diff --git a/Pipfile.lock b/Pipfile.lock index 626e56f4..11782aba 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -44,6 +44,14 @@ ], "version": "==1.8.2" }, + "beautifulsoup4": { + "hashes": [ + "sha256:05668158c7b85b791c5abde53e50265e16f98ad601c402ba44d70f96c4159612", + "sha256:25288c9e176f354bf277c0a10aa96c782a6a18a17122dba2e8cec4a97e03343b", + "sha256:f040590be10520f2ea4c2ae8c3dae441c7cfff5308ec9d58a0ec0c1b8f81d469" + ], + "version": "==4.8.0" + }, "black": { "hashes": [ "sha256:09a9dcb7c46ed496a9850b76e4e825d6049ecd38b611f1224857a79bd985a8cf", @@ -53,10 +61,10 @@ }, "certifi": { "hashes": [ - "sha256:046832c04d4e752f37383b628bc601a7ea7211496b4638f6514d0e5b9acc4939", - "sha256:945e3ba63a0b9f577b1395204e13c3a231f9bc0223888be653286534e5873695" + "sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50", + "sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef" ], - "version": "==2019.6.16" + "version": "==2019.9.11" }, "chardet": { "hashes": [ @@ -105,35 +113,42 @@ }, "coverage": { "hashes": [ - "sha256:108efa19b676e62590a7a13084098e35183479c0d9608131c20b0921c5a72dc0", - "sha256:16fe3ef881eff27bab287f91dadb4ff0ce4388b9e928d84cbf148a83cc70b3a1", - "sha256:1d0bbc11421827d1100da82ac8dc929532b97ad464038475a0f6505cbf83d6ea", - "sha256:23a8ca5b3c9673f775cc151e85a737f1a967df2ec02b09e8c5a3b606ff2050bf", - "sha256:24b890e51455276762b55cb06fa1c922066e8fc18d1deb1a6399b4d24dfa8ea2", - "sha256:2f0041757ca4801f3c6a74d1660862fdb18a25aea302dd0ce9b067ddbb06b667", - "sha256:3169aba03baddfccdab7cc04cf0878dbf76fc06d300bc35639129a6b794d6484", - "sha256:364fb1bf0f999af2e7f4b1a1e614b2af8c3e0017d11af716aad25f911b7cd0c7", - "sha256:5256856d23f3e45959e7e3a8f9d4cbad3d1613e5660cb8117cd1417798efc395", - "sha256:5b26daa1e1a1147455bf62cd682e504e68f1d1e04235374d50a5248a3c792b1c", - "sha256:60247c8f0c756732e2cfe21f03e6847b923b9a9eaff61f04dc64d3047ec1b669", - "sha256:6463d51507308eb3973340d903537f17ece2ee1e6513aa0c27548fc3a09b0471", - "sha256:64cbadf7a884b299794238bc4391752130e74f71e919993b50c1c431786ef2a2", - "sha256:6de85748ea39ce819ad6d90e660da43964457a1f5cd25262e962a7c7c87945b3", - "sha256:6f95b4794bd84f64aeca25087d8e3abc416aad76842afcac34fa6c3a6f61c62e", - "sha256:778fa184aa3079fa3cbd240e2f5b36771c3382db26bc7bf78aea9d06212c6c66", - "sha256:790a9c5e2dbdf6c41eec9776ed663e99bd36c1604e3bf2e8ae3b123181bfee9f", - "sha256:7d97c1aec0b68b4ea5e3c9edb9fc3f951e8a52360f4bad3aacab9a77defe5b17", - "sha256:93cefddcc0b541d3c52981a232947bf085a38092b0812317f1adb56f02869bcb", - "sha256:95e49867ac616ec63ecd69ea005e65e4b896a48b8db7f9f3ad69f37be29324b7", - "sha256:aca423563eafba66a7c15125391b267befd1e45238de5e1a119ae1fb4ea83b5c", - "sha256:baef7c35e7fce738d9637e9c7a6aa79cb79085e4de49c2ec517ce19239a660f6", - "sha256:c10ccf0797ffce85e93a40aff3a96a3adb63c734f95b59384a7c9522ed25c9e2", - "sha256:ca39704a05bba1886c384a4d7944fda72c53fe5e61979cd933d22084678ad4c1", - "sha256:f6e96d5eee578187f5b7e9266bf646b73de29e2dd7adca8bd83e383680ce1f4c", - "sha256:fc6524511fa664cb4e91401229eedd0dad4ba6ded9c4423fee2f698d78908d9c", - "sha256:fdf2e7e5f074495ad6ea796ca0d245aa6a8b9e4c546ffbf8d30aaaee6601af0f" - ], - "version": "==5.0a6" + "sha256:094378c3a35594335a840ea04d473c019e6d4fe10e343cd0d7fb5e87f8b7e926", + "sha256:10216222f3e4139910b6230d0ca0fe9d10ee98837eb83d29525722d628729d20", + "sha256:147478e21cba12c63b3454df5a2fb77b44df630428cffa3a36a6813e38157eab", + "sha256:230ce08965190c0f69196be34a07a795981b2b02b21419c2e1918a882b3eeab0", + "sha256:2469621d680a4c71cdbd3ea4dbed9d199bba93f21d2be1c107ded907b2db41a8", + "sha256:26526174d11fb2163832628d943edd452e07528b0ecc0c83c88256a59a32287c", + "sha256:2690bf0835f34ef3451860b02471e9560e4b3caf7413abeaa7544af72eb6d9ed", + "sha256:2b6f2d9a60413e75651cebe33c3f2f66d61209db44e8b9cf6d8d66fb0cb01fda", + "sha256:3ce91c6b92160ecefedf95a8c61fbf4fb36b0addef1a40c654acf1ad390653d0", + "sha256:43d16d7e9e9eaace3d9f1828b617b1be248f90d031a4b2dc1b6e1c88f1602dcf", + "sha256:52b6455da5f547cad72fd5cfc57a16678573fda6c695d257b5c266a44dbbd172", + "sha256:533f3036c8f58e6381fcca3306fe988740638c62c7fc86b7fae9c74b85ac3cdc", + "sha256:62d2abe5c733394058cb381d088bcab64a18da3ce9dc9a8ef2a18e122cbe47f1", + "sha256:72c34f99164679e44a5cbf19bf1a13be4e715c680816302b6ceca49b979fde91", + "sha256:81fc07feed4e40a7c0bdd266efa65e5afc83b5e0f1063007acc6759a957322a1", + "sha256:82093e673182c761ce54dfab17f026a06be3c011fee9b653855b9a2649f20232", + "sha256:87947fef728f72860407c446fd9b4a0f98e39e91ad7ae80803c02a85738e63ef", + "sha256:8b18c5a5a6b35b6311d2c356782ce3c7bacf6d987d9dc479178577391bf1c7dd", + "sha256:90e1850e993aa6b81bafaf672c8e508eaa17fbb5eb23aba93f7f4df822f3bd29", + "sha256:99f71e365bcb03a8debe1a75061329c9e45379f244a229442319d64c53c4e844", + "sha256:9b2c559104a90bf0043d6ef262ca205326d1fe6ec572dcf59e34be9289432793", + "sha256:ad22b073d92ea65b063e612154c72d6367dec3dd47ed33c02e3ab339eabe7bf3", + "sha256:bc3648da235fee2113a8cb80154d9fff4e2689d2d4a11ad35c1ecae23454b539", + "sha256:d0e2478bde68c5d853bcd306b5aae8fbe80417e87957a21fa6ee71edb90639f2", + "sha256:d3e6912d2370925222d2bfb3bd2ba02e9698b8da89cf7192ddf80cbb9f2455ee", + "sha256:d4fa98e3e15863568ea89eaec5e0866ca763980bdc56098dd9316865c111a28e", + "sha256:ee924a23457b373241ff39d21570360afd8ccb58520eb1e8e18eb00827b73e2d" + ], + "version": "==5.0a7" + }, + "dataclasses": { + "hashes": [ + "sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f", + "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84" + ], + "version": "==0.6" }, "entrypoints": { "hashes": [ @@ -144,26 +159,26 @@ }, "fastavro": { "hashes": [ - "sha256:081fefefc63206eadb7a08aa6f30b2c1539300bfc296ae998942d42324e55a6f", - "sha256:0ace26d2e41c5679525fd322d68b2ad06309288fee175491cd24b7025d5fa406", - "sha256:346fc111e9b4254a40426adf00fbc1651f1d424c29ca1d50c525bd6df74d7008", - "sha256:56d1bd9c07322bc0f48dfbe09ea85054e2f12e60d64362af2677edcd9f0d3a41", - "sha256:68fdde518b78bc830711eba0ae70d0472dce892f96bc45967c6765acffdfd3ad", - "sha256:76f8e7e769439add934dcbc13ce9d206d49639349966be4ec1d869c7709654b9", - "sha256:7be96062a224bbca443fa07b59bb38164547fd8aac8dbf05e19c1a326c2171b3", - "sha256:80cbcf3d6014a2458093a23d2436a8f07ac2b5647d28a31281ce0857725d2818", - "sha256:b50653c55d790f9c4de5330db5d036668f1a52cf43fa391a8d4db92e2e92e6a6", - "sha256:c039f2da6223fec5a63028ded913e0d39e3f6ac3a3a7a7085d7d976e753ca155", - "sha256:c335327c5a84d37e2f2de16cea2b0da0925c356249da8623231e6dd41efedeef", - "sha256:c68d37dd600c2b814f7f8c32f9b001827a39d28c639855091b5f01d8836925a9", - "sha256:cb0268a031c0842f665858677e4e329089b6f815f9f07dca5d947c45d2d34b3f", - "sha256:cc4e63bc201b19bc2b138e8540800e968e9a033eae87110c6eaa6643282cbb8b", - "sha256:e3239017e32533c941967b5f73381e9d4c1279b5c6672055e6088ad09335877c", - "sha256:e668ea9d589a720f0c15972a7292c587127725f110f66c8a40a71f754d11f457", - "sha256:eb26427df42211fd820da486c7392f26ba7b165274a144bf6aa9a543c0254783", - "sha256:f1d26ef4fd6fb3c3659c223880c38484809e928e5d7eca15889813a3790136b8" - ], - "version": "==0.22.4" + "sha256:1ad80ce952b29624011ea2cde191a3a1f60d79938d3726322d7cb6d6b9edfee3", + "sha256:2986cbef684de1940548f083049100f80531ddacda4cf3f32d53f92ffa32b88f", + "sha256:3b912c12a2aead83e33dbac05039beff8e7d5faaa9881f4c7ff60c4a33d26ab1", + "sha256:552aa72b657ebde7ee79527936d49d58f7b0f6e47fa225d79ad848c64bef27da", + "sha256:57684a5ac249eb435af0192d4f7900236095c82964832f6ba4d11485408f0781", + "sha256:64c0f8c85dec719c595eb1188b85790af6a1c69d8482f536ef46fdeaeab1cd65", + "sha256:6a53ce6673a892521b34de0455f7e12e89d671a6acd6cfd3d0c740d7ea249306", + "sha256:79a067c3243d25f7459913e3bb3aad411e9a6d603c27b4dd7fec86c4445eff23", + "sha256:94eaf42dbeba01f261897db18d6d8386aa4aeca63d7d7e0927c56e9718dc809a", + "sha256:a8e4d19ac741f62129a96f12326bc85f4430a72626e3164e8177eea31e60c793", + "sha256:aa7743b75c65a643047cb55c35bd292fe572fa7b9c35e9991fc38ae4efd1c7dd", + "sha256:abc39183b7f0eedf36f49ad5b60bc451ac96c6514393a0ef4cded754f72550f4", + "sha256:cd2a9e8d4c75f186c19ae9e2e895ee2c784cac101c7f6e5a33fe06bddd199af1", + "sha256:d6b45665a9f99ae56186343aa69a318a07f964e8fc0d5bf73ed02bf395c017d6", + "sha256:e67595b42120f018a086e8646dd89ab2ae2c37f7b07db35dbcc967932a769154", + "sha256:e70ca8781e23cba8989800971d0843e4a9812bfdf999bff7d78fa45332e28b5c", + "sha256:eed7ee9103a9d61265507321d781b7a8661041841bb972132a01fd2adab71545", + "sha256:f7ccb202e71338600cdf2936d0293f4616bf34a79ccc089f186678de4f6574c7" + ], + "version": "==0.22.5" }, "flake8": { "hashes": [ @@ -181,11 +196,11 @@ }, "importlib-metadata": { "hashes": [ - "sha256:9ff1b1c5a354142de080b8a4e9803e5d0d59283c93aed808617c787d16768375", - "sha256:b7143592e374e50584564794fcb8aaf00a23025f9db866627f89a21491847a8d" + "sha256:aa18d7378b00b40847790e7c27e11673d7fed219354109d0e7b9e5b25dc3ad26", + "sha256:d5f18a79777f3aa179c145737780282e27b508fc8fd688cb17c7a813e8bd39af" ], "markers": "python_version < '3.8'", - "version": "==0.20" + "version": "==0.23" }, "kafkaesque": { "editable": true, @@ -218,10 +233,10 @@ }, "packaging": { "hashes": [ - "sha256:a7ac867b97fdc07ee80a8058fe4435ccd274ecc3b0ed61d852d7d53055528cf9", - "sha256:c491ca87294da7cc01902edbe30a5bc6c4c28172b5138ab4e4aa1b9d7bfaeafe" + "sha256:28b924174df7a2fa32c1953825ff29c61e2f5e082343165438812f00d3a7fc47", + "sha256:d9551545c6d761f3def1677baf08ab2a3ca17c56879e70fecba2fc4dde4ed108" ], - "version": "==19.1" + "version": "==19.2" }, "pendulum": { "hashes": [ @@ -236,10 +251,10 @@ }, "pluggy": { "hashes": [ - "sha256:0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc", - "sha256:b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c" + "sha256:0db4b7601aae1d35b4a033282da476845aa19185c1e6964b25cf324b5e4ec3e6", + "sha256:fa5fa1622fa6dd5c030e9cad086fa19ef6a0cf6d7a2d12318e10cb49d6d68f34" ], - "version": "==0.12.0" + "version": "==0.13.0" }, "py": { "hashes": [ @@ -277,10 +292,10 @@ }, "pytest": { "hashes": [ - "sha256:95d13143cc14174ca1a01ec68e84d76ba5d9d493ac02716fd9706c949a505210", - "sha256:b78fe2881323bd44fd9bd76e5317173d4316577e7b1cddebae9136a4495ec865" + "sha256:813b99704b22c7d377bbd756ebe56c35252bb710937b46f207100e843440b3c2", + "sha256:cc6620b96bc667a0c8d4fa592a8c9c94178a1bd6cc799dbb057dfd9286d31a31" ], - "version": "==5.1.2" + "version": "==5.1.3" }, "pytest-cov": { "hashes": [ @@ -305,10 +320,10 @@ }, "pytzdata": { "hashes": [ - "sha256:c0c8316eaf6c25ba45816390a1a45c39790767069b3275c5f7de3ddf773eb810", - "sha256:e8a91952afd853642a49f0713caac3e15a5306855ff4a47af4ddec5b7dd23a09" + "sha256:84c52b9a47d097fcd483f047a544979de6c3a86e94c845e3569e9f8acd0fa071", + "sha256:fac06f7cdfa903188dc4848c655e4adaee67ee0f2fe08e7daf815cf2a761ee5e" ], - "version": "==2019.2" + "version": "==2019.3" }, "pyyaml": { "hashes": [ @@ -342,6 +357,13 @@ ], "version": "==1.12.0" }, + "soupsieve": { + "hashes": [ + "sha256:8662843366b8d8779dec4e2f921bebec9afd856a5ff2e82cd419acc5054a1a92", + "sha256:a5a6166b4767725fd52ae55fee8c8b6137d9a51e9f1edea461a062a759160118" + ], + "version": "==1.9.3" + }, "tabulate": { "hashes": [ "sha256:8af07a39377cee1103a5c8b3330a421c2d99b9141e9cc5ddd2e3263fea416943" @@ -357,10 +379,10 @@ }, "urllib3": { "hashes": [ - "sha256:b246607a25ac80bedac05c6f282e3cdaf3afb65420fd024ac94435cabe6e18d1", - "sha256:dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232" + "sha256:2f3eadfea5d92bc7899e75b5968410b749a054b492d5a6379c1344a1481bc2cb", + "sha256:9c6c593cb28f52075016307fc26b0a0f8e82bc7d1ff19aaaa959b91710a56c47" ], - "version": "==1.25.3" + "version": "==1.25.5" }, "virtualenv": { "hashes": [ diff --git a/esque/cli/output.py b/esque/cli/output.py index 31160c89..c35da4e3 100644 --- a/esque/cli/output.py +++ b/esque/cli/output.py @@ -105,21 +105,23 @@ def pretty_bytes(value: bytes) -> str: return value.decode("UTF-8") -def pretty_duration(value: Any, *, multiplier: int = 1) -> str: - if not value: +def pretty_duration(orig_value: Any, *, multiplier: int = 1) -> str: + if not orig_value: return "" - if type(value) != int: - value = int(value) + if type(orig_value) != int: + value = int(orig_value) + else: + value = orig_value value *= multiplier # Fix for conversion errors of ms > C_MAX_INT in some internal lib if value > MILLISECONDS_PER_YEAR: value = int(value / MILLISECONDS_PER_YEAR) - return pendulum.duration(years=value).in_words() + return f"{orig_value} ({pendulum.duration(years=value).in_words()})" - return pendulum.duration(milliseconds=value).in_words() + return f"{orig_value} ({pendulum.duration(milliseconds=value).in_words()})" def pretty_topic_diffs(topics_config_diff: Dict[str, TopicDiff]) -> str: @@ -173,7 +175,7 @@ def pretty_size(value: Any) -> str: ] for sign, size in units: if value >= size: - return f"{pretty_float(value / size)} {sign}" + return f"{value} ({pretty_float(value / size)} {sign})" def bold(s: str) -> str: diff --git a/esque/protocol/__init__.py b/esque/protocol/__init__.py new file mode 100644 index 00000000..adc4e2f5 --- /dev/null +++ b/esque/protocol/__init__.py @@ -0,0 +1,100 @@ +from .connection import BrokerConnection + +from .api import ( + ApiKey, + ApiVersions, + Request, + RequestData, + ResponseData, + SUPPORTED_API_VERSIONS, + ProduceRequestData, + ProduceResponseData, + FetchRequestData, + FetchResponseData, + ListOffsetsRequestData, + ListOffsetsResponseData, + MetadataRequestData, + MetadataResponseData, + LeaderAndIsrRequestData, + LeaderAndIsrResponseData, + StopReplicaRequestData, + StopReplicaResponseData, + UpdateMetadataRequestData, + UpdateMetadataResponseData, + ControlledShutdownRequestData, + ControlledShutdownResponseData, + OffsetCommitRequestData, + OffsetCommitResponseData, + OffsetFetchRequestData, + OffsetFetchResponseData, + FindCoordinatorRequestData, + FindCoordinatorResponseData, + JoinGroupRequestData, + JoinGroupResponseData, + HeartbeatRequestData, + HeartbeatResponseData, + LeaveGroupRequestData, + LeaveGroupResponseData, + SyncGroupRequestData, + SyncGroupResponseData, + DescribeGroupsRequestData, + DescribeGroupsResponseData, + ListGroupsRequestData, + ListGroupsResponseData, + SaslHandshakeRequestData, + SaslHandshakeResponseData, + ApiVersionsRequestData, + ApiVersionsResponseData, + CreateTopicsRequestData, + CreateTopicsResponseData, + DeleteTopicsRequestData, + DeleteTopicsResponseData, + DeleteRecordsRequestData, + DeleteRecordsResponseData, + InitProducerIdRequestData, + InitProducerIdResponseData, + OffsetForLeaderEpochRequestData, + OffsetForLeaderEpochResponseData, + AddPartitionsToTxnRequestData, + AddPartitionsToTxnResponseData, + AddOffsetsToTxnRequestData, + AddOffsetsToTxnResponseData, + EndTxnRequestData, + EndTxnResponseData, + WriteTxnMarkersRequestData, + WriteTxnMarkersResponseData, + TxnOffsetCommitRequestData, + TxnOffsetCommitResponseData, + DescribeAclsRequestData, + DescribeAclsResponseData, + CreateAclsRequestData, + CreateAclsResponseData, + DeleteAclsRequestData, + DeleteAclsResponseData, + DescribeConfigsRequestData, + DescribeConfigsResponseData, + AlterConfigsRequestData, + AlterConfigsResponseData, + AlterReplicaLogDirsRequestData, + AlterReplicaLogDirsResponseData, + DescribeLogDirsRequestData, + DescribeLogDirsResponseData, + SaslAuthenticateRequestData, + SaslAuthenticateResponseData, + CreatePartitionsRequestData, + CreatePartitionsResponseData, + CreateDelegationTokenRequestData, + CreateDelegationTokenResponseData, + RenewDelegationTokenRequestData, + RenewDelegationTokenResponseData, + ExpireDelegationTokenRequestData, + ExpireDelegationTokenResponseData, + DescribeDelegationTokenRequestData, + DescribeDelegationTokenResponseData, + DeleteGroupsRequestData, + DeleteGroupsResponseData, + ElectPreferredLeadersRequestData, + ElectPreferredLeadersResponseData, + IncrementalAlterConfigsRequestData, + IncrementalAlterConfigsResponseData, +) diff --git a/esque/protocol/api/__init__.py b/esque/protocol/api/__init__.py new file mode 100644 index 00000000..f1294cd8 --- /dev/null +++ b/esque/protocol/api/__init__.py @@ -0,0 +1,438 @@ +from io import BytesIO +from typing import BinaryIO, Dict, Generic, Optional, TypeVar + +from esque.protocol.api.base import ( + ApiKey, + RequestData, + RequestHeader, + ResponseData, + ResponseHeader, + requestHeaderSerializer, + responseHeaderSerializer, +) +from esque.protocol.serializers import BaseSerializer +from .add_offsets_to_txn import ( + AddOffsetsToTxnRequestData, + AddOffsetsToTxnResponseData, + addOffsetsToTxnRequestDataSerializers, + addOffsetsToTxnResponseDataSerializers, +) +from .add_partitions_to_txn import ( + AddPartitionsToTxnRequestData, + AddPartitionsToTxnResponseData, + addPartitionsToTxnRequestDataSerializers, + addPartitionsToTxnResponseDataSerializers, +) +from .alter_configs import ( + AlterConfigsRequestData, + AlterConfigsResponseData, + alterConfigsRequestDataSerializers, + alterConfigsResponseDataSerializers, +) +from .alter_replica_log_dirs import ( + AlterReplicaLogDirsRequestData, + AlterReplicaLogDirsResponseData, + alterReplicaLogDirsRequestDataSerializers, + alterReplicaLogDirsResponseDataSerializers, +) +from .api_versions import ( + ApiVersions, + ApiVersionsRequestData, + ApiVersionsResponseData, + apiVersionsRequestDataSerializers, + apiVersionsResponseDataSerializers, +) +from .controlled_shutdown import ( + ControlledShutdownRequestData, + ControlledShutdownResponseData, + controlledShutdownRequestDataSerializers, + controlledShutdownResponseDataSerializers, +) +from .create_acls import ( + CreateAclsRequestData, + CreateAclsResponseData, + createAclsRequestDataSerializers, + createAclsResponseDataSerializers, +) +from .create_delegation_token import ( + CreateDelegationTokenRequestData, + CreateDelegationTokenResponseData, + createDelegationTokenRequestDataSerializers, + createDelegationTokenResponseDataSerializers, +) +from .create_partitions import ( + CreatePartitionsRequestData, + CreatePartitionsResponseData, + createPartitionsRequestDataSerializers, + createPartitionsResponseDataSerializers, +) +from .create_topics import ( + CreateTopicsRequestData, + CreateTopicsResponseData, + createTopicsRequestDataSerializers, + createTopicsResponseDataSerializers, +) +from .delete_acls import ( + DeleteAclsRequestData, + DeleteAclsResponseData, + deleteAclsRequestDataSerializers, + deleteAclsResponseDataSerializers, +) +from .delete_groups import ( + DeleteGroupsRequestData, + DeleteGroupsResponseData, + deleteGroupsRequestDataSerializers, + deleteGroupsResponseDataSerializers, +) +from .delete_records import ( + DeleteRecordsRequestData, + DeleteRecordsResponseData, + deleteRecordsRequestDataSerializers, + deleteRecordsResponseDataSerializers, +) +from .delete_topics import ( + DeleteTopicsRequestData, + DeleteTopicsResponseData, + deleteTopicsRequestDataSerializers, + deleteTopicsResponseDataSerializers, +) +from .describe_acls import ( + DescribeAclsRequestData, + DescribeAclsResponseData, + describeAclsRequestDataSerializers, + describeAclsResponseDataSerializers, +) +from .describe_configs import ( + DescribeConfigsRequestData, + DescribeConfigsResponseData, + describeConfigsRequestDataSerializers, + describeConfigsResponseDataSerializers, +) +from .describe_delegation_token import ( + DescribeDelegationTokenRequestData, + DescribeDelegationTokenResponseData, + describeDelegationTokenRequestDataSerializers, + describeDelegationTokenResponseDataSerializers, +) +from .describe_groups import ( + DescribeGroupsRequestData, + DescribeGroupsResponseData, + describeGroupsRequestDataSerializers, + describeGroupsResponseDataSerializers, +) +from .describe_log_dirs import ( + DescribeLogDirsRequestData, + DescribeLogDirsResponseData, + describeLogDirsRequestDataSerializers, + describeLogDirsResponseDataSerializers, +) +from .elect_preferred_leaders import ( + ElectPreferredLeadersRequestData, + ElectPreferredLeadersResponseData, + electPreferredLeadersRequestDataSerializers, + electPreferredLeadersResponseDataSerializers, +) +from .end_txn import EndTxnRequestData, EndTxnResponseData, endTxnRequestDataSerializers, endTxnResponseDataSerializers +from .expire_delegation_token import ( + ExpireDelegationTokenRequestData, + ExpireDelegationTokenResponseData, + expireDelegationTokenRequestDataSerializers, + expireDelegationTokenResponseDataSerializers, +) +from .fetch import FetchRequestData, FetchResponseData, fetchRequestDataSerializers, fetchResponseDataSerializers +from .find_coordinator import ( + FindCoordinatorRequestData, + FindCoordinatorResponseData, + findCoordinatorRequestDataSerializers, + findCoordinatorResponseDataSerializers, +) +from .heartbeat import ( + HeartbeatRequestData, + HeartbeatResponseData, + heartbeatRequestDataSerializers, + heartbeatResponseDataSerializers, +) +from .incremental_alter_configs import ( + IncrementalAlterConfigsRequestData, + IncrementalAlterConfigsResponseData, + incrementalAlterConfigsRequestDataSerializers, + incrementalAlterConfigsResponseDataSerializers, +) +from .init_producer_id import ( + InitProducerIdRequestData, + InitProducerIdResponseData, + initProducerIdRequestDataSerializers, + initProducerIdResponseDataSerializers, +) +from .join_group import ( + JoinGroupRequestData, + JoinGroupResponseData, + joinGroupRequestDataSerializers, + joinGroupResponseDataSerializers, +) +from .leader_and_isr import ( + LeaderAndIsrRequestData, + LeaderAndIsrResponseData, + leaderAndIsrRequestDataSerializers, + leaderAndIsrResponseDataSerializers, +) +from .leave_group import ( + LeaveGroupRequestData, + LeaveGroupResponseData, + leaveGroupRequestDataSerializers, + leaveGroupResponseDataSerializers, +) +from .list_groups import ( + ListGroupsRequestData, + ListGroupsResponseData, + listGroupsRequestDataSerializers, + listGroupsResponseDataSerializers, +) +from .list_offsets import ( + ListOffsetsRequestData, + ListOffsetsResponseData, + listOffsetsRequestDataSerializers, + listOffsetsResponseDataSerializers, +) +from .metadata import ( + MetadataRequestData, + MetadataResponseData, + metadataRequestDataSerializers, + metadataResponseDataSerializers, +) +from .offset_commit import ( + OffsetCommitRequestData, + OffsetCommitResponseData, + offsetCommitRequestDataSerializers, + offsetCommitResponseDataSerializers, +) +from .offset_fetch import ( + OffsetFetchRequestData, + OffsetFetchResponseData, + offsetFetchRequestDataSerializers, + offsetFetchResponseDataSerializers, +) +from .offset_for_leader_epoch import ( + OffsetForLeaderEpochRequestData, + OffsetForLeaderEpochResponseData, + offsetForLeaderEpochRequestDataSerializers, + offsetForLeaderEpochResponseDataSerializers, +) +from .produce import ( + ProduceRequestData, + ProduceResponseData, + produceRequestDataSerializers, + produceResponseDataSerializers, +) +from .renew_delegation_token import ( + RenewDelegationTokenRequestData, + RenewDelegationTokenResponseData, + renewDelegationTokenRequestDataSerializers, + renewDelegationTokenResponseDataSerializers, +) +from .sasl_authenticate import ( + SaslAuthenticateRequestData, + SaslAuthenticateResponseData, + saslAuthenticateRequestDataSerializers, + saslAuthenticateResponseDataSerializers, +) +from .sasl_handshake import ( + SaslHandshakeRequestData, + SaslHandshakeResponseData, + saslHandshakeRequestDataSerializers, + saslHandshakeResponseDataSerializers, +) +from .stop_replica import ( + StopReplicaRequestData, + StopReplicaResponseData, + stopReplicaRequestDataSerializers, + stopReplicaResponseDataSerializers, +) +from .sync_group import ( + SyncGroupRequestData, + SyncGroupResponseData, + syncGroupRequestDataSerializers, + syncGroupResponseDataSerializers, +) +from .txn_offset_commit import ( + TxnOffsetCommitRequestData, + TxnOffsetCommitResponseData, + txnOffsetCommitRequestDataSerializers, + txnOffsetCommitResponseDataSerializers, +) +from .update_metadata import ( + UpdateMetadataRequestData, + UpdateMetadataResponseData, + updateMetadataRequestDataSerializers, + updateMetadataResponseDataSerializers, +) +from .write_txn_markers import ( + WriteTxnMarkersRequestData, + WriteTxnMarkersResponseData, + writeTxnMarkersRequestDataSerializers, + writeTxnMarkersResponseDataSerializers, +) + +REQUEST_SERIALIZERS: Dict[ApiKey, Dict[int, BaseSerializer[RequestData]]] = { + ApiKey.PRODUCE: produceRequestDataSerializers, + ApiKey.FETCH: fetchRequestDataSerializers, + ApiKey.LIST_OFFSETS: listOffsetsRequestDataSerializers, + ApiKey.METADATA: metadataRequestDataSerializers, + ApiKey.LEADER_AND_ISR: leaderAndIsrRequestDataSerializers, + ApiKey.STOP_REPLICA: stopReplicaRequestDataSerializers, + ApiKey.UPDATE_METADATA: updateMetadataRequestDataSerializers, + ApiKey.CONTROLLED_SHUTDOWN: controlledShutdownRequestDataSerializers, + ApiKey.OFFSET_COMMIT: offsetCommitRequestDataSerializers, + ApiKey.OFFSET_FETCH: offsetFetchRequestDataSerializers, + ApiKey.FIND_COORDINATOR: findCoordinatorRequestDataSerializers, + ApiKey.JOIN_GROUP: joinGroupRequestDataSerializers, + ApiKey.HEARTBEAT: heartbeatRequestDataSerializers, + ApiKey.LEAVE_GROUP: leaveGroupRequestDataSerializers, + ApiKey.SYNC_GROUP: syncGroupRequestDataSerializers, + ApiKey.DESCRIBE_GROUPS: describeGroupsRequestDataSerializers, + ApiKey.LIST_GROUPS: listGroupsRequestDataSerializers, + ApiKey.SASL_HANDSHAKE: saslHandshakeRequestDataSerializers, + ApiKey.API_VERSIONS: apiVersionsRequestDataSerializers, + ApiKey.CREATE_TOPICS: createTopicsRequestDataSerializers, + ApiKey.DELETE_TOPICS: deleteTopicsRequestDataSerializers, + ApiKey.DELETE_RECORDS: deleteRecordsRequestDataSerializers, + ApiKey.INIT_PRODUCER_ID: initProducerIdRequestDataSerializers, + ApiKey.OFFSET_FOR_LEADER_EPOCH: offsetForLeaderEpochRequestDataSerializers, + ApiKey.ADD_PARTITIONS_TO_TXN: addPartitionsToTxnRequestDataSerializers, + ApiKey.ADD_OFFSETS_TO_TXN: addOffsetsToTxnRequestDataSerializers, + ApiKey.END_TXN: endTxnRequestDataSerializers, + ApiKey.WRITE_TXN_MARKERS: writeTxnMarkersRequestDataSerializers, + ApiKey.TXN_OFFSET_COMMIT: txnOffsetCommitRequestDataSerializers, + ApiKey.DESCRIBE_ACLS: describeAclsRequestDataSerializers, + ApiKey.CREATE_ACLS: createAclsRequestDataSerializers, + ApiKey.DELETE_ACLS: deleteAclsRequestDataSerializers, + ApiKey.DESCRIBE_CONFIGS: describeConfigsRequestDataSerializers, + ApiKey.ALTER_CONFIGS: alterConfigsRequestDataSerializers, + ApiKey.ALTER_REPLICA_LOG_DIRS: alterReplicaLogDirsRequestDataSerializers, + ApiKey.DESCRIBE_LOG_DIRS: describeLogDirsRequestDataSerializers, + ApiKey.SASL_AUTHENTICATE: saslAuthenticateRequestDataSerializers, + ApiKey.CREATE_PARTITIONS: createPartitionsRequestDataSerializers, + ApiKey.CREATE_DELEGATION_TOKEN: createDelegationTokenRequestDataSerializers, + ApiKey.RENEW_DELEGATION_TOKEN: renewDelegationTokenRequestDataSerializers, + ApiKey.EXPIRE_DELEGATION_TOKEN: expireDelegationTokenRequestDataSerializers, + ApiKey.DESCRIBE_DELEGATION_TOKEN: describeDelegationTokenRequestDataSerializers, + ApiKey.DELETE_GROUPS: deleteGroupsRequestDataSerializers, + ApiKey.ELECT_PREFERRED_LEADERS: electPreferredLeadersRequestDataSerializers, + ApiKey.INCREMENTAL_ALTER_CONFIGS: incrementalAlterConfigsRequestDataSerializers, +} + + +RESPONSE_SERIALIZERS: Dict[ApiKey, Dict[int, BaseSerializer[ResponseData]]] = { + ApiKey.PRODUCE: produceResponseDataSerializers, + ApiKey.FETCH: fetchResponseDataSerializers, + ApiKey.LIST_OFFSETS: listOffsetsResponseDataSerializers, + ApiKey.METADATA: metadataResponseDataSerializers, + ApiKey.LEADER_AND_ISR: leaderAndIsrResponseDataSerializers, + ApiKey.STOP_REPLICA: stopReplicaResponseDataSerializers, + ApiKey.UPDATE_METADATA: updateMetadataResponseDataSerializers, + ApiKey.CONTROLLED_SHUTDOWN: controlledShutdownResponseDataSerializers, + ApiKey.OFFSET_COMMIT: offsetCommitResponseDataSerializers, + ApiKey.OFFSET_FETCH: offsetFetchResponseDataSerializers, + ApiKey.FIND_COORDINATOR: findCoordinatorResponseDataSerializers, + ApiKey.JOIN_GROUP: joinGroupResponseDataSerializers, + ApiKey.HEARTBEAT: heartbeatResponseDataSerializers, + ApiKey.LEAVE_GROUP: leaveGroupResponseDataSerializers, + ApiKey.SYNC_GROUP: syncGroupResponseDataSerializers, + ApiKey.DESCRIBE_GROUPS: describeGroupsResponseDataSerializers, + ApiKey.LIST_GROUPS: listGroupsResponseDataSerializers, + ApiKey.SASL_HANDSHAKE: saslHandshakeResponseDataSerializers, + ApiKey.API_VERSIONS: apiVersionsResponseDataSerializers, + ApiKey.CREATE_TOPICS: createTopicsResponseDataSerializers, + ApiKey.DELETE_TOPICS: deleteTopicsResponseDataSerializers, + ApiKey.DELETE_RECORDS: deleteRecordsResponseDataSerializers, + ApiKey.INIT_PRODUCER_ID: initProducerIdResponseDataSerializers, + ApiKey.OFFSET_FOR_LEADER_EPOCH: offsetForLeaderEpochResponseDataSerializers, + ApiKey.ADD_PARTITIONS_TO_TXN: addPartitionsToTxnResponseDataSerializers, + ApiKey.ADD_OFFSETS_TO_TXN: addOffsetsToTxnResponseDataSerializers, + ApiKey.END_TXN: endTxnResponseDataSerializers, + ApiKey.WRITE_TXN_MARKERS: writeTxnMarkersResponseDataSerializers, + ApiKey.TXN_OFFSET_COMMIT: txnOffsetCommitResponseDataSerializers, + ApiKey.DESCRIBE_ACLS: describeAclsResponseDataSerializers, + ApiKey.CREATE_ACLS: createAclsResponseDataSerializers, + ApiKey.DELETE_ACLS: deleteAclsResponseDataSerializers, + ApiKey.DESCRIBE_CONFIGS: describeConfigsResponseDataSerializers, + ApiKey.ALTER_CONFIGS: alterConfigsResponseDataSerializers, + ApiKey.ALTER_REPLICA_LOG_DIRS: alterReplicaLogDirsResponseDataSerializers, + ApiKey.DESCRIBE_LOG_DIRS: describeLogDirsResponseDataSerializers, + ApiKey.SASL_AUTHENTICATE: saslAuthenticateResponseDataSerializers, + ApiKey.CREATE_PARTITIONS: createPartitionsResponseDataSerializers, + ApiKey.CREATE_DELEGATION_TOKEN: createDelegationTokenResponseDataSerializers, + ApiKey.RENEW_DELEGATION_TOKEN: renewDelegationTokenResponseDataSerializers, + ApiKey.EXPIRE_DELEGATION_TOKEN: expireDelegationTokenResponseDataSerializers, + ApiKey.DESCRIBE_DELEGATION_TOKEN: describeDelegationTokenResponseDataSerializers, + ApiKey.DELETE_GROUPS: deleteGroupsResponseDataSerializers, + ApiKey.ELECT_PREFERRED_LEADERS: electPreferredLeadersResponseDataSerializers, + ApiKey.INCREMENTAL_ALTER_CONFIGS: incrementalAlterConfigsResponseDataSerializers, +} + + +SUPPORTED_API_VERSIONS: Dict[ApiKey, ApiVersions] = { + api_key: ApiVersions(api_key, min(serializers.keys()), max(serializers.keys())) + for api_key, serializers in REQUEST_SERIALIZERS.items() +} + + +def get_request_serializer(api_key: ApiKey, api_version: int) -> BaseSerializer[RequestData]: + return REQUEST_SERIALIZERS[api_key][api_version] + + +def get_response_serializer(api_key: ApiKey, api_version: int) -> BaseSerializer[ResponseData]: + return RESPONSE_SERIALIZERS[api_key][api_version] + + +Req = TypeVar("Req") +Res = TypeVar("Res") + + +class Request(Generic[Req, Res]): + def __init__(self, request_data: Req, header: RequestHeader): + self.api_version = header.api_version + self.request_data = request_data + self.request_header = header + self.response_data: Optional[Res] = None + self.response_header: Optional[ResponseHeader] = None + + def encode_request(self) -> bytes: + data = requestHeaderSerializer.encode(self.request_header) + data += self.request_serializer.encode(self.request_data) + return data + + def decode_response(self, data: bytes) -> "Request": + return self.read_response(BytesIO(data)) + + def read_response(self, buffer: BinaryIO) -> "Request": + self.response_header = responseHeaderSerializer.read(buffer) + assert self.response_header.correlation_id == self.correlation_id, "Request and response order got messed up!" + self.response_data = self.response_serializer.read(buffer) + return self + + @property + def correlation_id(self) -> int: + return self.request_header.correlation_id + + @property + def api_key(self) -> ApiKey: + return self.request_header.api_key + + @property + def response_serializer(self) -> BaseSerializer[Res]: + return get_response_serializer(self.api_key, self.api_version) + + @property + def request_serializer(self) -> BaseSerializer[Req]: + return get_request_serializer(self.api_key, self.api_version) + + @classmethod + def from_request_data( + cls, request_data: Req, api_version: int, correlation_id: int, client_id: Optional[str] + ) -> "Request": + request_data = request_data + header = RequestHeader( + api_key=request_data.api_key(), api_version=api_version, correlation_id=correlation_id, client_id=client_id + ) + return Request(request_data, header) diff --git a/esque/protocol/api/add_offsets_to_txn.py b/esque/protocol/api/add_offsets_to_txn.py new file mode 100644 index 00000000..e4393337 --- /dev/null +++ b/esque/protocol/api/add_offsets_to_txn.py @@ -0,0 +1,82 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class AddOffsetsToTxnRequestData(RequestData): + # The transactional id corresponding to the transaction. + transactional_id: "str" # STRING + + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + # The unique group identifier + group_id: "str" # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.ADD_OFFSETS_TO_TXN # == 25 + + +@dataclass +class AddOffsetsToTxnResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.ADD_OFFSETS_TO_TXN # == 25 + + +addOffsetsToTxnRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("group_id", stringSerializer), + ], + 1: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("group_id", stringSerializer), + ], +} + + +addOffsetsToTxnRequestDataSerializers: Dict[int, BaseSerializer[AddOffsetsToTxnRequestData]] = { + version: NamedTupleSerializer(AddOffsetsToTxnRequestData, schema) + for version, schema in addOffsetsToTxnRequestDataSchemas.items() +} + + +addOffsetsToTxnResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], + 1: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], +} + + +addOffsetsToTxnResponseDataSerializers: Dict[int, BaseSerializer[AddOffsetsToTxnResponseData]] = { + version: NamedTupleSerializer(AddOffsetsToTxnResponseData, schema) + for version, schema in addOffsetsToTxnResponseDataSchemas.items() +} diff --git a/esque/protocol/api/add_partitions_to_txn.py b/esque/protocol/api/add_partitions_to_txn.py new file mode 100644 index 00000000..fe2f6351 --- /dev/null +++ b/esque/protocol/api/add_partitions_to_txn.py @@ -0,0 +1,140 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["int"] # INT32 + + +@dataclass +class AddPartitionsToTxnRequestData(RequestData): + # The transactional id corresponding to the transaction. + transactional_id: "str" # STRING + + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + # The partitions to add to the transaction. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.ADD_PARTITIONS_TO_TXN # == 24 + + +@dataclass +class PartitionErrors: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Errors: + # Name of topic + topic: "str" # STRING + + partition_errors: List["PartitionErrors"] + + +@dataclass +class AddPartitionsToTxnResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + errors: List["Errors"] + + @staticmethod + def api_key() -> int: + return ApiKey.ADD_PARTITIONS_TO_TXN # == 24 + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +addPartitionsToTxnRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ], + 1: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ], +} + + +addPartitionsToTxnRequestDataSerializers: Dict[int, BaseSerializer[AddPartitionsToTxnRequestData]] = { + version: NamedTupleSerializer(AddPartitionsToTxnRequestData, schema) + for version, schema in addPartitionsToTxnRequestDataSchemas.items() +} + + +partitionErrorsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("error_code", int16Serializer)], + 1: [("partition", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionErrorsSerializers: Dict[int, BaseSerializer[PartitionErrors]] = { + version: NamedTupleSerializer(PartitionErrors, schema) for version, schema in partitionErrorsSchemas.items() +} + + +errorsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_errors", ArraySerializer(partitionErrorsSerializers[0]))], + 1: [("topic", stringSerializer), ("partition_errors", ArraySerializer(partitionErrorsSerializers[1]))], +} + + +errorsSerializers: Dict[int, BaseSerializer[Errors]] = { + version: NamedTupleSerializer(Errors, schema) for version, schema in errorsSchemas.items() +} + + +addPartitionsToTxnResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("errors", ArraySerializer(errorsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("errors", ArraySerializer(errorsSerializers[1]))], +} + + +addPartitionsToTxnResponseDataSerializers: Dict[int, BaseSerializer[AddPartitionsToTxnResponseData]] = { + version: NamedTupleSerializer(AddPartitionsToTxnResponseData, schema) + for version, schema in addPartitionsToTxnResponseDataSchemas.items() +} diff --git a/esque/protocol/api/alter_configs.py b/esque/protocol/api/alter_configs.py new file mode 100644 index 00000000..993c84ef --- /dev/null +++ b/esque/protocol/api/alter_configs.py @@ -0,0 +1,149 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class ConfigEntries: + # Configuration name + config_name: "str" # STRING + + # Configuration value + config_value: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Resources: + resource_type: "int" # INT8 + + resource_name: "str" # STRING + + config_entries: List["ConfigEntries"] + + +@dataclass +class AlterConfigsRequestData(RequestData): + # An array of resources to update with the provided configs. + resources: List["Resources"] + + validate_only: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.ALTER_CONFIGS # == 33 + + +@dataclass +class Resources: + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + resource_type: "int" # INT8 + + resource_name: "str" # STRING + + +@dataclass +class AlterConfigsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + resources: List["Resources"] + + @staticmethod + def api_key() -> int: + return ApiKey.ALTER_CONFIGS # == 33 + + +configEntriesSchemas: Dict[int, Schema] = { + 0: [("config_name", stringSerializer), ("config_value", nullableStringSerializer)], + 1: [("config_name", stringSerializer), ("config_value", nullableStringSerializer)], +} + + +configEntriesSerializers: Dict[int, BaseSerializer[ConfigEntries]] = { + version: NamedTupleSerializer(ConfigEntries, schema) for version, schema in configEntriesSchemas.items() +} + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_entries", ArraySerializer(configEntriesSerializers[0])), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_entries", ArraySerializer(configEntriesSerializers[1])), + ], +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +alterConfigsRequestDataSchemas: Dict[int, Schema] = { + 0: [("resources", ArraySerializer(resourcesSerializers[0])), ("validate_only", booleanSerializer)], + 1: [("resources", ArraySerializer(resourcesSerializers[1])), ("validate_only", booleanSerializer)], +} + + +alterConfigsRequestDataSerializers: Dict[int, BaseSerializer[AlterConfigsRequestData]] = { + version: NamedTupleSerializer(AlterConfigsRequestData, schema) + for version, schema in alterConfigsRequestDataSchemas.items() +} + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ], + 1: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ], +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +alterConfigsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("resources", ArraySerializer(resourcesSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("resources", ArraySerializer(resourcesSerializers[1]))], +} + + +alterConfigsResponseDataSerializers: Dict[int, BaseSerializer[AlterConfigsResponseData]] = { + version: NamedTupleSerializer(AlterConfigsResponseData, schema) + for version, schema in alterConfigsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/alter_replica_log_dirs.py b/esque/protocol/api/alter_replica_log_dirs.py new file mode 100644 index 00000000..15c3f3bf --- /dev/null +++ b/esque/protocol/api/alter_replica_log_dirs.py @@ -0,0 +1,139 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # List of partition ids of the topic. + partitions: List["int"] # INT32 + + +@dataclass +class LogDirs: + # The absolute log directory path. + log_dir: "str" # STRING + + topics: List["Topics"] + + +@dataclass +class AlterReplicaLogDirsRequestData(RequestData): + log_dirs: List["LogDirs"] + + @staticmethod + def api_key() -> int: + return ApiKey.ALTER_REPLICA_LOG_DIRS # == 34 + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["Partitions"] + + +@dataclass +class AlterReplicaLogDirsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.ALTER_REPLICA_LOG_DIRS # == 34 + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +logDirsSchemas: Dict[int, Schema] = { + 0: [("log_dir", stringSerializer), ("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("log_dir", stringSerializer), ("topics", ArraySerializer(topicsSerializers[1]))], +} + + +logDirsSerializers: Dict[int, BaseSerializer[LogDirs]] = { + version: NamedTupleSerializer(LogDirs, schema) for version, schema in logDirsSchemas.items() +} + + +alterReplicaLogDirsRequestDataSchemas: Dict[int, Schema] = { + 0: [("log_dirs", ArraySerializer(logDirsSerializers[0]))], + 1: [("log_dirs", ArraySerializer(logDirsSerializers[1]))], +} + + +alterReplicaLogDirsRequestDataSerializers: Dict[int, BaseSerializer[AlterReplicaLogDirsRequestData]] = { + version: NamedTupleSerializer(AlterReplicaLogDirsRequestData, schema) + for version, schema in alterReplicaLogDirsRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("error_code", int16Serializer)], + 1: [("partition", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +alterReplicaLogDirsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[1]))], +} + + +alterReplicaLogDirsResponseDataSerializers: Dict[int, BaseSerializer[AlterReplicaLogDirsResponseData]] = { + version: NamedTupleSerializer(AlterReplicaLogDirsResponseData, schema) + for version, schema in alterReplicaLogDirsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/api_versions.py b/esque/protocol/api/api_versions.py new file mode 100644 index 00000000..a2a5879e --- /dev/null +++ b/esque/protocol/api/api_versions.py @@ -0,0 +1,97 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, +) + + +@dataclass +class ApiVersionsRequestData(RequestData): + @staticmethod + def api_key() -> int: + return ApiKey.API_VERSIONS # == 18 + + +@dataclass +class ApiVersions: + # API key. + api_key: "int" # INT16 + + # Minimum supported version. + min_version: "int" # INT16 + + # Maximum supported version. + max_version: "int" # INT16 + + +@dataclass +class ApiVersionsResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # API versions supported by the broker. + api_versions: List["ApiVersions"] + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.API_VERSIONS # == 18 + + +apiVersionsRequestDataSchemas: Dict[int, Schema] = {0: [], 1: [], 2: []} + + +apiVersionsRequestDataSerializers: Dict[int, BaseSerializer[ApiVersionsRequestData]] = { + version: NamedTupleSerializer(ApiVersionsRequestData, schema) + for version, schema in apiVersionsRequestDataSchemas.items() +} + + +apiVersionsSchemas: Dict[int, Schema] = { + 0: [("api_key", int16Serializer), ("min_version", int16Serializer), ("max_version", int16Serializer)], + 1: [("api_key", int16Serializer), ("min_version", int16Serializer), ("max_version", int16Serializer)], + 2: [("api_key", int16Serializer), ("min_version", int16Serializer), ("max_version", int16Serializer)], +} + + +apiVersionsSerializers: Dict[int, BaseSerializer[ApiVersions]] = { + version: NamedTupleSerializer(ApiVersions, schema) for version, schema in apiVersionsSchemas.items() +} + + +apiVersionsResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("api_versions", ArraySerializer(apiVersionsSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("api_versions", ArraySerializer(apiVersionsSerializers[1])), + ("throttle_time_ms", int32Serializer), + ], + 2: [ + ("error_code", int16Serializer), + ("api_versions", ArraySerializer(apiVersionsSerializers[2])), + ("throttle_time_ms", int32Serializer), + ], +} + + +apiVersionsResponseDataSerializers: Dict[int, BaseSerializer[ApiVersionsResponseData]] = { + version: NamedTupleSerializer(ApiVersionsResponseData, schema) + for version, schema in apiVersionsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/base.py b/esque/protocol/api/base.py new file mode 100644 index 00000000..cba35ba3 --- /dev/null +++ b/esque/protocol/api/base.py @@ -0,0 +1,110 @@ +from enum import IntEnum +from typing import Optional + +from dataclasses import dataclass + +from ..constants import ErrorCode +from ..serializers import ( + BaseSerializer, + EnumSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + nullableStringSerializer, +) + + +class ApiKey(IntEnum): + PRODUCE = 0 + FETCH = 1 + LIST_OFFSETS = 2 + METADATA = 3 + LEADER_AND_ISR = 4 + STOP_REPLICA = 5 + UPDATE_METADATA = 6 + CONTROLLED_SHUTDOWN = 7 + OFFSET_COMMIT = 8 + OFFSET_FETCH = 9 + FIND_COORDINATOR = 10 + JOIN_GROUP = 11 + HEARTBEAT = 12 + LEAVE_GROUP = 13 + SYNC_GROUP = 14 + DESCRIBE_GROUPS = 15 + LIST_GROUPS = 16 + SASL_HANDSHAKE = 17 + API_VERSIONS = 18 + CREATE_TOPICS = 19 + DELETE_TOPICS = 20 + DELETE_RECORDS = 21 + INIT_PRODUCER_ID = 22 + OFFSET_FOR_LEADER_EPOCH = 23 + ADD_PARTITIONS_TO_TXN = 24 + ADD_OFFSETS_TO_TXN = 25 + END_TXN = 26 + WRITE_TXN_MARKERS = 27 + TXN_OFFSET_COMMIT = 28 + DESCRIBE_ACLS = 29 + CREATE_ACLS = 30 + DELETE_ACLS = 31 + DESCRIBE_CONFIGS = 32 + ALTER_CONFIGS = 33 + ALTER_REPLICA_LOG_DIRS = 34 + DESCRIBE_LOG_DIRS = 35 + SASL_AUTHENTICATE = 36 + CREATE_PARTITIONS = 37 + CREATE_DELEGATION_TOKEN = 38 + RENEW_DELEGATION_TOKEN = 39 + EXPIRE_DELEGATION_TOKEN = 40 + DESCRIBE_DELEGATION_TOKEN = 41 + DELETE_GROUPS = 42 + ELECT_PREFERRED_LEADERS = 43 + INCREMENTAL_ALTER_CONFIGS = 44 + + +apiKeySerializer: BaseSerializer[ApiKey] = EnumSerializer(ApiKey, int16Serializer) + +errorCodeSerializer: BaseSerializer[ErrorCode] = EnumSerializer(ErrorCode, int16Serializer) + + +@dataclass +class RequestHeader: + api_key: ApiKey + api_version: int # INT16 + correlation_id: int # INT32 + client_id: Optional[str] + + +requestHeaderSchema: Schema = [ + ("api_key", apiKeySerializer), + ("api_version", int16Serializer), + ("correlation_id", int32Serializer), + ("client_id", nullableStringSerializer), +] + +requestHeaderSerializer: BaseSerializer[RequestHeader] = NamedTupleSerializer(RequestHeader, requestHeaderSchema) + + +@dataclass +class ResponseHeader: + correlation_id: int # INT32 + + +responseHeaderSchema: Schema = [("correlation_id", int32Serializer)] + +responseHeaderSerializer: BaseSerializer[ResponseHeader] = NamedTupleSerializer(ResponseHeader, responseHeaderSchema) + + +@dataclass +class RequestData: + @staticmethod + def api_key() -> ApiKey: + raise NotImplementedError() + + +@dataclass +class ResponseData: + @staticmethod + def api_key() -> ApiKey: + raise NotImplementedError() diff --git a/esque/protocol/api/controlled_shutdown.py b/esque/protocol/api/controlled_shutdown.py new file mode 100644 index 00000000..8a92870d --- /dev/null +++ b/esque/protocol/api/controlled_shutdown.py @@ -0,0 +1,91 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class ControlledShutdownRequestData(RequestData): + # The id of the broker for which controlled shutdown has been requested. + broker_id: "int" # INT32 + + # The broker epoch. + broker_epoch: "int" # INT64 + + @staticmethod + def api_key() -> int: + return ApiKey.CONTROLLED_SHUTDOWN # == 7 + + +@dataclass +class RemainingPartitions: + # The name of the topic. + topic_name: "str" # STRING + + # The index of the partition. + partition_index: "int" # INT32 + + +@dataclass +class ControlledShutdownResponseData(ResponseData): + # The top-level error code. + error_code: "int" # INT16 + + # The partitions that the broker still leads. + remaining_partitions: List["RemainingPartitions"] + + @staticmethod + def api_key() -> int: + return ApiKey.CONTROLLED_SHUTDOWN # == 7 + + +controlledShutdownRequestDataSchemas: Dict[int, Schema] = { + 0: [("broker_id", int32Serializer), ("broker_epoch", DummySerializer(int()))], + 1: [("broker_id", int32Serializer), ("broker_epoch", DummySerializer(int()))], + 2: [("broker_id", int32Serializer), ("broker_epoch", int64Serializer)], +} + + +controlledShutdownRequestDataSerializers: Dict[int, BaseSerializer[ControlledShutdownRequestData]] = { + version: NamedTupleSerializer(ControlledShutdownRequestData, schema) + for version, schema in controlledShutdownRequestDataSchemas.items() +} + + +remainingPartitionsSchemas: Dict[int, Schema] = { + 0: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], + 1: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], + 2: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], +} + + +remainingPartitionsSerializers: Dict[int, BaseSerializer[RemainingPartitions]] = { + version: NamedTupleSerializer(RemainingPartitions, schema) + for version, schema in remainingPartitionsSchemas.items() +} + + +controlledShutdownResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("remaining_partitions", ArraySerializer(remainingPartitionsSerializers[0]))], + 1: [("error_code", int16Serializer), ("remaining_partitions", ArraySerializer(remainingPartitionsSerializers[1]))], + 2: [("error_code", int16Serializer), ("remaining_partitions", ArraySerializer(remainingPartitionsSerializers[2]))], +} + + +controlledShutdownResponseDataSerializers: Dict[int, BaseSerializer[ControlledShutdownResponseData]] = { + version: NamedTupleSerializer(ControlledShutdownResponseData, schema) + for version, schema in controlledShutdownResponseDataSchemas.items() +} diff --git a/esque/protocol/api/create_acls.py b/esque/protocol/api/create_acls.py new file mode 100644 index 00000000..55b9a700 --- /dev/null +++ b/esque/protocol/api/create_acls.py @@ -0,0 +1,141 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Creations: + # The resource type + resource_type: "int" # INT8 + + # The resource name + resource_name: "str" # STRING + + # The resource pattern type + resource_pattern_type: "int" # INT8 + + # The ACL principal + principal: "str" # STRING + + # The ACL host + host: "str" # STRING + + # The ACL operation + operation: "int" # INT8 + + # The ACL permission type + permission_type: "int" # INT8 + + +@dataclass +class CreateAclsRequestData(RequestData): + creations: List["Creations"] + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_ACLS # == 30 + + +@dataclass +class CreationResponses: + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class CreateAclsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + creation_responses: List["CreationResponses"] + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_ACLS # == 30 + + +creationsSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ("resource_pattern_type", DummySerializer(int())), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("resource_pattern_type", int8Serializer), + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], +} + + +creationsSerializers: Dict[int, BaseSerializer[Creations]] = { + version: NamedTupleSerializer(Creations, schema) for version, schema in creationsSchemas.items() +} + + +createAclsRequestDataSchemas: Dict[int, Schema] = { + 0: [("creations", ArraySerializer(creationsSerializers[0]))], + 1: [("creations", ArraySerializer(creationsSerializers[1]))], +} + + +createAclsRequestDataSerializers: Dict[int, BaseSerializer[CreateAclsRequestData]] = { + version: NamedTupleSerializer(CreateAclsRequestData, schema) + for version, schema in createAclsRequestDataSchemas.items() +} + + +creationResponsesSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("error_message", nullableStringSerializer)], + 1: [("error_code", int16Serializer), ("error_message", nullableStringSerializer)], +} + + +creationResponsesSerializers: Dict[int, BaseSerializer[CreationResponses]] = { + version: NamedTupleSerializer(CreationResponses, schema) for version, schema in creationResponsesSchemas.items() +} + + +createAclsResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("throttle_time_ms", int32Serializer), + ("creation_responses", ArraySerializer(creationResponsesSerializers[0])), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("creation_responses", ArraySerializer(creationResponsesSerializers[1])), + ], +} + + +createAclsResponseDataSerializers: Dict[int, BaseSerializer[CreateAclsResponseData]] = { + version: NamedTupleSerializer(CreateAclsResponseData, schema) + for version, schema in createAclsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/create_delegation_token.py b/esque/protocol/api/create_delegation_token.py new file mode 100644 index 00000000..2594a31b --- /dev/null +++ b/esque/protocol/api/create_delegation_token.py @@ -0,0 +1,146 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Renewers: + # principalType of the Kafka principal + principal_type: "str" # STRING + + # name of the Kafka principal + name: "str" # STRING + + +@dataclass +class CreateDelegationTokenRequestData(RequestData): + # An array of token renewers. Renewer is an Kafka PrincipalType and name string, who is allowed to + # renew this token before the max lifetime expires. + renewers: List["Renewers"] + + # Max lifetime period for token in milli seconds. if value is -1, then max lifetime will default to a + # server side config value. + max_life_time: "int" # INT64 + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_DELEGATION_TOKEN # == 38 + + +@dataclass +class Owner: + # principalType of the Kafka principal + principal_type: "str" # STRING + + # name of the Kafka principal + name: "str" # STRING + + +@dataclass +class CreateDelegationTokenResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # token owner. + owner: "Owner" + + # timestamp (in msec) when this token was generated. + issue_timestamp: "int" # INT64 + + # timestamp (in msec) at which this token expires. + expiry_timestamp: "int" # INT64 + + # max life time of this token. + max_timestamp: "int" # INT64 + + # UUID to ensure uniqueness. + token_id: "str" # STRING + + # HMAC of the delegation token. + hmac: "bytes" # BYTES + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_DELEGATION_TOKEN # == 38 + + +renewersSchemas: Dict[int, Schema] = { + 0: [("principal_type", stringSerializer), ("name", stringSerializer)], + 1: [("principal_type", stringSerializer), ("name", stringSerializer)], +} + + +renewersSerializers: Dict[int, BaseSerializer[Renewers]] = { + version: NamedTupleSerializer(Renewers, schema) for version, schema in renewersSchemas.items() +} + + +createDelegationTokenRequestDataSchemas: Dict[int, Schema] = { + 0: [("renewers", ArraySerializer(renewersSerializers[0])), ("max_life_time", int64Serializer)], + 1: [("renewers", ArraySerializer(renewersSerializers[1])), ("max_life_time", int64Serializer)], +} + + +createDelegationTokenRequestDataSerializers: Dict[int, BaseSerializer[CreateDelegationTokenRequestData]] = { + version: NamedTupleSerializer(CreateDelegationTokenRequestData, schema) + for version, schema in createDelegationTokenRequestDataSchemas.items() +} + + +ownerSchemas: Dict[int, Schema] = { + 0: [("principal_type", stringSerializer), ("name", stringSerializer)], + 1: [("principal_type", stringSerializer), ("name", stringSerializer)], +} + + +ownerSerializers: Dict[int, BaseSerializer[Owner]] = { + version: NamedTupleSerializer(Owner, schema) for version, schema in ownerSchemas.items() +} + + +createDelegationTokenResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("owner", ownerSerializers[0]), + ("issue_timestamp", int64Serializer), + ("expiry_timestamp", int64Serializer), + ("max_timestamp", int64Serializer), + ("token_id", stringSerializer), + ("hmac", bytesSerializer), + ("throttle_time_ms", int32Serializer), + ], + 1: [ + ("error_code", int16Serializer), + ("owner", ownerSerializers[1]), + ("issue_timestamp", int64Serializer), + ("expiry_timestamp", int64Serializer), + ("max_timestamp", int64Serializer), + ("token_id", stringSerializer), + ("hmac", bytesSerializer), + ("throttle_time_ms", int32Serializer), + ], +} + + +createDelegationTokenResponseDataSerializers: Dict[int, BaseSerializer[CreateDelegationTokenResponseData]] = { + version: NamedTupleSerializer(CreateDelegationTokenResponseData, schema) + for version, schema in createDelegationTokenResponseDataSchemas.items() +} diff --git a/esque/protocol/api/create_partitions.py b/esque/protocol/api/create_partitions.py new file mode 100644 index 00000000..7be48777 --- /dev/null +++ b/esque/protocol/api/create_partitions.py @@ -0,0 +1,141 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class NewPartitions: + # The new partition count. + count: "int" # INT32 + + # The assigned brokers. + assignment: List[List["int"]] # INT32 + + +@dataclass +class TopicPartitions: + # Name of topic + topic: "str" # STRING + + new_partitions: "NewPartitions" + + +@dataclass +class CreatePartitionsRequestData(RequestData): + # List of topic and the corresponding new partitions. + topic_partitions: List["TopicPartitions"] + + # The time in ms to wait for the partitions to be created. + timeout: "int" # INT32 + + # If true then validate the request, but don't actually increase the number of partitions. + validate_only: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_PARTITIONS # == 37 + + +@dataclass +class TopicErrors: + # Name of topic + topic: "str" # STRING + + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class CreatePartitionsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Per topic results for the create partitions request + topic_errors: List["TopicErrors"] + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_PARTITIONS # == 37 + + +newPartitionsSchemas: Dict[int, Schema] = { + 0: [("count", int32Serializer), ("assignment", ArraySerializer(ArraySerializer(int32Serializer)))], + 1: [("count", int32Serializer), ("assignment", ArraySerializer(ArraySerializer(int32Serializer)))], +} + + +newPartitionsSerializers: Dict[int, BaseSerializer[NewPartitions]] = { + version: NamedTupleSerializer(NewPartitions, schema) for version, schema in newPartitionsSchemas.items() +} + + +topicPartitionsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("new_partitions", newPartitionsSerializers[0])], + 1: [("topic", stringSerializer), ("new_partitions", newPartitionsSerializers[1])], +} + + +topicPartitionsSerializers: Dict[int, BaseSerializer[TopicPartitions]] = { + version: NamedTupleSerializer(TopicPartitions, schema) for version, schema in topicPartitionsSchemas.items() +} + + +createPartitionsRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("topic_partitions", ArraySerializer(topicPartitionsSerializers[0])), + ("timeout", int32Serializer), + ("validate_only", booleanSerializer), + ], + 1: [ + ("topic_partitions", ArraySerializer(topicPartitionsSerializers[1])), + ("timeout", int32Serializer), + ("validate_only", booleanSerializer), + ], +} + + +createPartitionsRequestDataSerializers: Dict[int, BaseSerializer[CreatePartitionsRequestData]] = { + version: NamedTupleSerializer(CreatePartitionsRequestData, schema) + for version, schema in createPartitionsRequestDataSchemas.items() +} + + +topicErrorsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("error_code", int16Serializer), ("error_message", nullableStringSerializer)], + 1: [("topic", stringSerializer), ("error_code", int16Serializer), ("error_message", nullableStringSerializer)], +} + + +topicErrorsSerializers: Dict[int, BaseSerializer[TopicErrors]] = { + version: NamedTupleSerializer(TopicErrors, schema) for version, schema in topicErrorsSchemas.items() +} + + +createPartitionsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("topic_errors", ArraySerializer(topicErrorsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("topic_errors", ArraySerializer(topicErrorsSerializers[1]))], +} + + +createPartitionsResponseDataSerializers: Dict[int, BaseSerializer[CreatePartitionsResponseData]] = { + version: NamedTupleSerializer(CreatePartitionsResponseData, schema) + for version, schema in createPartitionsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/create_topics.py b/esque/protocol/api/create_topics.py new file mode 100644 index 00000000..fbc65e1b --- /dev/null +++ b/esque/protocol/api/create_topics.py @@ -0,0 +1,218 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Assignments: + # The partition index. + partition_index: "int" # INT32 + + # The brokers to place the partition on. + broker_ids: List["int"] # INT32 + + +@dataclass +class Configs: + # The configuration name. + name: "str" # STRING + + # The configuration value. + value: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Topics: + # The configuration name. + name: "str" # STRING + + # The number of partitions to create in the topic, or -1 if we are specifying a manual partition + # assignment. + num_partitions: "int" # INT32 + + # The number of replicas to create for each partition in the topic, or -1 if we are specifying a + # manual partition assignment. + replication_factor: "int" # INT16 + + # The manual partition assignment, or the empty array if we are using automatic assignment. + assignments: List["Assignments"] + + # The custom topic configurations to set. + configs: List["Configs"] + + +@dataclass +class CreateTopicsRequestData(RequestData): + # The topics to create. + topics: List["Topics"] + + # How long to wait in milliseconds before timing out the request. + timeout_ms: "int" # INT32 + + # If true, check that the topics can be created as specified, but don't create anything. + validate_only: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_TOPICS # == 19 + + +@dataclass +class Topics: + # The topic name. + name: "str" # STRING + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The error message, or null if there was no error. + error_message: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class CreateTopicsResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # Results for each topic we tried to create. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.CREATE_TOPICS # == 19 + + +assignmentsSchemas: Dict[int, Schema] = { + 0: [("partition_index", int32Serializer), ("broker_ids", ArraySerializer(int32Serializer))], + 1: [("partition_index", int32Serializer), ("broker_ids", ArraySerializer(int32Serializer))], + 2: [("partition_index", int32Serializer), ("broker_ids", ArraySerializer(int32Serializer))], + 3: [("partition_index", int32Serializer), ("broker_ids", ArraySerializer(int32Serializer))], +} + + +assignmentsSerializers: Dict[int, BaseSerializer[Assignments]] = { + version: NamedTupleSerializer(Assignments, schema) for version, schema in assignmentsSchemas.items() +} + + +configsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("value", nullableStringSerializer)], + 1: [("name", stringSerializer), ("value", nullableStringSerializer)], + 2: [("name", stringSerializer), ("value", nullableStringSerializer)], + 3: [("name", stringSerializer), ("value", nullableStringSerializer)], +} + + +configsSerializers: Dict[int, BaseSerializer[Configs]] = { + version: NamedTupleSerializer(Configs, schema) for version, schema in configsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [ + ("name", stringSerializer), + ("num_partitions", int32Serializer), + ("replication_factor", int16Serializer), + ("assignments", ArraySerializer(assignmentsSerializers[0])), + ("configs", ArraySerializer(configsSerializers[0])), + ], + 1: [ + ("name", stringSerializer), + ("num_partitions", int32Serializer), + ("replication_factor", int16Serializer), + ("assignments", ArraySerializer(assignmentsSerializers[1])), + ("configs", ArraySerializer(configsSerializers[1])), + ], + 2: [ + ("name", stringSerializer), + ("num_partitions", int32Serializer), + ("replication_factor", int16Serializer), + ("assignments", ArraySerializer(assignmentsSerializers[2])), + ("configs", ArraySerializer(configsSerializers[2])), + ], + 3: [ + ("name", stringSerializer), + ("num_partitions", int32Serializer), + ("replication_factor", int16Serializer), + ("assignments", ArraySerializer(assignmentsSerializers[3])), + ("configs", ArraySerializer(configsSerializers[3])), + ], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +createTopicsRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("topics", ArraySerializer(topicsSerializers[0])), + ("timeout_ms", int32Serializer), + ("validate_only", DummySerializer(bool())), + ], + 1: [ + ("topics", ArraySerializer(topicsSerializers[1])), + ("timeout_ms", int32Serializer), + ("validate_only", booleanSerializer), + ], + 2: [ + ("topics", ArraySerializer(topicsSerializers[2])), + ("timeout_ms", int32Serializer), + ("validate_only", booleanSerializer), + ], + 3: [ + ("topics", ArraySerializer(topicsSerializers[3])), + ("timeout_ms", int32Serializer), + ("validate_only", booleanSerializer), + ], +} + + +createTopicsRequestDataSerializers: Dict[int, BaseSerializer[CreateTopicsRequestData]] = { + version: NamedTupleSerializer(CreateTopicsRequestData, schema) + for version, schema in createTopicsRequestDataSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("error_code", int16Serializer), ("error_message", DummySerializer(None))], + 1: [("name", stringSerializer), ("error_code", int16Serializer), ("error_message", nullableStringSerializer)], + 2: [("name", stringSerializer), ("error_code", int16Serializer), ("error_message", nullableStringSerializer)], + 3: [("name", stringSerializer), ("error_code", int16Serializer), ("error_message", nullableStringSerializer)], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +createTopicsResponseDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("topics", ArraySerializer(topicsSerializers[1])), ("throttle_time_ms", DummySerializer(int()))], + 2: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[2]))], + 3: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[3]))], +} + + +createTopicsResponseDataSerializers: Dict[int, BaseSerializer[CreateTopicsResponseData]] = { + version: NamedTupleSerializer(CreateTopicsResponseData, schema) + for version, schema in createTopicsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/delete_acls.py b/esque/protocol/api/delete_acls.py new file mode 100644 index 00000000..b9fa85a1 --- /dev/null +++ b/esque/protocol/api/delete_acls.py @@ -0,0 +1,207 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Filters: + # The resource type + resource_type: "int" # INT8 + + # The resource name filter + resource_name: "Optional[str]" # NULLABLE_STRING + + # The resource pattern type filter + resource_pattern_type_filter: "int" # INT8 + + # The ACL principal filter + principal: "Optional[str]" # NULLABLE_STRING + + # The ACL host filter + host: "Optional[str]" # NULLABLE_STRING + + # The ACL operation + operation: "int" # INT8 + + # The ACL permission type + permission_type: "int" # INT8 + + +@dataclass +class DeleteAclsRequestData(RequestData): + filters: List["Filters"] + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_ACLS # == 31 + + +@dataclass +class MatchingAcls: + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + # The resource type + resource_type: "int" # INT8 + + # The resource name + resource_name: "str" # STRING + + # The resource pattern type + resource_pattern_type: "int" # INT8 + + # The ACL principal + principal: "str" # STRING + + # The ACL host + host: "str" # STRING + + # The ACL operation + operation: "int" # INT8 + + # The ACL permission type + permission_type: "int" # INT8 + + +@dataclass +class FilterResponses: + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + # The matching ACLs + matching_acls: List["MatchingAcls"] + + +@dataclass +class DeleteAclsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + filter_responses: List["FilterResponses"] + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_ACLS # == 31 + + +filtersSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", nullableStringSerializer), + ("principal", nullableStringSerializer), + ("host", nullableStringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ("resource_pattern_type_filter", DummySerializer(int())), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", nullableStringSerializer), + ("resource_pattern_type_filter", int8Serializer), + ("principal", nullableStringSerializer), + ("host", nullableStringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], +} + + +filtersSerializers: Dict[int, BaseSerializer[Filters]] = { + version: NamedTupleSerializer(Filters, schema) for version, schema in filtersSchemas.items() +} + + +deleteAclsRequestDataSchemas: Dict[int, Schema] = { + 0: [("filters", ArraySerializer(filtersSerializers[0]))], + 1: [("filters", ArraySerializer(filtersSerializers[1]))], +} + + +deleteAclsRequestDataSerializers: Dict[int, BaseSerializer[DeleteAclsRequestData]] = { + version: NamedTupleSerializer(DeleteAclsRequestData, schema) + for version, schema in deleteAclsRequestDataSchemas.items() +} + + +matchingAclsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ("resource_pattern_type", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("resource_pattern_type", int8Serializer), + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], +} + + +matchingAclsSerializers: Dict[int, BaseSerializer[MatchingAcls]] = { + version: NamedTupleSerializer(MatchingAcls, schema) for version, schema in matchingAclsSchemas.items() +} + + +filterResponsesSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("matching_acls", ArraySerializer(matchingAclsSerializers[0])), + ], + 1: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("matching_acls", ArraySerializer(matchingAclsSerializers[1])), + ], +} + + +filterResponsesSerializers: Dict[int, BaseSerializer[FilterResponses]] = { + version: NamedTupleSerializer(FilterResponses, schema) for version, schema in filterResponsesSchemas.items() +} + + +deleteAclsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("filter_responses", ArraySerializer(filterResponsesSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("filter_responses", ArraySerializer(filterResponsesSerializers[1]))], +} + + +deleteAclsResponseDataSerializers: Dict[int, BaseSerializer[DeleteAclsResponseData]] = { + version: NamedTupleSerializer(DeleteAclsResponseData, schema) + for version, schema in deleteAclsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/delete_groups.py b/esque/protocol/api/delete_groups.py new file mode 100644 index 00000000..16120c6f --- /dev/null +++ b/esque/protocol/api/delete_groups.py @@ -0,0 +1,83 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class DeleteGroupsRequestData(RequestData): + # An array of groups to be deleted. + groups: List["str"] # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_GROUPS # == 42 + + +@dataclass +class GroupErrorCodes: + # The unique group identifier + group_id: "str" # STRING + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class DeleteGroupsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # An array of per group error codes. + group_error_codes: List["GroupErrorCodes"] + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_GROUPS # == 42 + + +deleteGroupsRequestDataSchemas: Dict[int, Schema] = { + 0: [("groups", ArraySerializer(stringSerializer))], + 1: [("groups", ArraySerializer(stringSerializer))], +} + + +deleteGroupsRequestDataSerializers: Dict[int, BaseSerializer[DeleteGroupsRequestData]] = { + version: NamedTupleSerializer(DeleteGroupsRequestData, schema) + for version, schema in deleteGroupsRequestDataSchemas.items() +} + + +groupErrorCodesSchemas: Dict[int, Schema] = { + 0: [("group_id", stringSerializer), ("error_code", int16Serializer)], + 1: [("group_id", stringSerializer), ("error_code", int16Serializer)], +} + + +groupErrorCodesSerializers: Dict[int, BaseSerializer[GroupErrorCodes]] = { + version: NamedTupleSerializer(GroupErrorCodes, schema) for version, schema in groupErrorCodesSchemas.items() +} + + +deleteGroupsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("group_error_codes", ArraySerializer(groupErrorCodesSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("group_error_codes", ArraySerializer(groupErrorCodesSerializers[1]))], +} + + +deleteGroupsResponseDataSerializers: Dict[int, BaseSerializer[DeleteGroupsResponseData]] = { + version: NamedTupleSerializer(DeleteGroupsResponseData, schema) + for version, schema in deleteGroupsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/delete_records.py b/esque/protocol/api/delete_records.py new file mode 100644 index 00000000..8a0533f7 --- /dev/null +++ b/esque/protocol/api/delete_records.py @@ -0,0 +1,146 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # The offset before which the messages will be deleted. -1 means high-watermark for the partition. + offset: "int" # INT64 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["Partitions"] + + +@dataclass +class DeleteRecordsRequestData(RequestData): + topics: List["Topics"] + + # The maximum time to await a response in ms. + timeout: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_RECORDS # == 21 + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # Smallest available offset of all live replicas + low_watermark: "int" # INT64 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["Partitions"] + + +@dataclass +class DeleteRecordsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_RECORDS # == 21 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("offset", int64Serializer)], + 1: [("partition", int32Serializer), ("offset", int64Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +deleteRecordsRequestDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0])), ("timeout", int32Serializer)], + 1: [("topics", ArraySerializer(topicsSerializers[1])), ("timeout", int32Serializer)], +} + + +deleteRecordsRequestDataSerializers: Dict[int, BaseSerializer[DeleteRecordsRequestData]] = { + version: NamedTupleSerializer(DeleteRecordsRequestData, schema) + for version, schema in deleteRecordsRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("low_watermark", int64Serializer), ("error_code", int16Serializer)], + 1: [("partition", int32Serializer), ("low_watermark", int64Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +deleteRecordsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[1]))], +} + + +deleteRecordsResponseDataSerializers: Dict[int, BaseSerializer[DeleteRecordsResponseData]] = { + version: NamedTupleSerializer(DeleteRecordsResponseData, schema) + for version, schema in deleteRecordsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/delete_topics.py b/esque/protocol/api/delete_topics.py new file mode 100644 index 00000000..e2dca73e --- /dev/null +++ b/esque/protocol/api/delete_topics.py @@ -0,0 +1,93 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class DeleteTopicsRequestData(RequestData): + # The names of the topics to delete + topic_names: List["str"] # STRING + + # The length of time in milliseconds to wait for the deletions to complete. + timeout_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_TOPICS # == 20 + + +@dataclass +class Responses: + # The topic name + name: "str" # STRING + + # The deletion error, or 0 if the deletion succeeded. + error_code: "int" # INT16 + + +@dataclass +class DeleteTopicsResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The results for each topic we tried to delete. + responses: List["Responses"] + + @staticmethod + def api_key() -> int: + return ApiKey.DELETE_TOPICS # == 20 + + +deleteTopicsRequestDataSchemas: Dict[int, Schema] = { + 0: [("topic_names", ArraySerializer(stringSerializer)), ("timeout_ms", int32Serializer)], + 1: [("topic_names", ArraySerializer(stringSerializer)), ("timeout_ms", int32Serializer)], + 2: [("topic_names", ArraySerializer(stringSerializer)), ("timeout_ms", int32Serializer)], + 3: [("topic_names", ArraySerializer(stringSerializer)), ("timeout_ms", int32Serializer)], +} + + +deleteTopicsRequestDataSerializers: Dict[int, BaseSerializer[DeleteTopicsRequestData]] = { + version: NamedTupleSerializer(DeleteTopicsRequestData, schema) + for version, schema in deleteTopicsRequestDataSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("error_code", int16Serializer)], + 1: [("name", stringSerializer), ("error_code", int16Serializer)], + 2: [("name", stringSerializer), ("error_code", int16Serializer)], + 3: [("name", stringSerializer), ("error_code", int16Serializer)], +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +deleteTopicsResponseDataSchemas: Dict[int, Schema] = { + 0: [("responses", ArraySerializer(responsesSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[1]))], + 2: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[2]))], + 3: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[3]))], +} + + +deleteTopicsResponseDataSerializers: Dict[int, BaseSerializer[DeleteTopicsResponseData]] = { + version: NamedTupleSerializer(DeleteTopicsResponseData, schema) + for version, schema in deleteTopicsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/describe_acls.py b/esque/protocol/api/describe_acls.py new file mode 100644 index 00000000..88749e0d --- /dev/null +++ b/esque/protocol/api/describe_acls.py @@ -0,0 +1,187 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class DescribeAclsRequestData(RequestData): + # The resource type + resource_type: "int" # INT8 + + # The resource name filter + resource_name: "Optional[str]" # NULLABLE_STRING + + # The resource pattern type filter + resource_pattern_type_filter: "int" # INT8 + + # The ACL principal filter + principal: "Optional[str]" # NULLABLE_STRING + + # The ACL host filter + host: "Optional[str]" # NULLABLE_STRING + + # The ACL operation + operation: "int" # INT8 + + # The ACL permission type + permission_type: "int" # INT8 + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_ACLS # == 29 + + +@dataclass +class Acls: + # The ACL principal + principal: "str" # STRING + + # The ACL host + host: "str" # STRING + + # The ACL operation + operation: "int" # INT8 + + # The ACL permission type + permission_type: "int" # INT8 + + +@dataclass +class Resources: + # The resource type + resource_type: "int" # INT8 + + # The resource name + resource_name: "str" # STRING + + # The resource pattern type + resource_pattern_type: "int" # INT8 + + acls: List["Acls"] + + +@dataclass +class DescribeAclsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + # The resources and their associated ACLs. + resources: List["Resources"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_ACLS # == 29 + + +describeAclsRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", nullableStringSerializer), + ("principal", nullableStringSerializer), + ("host", nullableStringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ("resource_pattern_type_filter", DummySerializer(int())), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", nullableStringSerializer), + ("resource_pattern_type_filter", int8Serializer), + ("principal", nullableStringSerializer), + ("host", nullableStringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], +} + + +describeAclsRequestDataSerializers: Dict[int, BaseSerializer[DescribeAclsRequestData]] = { + version: NamedTupleSerializer(DescribeAclsRequestData, schema) + for version, schema in describeAclsRequestDataSchemas.items() +} + + +aclsSchemas: Dict[int, Schema] = { + 0: [ + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], + 1: [ + ("principal", stringSerializer), + ("host", stringSerializer), + ("operation", int8Serializer), + ("permission_type", int8Serializer), + ], +} + + +aclsSerializers: Dict[int, BaseSerializer[Acls]] = { + version: NamedTupleSerializer(Acls, schema) for version, schema in aclsSchemas.items() +} + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("acls", ArraySerializer(aclsSerializers[0])), + ("resource_pattern_type", DummySerializer(int())), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("resource_pattern_type", int8Serializer), + ("acls", ArraySerializer(aclsSerializers[1])), + ], +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +describeAclsResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resources", ArraySerializer(resourcesSerializers[0])), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resources", ArraySerializer(resourcesSerializers[1])), + ], +} + + +describeAclsResponseDataSerializers: Dict[int, BaseSerializer[DescribeAclsResponseData]] = { + version: NamedTupleSerializer(DescribeAclsResponseData, schema) + for version, schema in describeAclsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/describe_configs.py b/esque/protocol/api/describe_configs.py new file mode 100644 index 00000000..a86b2275 --- /dev/null +++ b/esque/protocol/api/describe_configs.py @@ -0,0 +1,225 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Resources: + resource_type: "int" # INT8 + + resource_name: "str" # STRING + + config_names: List["str"] # STRING + + +@dataclass +class DescribeConfigsRequestData(RequestData): + # An array of config resources to be returned. + resources: List["Resources"] + + include_synonyms: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_CONFIGS # == 32 + + +@dataclass +class ConfigSynonyms: + config_name: "str" # STRING + + config_value: "Optional[str]" # NULLABLE_STRING + + config_source: "int" # INT8 + + +@dataclass +class ConfigEntries: + config_name: "str" # STRING + + config_value: "Optional[str]" # NULLABLE_STRING + + read_only: "bool" # BOOLEAN + + config_source: "int" # INT8 + + is_sensitive: "bool" # BOOLEAN + + config_synonyms: List["ConfigSynonyms"] + + +@dataclass +class Resources: + # Response error code + error_code: "int" # INT16 + + # Response error message + error_message: "Optional[str]" # NULLABLE_STRING + + resource_type: "int" # INT8 + + resource_name: "str" # STRING + + config_entries: List["ConfigEntries"] + + +@dataclass +class DescribeConfigsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + resources: List["Resources"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_CONFIGS # == 32 + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_names", ArraySerializer(stringSerializer)), + ], + 1: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_names", ArraySerializer(stringSerializer)), + ], + 2: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_names", ArraySerializer(stringSerializer)), + ], +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +describeConfigsRequestDataSchemas: Dict[int, Schema] = { + 0: [("resources", ArraySerializer(resourcesSerializers[0])), ("include_synonyms", DummySerializer(bool()))], + 1: [("resources", ArraySerializer(resourcesSerializers[1])), ("include_synonyms", booleanSerializer)], + 2: [("resources", ArraySerializer(resourcesSerializers[2])), ("include_synonyms", booleanSerializer)], +} + + +describeConfigsRequestDataSerializers: Dict[int, BaseSerializer[DescribeConfigsRequestData]] = { + version: NamedTupleSerializer(DescribeConfigsRequestData, schema) + for version, schema in describeConfigsRequestDataSchemas.items() +} + + +configSynonymsSchemas: Dict[int, Schema] = { + 1: [ + ("config_name", stringSerializer), + ("config_value", nullableStringSerializer), + ("config_source", int8Serializer), + ], + 2: [ + ("config_name", stringSerializer), + ("config_value", nullableStringSerializer), + ("config_source", int8Serializer), + ], +} + + +configSynonymsSerializers: Dict[int, BaseSerializer[ConfigSynonyms]] = { + version: NamedTupleSerializer(ConfigSynonyms, schema) for version, schema in configSynonymsSchemas.items() +} + + +configEntriesSchemas: Dict[int, Schema] = { + 0: [ + ("config_name", stringSerializer), + ("config_value", nullableStringSerializer), + ("read_only", booleanSerializer), + (None, booleanSerializer), + ("is_sensitive", booleanSerializer), + ("config_source", DummySerializer(int())), + ("config_synonyms", DummySerializer([])), + ], + 1: [ + ("config_name", stringSerializer), + ("config_value", nullableStringSerializer), + ("read_only", booleanSerializer), + ("config_source", int8Serializer), + ("is_sensitive", booleanSerializer), + ("config_synonyms", ArraySerializer(configSynonymsSerializers[1])), + ], + 2: [ + ("config_name", stringSerializer), + ("config_value", nullableStringSerializer), + ("read_only", booleanSerializer), + ("config_source", int8Serializer), + ("is_sensitive", booleanSerializer), + ("config_synonyms", ArraySerializer(configSynonymsSerializers[2])), + ], +} + + +configEntriesSerializers: Dict[int, BaseSerializer[ConfigEntries]] = { + version: NamedTupleSerializer(ConfigEntries, schema) for version, schema in configEntriesSchemas.items() +} + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_entries", ArraySerializer(configEntriesSerializers[0])), + ], + 1: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_entries", ArraySerializer(configEntriesSerializers[1])), + ], + 2: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("config_entries", ArraySerializer(configEntriesSerializers[2])), + ], +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +describeConfigsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("resources", ArraySerializer(resourcesSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("resources", ArraySerializer(resourcesSerializers[1]))], + 2: [("throttle_time_ms", int32Serializer), ("resources", ArraySerializer(resourcesSerializers[2]))], +} + + +describeConfigsResponseDataSerializers: Dict[int, BaseSerializer[DescribeConfigsResponseData]] = { + version: NamedTupleSerializer(DescribeConfigsResponseData, schema) + for version, schema in describeConfigsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/describe_delegation_token.py b/esque/protocol/api/describe_delegation_token.py new file mode 100644 index 00000000..5767ecb2 --- /dev/null +++ b/esque/protocol/api/describe_delegation_token.py @@ -0,0 +1,187 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Owners: + # principalType of the Kafka principal + principal_type: "str" # STRING + + # name of the Kafka principal + name: "str" # STRING + + +@dataclass +class DescribeDelegationTokenRequestData(RequestData): + # An array of token owners. + owners: List["Owners"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_DELEGATION_TOKEN # == 41 + + +@dataclass +class Owner: + # principalType of the Kafka principal + principal_type: "str" # STRING + + # name of the Kafka principal + name: "str" # STRING + + +@dataclass +class Renewers: + # principalType of the Kafka principal + principal_type: "str" # STRING + + # name of the Kafka principal + name: "str" # STRING + + +@dataclass +class TokenDetails: + # token owner. + owner: "Owner" + + # timestamp (in msec) when this token was generated. + issue_timestamp: "int" # INT64 + + # timestamp (in msec) at which this token expires. + expiry_timestamp: "int" # INT64 + + # max life time of this token. + max_timestamp: "int" # INT64 + + # UUID to ensure uniqueness. + token_id: "str" # STRING + + # HMAC of the delegation token to be expired. + hmac: "bytes" # BYTES + + # An array of token renewers. Renewer is an Kafka PrincipalType and name string, who is allowed to + # renew this token before the max lifetime expires. + renewers: List["Renewers"] + + +@dataclass +class DescribeDelegationTokenResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + token_details: List["TokenDetails"] + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_DELEGATION_TOKEN # == 41 + + +ownersSchemas: Dict[int, Schema] = { + 0: [("principal_type", stringSerializer), ("name", stringSerializer)], + 1: [("principal_type", stringSerializer), ("name", stringSerializer)], +} + + +ownersSerializers: Dict[int, BaseSerializer[Owners]] = { + version: NamedTupleSerializer(Owners, schema) for version, schema in ownersSchemas.items() +} + + +describeDelegationTokenRequestDataSchemas: Dict[int, Schema] = { + 0: [("owners", ArraySerializer(ownersSerializers[0]))], + 1: [("owners", ArraySerializer(ownersSerializers[1]))], +} + + +describeDelegationTokenRequestDataSerializers: Dict[int, BaseSerializer[DescribeDelegationTokenRequestData]] = { + version: NamedTupleSerializer(DescribeDelegationTokenRequestData, schema) + for version, schema in describeDelegationTokenRequestDataSchemas.items() +} + + +ownerSchemas: Dict[int, Schema] = { + 0: [("principal_type", stringSerializer), ("name", stringSerializer)], + 1: [("principal_type", stringSerializer), ("name", stringSerializer)], +} + + +ownerSerializers: Dict[int, BaseSerializer[Owner]] = { + version: NamedTupleSerializer(Owner, schema) for version, schema in ownerSchemas.items() +} + + +renewersSchemas: Dict[int, Schema] = { + 0: [("principal_type", stringSerializer), ("name", stringSerializer)], + 1: [("principal_type", stringSerializer), ("name", stringSerializer)], +} + + +renewersSerializers: Dict[int, BaseSerializer[Renewers]] = { + version: NamedTupleSerializer(Renewers, schema) for version, schema in renewersSchemas.items() +} + + +tokenDetailsSchemas: Dict[int, Schema] = { + 0: [ + ("owner", ownerSerializers[0]), + ("issue_timestamp", int64Serializer), + ("expiry_timestamp", int64Serializer), + ("max_timestamp", int64Serializer), + ("token_id", stringSerializer), + ("hmac", bytesSerializer), + ("renewers", ArraySerializer(renewersSerializers[0])), + ], + 1: [ + ("owner", ownerSerializers[1]), + ("issue_timestamp", int64Serializer), + ("expiry_timestamp", int64Serializer), + ("max_timestamp", int64Serializer), + ("token_id", stringSerializer), + ("hmac", bytesSerializer), + ("renewers", ArraySerializer(renewersSerializers[1])), + ], +} + + +tokenDetailsSerializers: Dict[int, BaseSerializer[TokenDetails]] = { + version: NamedTupleSerializer(TokenDetails, schema) for version, schema in tokenDetailsSchemas.items() +} + + +describeDelegationTokenResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("token_details", ArraySerializer(tokenDetailsSerializers[0])), + ("throttle_time_ms", int32Serializer), + ], + 1: [ + ("error_code", int16Serializer), + ("token_details", ArraySerializer(tokenDetailsSerializers[1])), + ("throttle_time_ms", int32Serializer), + ], +} + + +describeDelegationTokenResponseDataSerializers: Dict[int, BaseSerializer[DescribeDelegationTokenResponseData]] = { + version: NamedTupleSerializer(DescribeDelegationTokenResponseData, schema) + for version, schema in describeDelegationTokenResponseDataSchemas.items() +} diff --git a/esque/protocol/api/describe_groups.py b/esque/protocol/api/describe_groups.py new file mode 100644 index 00000000..6e1e5446 --- /dev/null +++ b/esque/protocol/api/describe_groups.py @@ -0,0 +1,197 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + bytesSerializer, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class DescribeGroupsRequestData(RequestData): + # The names of the groups to describe + groups: List["str"] # STRING + + # Whether to include authorized operations. + include_authorized_operations: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_GROUPS # == 15 + + +@dataclass +class Members: + # The member ID assigned by the group coordinator. + member_id: "str" # STRING + + # The client ID used in the member's latest join group request. + client_id: "str" # STRING + + # The client host. + client_host: "str" # STRING + + # The metadata corresponding to the current group protocol in use. + member_metadata: "bytes" # BYTES + + # The current assignment provided by the group leader. + member_assignment: "bytes" # BYTES + + +@dataclass +class Groups: + # The describe error, or 0 if there was no error. + error_code: "int" # INT16 + + # The group ID string. + group_id: "str" # STRING + + # The group state string, or the empty string. + group_state: "str" # STRING + + # The group protocol type, or the empty string. + protocol_type: "str" # STRING + + # The group protocol data, or the empty string. + protocol_data: "str" # STRING + + # The group members. + members: List["Members"] + + # 32-bit bitfield to represent authorized operations for this group. + authorized_operations: "int" # INT32 + + +@dataclass +class DescribeGroupsResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # Each described group. + groups: List["Groups"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_GROUPS # == 15 + + +describeGroupsRequestDataSchemas: Dict[int, Schema] = { + 0: [("groups", ArraySerializer(stringSerializer)), ("include_authorized_operations", DummySerializer(bool()))], + 1: [("groups", ArraySerializer(stringSerializer)), ("include_authorized_operations", DummySerializer(bool()))], + 2: [("groups", ArraySerializer(stringSerializer)), ("include_authorized_operations", DummySerializer(bool()))], + 3: [("groups", ArraySerializer(stringSerializer)), ("include_authorized_operations", booleanSerializer)], +} + + +describeGroupsRequestDataSerializers: Dict[int, BaseSerializer[DescribeGroupsRequestData]] = { + version: NamedTupleSerializer(DescribeGroupsRequestData, schema) + for version, schema in describeGroupsRequestDataSchemas.items() +} + + +membersSchemas: Dict[int, Schema] = { + 0: [ + ("member_id", stringSerializer), + ("client_id", stringSerializer), + ("client_host", stringSerializer), + ("member_metadata", bytesSerializer), + ("member_assignment", bytesSerializer), + ], + 1: [ + ("member_id", stringSerializer), + ("client_id", stringSerializer), + ("client_host", stringSerializer), + ("member_metadata", bytesSerializer), + ("member_assignment", bytesSerializer), + ], + 2: [ + ("member_id", stringSerializer), + ("client_id", stringSerializer), + ("client_host", stringSerializer), + ("member_metadata", bytesSerializer), + ("member_assignment", bytesSerializer), + ], + 3: [ + ("member_id", stringSerializer), + ("client_id", stringSerializer), + ("client_host", stringSerializer), + ("member_metadata", bytesSerializer), + ("member_assignment", bytesSerializer), + ], +} + + +membersSerializers: Dict[int, BaseSerializer[Members]] = { + version: NamedTupleSerializer(Members, schema) for version, schema in membersSchemas.items() +} + + +groupsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("group_id", stringSerializer), + ("group_state", stringSerializer), + ("protocol_type", stringSerializer), + ("protocol_data", stringSerializer), + ("members", ArraySerializer(membersSerializers[0])), + ("authorized_operations", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("group_id", stringSerializer), + ("group_state", stringSerializer), + ("protocol_type", stringSerializer), + ("protocol_data", stringSerializer), + ("members", ArraySerializer(membersSerializers[1])), + ("authorized_operations", DummySerializer(int())), + ], + 2: [ + ("error_code", int16Serializer), + ("group_id", stringSerializer), + ("group_state", stringSerializer), + ("protocol_type", stringSerializer), + ("protocol_data", stringSerializer), + ("members", ArraySerializer(membersSerializers[2])), + ("authorized_operations", DummySerializer(int())), + ], + 3: [ + ("error_code", int16Serializer), + ("group_id", stringSerializer), + ("group_state", stringSerializer), + ("protocol_type", stringSerializer), + ("protocol_data", stringSerializer), + ("members", ArraySerializer(membersSerializers[3])), + ("authorized_operations", int32Serializer), + ], +} + + +groupsSerializers: Dict[int, BaseSerializer[Groups]] = { + version: NamedTupleSerializer(Groups, schema) for version, schema in groupsSchemas.items() +} + + +describeGroupsResponseDataSchemas: Dict[int, Schema] = { + 0: [("groups", ArraySerializer(groupsSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("throttle_time_ms", int32Serializer), ("groups", ArraySerializer(groupsSerializers[1]))], + 2: [("throttle_time_ms", int32Serializer), ("groups", ArraySerializer(groupsSerializers[2]))], + 3: [("throttle_time_ms", int32Serializer), ("groups", ArraySerializer(groupsSerializers[3]))], +} + + +describeGroupsResponseDataSerializers: Dict[int, BaseSerializer[DescribeGroupsResponseData]] = { + version: NamedTupleSerializer(DescribeGroupsResponseData, schema) + for version, schema in describeGroupsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/describe_log_dirs.py b/esque/protocol/api/describe_log_dirs.py new file mode 100644 index 00000000..bd1e0dd3 --- /dev/null +++ b/esque/protocol/api/describe_log_dirs.py @@ -0,0 +1,170 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # List of partition ids of the topic. + partitions: List["int"] # INT32 + + +@dataclass +class DescribeLogDirsRequestData(RequestData): + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_LOG_DIRS # == 35 + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # The size of the log segments of the partition in bytes. + size: "int" # INT64 + + # The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or + # current replica's LEO (if it is the future log for the partition) + offset_lag: "int" # INT64 + + # True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the + # replica in the future. + is_future: "bool" # BOOLEAN + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["Partitions"] + + +@dataclass +class LogDirs: + # Response error code + error_code: "int" # INT16 + + # The absolute log directory path. + log_dir: "str" # STRING + + topics: List["Topics"] + + +@dataclass +class DescribeLogDirsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + log_dirs: List["LogDirs"] + + @staticmethod + def api_key() -> int: + return ApiKey.DESCRIBE_LOG_DIRS # == 35 + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +describeLogDirsRequestDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("topics", ArraySerializer(topicsSerializers[1]))], +} + + +describeLogDirsRequestDataSerializers: Dict[int, BaseSerializer[DescribeLogDirsRequestData]] = { + version: NamedTupleSerializer(DescribeLogDirsRequestData, schema) + for version, schema in describeLogDirsRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("size", int64Serializer), + ("offset_lag", int64Serializer), + ("is_future", booleanSerializer), + ], + 1: [ + ("partition", int32Serializer), + ("size", int64Serializer), + ("offset_lag", int64Serializer), + ("is_future", booleanSerializer), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +logDirsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("log_dir", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ], + 1: [ + ("error_code", int16Serializer), + ("log_dir", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ], +} + + +logDirsSerializers: Dict[int, BaseSerializer[LogDirs]] = { + version: NamedTupleSerializer(LogDirs, schema) for version, schema in logDirsSchemas.items() +} + + +describeLogDirsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("log_dirs", ArraySerializer(logDirsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("log_dirs", ArraySerializer(logDirsSerializers[1]))], +} + + +describeLogDirsResponseDataSerializers: Dict[int, BaseSerializer[DescribeLogDirsResponseData]] = { + version: NamedTupleSerializer(DescribeLogDirsResponseData, schema) + for version, schema in describeLogDirsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/elect_preferred_leaders.py b/esque/protocol/api/elect_preferred_leaders.py new file mode 100644 index 00000000..ac5162a5 --- /dev/null +++ b/esque/protocol/api/elect_preferred_leaders.py @@ -0,0 +1,134 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class TopicPartitions: + # The name of a topic. + topic: "str" # STRING + + # The partitions of this topic whose preferred leader should be elected + partition_id: List["int"] # INT32 + + +@dataclass +class ElectPreferredLeadersRequestData(RequestData): + # The topic partitions to elect the preferred leader of. + topic_partitions: List["TopicPartitions"] + + # The time in ms to wait for the election to complete. + timeout_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.ELECT_PREFERRED_LEADERS # == 43 + + +@dataclass +class PartitionResult: + # The partition id + partition_id: "int" # INT32 + + # The result error, or zero if there was no error. + error_code: "int" # INT16 + + # The result message, or null if there was no error. + error_message: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class ReplicaElectionResults: + # The topic name + topic: "str" # STRING + + # The results for each partition + partition_result: List["PartitionResult"] + + +@dataclass +class ElectPreferredLeadersResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The election results, or an empty array if the requester did not have permission and the request + # asks for all partitions. + replica_election_results: List["ReplicaElectionResults"] + + @staticmethod + def api_key() -> int: + return ApiKey.ELECT_PREFERRED_LEADERS # == 43 + + +topicPartitionsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_id", ArraySerializer(int32Serializer))] +} + + +topicPartitionsSerializers: Dict[int, BaseSerializer[TopicPartitions]] = { + version: NamedTupleSerializer(TopicPartitions, schema) for version, schema in topicPartitionsSchemas.items() +} + + +electPreferredLeadersRequestDataSchemas: Dict[int, Schema] = { + 0: [("topic_partitions", ArraySerializer(topicPartitionsSerializers[0])), ("timeout_ms", int32Serializer)] +} + + +electPreferredLeadersRequestDataSerializers: Dict[int, BaseSerializer[ElectPreferredLeadersRequestData]] = { + version: NamedTupleSerializer(ElectPreferredLeadersRequestData, schema) + for version, schema in electPreferredLeadersRequestDataSchemas.items() +} + + +partitionResultSchemas: Dict[int, Schema] = { + 0: [ + ("partition_id", int32Serializer), + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ] +} + + +partitionResultSerializers: Dict[int, BaseSerializer[PartitionResult]] = { + version: NamedTupleSerializer(PartitionResult, schema) for version, schema in partitionResultSchemas.items() +} + + +replicaElectionResultsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_result", ArraySerializer(partitionResultSerializers[0]))] +} + + +replicaElectionResultsSerializers: Dict[int, BaseSerializer[ReplicaElectionResults]] = { + version: NamedTupleSerializer(ReplicaElectionResults, schema) + for version, schema in replicaElectionResultsSchemas.items() +} + + +electPreferredLeadersResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("throttle_time_ms", int32Serializer), + ("replica_election_results", ArraySerializer(replicaElectionResultsSerializers[0])), + ] +} + + +electPreferredLeadersResponseDataSerializers: Dict[int, BaseSerializer[ElectPreferredLeadersResponseData]] = { + version: NamedTupleSerializer(ElectPreferredLeadersResponseData, schema) + for version, schema in electPreferredLeadersResponseDataSchemas.items() +} diff --git a/esque/protocol/api/end_txn.py b/esque/protocol/api/end_txn.py new file mode 100644 index 00000000..e7da736e --- /dev/null +++ b/esque/protocol/api/end_txn.py @@ -0,0 +1,81 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class EndTxnRequestData(RequestData): + # The transactional id corresponding to the transaction. + transactional_id: "str" # STRING + + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + # The result of the transaction (0 = ABORT, 1 = COMMIT) + transaction_result: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.END_TXN # == 26 + + +@dataclass +class EndTxnResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.END_TXN # == 26 + + +endTxnRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("transaction_result", booleanSerializer), + ], + 1: [ + ("transactional_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("transaction_result", booleanSerializer), + ], +} + + +endTxnRequestDataSerializers: Dict[int, BaseSerializer[EndTxnRequestData]] = { + version: NamedTupleSerializer(EndTxnRequestData, schema) for version, schema in endTxnRequestDataSchemas.items() +} + + +endTxnResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], + 1: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], +} + + +endTxnResponseDataSerializers: Dict[int, BaseSerializer[EndTxnResponseData]] = { + version: NamedTupleSerializer(EndTxnResponseData, schema) for version, schema in endTxnResponseDataSchemas.items() +} diff --git a/esque/protocol/api/expire_delegation_token.py b/esque/protocol/api/expire_delegation_token.py new file mode 100644 index 00000000..089a3994 --- /dev/null +++ b/esque/protocol/api/expire_delegation_token.py @@ -0,0 +1,69 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + int64Serializer, +) + + +@dataclass +class ExpireDelegationTokenRequestData(RequestData): + # HMAC of the delegation token to be expired. + hmac: "bytes" # BYTES + + # expiry time period in milli seconds. + expiry_time_period: "int" # INT64 + + @staticmethod + def api_key() -> int: + return ApiKey.EXPIRE_DELEGATION_TOKEN # == 40 + + +@dataclass +class ExpireDelegationTokenResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # timestamp (in msec) at which this token expires.. + expiry_timestamp: "int" # INT64 + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.EXPIRE_DELEGATION_TOKEN # == 40 + + +expireDelegationTokenRequestDataSchemas: Dict[int, Schema] = { + 0: [("hmac", bytesSerializer), ("expiry_time_period", int64Serializer)], + 1: [("hmac", bytesSerializer), ("expiry_time_period", int64Serializer)], +} + + +expireDelegationTokenRequestDataSerializers: Dict[int, BaseSerializer[ExpireDelegationTokenRequestData]] = { + version: NamedTupleSerializer(ExpireDelegationTokenRequestData, schema) + for version, schema in expireDelegationTokenRequestDataSchemas.items() +} + + +expireDelegationTokenResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("expiry_timestamp", int64Serializer), ("throttle_time_ms", int32Serializer)], + 1: [("error_code", int16Serializer), ("expiry_timestamp", int64Serializer), ("throttle_time_ms", int32Serializer)], +} + + +expireDelegationTokenResponseDataSerializers: Dict[int, BaseSerializer[ExpireDelegationTokenResponseData]] = { + version: NamedTupleSerializer(ExpireDelegationTokenResponseData, schema) + for version, schema in expireDelegationTokenResponseDataSchemas.items() +} diff --git a/esque/protocol/api/fetch.py b/esque/protocol/api/fetch.py new file mode 100644 index 00000000..45fb8314 --- /dev/null +++ b/esque/protocol/api/fetch.py @@ -0,0 +1,710 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + int8Serializer, + recordsSerializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # The current leader epoch, if provided, is used to fence consumers/replicas with old metadata. If the + # epoch provided by the client is larger than the current epoch known to the broker, then the + # UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the + # FENCED_LEADER_EPOCH error code will be returned. + current_leader_epoch: "int" # INT32 + + # Message offset. + fetch_offset: "int" # INT64 + + # Earliest available offset of the follower replica. The field is only used when request is sent by + # follower. + log_start_offset: "int" # INT64 + + # Maximum bytes to fetch. + partition_max_bytes: "int" # INT32 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # Partitions to remove from the fetch session. + partitions: List["Partitions"] + + +@dataclass +class ForgottenTopicsData: + # Name of topic + topic: "str" # STRING + + # Partitions to remove from the fetch session. + partitions: List["int"] # INT32 + + +@dataclass +class FetchRequestData(RequestData): + # Broker id of the follower. For normal consumers, use -1. + replica_id: "int" # INT32 + + # Maximum time in ms to wait for the response. + max_wait_time: "int" # INT32 + + # Minimum bytes to accumulate in the response. + min_bytes: "int" # INT32 + + # Maximum bytes to accumulate in the response. Note that this is not an absolute maximum, if the first + # message in the first non-empty partition of the fetch is larger than this value, the message will + # still be returned to ensure that progress can be made. + max_bytes: "int" # INT32 + + # This setting controls the visibility of transactional records. Using READ_UNCOMMITTED + # (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non- + # transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED + # returns all data from offsets smaller than the current LSO (last stable offset), and enables the + # inclusion of the list of aborted transactions in the result, which allows consumers to discard + # ABORTED transactional records + isolation_level: "int" # INT8 + + # The fetch session ID + session_id: "int" # INT32 + + # The fetch session epoch + session_epoch: "int" # INT32 + + # Topics to fetch in the order provided. + topics: List["Topics"] + + # Topics to remove from the fetch session. + forgotten_topics_data: List["ForgottenTopicsData"] + + # The consumer's rack id + rack_id: "str" # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.FETCH # == 1 + + +@dataclass +class AbortedTransactions: + # The producer id associated with the aborted transactions + producer_id: "int" # INT64 + + # The first offset in the aborted transaction + first_offset: "int" # INT64 + + +@dataclass +class PartitionHeader: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + # Last committed offset. + high_watermark: "int" # INT64 + + # The last stable offset (or LSO) of the partition. This is the last offset such that the state of all + # transactional records prior to this offset have been decided (ABORTED or COMMITTED) + last_stable_offset: "int" # INT64 + + # Earliest available offset. + log_start_offset: "int" # INT64 + + aborted_transactions: List["AbortedTransactions"] + + # The ID of the replica that the consumer should prefer. + preferred_read_replica: "int" # INT32 + + +@dataclass +class PartitionResponses: + partition_header: "PartitionHeader" + + record_set: "Records" # RECORDS + + +@dataclass +class Responses: + # Name of topic + topic: "str" # STRING + + partition_responses: List["PartitionResponses"] + + +@dataclass +class FetchResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + # The fetch session ID + session_id: "int" # INT32 + + responses: List["Responses"] + + @staticmethod + def api_key() -> int: + return ApiKey.FETCH # == 1 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 4: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 5: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 6: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 7: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 8: [ + ("partition", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 9: [ + ("partition", int32Serializer), + ("current_leader_epoch", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ], + 10: [ + ("partition", int32Serializer), + ("current_leader_epoch", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ], + 11: [ + ("partition", int32Serializer), + ("current_leader_epoch", int32Serializer), + ("fetch_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("partition_max_bytes", int32Serializer), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], + 4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[4]))], + 5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[5]))], + 6: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[6]))], + 7: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[7]))], + 8: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[8]))], + 9: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[9]))], + 10: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[10]))], + 11: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[11]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +forgottenTopicsDataSchemas: Dict[int, Schema] = { + 7: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 8: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 9: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 10: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], + 11: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))], +} + + +forgottenTopicsDataSerializers: Dict[int, BaseSerializer[ForgottenTopicsData]] = { + version: NamedTupleSerializer(ForgottenTopicsData, schema) + for version, schema in forgottenTopicsDataSchemas.items() +} + + +fetchRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ("max_bytes", DummySerializer(int())), + ("isolation_level", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 1: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ("max_bytes", DummySerializer(int())), + ("isolation_level", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 2: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[2])), + ("max_bytes", DummySerializer(int())), + ("isolation_level", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 3: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[3])), + ("isolation_level", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 4: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[4])), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 5: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[5])), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 6: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[6])), + ("session_id", DummySerializer(int())), + ("session_epoch", DummySerializer(int())), + ("forgotten_topics_data", DummySerializer([])), + ("rack_id", DummySerializer(str())), + ], + 7: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("session_id", int32Serializer), + ("session_epoch", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[7])), + ("forgotten_topics_data", ArraySerializer(forgottenTopicsDataSerializers[7])), + ("rack_id", DummySerializer(str())), + ], + 8: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("session_id", int32Serializer), + ("session_epoch", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[8])), + ("forgotten_topics_data", ArraySerializer(forgottenTopicsDataSerializers[8])), + ("rack_id", DummySerializer(str())), + ], + 9: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("session_id", int32Serializer), + ("session_epoch", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[9])), + ("forgotten_topics_data", ArraySerializer(forgottenTopicsDataSerializers[9])), + ("rack_id", DummySerializer(str())), + ], + 10: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("session_id", int32Serializer), + ("session_epoch", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[10])), + ("forgotten_topics_data", ArraySerializer(forgottenTopicsDataSerializers[10])), + ("rack_id", DummySerializer(str())), + ], + 11: [ + ("replica_id", int32Serializer), + ("max_wait_time", int32Serializer), + ("min_bytes", int32Serializer), + ("max_bytes", int32Serializer), + ("isolation_level", int8Serializer), + ("session_id", int32Serializer), + ("session_epoch", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[11])), + ("forgotten_topics_data", ArraySerializer(forgottenTopicsDataSerializers[11])), + ("rack_id", stringSerializer), + ], +} + + +fetchRequestDataSerializers: Dict[int, BaseSerializer[FetchRequestData]] = { + version: NamedTupleSerializer(FetchRequestData, schema) for version, schema in fetchRequestDataSchemas.items() +} + + +abortedTransactionsSchemas: Dict[int, Schema] = { + 4: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 5: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 6: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 7: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 8: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 9: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 10: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], + 11: [("producer_id", int64Serializer), ("first_offset", int64Serializer)], +} + + +abortedTransactionsSerializers: Dict[int, BaseSerializer[AbortedTransactions]] = { + version: NamedTupleSerializer(AbortedTransactions, schema) + for version, schema in abortedTransactionsSchemas.items() +} + + +partitionHeaderSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ("aborted_transactions", DummySerializer([])), + ("preferred_read_replica", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ("aborted_transactions", DummySerializer([])), + ("preferred_read_replica", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ("aborted_transactions", DummySerializer([])), + ("preferred_read_replica", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ("aborted_transactions", DummySerializer([])), + ("preferred_read_replica", DummySerializer(int())), + ], + 4: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[4])), + ("log_start_offset", DummySerializer(int())), + ("preferred_read_replica", DummySerializer(int())), + ], + 5: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[5])), + ("preferred_read_replica", DummySerializer(int())), + ], + 6: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[6])), + ("preferred_read_replica", DummySerializer(int())), + ], + 7: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[7])), + ("preferred_read_replica", DummySerializer(int())), + ], + 8: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[8])), + ("preferred_read_replica", DummySerializer(int())), + ], + 9: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[9])), + ("preferred_read_replica", DummySerializer(int())), + ], + 10: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[10])), + ("preferred_read_replica", DummySerializer(int())), + ], + 11: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("high_watermark", int64Serializer), + ("last_stable_offset", int64Serializer), + ("log_start_offset", int64Serializer), + ("aborted_transactions", ArraySerializer(abortedTransactionsSerializers[11])), + ("preferred_read_replica", int32Serializer), + ], +} + + +partitionHeaderSerializers: Dict[int, BaseSerializer[PartitionHeader]] = { + version: NamedTupleSerializer(PartitionHeader, schema) for version, schema in partitionHeaderSchemas.items() +} + + +partitionResponsesSchemas: Dict[int, Schema] = { + 0: [("partition_header", partitionHeaderSerializers[0]), ("record_set", recordsSerializer)], + 1: [("partition_header", partitionHeaderSerializers[1]), ("record_set", recordsSerializer)], + 2: [("partition_header", partitionHeaderSerializers[2]), ("record_set", recordsSerializer)], + 3: [("partition_header", partitionHeaderSerializers[3]), ("record_set", recordsSerializer)], + 4: [("partition_header", partitionHeaderSerializers[4]), ("record_set", recordsSerializer)], + 5: [("partition_header", partitionHeaderSerializers[5]), ("record_set", recordsSerializer)], + 6: [("partition_header", partitionHeaderSerializers[6]), ("record_set", recordsSerializer)], + 7: [("partition_header", partitionHeaderSerializers[7]), ("record_set", recordsSerializer)], + 8: [("partition_header", partitionHeaderSerializers[8]), ("record_set", recordsSerializer)], + 9: [("partition_header", partitionHeaderSerializers[9]), ("record_set", recordsSerializer)], + 10: [("partition_header", partitionHeaderSerializers[10]), ("record_set", recordsSerializer)], + 11: [("partition_header", partitionHeaderSerializers[11]), ("record_set", recordsSerializer)], +} + + +partitionResponsesSerializers: Dict[int, BaseSerializer[PartitionResponses]] = { + version: NamedTupleSerializer(PartitionResponses, schema) for version, schema in partitionResponsesSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[0]))], + 1: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[1]))], + 2: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[2]))], + 3: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[3]))], + 4: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[4]))], + 5: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[5]))], + 6: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[6]))], + 7: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[7]))], + 8: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[8]))], + 9: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[9]))], + 10: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[10]))], + 11: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[11]))], +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +fetchResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("responses", ArraySerializer(responsesSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[1])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 2: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[2])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 3: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[3])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 4: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[4])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 5: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[5])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 6: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[6])), + ("error_code", DummySerializer(int())), + ("session_id", DummySerializer(int())), + ], + 7: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("session_id", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[7])), + ], + 8: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("session_id", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[8])), + ], + 9: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("session_id", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[9])), + ], + 10: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("session_id", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[10])), + ], + 11: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("session_id", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[11])), + ], +} + + +fetchResponseDataSerializers: Dict[int, BaseSerializer[FetchResponseData]] = { + version: NamedTupleSerializer(FetchResponseData, schema) for version, schema in fetchResponseDataSchemas.items() +} diff --git a/esque/protocol/api/find_coordinator.py b/esque/protocol/api/find_coordinator.py new file mode 100644 index 00000000..be62bb4a --- /dev/null +++ b/esque/protocol/api/find_coordinator.py @@ -0,0 +1,103 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class FindCoordinatorRequestData(RequestData): + # The coordinator key. + key: "str" # STRING + + # The coordinator key type. (Group, transaction, etc.) + key_type: "int" # INT8 + + @staticmethod + def api_key() -> int: + return ApiKey.FIND_COORDINATOR # == 10 + + +@dataclass +class FindCoordinatorResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The error message, or null if there was no error. + error_message: "Optional[str]" # NULLABLE_STRING + + # The node id. + node_id: "int" # INT32 + + # The host name. + host: "str" # STRING + + # The port. + port: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.FIND_COORDINATOR # == 10 + + +findCoordinatorRequestDataSchemas: Dict[int, Schema] = { + 0: [("key", stringSerializer), ("key_type", DummySerializer(int()))], + 1: [("key", stringSerializer), ("key_type", int8Serializer)], + 2: [("key", stringSerializer), ("key_type", int8Serializer)], +} + + +findCoordinatorRequestDataSerializers: Dict[int, BaseSerializer[FindCoordinatorRequestData]] = { + version: NamedTupleSerializer(FindCoordinatorRequestData, schema) + for version, schema in findCoordinatorRequestDataSchemas.items() +} + + +findCoordinatorResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("throttle_time_ms", DummySerializer(int())), + ("error_message", DummySerializer(None)), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ], + 2: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ], +} + + +findCoordinatorResponseDataSerializers: Dict[int, BaseSerializer[FindCoordinatorResponseData]] = { + version: NamedTupleSerializer(FindCoordinatorResponseData, schema) + for version, schema in findCoordinatorResponseDataSchemas.items() +} diff --git a/esque/protocol/api/heartbeat.py b/esque/protocol/api/heartbeat.py new file mode 100644 index 00000000..cb784028 --- /dev/null +++ b/esque/protocol/api/heartbeat.py @@ -0,0 +1,97 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class HeartbeatRequestData(RequestData): + # The group id. + group_id: "str" # STRING + + # The generation of the group. + generation_id: "int" # INT32 + + # The member ID. + member_id: "str" # STRING + + # The unique identifier of the consumer instance provided by end user. + group_instance_id: "Optional[str]" # NULLABLE_STRING + + @staticmethod + def api_key() -> int: + return ApiKey.HEARTBEAT # == 12 + + +@dataclass +class HeartbeatResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.HEARTBEAT # == 12 + + +heartbeatRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", DummySerializer(None)), + ], + 1: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", DummySerializer(None)), + ], + 2: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", DummySerializer(None)), + ], + 3: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", nullableStringSerializer), + ], +} + + +heartbeatRequestDataSerializers: Dict[int, BaseSerializer[HeartbeatRequestData]] = { + version: NamedTupleSerializer(HeartbeatRequestData, schema) + for version, schema in heartbeatRequestDataSchemas.items() +} + + +heartbeatResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("throttle_time_ms", DummySerializer(int()))], + 1: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], + 2: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], + 3: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], +} + + +heartbeatResponseDataSerializers: Dict[int, BaseSerializer[HeartbeatResponseData]] = { + version: NamedTupleSerializer(HeartbeatResponseData, schema) + for version, schema in heartbeatResponseDataSchemas.items() +} diff --git a/esque/protocol/api/incremental_alter_configs.py b/esque/protocol/api/incremental_alter_configs.py new file mode 100644 index 00000000..c2790c93 --- /dev/null +++ b/esque/protocol/api/incremental_alter_configs.py @@ -0,0 +1,145 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int8Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Configs: + # The configuration key name. + name: "str" # STRING + + # The type (Set, Delete, Append, Subtract) of operation. + config_operation: "int" # INT8 + + # The value to set for the configuration key. + value: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Resources: + # The resource type. + resource_type: "int" # INT8 + + # The resource name. + resource_name: "str" # STRING + + # The configurations. + configs: List["Configs"] + + +@dataclass +class IncrementalAlterConfigsRequestData(RequestData): + # The incremental updates for each resource. + resources: List["Resources"] + + # True if we should validate the request, but not change the configurations. + validate_only: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.INCREMENTAL_ALTER_CONFIGS # == 44 + + +@dataclass +class Responses: + # The resource error code. + error_code: "int" # INT16 + + # The resource error message, or null if there was no error. + error_message: "Optional[str]" # NULLABLE_STRING + + # The resource type. + resource_type: "int" # INT8 + + # The resource name. + resource_name: "str" # STRING + + +@dataclass +class IncrementalAlterConfigsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to a quota violation, or zero if + # the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The responses for each resource. + responses: List["Responses"] + + @staticmethod + def api_key() -> int: + return ApiKey.INCREMENTAL_ALTER_CONFIGS # == 44 + + +configsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("config_operation", int8Serializer), ("value", nullableStringSerializer)] +} + + +configsSerializers: Dict[int, BaseSerializer[Configs]] = { + version: NamedTupleSerializer(Configs, schema) for version, schema in configsSchemas.items() +} + + +resourcesSchemas: Dict[int, Schema] = { + 0: [ + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ("configs", ArraySerializer(configsSerializers[0])), + ] +} + + +resourcesSerializers: Dict[int, BaseSerializer[Resources]] = { + version: NamedTupleSerializer(Resources, schema) for version, schema in resourcesSchemas.items() +} + + +incrementalAlterConfigsRequestDataSchemas: Dict[int, Schema] = { + 0: [("resources", ArraySerializer(resourcesSerializers[0])), ("validate_only", booleanSerializer)] +} + + +incrementalAlterConfigsRequestDataSerializers: Dict[int, BaseSerializer[IncrementalAlterConfigsRequestData]] = { + version: NamedTupleSerializer(IncrementalAlterConfigsRequestData, schema) + for version, schema in incrementalAlterConfigsRequestDataSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("resource_type", int8Serializer), + ("resource_name", stringSerializer), + ] +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +incrementalAlterConfigsResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[0]))] +} + + +incrementalAlterConfigsResponseDataSerializers: Dict[int, BaseSerializer[IncrementalAlterConfigsResponseData]] = { + version: NamedTupleSerializer(IncrementalAlterConfigsResponseData, schema) + for version, schema in incrementalAlterConfigsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/init_producer_id.py b/esque/protocol/api/init_producer_id.py new file mode 100644 index 00000000..b8e765e8 --- /dev/null +++ b/esque/protocol/api/init_producer_id.py @@ -0,0 +1,83 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, +) + + +@dataclass +class InitProducerIdRequestData(RequestData): + # The transactional id, or null if the producer is not transactional. + transactional_id: "Optional[str]" # NULLABLE_STRING + + # The time in ms to wait for before aborting idle transactions sent by this producer. This is only + # relevant if a TransactionalId has been defined. + transaction_timeout_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.INIT_PRODUCER_ID # == 22 + + +@dataclass +class InitProducerIdResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The current producer id. + producer_id: "int" # INT64 + + # The current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.INIT_PRODUCER_ID # == 22 + + +initProducerIdRequestDataSchemas: Dict[int, Schema] = { + 0: [("transactional_id", nullableStringSerializer), ("transaction_timeout_ms", int32Serializer)], + 1: [("transactional_id", nullableStringSerializer), ("transaction_timeout_ms", int32Serializer)], +} + + +initProducerIdRequestDataSerializers: Dict[int, BaseSerializer[InitProducerIdRequestData]] = { + version: NamedTupleSerializer(InitProducerIdRequestData, schema) + for version, schema in initProducerIdRequestDataSchemas.items() +} + + +initProducerIdResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ], +} + + +initProducerIdResponseDataSerializers: Dict[int, BaseSerializer[InitProducerIdResponseData]] = { + version: NamedTupleSerializer(InitProducerIdResponseData, schema) + for version, schema in initProducerIdResponseDataSchemas.items() +} diff --git a/esque/protocol/api/join_group.py b/esque/protocol/api/join_group.py new file mode 100644 index 00000000..10a858d5 --- /dev/null +++ b/esque/protocol/api/join_group.py @@ -0,0 +1,259 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Protocols: + # The protocol name. + name: "str" # STRING + + # The protocol metadata. + metadata: "bytes" # BYTES + + +@dataclass +class JoinGroupRequestData(RequestData): + # The group identifier. + group_id: "str" # STRING + + # The coordinator considers the consumer dead if it receives no heartbeat after this timeout in + # milliseconds. + session_timeout_ms: "int" # INT32 + + # The maximum time in milliseconds that the coordinator will wait for each member to rejoin when + # rebalancing the group. + rebalance_timeout_ms: "int" # INT32 + + # The member id assigned by the group coordinator. + member_id: "str" # STRING + + # The unique identifier of the consumer instance provided by end user. + group_instance_id: "Optional[str]" # NULLABLE_STRING + + # The unique name the for class of protocols implemented by the group we want to join. + protocol_type: "str" # STRING + + # The list of protocols that the member supports. + protocols: List["Protocols"] + + @staticmethod + def api_key() -> int: + return ApiKey.JOIN_GROUP # == 11 + + +@dataclass +class Members: + # The group member ID. + member_id: "str" # STRING + + # The unique identifier of the consumer instance provided by end user. + group_instance_id: "Optional[str]" # NULLABLE_STRING + + # The group member metadata. + metadata: "bytes" # BYTES + + +@dataclass +class JoinGroupResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The generation ID of the group. + generation_id: "int" # INT32 + + # The group protocol selected by the coordinator. + protocol_name: "str" # STRING + + # The leader of the group. + leader: "str" # STRING + + # The group member ID. + member_id: "str" # STRING + + members: List["Members"] + + @staticmethod + def api_key() -> int: + return ApiKey.JOIN_GROUP # == 11 + + +protocolsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("metadata", bytesSerializer)], + 1: [("name", stringSerializer), ("metadata", bytesSerializer)], + 2: [("name", stringSerializer), ("metadata", bytesSerializer)], + 3: [("name", stringSerializer), ("metadata", bytesSerializer)], + 4: [("name", stringSerializer), ("metadata", bytesSerializer)], + 5: [("name", stringSerializer), ("metadata", bytesSerializer)], +} + + +protocolsSerializers: Dict[int, BaseSerializer[Protocols]] = { + version: NamedTupleSerializer(Protocols, schema) for version, schema in protocolsSchemas.items() +} + + +joinGroupRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[0])), + ("rebalance_timeout_ms", DummySerializer(int())), + ("group_instance_id", DummySerializer(None)), + ], + 1: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("rebalance_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[1])), + ("group_instance_id", DummySerializer(None)), + ], + 2: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("rebalance_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[2])), + ("group_instance_id", DummySerializer(None)), + ], + 3: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("rebalance_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[3])), + ("group_instance_id", DummySerializer(None)), + ], + 4: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("rebalance_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[4])), + ("group_instance_id", DummySerializer(None)), + ], + 5: [ + ("group_id", stringSerializer), + ("session_timeout_ms", int32Serializer), + ("rebalance_timeout_ms", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", nullableStringSerializer), + ("protocol_type", stringSerializer), + ("protocols", ArraySerializer(protocolsSerializers[5])), + ], +} + + +joinGroupRequestDataSerializers: Dict[int, BaseSerializer[JoinGroupRequestData]] = { + version: NamedTupleSerializer(JoinGroupRequestData, schema) + for version, schema in joinGroupRequestDataSchemas.items() +} + + +membersSchemas: Dict[int, Schema] = { + 0: [("member_id", stringSerializer), ("metadata", bytesSerializer), ("group_instance_id", DummySerializer(None))], + 1: [("member_id", stringSerializer), ("metadata", bytesSerializer), ("group_instance_id", DummySerializer(None))], + 2: [("member_id", stringSerializer), ("metadata", bytesSerializer), ("group_instance_id", DummySerializer(None))], + 3: [("member_id", stringSerializer), ("metadata", bytesSerializer), ("group_instance_id", DummySerializer(None))], + 4: [("member_id", stringSerializer), ("metadata", bytesSerializer), ("group_instance_id", DummySerializer(None))], + 5: [ + ("member_id", stringSerializer), + ("group_instance_id", nullableStringSerializer), + ("metadata", bytesSerializer), + ], +} + + +membersSerializers: Dict[int, BaseSerializer[Members]] = { + version: NamedTupleSerializer(Members, schema) for version, schema in membersSchemas.items() +} + + +joinGroupResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[1])), + ("throttle_time_ms", DummySerializer(int())), + ], + 2: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[2])), + ], + 3: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[3])), + ], + 4: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[4])), + ], + 5: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("generation_id", int32Serializer), + ("protocol_name", stringSerializer), + ("leader", stringSerializer), + ("member_id", stringSerializer), + ("members", ArraySerializer(membersSerializers[5])), + ], +} + + +joinGroupResponseDataSerializers: Dict[int, BaseSerializer[JoinGroupResponseData]] = { + version: NamedTupleSerializer(JoinGroupResponseData, schema) + for version, schema in joinGroupResponseDataSchemas.items() +} diff --git a/esque/protocol/api/leader_and_isr.py b/esque/protocol/api/leader_and_isr.py new file mode 100644 index 00000000..b79fbe4b --- /dev/null +++ b/esque/protocol/api/leader_and_isr.py @@ -0,0 +1,234 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class PartitionStates: + # Topic partition id + partition: "int" # INT32 + + # The controller epoch + controller_epoch: "int" # INT32 + + # The broker id for the leader. + leader: "int" # INT32 + + # The leader epoch. + leader_epoch: "int" # INT32 + + # The in sync replica ids. + isr: List["int"] # INT32 + + # The ZK version. + zk_version: "int" # INT32 + + # The replica ids. + replicas: List["int"] # INT32 + + # Whether the replica should have existed on the broker or not + is_new: "bool" # BOOLEAN + + +@dataclass +class TopicStates: + # Name of topic + topic: "str" # STRING + + # Partition states + partition_states: List["PartitionStates"] + + +@dataclass +class LiveLeaders: + # The broker id + id: "int" # INT32 + + # The hostname of the broker. + host: "str" # STRING + + # The port on which the broker accepts requests. + port: "int" # INT32 + + +@dataclass +class LeaderAndIsrRequestData(RequestData): + # The controller id + controller_id: "int" # INT32 + + # The controller epoch + controller_epoch: "int" # INT32 + + # The broker epoch + broker_epoch: "int" # INT64 + + # Topic states + topic_states: List["TopicStates"] + + # Live leaders + live_leaders: List["LiveLeaders"] + + @staticmethod + def api_key() -> int: + return ApiKey.LEADER_AND_ISR # == 4 + + +@dataclass +class Partitions: + # Name of topic + topic: "str" # STRING + + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class LeaderAndIsrResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # Response for the requests partitions + partitions: List["Partitions"] + + @staticmethod + def api_key() -> int: + return ApiKey.LEADER_AND_ISR # == 4 + + +partitionStatesSchemas: Dict[int, Schema] = { + 0: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("is_new", DummySerializer(bool())), + ], + 1: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("is_new", booleanSerializer), + ], + 2: [ + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("is_new", booleanSerializer), + ], +} + + +partitionStatesSerializers: Dict[int, BaseSerializer[PartitionStates]] = { + version: NamedTupleSerializer(PartitionStates, schema) for version, schema in partitionStatesSchemas.items() +} + + +liveLeadersSchemas: Dict[int, Schema] = { + 0: [("id", int32Serializer), ("host", stringSerializer), ("port", int32Serializer)], + 1: [("id", int32Serializer), ("host", stringSerializer), ("port", int32Serializer)], + 2: [("id", int32Serializer), ("host", stringSerializer), ("port", int32Serializer)], +} + + +liveLeadersSerializers: Dict[int, BaseSerializer[LiveLeaders]] = { + version: NamedTupleSerializer(LiveLeaders, schema) for version, schema in liveLeadersSchemas.items() +} + + +topicStatesSchemas: Dict[int, Schema] = { + 2: [("topic", stringSerializer), ("partition_states", ArraySerializer(partitionStatesSerializers[2]))] +} + + +topicStatesSerializers: Dict[int, BaseSerializer[TopicStates]] = { + version: NamedTupleSerializer(TopicStates, schema) for version, schema in topicStatesSchemas.items() +} + + +leaderAndIsrRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[0])), + ("live_leaders", ArraySerializer(liveLeadersSerializers[0])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 1: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[1])), + ("live_leaders", ArraySerializer(liveLeadersSerializers[1])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 2: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + ("broker_epoch", int64Serializer), + ("topic_states", ArraySerializer(topicStatesSerializers[2])), + ("live_leaders", ArraySerializer(liveLeadersSerializers[2])), + ], +} + + +leaderAndIsrRequestDataSerializers: Dict[int, BaseSerializer[LeaderAndIsrRequestData]] = { + version: NamedTupleSerializer(LeaderAndIsrRequestData, schema) + for version, schema in leaderAndIsrRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition", int32Serializer), ("error_code", int16Serializer)], + 1: [("topic", stringSerializer), ("partition", int32Serializer), ("error_code", int16Serializer)], + 2: [("topic", stringSerializer), ("partition", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +leaderAndIsrResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("error_code", int16Serializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("error_code", int16Serializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], +} + + +leaderAndIsrResponseDataSerializers: Dict[int, BaseSerializer[LeaderAndIsrResponseData]] = { + version: NamedTupleSerializer(LeaderAndIsrResponseData, schema) + for version, schema in leaderAndIsrResponseDataSchemas.items() +} diff --git a/esque/protocol/api/leave_group.py b/esque/protocol/api/leave_group.py new file mode 100644 index 00000000..bcd8cf77 --- /dev/null +++ b/esque/protocol/api/leave_group.py @@ -0,0 +1,68 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class LeaveGroupRequestData(RequestData): + # The ID of the group to leave. + group_id: "str" # STRING + + # The member ID to remove from the group. + member_id: "str" # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.LEAVE_GROUP # == 13 + + +@dataclass +class LeaveGroupResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.LEAVE_GROUP # == 13 + + +leaveGroupRequestDataSchemas: Dict[int, Schema] = { + 0: [("group_id", stringSerializer), ("member_id", stringSerializer)], + 1: [("group_id", stringSerializer), ("member_id", stringSerializer)], + 2: [("group_id", stringSerializer), ("member_id", stringSerializer)], +} + + +leaveGroupRequestDataSerializers: Dict[int, BaseSerializer[LeaveGroupRequestData]] = { + version: NamedTupleSerializer(LeaveGroupRequestData, schema) + for version, schema in leaveGroupRequestDataSchemas.items() +} + + +leaveGroupResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("throttle_time_ms", DummySerializer(int()))], + 1: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], + 2: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer)], +} + + +leaveGroupResponseDataSerializers: Dict[int, BaseSerializer[LeaveGroupResponseData]] = { + version: NamedTupleSerializer(LeaveGroupResponseData, schema) + for version, schema in leaveGroupResponseDataSchemas.items() +} diff --git a/esque/protocol/api/list_groups.py b/esque/protocol/api/list_groups.py new file mode 100644 index 00000000..b69f2b4a --- /dev/null +++ b/esque/protocol/api/list_groups.py @@ -0,0 +1,93 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + stringSerializer, +) + + +@dataclass +class ListGroupsRequestData(RequestData): + @staticmethod + def api_key() -> int: + return ApiKey.LIST_GROUPS # == 16 + + +@dataclass +class Groups: + # The unique group identifier + group_id: "str" # STRING + + protocol_type: "str" # STRING + + +@dataclass +class ListGroupsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + groups: List["Groups"] + + @staticmethod + def api_key() -> int: + return ApiKey.LIST_GROUPS # == 16 + + +listGroupsRequestDataSchemas: Dict[int, Schema] = {0: [], 1: [], 2: []} + + +listGroupsRequestDataSerializers: Dict[int, BaseSerializer[ListGroupsRequestData]] = { + version: NamedTupleSerializer(ListGroupsRequestData, schema) + for version, schema in listGroupsRequestDataSchemas.items() +} + + +groupsSchemas: Dict[int, Schema] = { + 0: [("group_id", stringSerializer), ("protocol_type", stringSerializer)], + 1: [("group_id", stringSerializer), ("protocol_type", stringSerializer)], + 2: [("group_id", stringSerializer), ("protocol_type", stringSerializer)], +} + + +groupsSerializers: Dict[int, BaseSerializer[Groups]] = { + version: NamedTupleSerializer(Groups, schema) for version, schema in groupsSchemas.items() +} + + +listGroupsResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("groups", ArraySerializer(groupsSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ], + 1: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("groups", ArraySerializer(groupsSerializers[1])), + ], + 2: [ + ("throttle_time_ms", int32Serializer), + ("error_code", int16Serializer), + ("groups", ArraySerializer(groupsSerializers[2])), + ], +} + + +listGroupsResponseDataSerializers: Dict[int, BaseSerializer[ListGroupsResponseData]] = { + version: NamedTupleSerializer(ListGroupsResponseData, schema) + for version, schema in listGroupsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/list_offsets.py b/esque/protocol/api/list_offsets.py new file mode 100644 index 00000000..46baf103 --- /dev/null +++ b/esque/protocol/api/list_offsets.py @@ -0,0 +1,274 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + int8Serializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # The current leader epoch, if provided, is used to fence consumers/replicas with old metadata. If the + # epoch provided by the client is larger than the current epoch known to the broker, then the + # UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the + # FENCED_LEADER_EPOCH error code will be returned. + current_leader_epoch: "int" # INT32 + + # The target timestamp for the partition. + timestamp: "int" # INT64 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # Partitions to list offsets. + partitions: List["Partitions"] + + +@dataclass +class ListOffsetsRequestData(RequestData): + # Broker id of the follower. For normal consumers, use -1. + replica_id: "int" # INT32 + + # This setting controls the visibility of transactional records. Using READ_UNCOMMITTED + # (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non- + # transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED + # returns all data from offsets smaller than the current LSO (last stable offset), and enables the + # inclusion of the list of aborted transactions in the result, which allows consumers to discard + # ABORTED transactional records + isolation_level: "int" # INT8 + + # Topics to list offsets. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.LIST_OFFSETS # == 2 + + +@dataclass +class PartitionResponses: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + # The timestamp associated with the returned offset + timestamp: "int" # INT64 + + # The offset found + offset: "int" # INT64 + + # The leader epoch + leader_epoch: "int" # INT32 + + +@dataclass +class Responses: + # Name of topic + topic: "str" # STRING + + # The listed offsets by partition + partition_responses: List["PartitionResponses"] + + +@dataclass +class ListOffsetsResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # The listed offsets by topic + responses: List["Responses"] + + @staticmethod + def api_key() -> int: + return ApiKey.LIST_OFFSETS # == 2 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("timestamp", int64Serializer), + (None, int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("timestamp", int64Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("timestamp", int64Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("timestamp", int64Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 4: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("timestamp", int64Serializer)], + 5: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("timestamp", int64Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], + 4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[4]))], + 5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[5]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +listOffsetsRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("replica_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ("isolation_level", DummySerializer(int())), + ], + 1: [ + ("replica_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ("isolation_level", DummySerializer(int())), + ], + 2: [ + ("replica_id", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[2])), + ], + 3: [ + ("replica_id", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[3])), + ], + 4: [ + ("replica_id", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[4])), + ], + 5: [ + ("replica_id", int32Serializer), + ("isolation_level", int8Serializer), + ("topics", ArraySerializer(topicsSerializers[5])), + ], +} + + +listOffsetsRequestDataSerializers: Dict[int, BaseSerializer[ListOffsetsRequestData]] = { + version: NamedTupleSerializer(ListOffsetsRequestData, schema) + for version, schema in listOffsetsRequestDataSchemas.items() +} + + +partitionResponsesSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + (None, ArraySerializer(int64Serializer)), + ("timestamp", DummySerializer(int())), + ("offset", DummySerializer(int())), + ("leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("timestamp", int64Serializer), + ("offset", int64Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("timestamp", int64Serializer), + ("offset", int64Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("timestamp", int64Serializer), + ("offset", int64Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 4: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("timestamp", int64Serializer), + ("offset", int64Serializer), + ("leader_epoch", int32Serializer), + ], + 5: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("timestamp", int64Serializer), + ("offset", int64Serializer), + ("leader_epoch", int32Serializer), + ], +} + + +partitionResponsesSerializers: Dict[int, BaseSerializer[PartitionResponses]] = { + version: NamedTupleSerializer(PartitionResponses, schema) for version, schema in partitionResponsesSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[0]))], + 1: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[1]))], + 2: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[2]))], + 3: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[3]))], + 4: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[4]))], + 5: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[5]))], +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +listOffsetsResponseDataSchemas: Dict[int, Schema] = { + 0: [("responses", ArraySerializer(responsesSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("responses", ArraySerializer(responsesSerializers[1])), ("throttle_time_ms", DummySerializer(int()))], + 2: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[2]))], + 3: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[3]))], + 4: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[4]))], + 5: [("throttle_time_ms", int32Serializer), ("responses", ArraySerializer(responsesSerializers[5]))], +} + + +listOffsetsResponseDataSerializers: Dict[int, BaseSerializer[ListOffsetsResponseData]] = { + version: NamedTupleSerializer(ListOffsetsResponseData, schema) + for version, schema in listOffsetsResponseDataSchemas.items() +} diff --git a/esque/protocol/api/metadata.py b/esque/protocol/api/metadata.py new file mode 100644 index 00000000..f81cca3c --- /dev/null +++ b/esque/protocol/api/metadata.py @@ -0,0 +1,516 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Topics: + # The topic name. + name: "str" # STRING + + +@dataclass +class MetadataRequestData(RequestData): + # The topics to fetch metadata for. + topics: List["Topics"] + + # If this is true, the broker may auto-create topics that we requested which do not already exist, if + # it is configured to do so. + allow_auto_topic_creation: "bool" # BOOLEAN + + # Whether to include cluster authorized operations. + include_cluster_authorized_operations: "bool" # BOOLEAN + + # Whether to include topic authorized operations. + include_topic_authorized_operations: "bool" # BOOLEAN + + @staticmethod + def api_key() -> int: + return ApiKey.METADATA # == 3 + + +@dataclass +class Brokers: + # The broker ID. + node_id: "int" # INT32 + + # The broker hostname. + host: "str" # STRING + + # The broker port. + port: "int" # INT32 + + # The rack of the broker, or null if it has not been assigned to a rack. + rack: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Partitions: + # The partition error, or 0 if there was no error. + error_code: "int" # INT16 + + # The partition index. + partition_index: "int" # INT32 + + # The ID of the leader broker. + leader_id: "int" # INT32 + + # The leader epoch of this partition. + leader_epoch: "int" # INT32 + + # The set of all nodes that host this partition. + replica_nodes: List["int"] # INT32 + + # The set of nodes that are in sync with the leader for this partition. + isr_nodes: List["int"] # INT32 + + # The set of offline replicas of this partition. + offline_replicas: List["int"] # INT32 + + +@dataclass +class Topics: + # The partition error, or 0 if there was no error. + error_code: "int" # INT16 + + # The topic name. + name: "str" # STRING + + # True if the topic is internal. + is_internal: "bool" # BOOLEAN + + # Each partition in the topic. + partitions: List["Partitions"] + + # 32-bit bitfield to represent authorized operations for this topic. + topic_authorized_operations: "int" # INT32 + + +@dataclass +class MetadataResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # Each broker in the response. + brokers: List["Brokers"] + + # The cluster ID that responding broker belongs to. + cluster_id: "Optional[str]" # NULLABLE_STRING + + # The ID of the controller broker. + controller_id: "int" # INT32 + + # Each topic in the response. + topics: List["Topics"] + + # 32-bit bitfield to represent authorized operations for this cluster. + cluster_authorized_operations: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.METADATA # == 3 + + +topicsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer)], + 1: [("name", stringSerializer)], + 2: [("name", stringSerializer)], + 3: [("name", stringSerializer)], + 4: [("name", stringSerializer)], + 5: [("name", stringSerializer)], + 6: [("name", stringSerializer)], + 7: [("name", stringSerializer)], + 8: [("name", stringSerializer)], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +metadataRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("topics", ArraySerializer(topicsSerializers[0])), + ("allow_auto_topic_creation", DummySerializer(bool())), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 1: [ + ("topics", ArraySerializer(topicsSerializers[1])), + ("allow_auto_topic_creation", DummySerializer(bool())), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 2: [ + ("topics", ArraySerializer(topicsSerializers[2])), + ("allow_auto_topic_creation", DummySerializer(bool())), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 3: [ + ("topics", ArraySerializer(topicsSerializers[3])), + ("allow_auto_topic_creation", DummySerializer(bool())), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 4: [ + ("topics", ArraySerializer(topicsSerializers[4])), + ("allow_auto_topic_creation", booleanSerializer), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 5: [ + ("topics", ArraySerializer(topicsSerializers[5])), + ("allow_auto_topic_creation", booleanSerializer), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 6: [ + ("topics", ArraySerializer(topicsSerializers[6])), + ("allow_auto_topic_creation", booleanSerializer), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 7: [ + ("topics", ArraySerializer(topicsSerializers[7])), + ("allow_auto_topic_creation", booleanSerializer), + ("include_cluster_authorized_operations", DummySerializer(bool())), + ("include_topic_authorized_operations", DummySerializer(bool())), + ], + 8: [ + ("topics", ArraySerializer(topicsSerializers[8])), + ("allow_auto_topic_creation", booleanSerializer), + ("include_cluster_authorized_operations", booleanSerializer), + ("include_topic_authorized_operations", booleanSerializer), + ], +} + + +metadataRequestDataSerializers: Dict[int, BaseSerializer[MetadataRequestData]] = { + version: NamedTupleSerializer(MetadataRequestData, schema) + for version, schema in metadataRequestDataSchemas.items() +} + + +brokersSchemas: Dict[int, Schema] = { + 0: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", DummySerializer(None)), + ], + 1: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 2: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 3: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 4: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 5: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 6: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 7: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], + 8: [ + ("node_id", int32Serializer), + ("host", stringSerializer), + ("port", int32Serializer), + ("rack", nullableStringSerializer), + ], +} + + +brokersSerializers: Dict[int, BaseSerializer[Brokers]] = { + version: NamedTupleSerializer(Brokers, schema) for version, schema in brokersSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ("offline_replicas", DummySerializer([])), + ], + 1: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ("offline_replicas", DummySerializer([])), + ], + 2: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ("offline_replicas", DummySerializer([])), + ], + 3: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ("offline_replicas", DummySerializer([])), + ], + 4: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ("offline_replicas", DummySerializer([])), + ], + 5: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ], + 6: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ("leader_epoch", DummySerializer(int())), + ], + 7: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("leader_epoch", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ], + 8: [ + ("error_code", int16Serializer), + ("partition_index", int32Serializer), + ("leader_id", int32Serializer), + ("leader_epoch", int32Serializer), + ("replica_nodes", ArraySerializer(int32Serializer)), + ("isr_nodes", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("partitions", ArraySerializer(partitionsSerializers[0])), + ("is_internal", DummySerializer(bool())), + ("topic_authorized_operations", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[1])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 2: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[2])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 3: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[3])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 4: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[4])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 5: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[5])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 6: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[6])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 7: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[7])), + ("topic_authorized_operations", DummySerializer(int())), + ], + 8: [ + ("error_code", int16Serializer), + ("name", stringSerializer), + ("is_internal", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[8])), + ("topic_authorized_operations", int32Serializer), + ], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +metadataResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("brokers", ArraySerializer(brokersSerializers[0])), + ("topics", ArraySerializer(topicsSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ("cluster_id", DummySerializer(None)), + ("controller_id", DummySerializer(int())), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 1: [ + ("brokers", ArraySerializer(brokersSerializers[1])), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ("throttle_time_ms", DummySerializer(int())), + ("cluster_id", DummySerializer(None)), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 2: [ + ("brokers", ArraySerializer(brokersSerializers[2])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[2])), + ("throttle_time_ms", DummySerializer(int())), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 3: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[3])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[3])), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 4: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[4])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[4])), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 5: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[5])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[5])), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 6: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[6])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[6])), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 7: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[7])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[7])), + ("cluster_authorized_operations", DummySerializer(int())), + ], + 8: [ + ("throttle_time_ms", int32Serializer), + ("brokers", ArraySerializer(brokersSerializers[8])), + ("cluster_id", nullableStringSerializer), + ("controller_id", int32Serializer), + ("topics", ArraySerializer(topicsSerializers[8])), + ("cluster_authorized_operations", int32Serializer), + ], +} + + +metadataResponseDataSerializers: Dict[int, BaseSerializer[MetadataResponseData]] = { + version: NamedTupleSerializer(MetadataResponseData, schema) + for version, schema in metadataResponseDataSchemas.items() +} diff --git a/esque/protocol/api/offset_commit.py b/esque/protocol/api/offset_commit.py new file mode 100644 index 00000000..9f5568f6 --- /dev/null +++ b/esque/protocol/api/offset_commit.py @@ -0,0 +1,292 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # The partition index. + partition_index: "int" # INT32 + + # The message offset to be committed. + committed_offset: "int" # INT64 + + # The leader epoch of this partition. + committed_leader_epoch: "int" # INT32 + + # Any associated metadata the client wants to keep. + committed_metadata: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Topics: + # The topic name. + name: "str" # STRING + + # Each partition to commit offsets for. + partitions: List["Partitions"] + + +@dataclass +class OffsetCommitRequestData(RequestData): + # The unique group identifier. + group_id: "str" # STRING + + # The generation of the group. + generation_id: "int" # INT32 + + # The member ID assigned by the group coordinator. + member_id: "str" # STRING + + # The unique identifier of the consumer instance provided by end user. + group_instance_id: "Optional[str]" # NULLABLE_STRING + + # The topics to commit offsets for. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_COMMIT # == 8 + + +@dataclass +class Partitions: + # The partition index. + partition_index: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + +@dataclass +class Topics: + # The topic name. + name: "str" # STRING + + # The responses for each partition in the topic. + partitions: List["Partitions"] + + +@dataclass +class OffsetCommitResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The responses for each topic. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_COMMIT # == 8 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + (None, int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 2: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 3: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 4: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 5: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_metadata", nullableStringSerializer), + ("committed_leader_epoch", DummySerializer(int())), + ], + 6: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_leader_epoch", int32Serializer), + ("committed_metadata", nullableStringSerializer), + ], + 7: [ + ("partition_index", int32Serializer), + ("committed_offset", int64Serializer), + ("committed_leader_epoch", int32Serializer), + ("committed_metadata", nullableStringSerializer), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], + 4: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[4]))], + 5: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[5]))], + 6: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[6]))], + 7: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[7]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +offsetCommitRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("group_id", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ("generation_id", DummySerializer(int())), + ("member_id", DummySerializer(str())), + ("group_instance_id", DummySerializer(None)), + ], + 1: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ("group_instance_id", DummySerializer(None)), + ], + 2: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + (None, int64Serializer), + ("topics", ArraySerializer(topicsSerializers[2])), + ("group_instance_id", DummySerializer(None)), + ], + 3: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + (None, int64Serializer), + ("topics", ArraySerializer(topicsSerializers[3])), + ("group_instance_id", DummySerializer(None)), + ], + 4: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + (None, int64Serializer), + ("topics", ArraySerializer(topicsSerializers[4])), + ("group_instance_id", DummySerializer(None)), + ], + 5: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[5])), + ("group_instance_id", DummySerializer(None)), + ], + 6: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("topics", ArraySerializer(topicsSerializers[6])), + ("group_instance_id", DummySerializer(None)), + ], + 7: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", nullableStringSerializer), + ("topics", ArraySerializer(topicsSerializers[7])), + ], +} + + +offsetCommitRequestDataSerializers: Dict[int, BaseSerializer[OffsetCommitRequestData]] = { + version: NamedTupleSerializer(OffsetCommitRequestData, schema) + for version, schema in offsetCommitRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 1: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 2: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 3: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 4: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 5: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 6: [("partition_index", int32Serializer), ("error_code", int16Serializer)], + 7: [("partition_index", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], + 4: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[4]))], + 5: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[5]))], + 6: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[6]))], + 7: [("name", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[7]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +offsetCommitResponseDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("topics", ArraySerializer(topicsSerializers[1])), ("throttle_time_ms", DummySerializer(int()))], + 2: [("topics", ArraySerializer(topicsSerializers[2])), ("throttle_time_ms", DummySerializer(int()))], + 3: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[3]))], + 4: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[4]))], + 5: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[5]))], + 6: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[6]))], + 7: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[7]))], +} + + +offsetCommitResponseDataSerializers: Dict[int, BaseSerializer[OffsetCommitResponseData]] = { + version: NamedTupleSerializer(OffsetCommitResponseData, schema) + for version, schema in offsetCommitResponseDataSchemas.items() +} diff --git a/esque/protocol/api/offset_fetch.py b/esque/protocol/api/offset_fetch.py new file mode 100644 index 00000000..47dc04f4 --- /dev/null +++ b/esque/protocol/api/offset_fetch.py @@ -0,0 +1,244 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # Partitions to fetch offsets. + partitions: List["Partitions"] + + +@dataclass +class OffsetFetchRequestData(RequestData): + # The unique group identifier + group_id: "str" # STRING + + # Topics to fetch offsets. If the topic array is null fetch offsets for all topics. + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_FETCH # == 9 + + +@dataclass +class PartitionResponses: + # Topic partition id + partition: "int" # INT32 + + # Message offset to be committed + offset: "int" # INT64 + + # The leader epoch, if provided is derived from the last consumed record. This is used by the consumer + # to check for log truncation and to ensure partition metadata is up to date following a group + # rebalance. + leader_epoch: "int" # INT32 + + # Any associated metadata the client wants to keep. + metadata: "Optional[str]" # NULLABLE_STRING + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Responses: + # Name of topic + topic: "str" # STRING + + # Responses by partition for fetched offsets + partition_responses: List["PartitionResponses"] + + +@dataclass +class OffsetFetchResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Responses by topic for fetched offsets + responses: List["Responses"] + + # Response error code + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_FETCH # == 9 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer)], + 1: [("partition", int32Serializer)], + 2: [("partition", int32Serializer)], + 3: [("partition", int32Serializer)], + 4: [("partition", int32Serializer)], + 5: [("partition", int32Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], + 4: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[4]))], + 5: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[5]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +offsetFetchRequestDataSchemas: Dict[int, Schema] = { + 0: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[1]))], + 2: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[2]))], + 3: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[3]))], + 4: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[4]))], + 5: [("group_id", stringSerializer), ("topics", ArraySerializer(topicsSerializers[5]))], +} + + +offsetFetchRequestDataSerializers: Dict[int, BaseSerializer[OffsetFetchRequestData]] = { + version: NamedTupleSerializer(OffsetFetchRequestData, schema) + for version, schema in offsetFetchRequestDataSchemas.items() +} + + +partitionResponsesSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 4: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 5: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("leader_epoch", int32Serializer), + ("metadata", nullableStringSerializer), + ("error_code", int16Serializer), + ], +} + + +partitionResponsesSerializers: Dict[int, BaseSerializer[PartitionResponses]] = { + version: NamedTupleSerializer(PartitionResponses, schema) for version, schema in partitionResponsesSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[0]))], + 1: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[1]))], + 2: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[2]))], + 3: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[3]))], + 4: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[4]))], + 5: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[5]))], +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +offsetFetchResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("responses", ArraySerializer(responsesSerializers[0])), + ("throttle_time_ms", DummySerializer(int())), + ("error_code", DummySerializer(int())), + ], + 1: [ + ("responses", ArraySerializer(responsesSerializers[1])), + ("throttle_time_ms", DummySerializer(int())), + ("error_code", DummySerializer(int())), + ], + 2: [ + ("responses", ArraySerializer(responsesSerializers[2])), + ("error_code", int16Serializer), + ("throttle_time_ms", DummySerializer(int())), + ], + 3: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[3])), + ("error_code", int16Serializer), + ], + 4: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[4])), + ("error_code", int16Serializer), + ], + 5: [ + ("throttle_time_ms", int32Serializer), + ("responses", ArraySerializer(responsesSerializers[5])), + ("error_code", int16Serializer), + ], +} + + +offsetFetchResponseDataSerializers: Dict[int, BaseSerializer[OffsetFetchResponseData]] = { + version: NamedTupleSerializer(OffsetFetchResponseData, schema) + for version, schema in offsetFetchResponseDataSchemas.items() +} diff --git a/esque/protocol/api/offset_for_leader_epoch.py b/esque/protocol/api/offset_for_leader_epoch.py new file mode 100644 index 00000000..413e2e86 --- /dev/null +++ b/esque/protocol/api/offset_for_leader_epoch.py @@ -0,0 +1,200 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # The current leader epoch, if provided, is used to fence consumers/replicas with old metadata. If the + # epoch provided by the client is larger than the current epoch known to the broker, then the + # UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the + # FENCED_LEADER_EPOCH error code will be returned. + current_leader_epoch: "int" # INT32 + + # The epoch to lookup an offset for. + leader_epoch: "int" # INT32 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # An array of partitions to get epochs for + partitions: List["Partitions"] + + +@dataclass +class OffsetForLeaderEpochRequestData(RequestData): + # Broker id of the follower. For normal consumers, use -1. + replica_id: "int" # INT32 + + # An array of topics to get epochs for + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_FOR_LEADER_EPOCH # == 23 + + +@dataclass +class Partitions: + # Response error code + error_code: "int" # INT16 + + # Topic partition id + partition: "int" # INT32 + + # The leader epoch + leader_epoch: "int" # INT32 + + # The end offset + end_offset: "int" # INT64 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # An array of offsets by partition + partitions: List["Partitions"] + + +@dataclass +class OffsetForLeaderEpochResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # An array of topics for which we have leader offsets for some requested partition leader epoch + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.OFFSET_FOR_LEADER_EPOCH # == 23 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("leader_epoch", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("leader_epoch", int32Serializer), + ("current_leader_epoch", DummySerializer(int())), + ], + 2: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("leader_epoch", int32Serializer)], + 3: [("partition", int32Serializer), ("current_leader_epoch", int32Serializer), ("leader_epoch", int32Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +offsetForLeaderEpochRequestDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0])), ("replica_id", DummySerializer(int()))], + 1: [("topics", ArraySerializer(topicsSerializers[1])), ("replica_id", DummySerializer(int()))], + 2: [("topics", ArraySerializer(topicsSerializers[2])), ("replica_id", DummySerializer(int()))], + 3: [("replica_id", int32Serializer), ("topics", ArraySerializer(topicsSerializers[3]))], +} + + +offsetForLeaderEpochRequestDataSerializers: Dict[int, BaseSerializer[OffsetForLeaderEpochRequestData]] = { + version: NamedTupleSerializer(OffsetForLeaderEpochRequestData, schema) + for version, schema in offsetForLeaderEpochRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("partition", int32Serializer), + ("end_offset", int64Serializer), + ("leader_epoch", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("partition", int32Serializer), + ("leader_epoch", int32Serializer), + ("end_offset", int64Serializer), + ], + 2: [ + ("error_code", int16Serializer), + ("partition", int32Serializer), + ("leader_epoch", int32Serializer), + ("end_offset", int64Serializer), + ], + 3: [ + ("error_code", int16Serializer), + ("partition", int32Serializer), + ("leader_epoch", int32Serializer), + ("end_offset", int64Serializer), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], + 3: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[3]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +offsetForLeaderEpochResponseDataSchemas: Dict[int, Schema] = { + 0: [("topics", ArraySerializer(topicsSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("topics", ArraySerializer(topicsSerializers[1])), ("throttle_time_ms", DummySerializer(int()))], + 2: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[2]))], + 3: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[3]))], +} + + +offsetForLeaderEpochResponseDataSerializers: Dict[int, BaseSerializer[OffsetForLeaderEpochResponseData]] = { + version: NamedTupleSerializer(OffsetForLeaderEpochResponseData, schema) + for version, schema in offsetForLeaderEpochResponseDataSchemas.items() +} diff --git a/esque/protocol/api/produce.py b/esque/protocol/api/produce.py new file mode 100644 index 00000000..8372cc47 --- /dev/null +++ b/esque/protocol/api/produce.py @@ -0,0 +1,286 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, + recordsSerializer, + stringSerializer, +) + + +@dataclass +class Data: + # Topic partition id + partition: "int" # INT32 + + record_set: "Records" # RECORDS + + +@dataclass +class TopicData: + # Name of topic + topic: "str" # STRING + + data: List["Data"] + + +@dataclass +class ProduceRequestData(RequestData): + # The transactional id or null if the producer is not transactional + transactional_id: "Optional[str]" # NULLABLE_STRING + + # The number of acknowledgments the producer requires the leader to have received before considering a + # request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the + # full ISR. + acks: "int" # INT16 + + # The time to await a response in ms. + timeout: "int" # INT32 + + topic_data: List["TopicData"] + + @staticmethod + def api_key() -> int: + return ApiKey.PRODUCE # == 0 + + +@dataclass +class PartitionResponses: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + base_offset: "int" # INT64 + + # The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, + # the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker + # local time when the messages are appended. + log_append_time: "int" # INT64 + + # The start offset of the log at the time this produce response was created + log_start_offset: "int" # INT64 + + +@dataclass +class Responses: + # Name of topic + topic: "str" # STRING + + partition_responses: List["PartitionResponses"] + + +@dataclass +class ProduceResponseData(ResponseData): + responses: List["Responses"] + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.PRODUCE # == 0 + + +dataSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 1: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 2: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 3: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 4: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 5: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 6: [("partition", int32Serializer), ("record_set", recordsSerializer)], + 7: [("partition", int32Serializer), ("record_set", recordsSerializer)], +} + + +dataSerializers: Dict[int, BaseSerializer[Data]] = { + version: NamedTupleSerializer(Data, schema) for version, schema in dataSchemas.items() +} + + +topicDataSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[0]))], + 1: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[1]))], + 2: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[2]))], + 3: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[3]))], + 4: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[4]))], + 5: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[5]))], + 6: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[6]))], + 7: [("topic", stringSerializer), ("data", ArraySerializer(dataSerializers[7]))], +} + + +topicDataSerializers: Dict[int, BaseSerializer[TopicData]] = { + version: NamedTupleSerializer(TopicData, schema) for version, schema in topicDataSchemas.items() +} + + +produceRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[0])), + ("transactional_id", DummySerializer(None)), + ], + 1: [ + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[1])), + ("transactional_id", DummySerializer(None)), + ], + 2: [ + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[2])), + ("transactional_id", DummySerializer(None)), + ], + 3: [ + ("transactional_id", nullableStringSerializer), + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[3])), + ], + 4: [ + ("transactional_id", nullableStringSerializer), + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[4])), + ], + 5: [ + ("transactional_id", nullableStringSerializer), + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[5])), + ], + 6: [ + ("transactional_id", nullableStringSerializer), + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[6])), + ], + 7: [ + ("transactional_id", nullableStringSerializer), + ("acks", int16Serializer), + ("timeout", int32Serializer), + ("topic_data", ArraySerializer(topicDataSerializers[7])), + ], +} + + +produceRequestDataSerializers: Dict[int, BaseSerializer[ProduceRequestData]] = { + version: NamedTupleSerializer(ProduceRequestData, schema) for version, schema in produceRequestDataSchemas.items() +} + + +partitionResponsesSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", DummySerializer(int())), + ("log_start_offset", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", DummySerializer(int())), + ], + 3: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", DummySerializer(int())), + ], + 4: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", DummySerializer(int())), + ], + 5: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", int64Serializer), + ], + 6: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", int64Serializer), + ], + 7: [ + ("partition", int32Serializer), + ("error_code", int16Serializer), + ("base_offset", int64Serializer), + ("log_append_time", int64Serializer), + ("log_start_offset", int64Serializer), + ], +} + + +partitionResponsesSerializers: Dict[int, BaseSerializer[PartitionResponses]] = { + version: NamedTupleSerializer(PartitionResponses, schema) for version, schema in partitionResponsesSchemas.items() +} + + +responsesSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[0]))], + 1: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[1]))], + 2: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[2]))], + 3: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[3]))], + 4: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[4]))], + 5: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[5]))], + 6: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[6]))], + 7: [("topic", stringSerializer), ("partition_responses", ArraySerializer(partitionResponsesSerializers[7]))], +} + + +responsesSerializers: Dict[int, BaseSerializer[Responses]] = { + version: NamedTupleSerializer(Responses, schema) for version, schema in responsesSchemas.items() +} + + +produceResponseDataSchemas: Dict[int, Schema] = { + 0: [("responses", ArraySerializer(responsesSerializers[0])), ("throttle_time_ms", DummySerializer(int()))], + 1: [("responses", ArraySerializer(responsesSerializers[1])), ("throttle_time_ms", int32Serializer)], + 2: [("responses", ArraySerializer(responsesSerializers[2])), ("throttle_time_ms", int32Serializer)], + 3: [("responses", ArraySerializer(responsesSerializers[3])), ("throttle_time_ms", int32Serializer)], + 4: [("responses", ArraySerializer(responsesSerializers[4])), ("throttle_time_ms", int32Serializer)], + 5: [("responses", ArraySerializer(responsesSerializers[5])), ("throttle_time_ms", int32Serializer)], + 6: [("responses", ArraySerializer(responsesSerializers[6])), ("throttle_time_ms", int32Serializer)], + 7: [("responses", ArraySerializer(responsesSerializers[7])), ("throttle_time_ms", int32Serializer)], +} + + +produceResponseDataSerializers: Dict[int, BaseSerializer[ProduceResponseData]] = { + version: NamedTupleSerializer(ProduceResponseData, schema) + for version, schema in produceResponseDataSchemas.items() +} diff --git a/esque/protocol/api/renew_delegation_token.py b/esque/protocol/api/renew_delegation_token.py new file mode 100644 index 00000000..c04d2e89 --- /dev/null +++ b/esque/protocol/api/renew_delegation_token.py @@ -0,0 +1,69 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + int64Serializer, +) + + +@dataclass +class RenewDelegationTokenRequestData(RequestData): + # HMAC of the delegation token to be renewed. + hmac: "bytes" # BYTES + + # Renew time period in milli seconds. + renew_time_period: "int" # INT64 + + @staticmethod + def api_key() -> int: + return ApiKey.RENEW_DELEGATION_TOKEN # == 39 + + +@dataclass +class RenewDelegationTokenResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # timestamp (in msec) at which this token expires.. + expiry_timestamp: "int" # INT64 + + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + @staticmethod + def api_key() -> int: + return ApiKey.RENEW_DELEGATION_TOKEN # == 39 + + +renewDelegationTokenRequestDataSchemas: Dict[int, Schema] = { + 0: [("hmac", bytesSerializer), ("renew_time_period", int64Serializer)], + 1: [("hmac", bytesSerializer), ("renew_time_period", int64Serializer)], +} + + +renewDelegationTokenRequestDataSerializers: Dict[int, BaseSerializer[RenewDelegationTokenRequestData]] = { + version: NamedTupleSerializer(RenewDelegationTokenRequestData, schema) + for version, schema in renewDelegationTokenRequestDataSchemas.items() +} + + +renewDelegationTokenResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("expiry_timestamp", int64Serializer), ("throttle_time_ms", int32Serializer)], + 1: [("error_code", int16Serializer), ("expiry_timestamp", int64Serializer), ("throttle_time_ms", int32Serializer)], +} + + +renewDelegationTokenResponseDataSerializers: Dict[int, BaseSerializer[RenewDelegationTokenResponseData]] = { + version: NamedTupleSerializer(RenewDelegationTokenResponseData, schema) + for version, schema in renewDelegationTokenResponseDataSchemas.items() +} diff --git a/esque/protocol/api/sasl_authenticate.py b/esque/protocol/api/sasl_authenticate.py new file mode 100644 index 00000000..4523862a --- /dev/null +++ b/esque/protocol/api/sasl_authenticate.py @@ -0,0 +1,79 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int64Serializer, + nullableStringSerializer, +) + + +@dataclass +class SaslAuthenticateRequestData(RequestData): + # The SASL authentication bytes from the client, as defined by the SASL mechanism. + auth_bytes: "bytes" # BYTES + + @staticmethod + def api_key() -> int: + return ApiKey.SASL_AUTHENTICATE # == 36 + + +@dataclass +class SaslAuthenticateResponseData(ResponseData): + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The error message, or null if there was no error. + error_message: "Optional[str]" # NULLABLE_STRING + + # The SASL authentication bytes from the server, as defined by the SASL mechanism. + auth_bytes: "bytes" # BYTES + + # The SASL authentication bytes from the server, as defined by the SASL mechanism. + session_lifetime_ms: "int" # INT64 + + @staticmethod + def api_key() -> int: + return ApiKey.SASL_AUTHENTICATE # == 36 + + +saslAuthenticateRequestDataSchemas: Dict[int, Schema] = { + 0: [("auth_bytes", bytesSerializer)], + 1: [("auth_bytes", bytesSerializer)], +} + + +saslAuthenticateRequestDataSerializers: Dict[int, BaseSerializer[SaslAuthenticateRequestData]] = { + version: NamedTupleSerializer(SaslAuthenticateRequestData, schema) + for version, schema in saslAuthenticateRequestDataSchemas.items() +} + + +saslAuthenticateResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("auth_bytes", bytesSerializer), + ("session_lifetime_ms", DummySerializer(int())), + ], + 1: [ + ("error_code", int16Serializer), + ("error_message", nullableStringSerializer), + ("auth_bytes", bytesSerializer), + ("session_lifetime_ms", int64Serializer), + ], +} + + +saslAuthenticateResponseDataSerializers: Dict[int, BaseSerializer[SaslAuthenticateResponseData]] = { + version: NamedTupleSerializer(SaslAuthenticateResponseData, schema) + for version, schema in saslAuthenticateResponseDataSchemas.items() +} diff --git a/esque/protocol/api/sasl_handshake.py b/esque/protocol/api/sasl_handshake.py new file mode 100644 index 00000000..442f2472 --- /dev/null +++ b/esque/protocol/api/sasl_handshake.py @@ -0,0 +1,61 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + stringSerializer, +) + + +@dataclass +class SaslHandshakeRequestData(RequestData): + # The SASL mechanism chosen by the client. + mechanism: "str" # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.SASL_HANDSHAKE # == 17 + + +@dataclass +class SaslHandshakeResponseData(ResponseData): + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The mechanisms enabled in the server. + mechanisms: List["str"] # STRING + + @staticmethod + def api_key() -> int: + return ApiKey.SASL_HANDSHAKE # == 17 + + +saslHandshakeRequestDataSchemas: Dict[int, Schema] = { + 0: [("mechanism", stringSerializer)], + 1: [("mechanism", stringSerializer)], +} + + +saslHandshakeRequestDataSerializers: Dict[int, BaseSerializer[SaslHandshakeRequestData]] = { + version: NamedTupleSerializer(SaslHandshakeRequestData, schema) + for version, schema in saslHandshakeRequestDataSchemas.items() +} + + +saslHandshakeResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("mechanisms", ArraySerializer(stringSerializer))], + 1: [("error_code", int16Serializer), ("mechanisms", ArraySerializer(stringSerializer))], +} + + +saslHandshakeResponseDataSerializers: Dict[int, BaseSerializer[SaslHandshakeResponseData]] = { + version: NamedTupleSerializer(SaslHandshakeResponseData, schema) + for version, schema in saslHandshakeResponseDataSchemas.items() +} diff --git a/esque/protocol/api/stop_replica.py b/esque/protocol/api/stop_replica.py new file mode 100644 index 00000000..7a2153d7 --- /dev/null +++ b/esque/protocol/api/stop_replica.py @@ -0,0 +1,132 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Name of topic + topic: "str" # STRING + + # The partition ids of a topic + partition_ids: List["int"] # INT32 + + +@dataclass +class StopReplicaRequestData(RequestData): + # The controller id + controller_id: "int" # INT32 + + # The controller epoch + controller_epoch: "int" # INT32 + + # The broker epoch + broker_epoch: "int" # INT64 + + # Boolean which indicates if replica's partitions must be deleted. + delete_partitions: "bool" # BOOLEAN + + # The partitions + partitions: List["Partitions"] + + @staticmethod + def api_key() -> int: + return ApiKey.STOP_REPLICA # == 5 + + +@dataclass +class Partitions: + # Name of topic + topic: "str" # STRING + + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class StopReplicaResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + # Response for the requests partitions + partitions: List["Partitions"] + + @staticmethod + def api_key() -> int: + return ApiKey.STOP_REPLICA # == 5 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), (None, int32Serializer), ("partition_ids", DummySerializer([]))], + 1: [("topic", stringSerializer), ("partition_ids", ArraySerializer(int32Serializer))], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +stopReplicaRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + ("delete_partitions", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[0])), + ("broker_epoch", DummySerializer(int())), + ], + 1: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + ("broker_epoch", int64Serializer), + ("delete_partitions", booleanSerializer), + ("partitions", ArraySerializer(partitionsSerializers[1])), + ], +} + + +stopReplicaRequestDataSerializers: Dict[int, BaseSerializer[StopReplicaRequestData]] = { + version: NamedTupleSerializer(StopReplicaRequestData, schema) + for version, schema in stopReplicaRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partition", int32Serializer), ("error_code", int16Serializer)], + 1: [("topic", stringSerializer), ("partition", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +stopReplicaResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("error_code", int16Serializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], +} + + +stopReplicaResponseDataSerializers: Dict[int, BaseSerializer[StopReplicaResponseData]] = { + version: NamedTupleSerializer(StopReplicaResponseData, schema) + for version, schema in stopReplicaResponseDataSchemas.items() +} diff --git a/esque/protocol/api/sync_group.py b/esque/protocol/api/sync_group.py new file mode 100644 index 00000000..825f6b4c --- /dev/null +++ b/esque/protocol/api/sync_group.py @@ -0,0 +1,135 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + bytesSerializer, + int16Serializer, + int32Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Assignments: + # The ID of the member to assign. + member_id: "str" # STRING + + # The member assignment. + assignment: "bytes" # BYTES + + +@dataclass +class SyncGroupRequestData(RequestData): + # The unique group identifier. + group_id: "str" # STRING + + # The generation of the group. + generation_id: "int" # INT32 + + # The ID of the member to assign. + member_id: "str" # STRING + + # The unique identifier of the consumer instance provided by end user. + group_instance_id: "Optional[str]" # NULLABLE_STRING + + # Each assignment. + assignments: List["Assignments"] + + @staticmethod + def api_key() -> int: + return ApiKey.SYNC_GROUP # == 14 + + +@dataclass +class SyncGroupResponseData(ResponseData): + # The duration in milliseconds for which the request was throttled due to a quota violation, or zero + # if the request did not violate any quota. + throttle_time_ms: "int" # INT32 + + # The error code, or 0 if there was no error. + error_code: "int" # INT16 + + # The member assignment. + assignment: "bytes" # BYTES + + @staticmethod + def api_key() -> int: + return ApiKey.SYNC_GROUP # == 14 + + +assignmentsSchemas: Dict[int, Schema] = { + 0: [("member_id", stringSerializer), ("assignment", bytesSerializer)], + 1: [("member_id", stringSerializer), ("assignment", bytesSerializer)], + 2: [("member_id", stringSerializer), ("assignment", bytesSerializer)], + 3: [("member_id", stringSerializer), ("assignment", bytesSerializer)], +} + + +assignmentsSerializers: Dict[int, BaseSerializer[Assignments]] = { + version: NamedTupleSerializer(Assignments, schema) for version, schema in assignmentsSchemas.items() +} + + +syncGroupRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("assignments", ArraySerializer(assignmentsSerializers[0])), + ("group_instance_id", DummySerializer(None)), + ], + 1: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("assignments", ArraySerializer(assignmentsSerializers[1])), + ("group_instance_id", DummySerializer(None)), + ], + 2: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("assignments", ArraySerializer(assignmentsSerializers[2])), + ("group_instance_id", DummySerializer(None)), + ], + 3: [ + ("group_id", stringSerializer), + ("generation_id", int32Serializer), + ("member_id", stringSerializer), + ("group_instance_id", nullableStringSerializer), + ("assignments", ArraySerializer(assignmentsSerializers[3])), + ], +} + + +syncGroupRequestDataSerializers: Dict[int, BaseSerializer[SyncGroupRequestData]] = { + version: NamedTupleSerializer(SyncGroupRequestData, schema) + for version, schema in syncGroupRequestDataSchemas.items() +} + + +syncGroupResponseDataSchemas: Dict[int, Schema] = { + 0: [ + ("error_code", int16Serializer), + ("assignment", bytesSerializer), + ("throttle_time_ms", DummySerializer(int())), + ], + 1: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer), ("assignment", bytesSerializer)], + 2: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer), ("assignment", bytesSerializer)], + 3: [("throttle_time_ms", int32Serializer), ("error_code", int16Serializer), ("assignment", bytesSerializer)], +} + + +syncGroupResponseDataSerializers: Dict[int, BaseSerializer[SyncGroupResponseData]] = { + version: NamedTupleSerializer(SyncGroupResponseData, schema) + for version, schema in syncGroupResponseDataSchemas.items() +} diff --git a/esque/protocol/api/txn_offset_commit.py b/esque/protocol/api/txn_offset_commit.py new file mode 100644 index 00000000..de17297a --- /dev/null +++ b/esque/protocol/api/txn_offset_commit.py @@ -0,0 +1,205 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # Message offset to be committed + offset: "int" # INT64 + + # The leader epoch, if provided is derived from the last consumed record. This is used by the consumer + # to check for log truncation and to ensure partition metadata is up to date following a group + # rebalance. + leader_epoch: "int" # INT32 + + # Any associated metadata the client wants to keep. + metadata: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # Partitions to commit offsets + partitions: List["Partitions"] + + +@dataclass +class TxnOffsetCommitRequestData(RequestData): + # The transactional id corresponding to the transaction. + transactional_id: "str" # STRING + + # The unique group identifier + group_id: "str" # STRING + + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + # Topics to commit offsets + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.TXN_OFFSET_COMMIT # == 28 + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + # Responses by partition for committed offsets + partitions: List["Partitions"] + + +@dataclass +class TxnOffsetCommitResponseData(ResponseData): + # Duration in milliseconds for which the request was throttled due to quota violation (Zero if the + # request did not violate any quota) + throttle_time_ms: "int" # INT32 + + # Responses by topic for committed offsets + topics: List["Topics"] + + @staticmethod + def api_key() -> int: + return ApiKey.TXN_OFFSET_COMMIT # == 28 + + +partitionsSchemas: Dict[int, Schema] = { + 0: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("leader_epoch", DummySerializer(int())), + ], + 1: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("metadata", nullableStringSerializer), + ("leader_epoch", DummySerializer(int())), + ], + 2: [ + ("partition", int32Serializer), + ("offset", int64Serializer), + ("leader_epoch", int32Serializer), + ("metadata", nullableStringSerializer), + ], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +txnOffsetCommitRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("transactional_id", stringSerializer), + ("group_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ], + 1: [ + ("transactional_id", stringSerializer), + ("group_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("topics", ArraySerializer(topicsSerializers[1])), + ], + 2: [ + ("transactional_id", stringSerializer), + ("group_id", stringSerializer), + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("topics", ArraySerializer(topicsSerializers[2])), + ], +} + + +txnOffsetCommitRequestDataSerializers: Dict[int, BaseSerializer[TxnOffsetCommitRequestData]] = { + version: NamedTupleSerializer(TxnOffsetCommitRequestData, schema) + for version, schema in txnOffsetCommitRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = { + 0: [("partition", int32Serializer), ("error_code", int16Serializer)], + 1: [("partition", int32Serializer), ("error_code", int16Serializer)], + 2: [("partition", int32Serializer), ("error_code", int16Serializer)], +} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))], + 1: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[1]))], + 2: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[2]))], +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +txnOffsetCommitResponseDataSchemas: Dict[int, Schema] = { + 0: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[0]))], + 1: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[1]))], + 2: [("throttle_time_ms", int32Serializer), ("topics", ArraySerializer(topicsSerializers[2]))], +} + + +txnOffsetCommitResponseDataSerializers: Dict[int, BaseSerializer[TxnOffsetCommitResponseData]] = { + version: NamedTupleSerializer(TxnOffsetCommitResponseData, schema) + for version, schema in txnOffsetCommitResponseDataSchemas.items() +} diff --git a/esque/protocol/api/update_metadata.py b/esque/protocol/api/update_metadata.py new file mode 100644 index 00000000..363e39de --- /dev/null +++ b/esque/protocol/api/update_metadata.py @@ -0,0 +1,350 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + DummySerializer, + NamedTupleSerializer, + Schema, + int16Serializer, + int32Serializer, + int64Serializer, + nullableStringSerializer, + stringSerializer, +) + + +@dataclass +class PartitionStates: + # Topic partition id + partition: "int" # INT32 + + # The controller epoch + controller_epoch: "int" # INT32 + + # The broker id for the leader. + leader: "int" # INT32 + + # The leader epoch. + leader_epoch: "int" # INT32 + + # The in sync replica ids. + isr: List["int"] # INT32 + + # The ZK version. + zk_version: "int" # INT32 + + # The replica ids. + replicas: List["int"] # INT32 + + # The offline replica ids + offline_replicas: List["int"] # INT32 + + +@dataclass +class TopicStates: + # Name of topic + topic: "str" # STRING + + # Partition states + partition_states: List["PartitionStates"] + + +@dataclass +class EndPoints: + # The port on which the broker accepts requests. + port: "int" # INT32 + + # The hostname of the broker. + host: "str" # STRING + + # The listener name. + listener_name: "str" # STRING + + # The security protocol type. + security_protocol_type: "int" # INT16 + + +@dataclass +class LiveBrokers: + # The broker id + id: "int" # INT32 + + # The endpoints + end_points: List["EndPoints"] + + # The rack + rack: "Optional[str]" # NULLABLE_STRING + + +@dataclass +class UpdateMetadataRequestData(RequestData): + # The controller id + controller_id: "int" # INT32 + + # The controller epoch + controller_epoch: "int" # INT32 + + # The broker epoch + broker_epoch: "int" # INT64 + + # Topic states + topic_states: List["TopicStates"] + + # Live broekrs + live_brokers: List["LiveBrokers"] + + @staticmethod + def api_key() -> int: + return ApiKey.UPDATE_METADATA # == 6 + + +@dataclass +class UpdateMetadataResponseData(ResponseData): + # Response error code + error_code: "int" # INT16 + + @staticmethod + def api_key() -> int: + return ApiKey.UPDATE_METADATA # == 6 + + +partitionStatesSchemas: Dict[int, Schema] = { + 0: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", DummySerializer([])), + ], + 1: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", DummySerializer([])), + ], + 2: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", DummySerializer([])), + ], + 3: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", DummySerializer([])), + ], + 4: [ + (None, stringSerializer), + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ], + 5: [ + ("partition", int32Serializer), + ("controller_epoch", int32Serializer), + ("leader", int32Serializer), + ("leader_epoch", int32Serializer), + ("isr", ArraySerializer(int32Serializer)), + ("zk_version", int32Serializer), + ("replicas", ArraySerializer(int32Serializer)), + ("offline_replicas", ArraySerializer(int32Serializer)), + ], +} + + +partitionStatesSerializers: Dict[int, BaseSerializer[PartitionStates]] = { + version: NamedTupleSerializer(PartitionStates, schema) for version, schema in partitionStatesSchemas.items() +} + + +endPointsSchemas: Dict[int, Schema] = { + 1: [ + ("port", int32Serializer), + ("host", stringSerializer), + ("security_protocol_type", int16Serializer), + ("listener_name", DummySerializer(str())), + ], + 2: [ + ("port", int32Serializer), + ("host", stringSerializer), + ("security_protocol_type", int16Serializer), + ("listener_name", DummySerializer(str())), + ], + 3: [ + ("port", int32Serializer), + ("host", stringSerializer), + ("listener_name", stringSerializer), + ("security_protocol_type", int16Serializer), + ], + 4: [ + ("port", int32Serializer), + ("host", stringSerializer), + ("listener_name", stringSerializer), + ("security_protocol_type", int16Serializer), + ], + 5: [ + ("port", int32Serializer), + ("host", stringSerializer), + ("listener_name", stringSerializer), + ("security_protocol_type", int16Serializer), + ], +} + + +endPointsSerializers: Dict[int, BaseSerializer[EndPoints]] = { + version: NamedTupleSerializer(EndPoints, schema) for version, schema in endPointsSchemas.items() +} + + +liveBrokersSchemas: Dict[int, Schema] = { + 0: [ + ("id", int32Serializer), + (None, stringSerializer), + (None, int32Serializer), + ("end_points", DummySerializer([])), + ("rack", DummySerializer(None)), + ], + 1: [ + ("id", int32Serializer), + ("end_points", ArraySerializer(endPointsSerializers[1])), + ("rack", DummySerializer(None)), + ], + 2: [ + ("id", int32Serializer), + ("end_points", ArraySerializer(endPointsSerializers[2])), + ("rack", nullableStringSerializer), + ], + 3: [ + ("id", int32Serializer), + ("end_points", ArraySerializer(endPointsSerializers[3])), + ("rack", nullableStringSerializer), + ], + 4: [ + ("id", int32Serializer), + ("end_points", ArraySerializer(endPointsSerializers[4])), + ("rack", nullableStringSerializer), + ], + 5: [ + ("id", int32Serializer), + ("end_points", ArraySerializer(endPointsSerializers[5])), + ("rack", nullableStringSerializer), + ], +} + + +liveBrokersSerializers: Dict[int, BaseSerializer[LiveBrokers]] = { + version: NamedTupleSerializer(LiveBrokers, schema) for version, schema in liveBrokersSchemas.items() +} + + +topicStatesSchemas: Dict[int, Schema] = { + 5: [("topic", stringSerializer), ("partition_states", ArraySerializer(partitionStatesSerializers[5]))] +} + + +topicStatesSerializers: Dict[int, BaseSerializer[TopicStates]] = { + version: NamedTupleSerializer(TopicStates, schema) for version, schema in topicStatesSchemas.items() +} + + +updateMetadataRequestDataSchemas: Dict[int, Schema] = { + 0: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[0])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[0])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 1: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[1])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[1])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 2: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[2])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[2])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 3: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[3])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[3])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 4: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + (None, ArraySerializer(partitionStatesSerializers[4])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[4])), + ("broker_epoch", DummySerializer(int())), + ("topic_states", DummySerializer([])), + ], + 5: [ + ("controller_id", int32Serializer), + ("controller_epoch", int32Serializer), + ("broker_epoch", int64Serializer), + ("topic_states", ArraySerializer(topicStatesSerializers[5])), + ("live_brokers", ArraySerializer(liveBrokersSerializers[5])), + ], +} + + +updateMetadataRequestDataSerializers: Dict[int, BaseSerializer[UpdateMetadataRequestData]] = { + version: NamedTupleSerializer(UpdateMetadataRequestData, schema) + for version, schema in updateMetadataRequestDataSchemas.items() +} + + +updateMetadataResponseDataSchemas: Dict[int, Schema] = { + 0: [("error_code", int16Serializer)], + 1: [("error_code", int16Serializer)], + 2: [("error_code", int16Serializer)], + 3: [("error_code", int16Serializer)], + 4: [("error_code", int16Serializer)], + 5: [("error_code", int16Serializer)], +} + + +updateMetadataResponseDataSerializers: Dict[int, BaseSerializer[UpdateMetadataResponseData]] = { + version: NamedTupleSerializer(UpdateMetadataResponseData, schema) + for version, schema in updateMetadataResponseDataSchemas.items() +} diff --git a/esque/protocol/api/write_txn_markers.py b/esque/protocol/api/write_txn_markers.py new file mode 100644 index 00000000..71d595e8 --- /dev/null +++ b/esque/protocol/api/write_txn_markers.py @@ -0,0 +1,163 @@ +# FIXME autogenerated module, check for errors! +from typing import Dict, List + +from dataclasses import dataclass + +from esque.protocol.api.base import ApiKey, RequestData, ResponseData +from esque.protocol.serializers import ( + ArraySerializer, + BaseSerializer, + NamedTupleSerializer, + Schema, + booleanSerializer, + int16Serializer, + int32Serializer, + int64Serializer, + stringSerializer, +) + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["int"] # INT32 + + +@dataclass +class TransactionMarkers: + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Current epoch associated with the producer id. + producer_epoch: "int" # INT16 + + # The result of the transaction to write to the partitions (false = ABORT, true = COMMIT). + transaction_result: "bool" # BOOLEAN + + # The partitions to write markers for. + topics: List["Topics"] + + # Epoch associated with the transaction state partition hosted by this transaction coordinator + coordinator_epoch: "int" # INT32 + + +@dataclass +class WriteTxnMarkersRequestData(RequestData): + # The transaction markers to be written. + transaction_markers: List["TransactionMarkers"] + + @staticmethod + def api_key() -> int: + return ApiKey.WRITE_TXN_MARKERS # == 27 + + +@dataclass +class Partitions: + # Topic partition id + partition: "int" # INT32 + + # Response error code + error_code: "int" # INT16 + + +@dataclass +class Topics: + # Name of topic + topic: "str" # STRING + + partitions: List["Partitions"] + + +@dataclass +class TransactionMarkers: + # Current producer id in use by the transactional id. + producer_id: "int" # INT64 + + # Errors per partition from writing markers. + topics: List["Topics"] + + +@dataclass +class WriteTxnMarkersResponseData(ResponseData): + # Errors per partition from writing markers. + transaction_markers: List["TransactionMarkers"] + + @staticmethod + def api_key() -> int: + return ApiKey.WRITE_TXN_MARKERS # == 27 + + +topicsSchemas: Dict[int, Schema] = {0: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))]} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +transactionMarkersSchemas: Dict[int, Schema] = { + 0: [ + ("producer_id", int64Serializer), + ("producer_epoch", int16Serializer), + ("transaction_result", booleanSerializer), + ("topics", ArraySerializer(topicsSerializers[0])), + ("coordinator_epoch", int32Serializer), + ] +} + + +transactionMarkersSerializers: Dict[int, BaseSerializer[TransactionMarkers]] = { + version: NamedTupleSerializer(TransactionMarkers, schema) for version, schema in transactionMarkersSchemas.items() +} + + +writeTxnMarkersRequestDataSchemas: Dict[int, Schema] = { + 0: [("transaction_markers", ArraySerializer(transactionMarkersSerializers[0]))] +} + + +writeTxnMarkersRequestDataSerializers: Dict[int, BaseSerializer[WriteTxnMarkersRequestData]] = { + version: NamedTupleSerializer(WriteTxnMarkersRequestData, schema) + for version, schema in writeTxnMarkersRequestDataSchemas.items() +} + + +partitionsSchemas: Dict[int, Schema] = {0: [("partition", int32Serializer), ("error_code", int16Serializer)]} + + +partitionsSerializers: Dict[int, BaseSerializer[Partitions]] = { + version: NamedTupleSerializer(Partitions, schema) for version, schema in partitionsSchemas.items() +} + + +topicsSchemas: Dict[int, Schema] = { + 0: [("topic", stringSerializer), ("partitions", ArraySerializer(partitionsSerializers[0]))] +} + + +topicsSerializers: Dict[int, BaseSerializer[Topics]] = { + version: NamedTupleSerializer(Topics, schema) for version, schema in topicsSchemas.items() +} + + +transactionMarkersSchemas: Dict[int, Schema] = { + 0: [("producer_id", int64Serializer), ("topics", ArraySerializer(topicsSerializers[0]))] +} + + +transactionMarkersSerializers: Dict[int, BaseSerializer[TransactionMarkers]] = { + version: NamedTupleSerializer(TransactionMarkers, schema) for version, schema in transactionMarkersSchemas.items() +} + + +writeTxnMarkersResponseDataSchemas: Dict[int, Schema] = { + 0: [("transaction_markers", ArraySerializer(transactionMarkersSerializers[0]))] +} + + +writeTxnMarkersResponseDataSerializers: Dict[int, BaseSerializer[WriteTxnMarkersResponseData]] = { + version: NamedTupleSerializer(WriteTxnMarkersResponseData, schema) + for version, schema in writeTxnMarkersResponseDataSchemas.items() +} diff --git a/esque/protocol/connection.py b/esque/protocol/connection.py new file mode 100644 index 00000000..71c3f95b --- /dev/null +++ b/esque/protocol/connection.py @@ -0,0 +1,488 @@ +# noqa: F811,F401 + +import itertools as it +import queue +import socket +import warnings +from typing import BinaryIO, Dict, List, Tuple, overload + +from .api import ( + ApiKey, + ApiVersions, + Request, + RequestData, + ResponseData, + SUPPORTED_API_VERSIONS, + ProduceRequestData, + ProduceResponseData, + FetchRequestData, + FetchResponseData, + ListOffsetsRequestData, + ListOffsetsResponseData, + MetadataRequestData, + MetadataResponseData, + LeaderAndIsrRequestData, + LeaderAndIsrResponseData, + StopReplicaRequestData, + StopReplicaResponseData, + UpdateMetadataRequestData, + UpdateMetadataResponseData, + ControlledShutdownRequestData, + ControlledShutdownResponseData, + OffsetCommitRequestData, + OffsetCommitResponseData, + OffsetFetchRequestData, + OffsetFetchResponseData, + FindCoordinatorRequestData, + FindCoordinatorResponseData, + JoinGroupRequestData, + JoinGroupResponseData, + HeartbeatRequestData, + HeartbeatResponseData, + LeaveGroupRequestData, + LeaveGroupResponseData, + SyncGroupRequestData, + SyncGroupResponseData, + DescribeGroupsRequestData, + DescribeGroupsResponseData, + ListGroupsRequestData, + ListGroupsResponseData, + SaslHandshakeRequestData, + SaslHandshakeResponseData, + ApiVersionsRequestData, + ApiVersionsResponseData, + CreateTopicsRequestData, + CreateTopicsResponseData, + DeleteTopicsRequestData, + DeleteTopicsResponseData, + DeleteRecordsRequestData, + DeleteRecordsResponseData, + InitProducerIdRequestData, + InitProducerIdResponseData, + OffsetForLeaderEpochRequestData, + OffsetForLeaderEpochResponseData, + AddPartitionsToTxnRequestData, + AddPartitionsToTxnResponseData, + AddOffsetsToTxnRequestData, + AddOffsetsToTxnResponseData, + EndTxnRequestData, + EndTxnResponseData, + WriteTxnMarkersRequestData, + WriteTxnMarkersResponseData, + TxnOffsetCommitRequestData, + TxnOffsetCommitResponseData, + DescribeAclsRequestData, + DescribeAclsResponseData, + CreateAclsRequestData, + CreateAclsResponseData, + DeleteAclsRequestData, + DeleteAclsResponseData, + DescribeConfigsRequestData, + DescribeConfigsResponseData, + AlterConfigsRequestData, + AlterConfigsResponseData, + AlterReplicaLogDirsRequestData, + AlterReplicaLogDirsResponseData, + DescribeLogDirsRequestData, + DescribeLogDirsResponseData, + SaslAuthenticateRequestData, + SaslAuthenticateResponseData, + CreatePartitionsRequestData, + CreatePartitionsResponseData, + CreateDelegationTokenRequestData, + CreateDelegationTokenResponseData, + RenewDelegationTokenRequestData, + RenewDelegationTokenResponseData, + ExpireDelegationTokenRequestData, + ExpireDelegationTokenResponseData, + DescribeDelegationTokenRequestData, + DescribeDelegationTokenResponseData, + DeleteGroupsRequestData, + DeleteGroupsResponseData, + ElectPreferredLeadersRequestData, + ElectPreferredLeadersResponseData, + IncrementalAlterConfigsRequestData, + IncrementalAlterConfigsResponseData, +) +from .serializers import int32Serializer + + +class ApiNotSupportedWarning(UserWarning): + pass + + +class BrokerConnection: + def __init__(self, address: Tuple[str, int], client_id: str): + self.kafka_io = KafkaIO.from_address(address) + self.client_id = client_id + self._correlation_id_counter = it.count() + self.api_versions: Dict[ApiKey, int] = {ApiKey.API_VERSIONS: 1} + self._query_api_versions() + + def _query_api_versions(self) -> None: + request = self.send(ApiVersionsRequestData()) + all_server_supported_versions = { + ApiKey(support_range.api_key): support_range for support_range in request.response_data.api_versions + } + server_api_keys = set(all_server_supported_versions) + client_api_keys = set(SUPPORTED_API_VERSIONS) + for api_key in server_api_keys | client_api_keys: + client_supported_version = SUPPORTED_API_VERSIONS.get(api_key, ApiVersions(api_key, -2, -1)) + server_supported_version = all_server_supported_versions.get(api_key, ApiVersions(api_key, -4, -3)) + effective_version = min(client_supported_version.max_version, server_supported_version.max_version) + + # TODO messages say something like server only supports api ... up to version -4 + # better say server doesn't support api ... PERIOD + # I'd like to do warings/exceptions during runtime once a feature is actually needed. This makes sure the + # client can be used for everything where the api versions match and/or are high enough. + # In the high level part, I imagine function annotations like @requires(ApiKey.LIST_OFFSETS, 2) if a + # function requires the server to support api LIST_OFFSETS of at least version 2 + if effective_version < client_supported_version.min_version: + if server_supported_version.max_version == -3: + warnings.warn( + ApiNotSupportedWarning( + f"Client supports API {api_key.name} up to version {client_supported_version.max_version}, " + + f"but server does not support the API at all. You cannot use this API." + ) + ) + else: + warnings.warn( + ApiNotSupportedWarning( + f"Server only supports API {api_key.name} up to version" + f"{server_supported_version.max_version}, but client needs at least " + f"{client_supported_version.min_version}. You cannot use this API." + ) + ) + if effective_version < server_supported_version.min_version: + if client_supported_version.max_version == -1: + warnings.warn( + ApiNotSupportedWarning( + f"Server supports api {api_key.name} up to version {server_supported_version.max_version}, " + + f"but client does not support the API at all. You cannot use this API." + ) + ) + else: + warnings.warn( + ApiNotSupportedWarning( + f"Client only supports API {api_key.name} up to version" + f"{client_supported_version.max_version}, but server needs at least " + f"{server_supported_version.min_version}. You cannot use this API." + ) + ) + self.api_versions[api_key] = effective_version + + @overload + def send(self, data: ProduceRequestData) -> Request[ProduceRequestData, ProduceResponseData]: + ... + + @overload + def send(self, data: FetchRequestData) -> Request[FetchRequestData, FetchResponseData]: + ... + + @overload + def send(self, data: ListOffsetsRequestData) -> Request[ListOffsetsRequestData, ListOffsetsResponseData]: + ... + + @overload + def send(self, data: MetadataRequestData) -> Request[MetadataRequestData, MetadataResponseData]: + ... + + @overload + def send(self, data: LeaderAndIsrRequestData) -> Request[LeaderAndIsrRequestData, LeaderAndIsrResponseData]: + ... + + @overload + def send(self, data: StopReplicaRequestData) -> Request[StopReplicaRequestData, StopReplicaResponseData]: + ... + + @overload + def send(self, data: UpdateMetadataRequestData) -> Request[UpdateMetadataRequestData, UpdateMetadataResponseData]: + ... + + @overload + def send( + self, data: ControlledShutdownRequestData + ) -> Request[ControlledShutdownRequestData, ControlledShutdownResponseData]: + ... + + @overload + def send(self, data: OffsetCommitRequestData) -> Request[OffsetCommitRequestData, OffsetCommitResponseData]: + ... + + @overload + def send(self, data: OffsetFetchRequestData) -> Request[OffsetFetchRequestData, OffsetFetchResponseData]: + ... + + @overload + def send( + self, data: FindCoordinatorRequestData + ) -> Request[FindCoordinatorRequestData, FindCoordinatorResponseData]: + ... + + @overload + def send(self, data: JoinGroupRequestData) -> Request[JoinGroupRequestData, JoinGroupResponseData]: + ... + + @overload + def send(self, data: HeartbeatRequestData) -> Request[HeartbeatRequestData, HeartbeatResponseData]: + ... + + @overload + def send(self, data: LeaveGroupRequestData) -> Request[LeaveGroupRequestData, LeaveGroupResponseData]: + ... + + @overload + def send(self, data: SyncGroupRequestData) -> Request[SyncGroupRequestData, SyncGroupResponseData]: + ... + + @overload + def send(self, data: DescribeGroupsRequestData) -> Request[DescribeGroupsRequestData, DescribeGroupsResponseData]: + ... + + @overload + def send(self, data: ListGroupsRequestData) -> Request[ListGroupsRequestData, ListGroupsResponseData]: + ... + + @overload + def send(self, data: SaslHandshakeRequestData) -> Request[SaslHandshakeRequestData, SaslHandshakeResponseData]: + ... + + @overload + def send(self, data: ApiVersionsRequestData) -> Request[ApiVersionsRequestData, ApiVersionsResponseData]: + ... + + @overload + def send(self, data: CreateTopicsRequestData) -> Request[CreateTopicsRequestData, CreateTopicsResponseData]: + ... + + @overload + def send(self, data: DeleteTopicsRequestData) -> Request[DeleteTopicsRequestData, DeleteTopicsResponseData]: + ... + + @overload + def send(self, data: DeleteRecordsRequestData) -> Request[DeleteRecordsRequestData, DeleteRecordsResponseData]: + ... + + @overload + def send(self, data: InitProducerIdRequestData) -> Request[InitProducerIdRequestData, InitProducerIdResponseData]: + ... + + @overload + def send( + self, data: OffsetForLeaderEpochRequestData + ) -> Request[OffsetForLeaderEpochRequestData, OffsetForLeaderEpochResponseData]: + ... + + @overload + def send( + self, data: AddPartitionsToTxnRequestData + ) -> Request[AddPartitionsToTxnRequestData, AddPartitionsToTxnResponseData]: + ... + + @overload + def send( + self, data: AddOffsetsToTxnRequestData + ) -> Request[AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData]: + ... + + @overload + def send(self, data: EndTxnRequestData) -> Request[EndTxnRequestData, EndTxnResponseData]: + ... + + @overload + def send( + self, data: WriteTxnMarkersRequestData + ) -> Request[WriteTxnMarkersRequestData, WriteTxnMarkersResponseData]: + ... + + @overload + def send( + self, data: TxnOffsetCommitRequestData + ) -> Request[TxnOffsetCommitRequestData, TxnOffsetCommitResponseData]: + ... + + @overload + def send(self, data: DescribeAclsRequestData) -> Request[DescribeAclsRequestData, DescribeAclsResponseData]: + ... + + @overload + def send(self, data: CreateAclsRequestData) -> Request[CreateAclsRequestData, CreateAclsResponseData]: + ... + + @overload + def send(self, data: DeleteAclsRequestData) -> Request[DeleteAclsRequestData, DeleteAclsResponseData]: + ... + + @overload + def send( + self, data: DescribeConfigsRequestData + ) -> Request[DescribeConfigsRequestData, DescribeConfigsResponseData]: + ... + + @overload + def send(self, data: AlterConfigsRequestData) -> Request[AlterConfigsRequestData, AlterConfigsResponseData]: + ... + + @overload + def send( + self, data: AlterReplicaLogDirsRequestData + ) -> Request[AlterReplicaLogDirsRequestData, AlterReplicaLogDirsResponseData]: + ... + + @overload + def send( + self, data: DescribeLogDirsRequestData + ) -> Request[DescribeLogDirsRequestData, DescribeLogDirsResponseData]: + ... + + @overload + def send( + self, data: SaslAuthenticateRequestData + ) -> Request[SaslAuthenticateRequestData, SaslAuthenticateResponseData]: + ... + + @overload + def send( + self, data: CreatePartitionsRequestData + ) -> Request[CreatePartitionsRequestData, CreatePartitionsResponseData]: + ... + + @overload + def send( + self, data: CreateDelegationTokenRequestData + ) -> Request[CreateDelegationTokenRequestData, CreateDelegationTokenResponseData]: + ... + + @overload + def send( + self, data: RenewDelegationTokenRequestData + ) -> Request[RenewDelegationTokenRequestData, RenewDelegationTokenResponseData]: + ... + + @overload + def send( + self, data: ExpireDelegationTokenRequestData + ) -> Request[ExpireDelegationTokenRequestData, ExpireDelegationTokenResponseData]: + ... + + @overload + def send( + self, data: DescribeDelegationTokenRequestData + ) -> Request[DescribeDelegationTokenRequestData, DescribeDelegationTokenResponseData]: + ... + + @overload + def send(self, data: DeleteGroupsRequestData) -> Request[DeleteGroupsRequestData, DeleteGroupsResponseData]: + ... + + @overload + def send( + self, data: ElectPreferredLeadersRequestData + ) -> Request[ElectPreferredLeadersRequestData, ElectPreferredLeadersResponseData]: + ... + + @overload + def send( + self, data: IncrementalAlterConfigsRequestData + ) -> Request[IncrementalAlterConfigsRequestData, IncrementalAlterConfigsResponseData]: + ... + + def send(self, request_data: RequestData) -> Request[RequestData, ResponseData]: + return self.send_many([request_data])[0] + + def send_many(self, request_data_to_send: List[RequestData]) -> List[Request]: + requests_to_send = [self._request_from_data(data) for data in request_data_to_send] + + received_requests: List[Request] = [] + + len_ = len(request_data_to_send) + if len_ == 0: + return [] + + while len(received_requests) < len_: + self._try_send_and_pop_from(requests_to_send) + self._try_receive_and_append_to(received_requests) + return received_requests + + def _try_send_and_pop_from(self, requests_to_send: List[Request]) -> None: + if requests_to_send: + try: + self.kafka_io.send(requests_to_send[0]) + del requests_to_send[0] + + if not requests_to_send: # we're now empty, flush all messages + self.kafka_io.flush() + except queue.Full: # make sure we flush so some messages can be read to make place for new ones + self.kafka_io.flush() + + def _try_receive_and_append_to(self, received_requests: List[Request]) -> None: + try: + received_requests.append(self.kafka_io.receive()) + except queue.Empty: + pass + + def _request_from_data(self, request_data: RequestData) -> Request: + api_key = request_data.api_key() + api_version = self.api_versions[api_key] + return Request.from_request_data(request_data, api_version, next(self._correlation_id_counter), self.client_id) + + def close(self): + self.kafka_io.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self.close() + + +class KafkaIO: + def __init__(self, in_stream: BinaryIO, out_stream: BinaryIO): + self._in_flight: "queue.Queue[Request]" = queue.Queue(maxsize=10) # TODO make this configurable + self._in_stream: BinaryIO = in_stream + self._out_stream: BinaryIO = out_stream + + def send(self, request: Request) -> None: + data = request.encode_request() + self._send_req_data(request, data) + + def _send_req_data(self, request: Request, data: bytes) -> None: + self._in_flight.put(request, block=False) + self._out_stream.write(int32Serializer.encode(len(data))) + self._out_stream.write(data) + + def receive(self) -> Request: + request, data = self._receive_req_data() + request.decode_response(data) + self._in_flight.task_done() + return request + + def _receive_req_data(self) -> Tuple[Request, bytes]: + request = self._in_flight.get(block=False) + len_ = int32Serializer.read(self._in_stream) + data = self._in_stream.read(len_) + return request, data + + def flush(self): + self._out_stream.flush() + + @classmethod + def from_socket(cls, io_socket: socket.SocketType) -> "KafkaIO": + in_stream = io_socket.makefile(mode="rb", buffering=4096) + out_stream = io_socket.makefile(mode="wb", buffering=4096) + return cls(in_stream, out_stream) + + @classmethod + def from_address(cls, address: Tuple[str, int]) -> "KafkaIO": + io_socket = socket.create_connection(address) + return cls.from_socket(io_socket) + + def close(self): + self._in_stream.close() + self._out_stream.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self.close() diff --git a/esque/protocol/constants.py b/esque/protocol/constants.py new file mode 100644 index 00000000..4cdcb2e5 --- /dev/null +++ b/esque/protocol/constants.py @@ -0,0 +1,240 @@ +from enum import Enum +from typing import Dict, Tuple + +_ERROR_METADATA: Dict[int, Tuple[bool, str]] = { + -1: (False, "The server experienced an unexpected error when processing the request."), + 0: (False, "No Error"), + 1: (False, "The requested offset is not within the range of offsets maintained by the server."), + 2: ( + True, + "This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, " + "or is otherwise corrupt.", + ), + 3: (True, "This server does not host this topic-partition."), + 4: (False, "The requested fetch size is invalid."), + 5: (True, "There is no leader for this topic-partition as we are in the middle of a leadership election."), + 6: (True, "This server is not the leader for that topic-partition."), + 7: (True, "The request timed out."), + 8: (False, "The broker is not available."), + 9: (False, "The replica is not available for the requested topic-partition."), + 10: (False, "The request included a message larger than the max message size the server will accept."), + 11: (False, "The controller moved to another broker."), + 12: (False, "The metadata field of the offset request was too large."), + 13: (True, "The server disconnected before a response was received."), + 14: (True, "The coordinator is loading and hence can't process requests."), + 15: (True, "The coordinator is not available."), + 16: (True, "This is not the correct coordinator."), + 17: (False, "The request attempted to perform an operation on an invalid topic."), + 18: (False, "The request included message batch larger than the configured segment size on the server."), + 19: (True, "Messages are rejected since there are fewer in-sync replicas than required."), + 20: (True, "Messages are written to the log, but to fewer in-sync replicas than required."), + 21: (False, "Produce request specified an invalid value for required acks."), + 22: (False, "Specified group generation id is not valid."), + 23: ( + False, + "The group member's supported protocols are incompatible with those of existing members or first group " + "member tried to join with empty protocol type or empty protocol list.", + ), + 24: (False, "The configured groupId is invalid."), + 25: (False, "The coordinator is not aware of this member."), + 26: ( + False, + "The session timeout is not within the range allowed by the broker (as configured by " + "group.min.session.timeout.ms and group.max.session.timeout.ms).", + ), + 27: (False, "The group is rebalancing, so a rejoin is needed."), + 28: (False, "The committing offset data size is not valid."), + 29: (False, "Not authorized to access topics: [Topic authorization failed.]"), + 30: (False, "Not authorized to access group: Group authorization failed."), + 31: (False, "Cluster authorization failed."), + 32: (False, "The timestamp of the message is out of acceptable range."), + 33: (False, "The broker does not support the requested SASL mechanism."), + 34: (False, "Request is not valid given the current SASL state."), + 35: (False, "The version of API is not supported."), + 36: (False, "Topic with this name already exists."), + 37: (False, "Number of partitions is below 1."), + 38: (False, "Replication factor is below 1 or larger than the number of available brokers."), + 39: (False, "Replica assignment is invalid."), + 40: (False, "Configuration is invalid."), + 41: (True, "This is not the correct controller for this cluster."), + 42: ( + False, + "This most likely occurs because of a request being malformed by the client library or the message was " + "sent to an incompatible broker. See the broker logs for more details.", + ), + 43: (False, "The message format version on the broker does not support the request."), + 44: (False, "Request parameters do not satisfy the configured policy."), + 45: (False, "The broker received an out of order sequence number."), + 46: (False, "The broker received a duplicate sequence number."), + 47: ( + False, + "Producer attempted an operation with an old epoch. Either there is a newer producer with the same " + "transactionalId, or the producer's transaction has been expired by the broker.", + ), + 48: (False, "The producer attempted a transactional operation in an invalid state."), + 49: ( + False, + "The producer attempted to use a producer id which is not currently assigned to its transactional id.", + ), + 50: ( + False, + "The transaction timeout is larger than the maximum value allowed by the broker (as configured by " + "transaction.max.timeout.ms).", + ), + 51: ( + False, + "The producer attempted to update a transaction while another concurrent operation on the same " + "transaction was ongoing.", + ), + 52: ( + False, + "Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator " + "for a given producer.", + ), + 53: (False, "Transactional Id authorization failed."), + 54: (False, "Security features are disabled."), + 55: ( + False, + "The broker did not attempt to execute this operation. This may happen for batched RPCs where some " + "operations in the batch failed, causing the broker to respond without trying the rest.", + ), + 56: (True, "Disk error when trying to access log file on the disk."), + 57: (False, "The user-specified log directory is not found in the broker config."), + 58: (False, "SASL Authentication failed."), + 59: ( + False, + "This exception is raised by the broker if it could not locate the producer metadata associated with the " + "producerId in question. This could happen if, for instance, the producer's records were deleted because " + "their retention time had elapsed. Once the last records of the producerId are removed, the producer's " + "metadata is removed from the broker, and future appends by the producer will return this exception.", + ), + 60: (False, "A partition reassignment is in progress."), + 61: (False, "Delegation Token feature is not enabled."), + 62: (False, "Delegation Token is not found on server."), + 63: (False, "Specified Principal is not valid Owner/Renewer."), + 64: ( + False, + "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token " + "authenticated channels.", + ), + 65: (False, "Delegation Token authorization failed."), + 66: (False, "Delegation Token is expired."), + 67: (False, "Supplied principalType is not supported."), + 68: (False, "The group is not empty."), + 69: (False, "The group id does not exist."), + 70: (True, "The fetch session ID was not found."), + 71: (True, "The fetch session epoch is invalid."), + 72: ( + True, + "There is no listener on the leader broker that matches the listener on which metadata request was " + "processed.", + ), + 73: (False, "Topic deletion is disabled."), + 74: (True, "The leader epoch in the request is older than the epoch on the broker"), + 75: (True, "The leader epoch in the request is newer than the epoch on the broker"), + 76: (False, "The requesting client does not support the compression type of given partition."), + 77: (False, "Broker epoch has changed"), + 78: ( + True, + "The leader high watermark has not caught up from a recent leader election so the offsets cannot be " + "guaranteed to be monotonically increasing", + ), + 79: (False, "The group member needs to have a valid member id before actually entering a consumer group"), + 80: (True, "The preferred leader was not available"), + 81: ( + False, + "Consumer group The consumer group has reached its max size. already has the configured maximum number of" + " members.", + ), +} + + +class ErrorCode(Enum): + UNKNOWN_SERVER_ERROR = -1 + NONE = 0 + OFFSET_OUT_OF_RANGE = 1 + CORRUPT_MESSAGE = 2 + UNKNOWN_TOPIC_OR_PARTITION = 3 + INVALID_FETCH_SIZE = 4 + LEADER_NOT_AVAILABLE = 5 + NOT_LEADER_FOR_PARTITION = 6 + REQUEST_TIMED_OUT = 7 + BROKER_NOT_AVAILABLE = 8 + REPLICA_NOT_AVAILABLE = 9 + MESSAGE_TOO_LARGE = 10 + STALE_CONTROLLER_EPOCH = 11 + OFFSET_METADATA_TOO_LARGE = 12 + NETWORK_EXCEPTION = 13 + COORDINATOR_LOAD_IN_PROGRESS = 14 + COORDINATOR_NOT_AVAILABLE = 15 + NOT_COORDINATOR = 16 + INVALID_TOPIC_EXCEPTION = 17 + RECORD_LIST_TOO_LARGE = 18 + NOT_ENOUGH_REPLICAS = 19 + NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20 + INVALID_REQUIRED_ACKS = 21 + ILLEGAL_GENERATION = 22 + INCONSISTENT_GROUP_PROTOCOL = 23 + INVALID_GROUP_ID = 24 + UNKNOWN_MEMBER_ID = 25 + INVALID_SESSION_TIMEOUT = 26 + REBALANCE_IN_PROGRESS = 27 + INVALID_COMMIT_OFFSET_SIZE = 28 + TOPIC_AUTHORIZATION_FAILED = 29 + GROUP_AUTHORIZATION_FAILED = 30 + CLUSTER_AUTHORIZATION_FAILED = 31 + INVALID_TIMESTAMP = 32 + UNSUPPORTED_SASL_MECHANISM = 33 + ILLEGAL_SASL_STATE = 34 + UNSUPPORTED_VERSION = 35 + TOPIC_ALREADY_EXISTS = 36 + INVALID_PARTITIONS = 37 + INVALID_REPLICATION_FACTOR = 38 + INVALID_REPLICA_ASSIGNMENT = 39 + INVALID_CONFIG = 40 + NOT_CONTROLLER = 41 + INVALID_REQUEST = 42 + UNSUPPORTED_FOR_MESSAGE_FORMAT = 43 + POLICY_VIOLATION = 44 + OUT_OF_ORDER_SEQUENCE_NUMBER = 45 + DUPLICATE_SEQUENCE_NUMBER = 46 + INVALID_PRODUCER_EPOCH = 47 + INVALID_TXN_STATE = 48 + INVALID_PRODUCER_ID_MAPPING = 49 + INVALID_TRANSACTION_TIMEOUT = 50 + CONCURRENT_TRANSACTIONS = 51 + TRANSACTION_COORDINATOR_FENCED = 52 + TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53 + SECURITY_DISABLED = 54 + OPERATION_NOT_ATTEMPTED = 55 + KAFKA_STORAGE_ERROR = 56 + LOG_DIR_NOT_FOUND = 57 + SASL_AUTHENTICATION_FAILED = 58 + UNKNOWN_PRODUCER_ID = 59 + REASSIGNMENT_IN_PROGRESS = 60 + DELEGATION_TOKEN_AUTH_DISABLED = 61 + DELEGATION_TOKEN_NOT_FOUND = 62 + DELEGATION_TOKEN_OWNER_MISMATCH = 63 + DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64 + DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65 + DELEGATION_TOKEN_EXPIRED = 66 + INVALID_PRINCIPAL_TYPE = 67 + NON_EMPTY_GROUP = 68 + GROUP_ID_NOT_FOUND = 69 + FETCH_SESSION_ID_NOT_FOUND = 70 + INVALID_FETCH_SESSION_EPOCH = 71 + LISTENER_NOT_FOUND = 72 + TOPIC_DELETION_DISABLED = 73 + FENCED_LEADER_EPOCH = 74 + UNKNOWN_LEADER_EPOCH = 75 + UNSUPPORTED_COMPRESSION_TYPE = 76 + STALE_BROKER_EPOCH = 77 + OFFSET_NOT_AVAILABLE = 78 + MEMBER_ID_REQUIRED = 79 + PREFERRED_LEADER_NOT_AVAILABLE = 80 + GROUP_MAX_SIZE_REACHED = 81 + + def __init__(self, code: int): + retryable, description = _ERROR_METADATA[code] + self.retryable = retryable + self.description = description diff --git a/esque/protocol/serializers/__init__.py b/esque/protocol/serializers/__init__.py new file mode 100644 index 00000000..6a5f4abf --- /dev/null +++ b/esque/protocol/serializers/__init__.py @@ -0,0 +1,47 @@ +from .generic import ArraySerializer, DictSerializer, DummySerializer, EnumSerializer, NamedTupleSerializer +from .primitive import ( + BaseSerializer, + GenericSerializer, + PrimitiveType, + Schema, + booleanSerializer, + bytesSerializer, + get_serializer, + int16Serializer, + int32Serializer, + int64Serializer, + int8Serializer, + nullableBytesSerializer, + nullableStringSerializer, + recordsSerializer, + stringSerializer, + uint32Serializer, + varIntSerializer, + varLongSerializer, +) + +__all__ = [ + "Schema", + "GenericSerializer", + "BaseSerializer", + "booleanSerializer", + "bytesSerializer", + "int8Serializer", + "int16Serializer", + "int32Serializer", + "int64Serializer", + "nullableBytesSerializer", + "nullableStringSerializer", + "recordsSerializer", + "stringSerializer", + "uint32Serializer", + "varIntSerializer", + "varLongSerializer", + "get_serializer", + "PrimitiveType", + "NamedTupleSerializer", + "ArraySerializer", + "DictSerializer", + "EnumSerializer", + "DummySerializer", +] diff --git a/esque/protocol/serializers/generic.py b/esque/protocol/serializers/generic.py new file mode 100644 index 00000000..486069ff --- /dev/null +++ b/esque/protocol/serializers/generic.py @@ -0,0 +1,71 @@ +from typing import BinaryIO, Dict, Generic, List, Type, TypeVar + +from esque.protocol.serializers.primitive import BaseSerializer, Schema, int16Serializer, int32Serializer + +T = TypeVar("T") + + +class DictSerializer(Generic[T], BaseSerializer): + def __init__(self, schema: Schema): + self._schema = schema.copy() + + def encode(self, value: Dict) -> bytes: + return b"".join(serializer.encode(value[field]) for field, serializer in self._schema) + + def read(self, buffer: BinaryIO) -> Dict: + data = {} + for field, serializer in self._schema: + data[field] = serializer.read(buffer) + return data + + +class ArraySerializer(Generic[T], BaseSerializer): + def __init__(self, elem_serializer: BaseSerializer[T]): + self._elem_serializer: BaseSerializer[T] = elem_serializer + + def encode(self, elems: List[T]) -> bytes: + return int16Serializer.encode(len(elems)) + b"".join(self._elem_serializer.encode(elem) for elem in elems) + + def read(self, buffer: BinaryIO) -> List[T]: + len_ = int32Serializer.read(buffer) + return [self._elem_serializer.read(buffer) for _ in range(len_)] + + +# TODO: figure out how to properly bind T to NamedTuple, doesn't seem to work with an additional +# TypeVar(..., bound=NamedTuple) +class NamedTupleSerializer(Generic[T], DictSerializer): + def __init__(self, tuple_class: Type[T], schema: Schema): + super().__init__(schema) + self.tuple_class = tuple_class + + def encode(self, value: T) -> bytes: + return b"".join(serializer.encode(getattr(value, field)) for field, serializer in self._schema) + + def read(self, buffer: BinaryIO) -> T: + data = super().read(buffer) + data.pop(None, None) # None fields are supposed to be ignored, pop the field if one is there + return self.tuple_class(**data) + + +# TODO: see above +class EnumSerializer(Generic[T], BaseSerializer): + def __init__(self, enum_class: Type[T], serializer: BaseSerializer): + self.enum_class = enum_class + self.serializer = serializer + + def encode(self, value: T) -> bytes: + return self.serializer.encode(value.value) + + def read(self, buffer: BinaryIO) -> T: + return self.enum_class(self.serializer.read(buffer)) + + +class DummySerializer(Generic[T], BaseSerializer): + def __init__(self, value: T): + self.value = value + + def encode(self, value: T) -> bytes: + return b"" + + def read(self, buffer: BinaryIO) -> T: + return self.value diff --git a/esque/protocol/serializers/primitive.py b/esque/protocol/serializers/primitive.py new file mode 100644 index 00000000..3e5b33b4 --- /dev/null +++ b/esque/protocol/serializers/primitive.py @@ -0,0 +1,217 @@ +import struct +from abc import ABCMeta +from enum import Enum +from typing import Any, BinaryIO, Callable, Dict, Generic, List, Optional, Tuple, TypeVar + +T = TypeVar("T") +Schema = List[Tuple[Optional[str], "BaseSerializer"]] + + +class PrimitiveType(Enum): + Boolean = 1 + Int8 = 2 + Int16 = 3 + Int32 = 4 + UInt32 = 5 + Int64 = 6 + VarInt = 7 + VarLong = 8 + String = 9 + NullableString = 10 + Bytes = 11 + NullableBytes = 12 + Records = 13 + + +class BaseSerializer(Generic[T], metaclass=ABCMeta): + def write(self, buffer: BinaryIO, value: T): + buffer.write(self.encode(value)) + + def encode(self, value: T) -> bytes: + raise NotImplementedError() + + def read(self, buffer: BinaryIO) -> T: + raise NotImplementedError() + + +class PrimitiveSerializer(Generic[T], BaseSerializer): + def __init__(self, format_: str): + self._struct = struct.Struct(format_) + + def encode(self, value: T) -> bytes: + return self._struct.pack(value) + + def read(self, buffer: BinaryIO) -> T: + return self._struct.unpack(buffer.read(self._struct.size))[0] + + +class GenericSerializer(Generic[T], BaseSerializer): + def __init__(self, encoder: Callable[[T], bytes], reader: Callable[[BinaryIO], T]): + self._encoder: Callable[[Any], bytes] = encoder + self._reader: Callable[[BinaryIO], Any] = reader + + def encode(self, value: T) -> bytes: + return self._encoder(value) + + def read(self, buffer: BinaryIO) -> T: + return self._reader(buffer) + + +class VarlenZigZagSerializer(BaseSerializer): + def __init__(self, bits: int): + self.bits = bits - 1 + + def encode(self, value: int) -> bytes: + assert ( + -2 ** self.bits <= value <= (2 ** self.bits - 1) + ), f"Number not in range! {-2 ** self.bits}(min) <= {value}(value) <= {(2 ** self.bits - 1)}(max)" + + value = (value << 1) ^ (value >> self.bits) + return self.varlen_encode(value) + + def read(self, buffer: BinaryIO) -> int: + zigzag = self.varlen_decode(buffer) + + # now turn ZigZag encoding + value = (zigzag // 2) ^ (-1 * (zigzag & 1)) + return value + + @staticmethod + def varlen_encode(value: int) -> bytes: + # see how many bytes we need, only 7 bit are available per byte + # the msb is used to indicate whether another byte is following + len_, rest = divmod(value.bit_length(), 7) + if rest: + len_ += 1 + + if len_ == 0: + return b"\x00" + + arr = bytearray(len_) + for i in range(len_): + # put last 7 bits into current position (we start at the end of the bytes array) + arr[-i - 1] = value & 0x7F + # shift our value by the seven bits we've just serialized + value >>= 7 + + # if this is not the first iteration, i.e. not the last byte, we set msb here to 1 to indicate that another + # byte is following + if i: + arr[-i - 1] |= 0x80 + + return bytes(arr) + + @staticmethod + def varlen_decode(buffer) -> int: + value = 0 + while True: + # shift the value we've computed so far by 7 bits to make space for the next 7 + value <<= 7 + + # read one byte + byte = int8Serializer.read(buffer) + + # append this byte's 7 data bits + value ^= byte & 0x7F + + # check msb to see if we need to continue reading + if not (byte & 0x80): + break + return value + + +# Represents a raw sequence of bytes. First the length N is given as an INT32. Then N bytes follow. +def encode_bytes(value: bytes) -> bytes: + assert value is not None, "Value cannot be None!" + len_ = len(value) + return int32Serializer.encode(len_) + value + + +def read_bytes(buffer: BinaryIO) -> bytes: + len_ = int32Serializer.read(buffer) + return buffer.read(len_) + + +# Represents a raw sequence of bytes or null. For non-null values, first the length N is given as an +# INT32. Then N bytes follow. A null value is encoded with length of -1 and there are no following +# bytes. +def encode_nullable_bytes(value: Optional[bytes]) -> bytes: + if value is None: + return int32Serializer.encode(-1) + return encode_bytes(value) + + +def read_nullable_bytes(buffer: BinaryIO) -> Optional[bytes]: + len_ = int32Serializer.read(buffer) + if len_ == -1: + return None + return buffer.read(len_) + + +# Represents a sequence of characters. First the length N is given as an INT16. Then N bytes follow +# which are the UTF-8 encoding of the character sequence. Length must not be negative. +def encode_string(value: str) -> bytes: + assert value is not None, "Value cannot be None!" + len_ = len(value) + return int16Serializer.encode(len_) + value.encode("utf-8") + + +def read_string(buffer: BinaryIO) -> str: + len_ = int16Serializer.read(buffer) + return buffer.read(len_).decode("utf-8") + + +# Represents a sequence of characters or null. For non-null strings, first the length N is given as an +# INT16. Then N bytes follow which are the UTF-8 encoding of the character sequence. A null value is +# encoded with length of -1 and there are no following bytes. +def encode_nullable_string(value: Optional[str]) -> bytes: + if value is None: + return int16Serializer.encode(-1) + return encode_string(value) + + +def read_nullable_string(buffer: BinaryIO) -> Optional[str]: + len_ = int16Serializer.read(buffer) + if len_ == -1: + return None + return buffer.read(len_).decode("utf-8") + + +booleanSerializer: BaseSerializer[bool] = PrimitiveSerializer("?") +int8Serializer: BaseSerializer[int] = PrimitiveSerializer(">b") +int16Serializer: BaseSerializer[int] = PrimitiveSerializer(">h") +int32Serializer: BaseSerializer[int] = PrimitiveSerializer(">i") +int64Serializer: BaseSerializer[int] = PrimitiveSerializer(">q") +uint32Serializer: BaseSerializer[int] = PrimitiveSerializer(">I") +varIntSerializer: BaseSerializer[int] = VarlenZigZagSerializer(32) +varLongSerializer: BaseSerializer[int] = VarlenZigZagSerializer(64) +nullableStringSerializer: BaseSerializer[Optional[str]] = GenericSerializer( + encode_nullable_string, read_nullable_string +) +stringSerializer: BaseSerializer[str] = GenericSerializer(encode_string, read_string) +nullableBytesSerializer: BaseSerializer[Optional[bytes]] = GenericSerializer( + encode_nullable_bytes, read_nullable_bytes +) +bytesSerializer: BaseSerializer[bytes] = GenericSerializer(encode_bytes, read_bytes) + +# Represents a sequence of Kafka records as NULLABLE_BYTES. For a detailed description of records see +# Message Sets. +recordsSerializer: BaseSerializer[bytes] = nullableBytesSerializer + +SERIALIZER_MAP: Dict[PrimitiveType, BaseSerializer] = { + PrimitiveType.Boolean: booleanSerializer, + PrimitiveType.Int8: int8Serializer, + PrimitiveType.Int16: int16Serializer, + PrimitiveType.Int32: int32Serializer, + PrimitiveType.UInt32: uint32Serializer, + PrimitiveType.Int64: int64Serializer, + PrimitiveType.VarInt: varIntSerializer, + PrimitiveType.VarLong: varLongSerializer, + PrimitiveType.NullableString: nullableStringSerializer, + PrimitiveType.String: stringSerializer, + PrimitiveType.NullableBytes: nullableBytesSerializer, + PrimitiveType.Bytes: bytesSerializer, + PrimitiveType.Records: recordsSerializer, +} + +get_serializer: Callable[[PrimitiveType], BaseSerializer] = SERIALIZER_MAP.get diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 00000000..6caf68af --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1 @@ +output \ No newline at end of file diff --git a/scripts/schema_generator.py b/scripts/schema_generator.py new file mode 100644 index 00000000..0961e8ae --- /dev/null +++ b/scripts/schema_generator.py @@ -0,0 +1,471 @@ +import subprocess +from collections import defaultdict +from operator import itemgetter +from typing import Union, List, Iterator, Optional, Dict, Tuple +from dataclasses import dataclass +from enum import IntEnum + +import bs4 +import requests +import re +import textwrap + + +class ApiKey(IntEnum): + PRODUCE = 0 + FETCH = 1 + LIST_OFFSETS = 2 + METADATA = 3 + LEADER_AND_ISR = 4 + STOP_REPLICA = 5 + UPDATE_METADATA = 6 + CONTROLLED_SHUTDOWN = 7 + OFFSET_COMMIT = 8 + OFFSET_FETCH = 9 + FIND_COORDINATOR = 10 + JOIN_GROUP = 11 + HEARTBEAT = 12 + LEAVE_GROUP = 13 + SYNC_GROUP = 14 + DESCRIBE_GROUPS = 15 + LIST_GROUPS = 16 + SASL_HANDSHAKE = 17 + API_VERSIONS = 18 + CREATE_TOPICS = 19 + DELETE_TOPICS = 20 + DELETE_RECORDS = 21 + INIT_PRODUCER_ID = 22 + OFFSET_FOR_LEADER_EPOCH = 23 + ADD_PARTITIONS_TO_TXN = 24 + ADD_OFFSETS_TO_TXN = 25 + END_TXN = 26 + WRITE_TXN_MARKERS = 27 + TXN_OFFSET_COMMIT = 28 + DESCRIBE_ACLS = 29 + CREATE_ACLS = 30 + DELETE_ACLS = 31 + DESCRIBE_CONFIGS = 32 + ALTER_CONFIGS = 33 + ALTER_REPLICA_LOG_DIRS = 34 + DESCRIBE_LOG_DIRS = 35 + SASL_AUTHENTICATE = 36 + CREATE_PARTITIONS = 37 + CREATE_DELEGATION_TOKEN = 38 + RENEW_DELEGATION_TOKEN = 39 + EXPIRE_DELEGATION_TOKEN = 40 + DESCRIBE_DELEGATION_TOKEN = 41 + DELETE_GROUPS = 42 + ELECT_PREFERRED_LEADERS = 43 + INCREMENTAL_ALTER_CONFIGS = 44 + + +heading_pattern = re.compile(r"(?P\w+) API[^\(]*\(Key: (?P\d+)\).*") +schema_pattern = re.compile( + r"\s*(?P\w+?)(?: (?PRequest|Response) \(Version: (?P\d+)\))? => (?P.*)" +) + + +def find_api_schemas_and_descriptions(): + res = requests.get("https://kafka.apache.org/protocol") + html_doc = res.text + + soup = bs4.BeautifulSoup(html_doc, "html.parser") + + sections = soup.find_all("h5", string=heading_pattern) + + request_schemas = None + response_schemas = None + request_section = True + data = {} + + for child in sections[0].previous_sibling.next_siblings: + if child.name == "b": + if child.get_text() == "Requests:": + request_section = True + elif child.get_text() == "Responses:": + request_section = False + + if child.name == "h5": + mtch = heading_pattern.match(child.get_text()) + if not mtch: + continue + api_name = mtch.group("name") + + api_key = int(mtch.group("api_version")) + request_schemas = {} + response_schemas = {} + data[api_key] = { + "name": api_name, + "request_schemas": request_schemas, + "response_schemas": response_schemas, + } + + if child.name == "p" and child.pre: + schema = child.pre.get_text() + version = int(schema_pattern.match(schema).group("version")) + + cells = child.table.select("tr > td") + cell_texts = [c.get_text().strip() for c in cells] + rows = zip(cell_texts[0::2], cell_texts[1::2]) + description = dict(rows) + info = {"schema": schema, "description": description} + + if request_section: + request_schemas[version] = info + else: + response_schemas[version] = info + + return data + + +def count_leading_space(line): + return len(line) - len(line.lstrip()) + + +def snake_to_camelcase(name: str) -> str: + return "".join(part.capitalize() for part in name.split("_")) + + +def camel_to_snakecase(name: str) -> str: + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + + +# Schem example: + +# Produce Request (Version: 0) => acks timeout [topic_data] +# acks => INT16 +# timeout => INT32 +# topic_data => topic [data] +# topic => STRING +# data => partition record_set +# partition => INT32 +# record_set => RECORDS +def yield_schemas(lines: List[str], descriptions: Dict[str, str], api_key: Optional[int] = None) -> Iterator["Schema"]: + first_line = lines.pop(0) + mtch = schema_pattern.match(first_line) + if not mtch: + raise ValueError(f"line {repr(first_line)} doesn't match {repr(schema_pattern)}") + + field_names = mtch.group("field_names").split() + name = mtch.group("name") + + kind = mtch.group("kind") + if kind: + name += kind + "Data" + + fields: List["Field"] = [] + array_dimensions = [int(f.startswith("[")) for f in field_names] + + last_indent = None + while lines and field_names: + line = lines[0] + current_indent = count_leading_space(line) + line = line.strip() + if last_indent is None: + last_indent = current_indent + elif current_indent < last_indent: + assert not array_dimensions, f"{len(array_dimensions)} fields were not parsed!" + break + + field_name, type_ = line.split(" => ") + if len(lines) > 1 and (count_leading_space(lines[1]) > current_indent): + yield from yield_schemas(lines, descriptions) + type_ = snake_to_camelcase(field_name) + else: + del lines[0] + while type_.startswith("ARRAY("): + array_dimensions[-1] += 1 + type_ = type_[6:-1] + + fields.append(Field(field_name, type_, descriptions.get(field_name, None), array_dimensions.pop(0))) + + if kind: + assert api_key is not None, "Parsing api schema but no api key provided!" + yield ApiSchema(name, fields, api_key, int(mtch.group("version")), kind) + else: + yield Schema(snake_to_camelcase(name), fields) + + +def parse_schema(schema: str, descripitons: Dict[str, str], api_key: int) -> "List[Union[Schema, ApiSchema]]": + return list(yield_schemas(schema.splitlines(), descripitons, api_key)) + + +@dataclass(unsafe_hash=True) +class Field: + name: str + type: str + description: Optional[str] + array_dimensions: int + + +@dataclass(unsafe_hash=True) +class Schema: + name: str + fields: List["Field"] + + @property + def field_names(self) -> List[str]: + return [f.name for f in self.fields] + + +@dataclass(unsafe_hash=True) +class ApiSchema(Schema): + api_key: int + version: int + kind: str + + +TYPEMAP = { + "BOOLEAN": "bool", + "INT8": "int", + "INT16": "int", + "INT32": "int", + "INT64": "int", + "UINT32": "int", + "VARINT": "int", + "VARLONG": "int", + "STRING": "str", + "NULLABLE_STRING": "Optional[str]", + "BYTES": "bytes", + "NULLABLE_BYTES": "Optional[bytes]", + "RECORDS": "Records", +} + + +def render_schema(schema: Union[ApiSchema, Schema]) -> str: + lines: List[str] = ["", "@dataclass"] + if isinstance(schema, ApiSchema): + lines.append(f"class {schema.name}({schema.kind}Data):") + else: + lines.append(f"class {schema.name}:") + + def render_type(field: Field) -> str: + type_ = field.type + real_type = TYPEMAP.get(type_, type_) + + type_str = f'"{real_type}"' + for _ in range(field.array_dimensions): + type_str = f"List[{type_str}]" + + if real_type != type_: + type_str += f" # {type_}" + return type_str + + def render_description(description: Optional[str]) -> List[str]: + if not description or description == "null": + return [] + return [f" # {line}" for line in textwrap.wrap(description, width=100)] + + def render_field(field: Field) -> List[str]: + lines = [] + lines.extend(render_description(field.description)) + lines.append(f" {field.name}: {render_type(field)}") + lines.append("") + return lines + + for field in schema.fields: + lines.extend(render_field(field)) + + if isinstance(schema, ApiSchema): + lines.extend( + [ + " @staticmethod", + " def api_key() -> int:", + f" return ApiKey.{ApiKey(schema.api_key).name} # == {schema.api_key}", + "", + ] + ) + + return "\n".join(lines) + + +def main(): + data = find_api_schemas_and_descriptions() + names = [] + for api_key in ApiKey: + name_snake = api_key.name.lower() + name_camel = snake_to_camelcase(name_snake) + name_camel_lower = name_camel[0].lower() + name_camel[1:] + names.append((name_snake, name_camel, name_camel_lower)) + + with open("./output/__init__.py", "w") as o: + o.write("from io import BytesIO\n") + o.write("from typing import BinaryIO, Dict, Generic, Optional, TypeVar\n") + o.write("from .base import (\n") + o.write(" ApiKey,\n") + o.write(" RequestData,\n") + o.write(" RequestHeader,\n") + o.write(" ResponseData,\n") + o.write(" ResponseHeader,\n") + o.write(" requestHeaderSerializer,\n") + o.write(" responseHeaderSerializer,\n") + o.write(")\n") + o.write("from ..serializers import BaseSerializer\n") + for name_snake, name_camel, name_camel_lower in names: + o.write(f"from .{name_snake} import (") + o.write(f" {name_camel}RequestData,\n") + o.write(f" {name_camel}ResponseData,\n") + o.write(f" {name_camel_lower}RequestDataSerializers,\n") + o.write(f" {name_camel_lower}ResponseDataSerializers,\n") + o.write(")\n") + o.write("\n\n") + + o.write("REQUEST_SERIALIZERS: Dict[ApiKey, Dict[int, BaseSerializer[RequestData]]] = {\n") + for name_snake, name_camel, name_camel_lower in names: + o.write(f" ApiKey.{name_snake.upper()}: {name_camel_lower}RequestDataSerializers,\n") + o.write("}\n\n") + + o.write("RESPONSE_SERIALIZERS: Dict[ApiKey, Dict[int, BaseSerializer[ResponseData]]] = {\n") + for name_snake, name_camel, name_camel_lower in names: + o.write(f" ApiKey.{name_snake.upper()}: {name_camel_lower}ResponseDataSerializers,\n") + o.write("}\n\n") + + with open("./output/overload.py", "w") as o: + o.write("from .api import (\n") + o.write("ApiKey,\n") + o.write("ApiVersions,\n") + o.write("Request,\n") + o.write("RequestData,\n") + o.write("ResponseData,\n") + o.write("SUPPORTED_API_VERSIONS,\n") + for _, name_camel, _ in names: + o.write(f"{name_camel}RequestData,\n") + o.write(f"{name_camel}ResponseData,\n") + o.write(")\n\n") + o.write("class BrokerConnection:\n") + for name_snake, name_camel, name_camel_lower in names: + o.write(" @overload\n") + o.write( + f" def send(self, data: {name_camel}RequestData) -> Request[{name_camel}RequestData, {name_camel}ResponseData]:\n" + ) + o.write(" ...\n\n") + + for api_key, api_data in data.items(): + name = api_data["name"] + max_version = max(api_data["request_schemas"]) + assert max_version == max( + api_data["response_schemas"] + ), f"Max request version {max_version} != max response version {max(api_data['response_schemas'])}!" + + request_data = api_data["request_schemas"][max_version] + response_data = api_data["response_schemas"][max_version] + + filename = f"./output/{camel_to_snakecase(name)}.py" + with open(filename, "w") as fp: + fp.write("# FIXME autogenerated module, check for errors!\n") + fp.write("from dataclasses import dataclass\n") + fp.write("from typing import Dict, Tuple, List, Optional\n") + fp.write("\n") + fp.write("from esque.protocol.api.base import *\n") + fp.write("from esque.protocol.serializers import *\n") + fp.write("\n") + try: + schemas = parse_schema(request_data["schema"], request_data["description"], api_key) + except: + print(request_data["schema"]) + raise + try: + response_schemas = parse_schema(response_data["schema"], response_data["description"], api_key) + schemas.extend(response_schemas) + except: + print(response_data["schema"]) + raise + + for schema in schemas: + fp.write(render_schema(schema) + "\n") + + all_schemas: Dict[str, List[Tuple[int, Schema]]] = defaultdict(list) + + for version, d in api_data["request_schemas"].items(): + for schema in parse_schema(d["schema"], d["description"], api_key): + all_schemas[schema.name].append((version, schema)) + + for version_schemas in all_schemas.values(): + fp.write(render_serializer_schemas(version_schemas) + "\n") + + all_schemas: Dict[str, List[Tuple[int, Schema]]] = defaultdict(list) + + for version, d in api_data["response_schemas"].items(): + for schema in parse_schema(d["schema"], d["description"], api_key): + all_schemas[schema.name].append((version, schema)) + + for version_schemas in all_schemas.values(): + fp.write(render_serializer_schemas(version_schemas) + "\n") + subprocess.call(["black", f"./output"]) + + +SERIALIZER_MAP = { + "BOOLEAN": "booleanSerializer", + "INT8": "int8Serializer", + "INT16": "int16Serializer", + "INT32": "int32Serializer", + "INT64": "int64Serializer", + "UINT32": "uint32Serializer", + "VARINT": "varIntSerializer", + "VARLONG": "varLongSerializer", + "STRING": "stringSerializer", + "NULLABLE_STRING": "nullableStringSerializer", + "BYTES": "bytesSerializer", + "NULLABLE_BYTES": "nullableBytesSerializer", + "RECORDS": "recordsSerializer", +} + + +def render_serializer(field: Field, version: int) -> str: + if field.type in SERIALIZER_MAP: + serializer_name = SERIALIZER_MAP[field.type] + else: + serializer_name = field.type[0].lower() + field.type[1:] + f"Serializers[{version}]" + + for _ in range(field.array_dimensions): + serializer_name = f"ArraySerializer({serializer_name})" + return serializer_name + + +def render_dummy(field: Field) -> str: + if field.array_dimensions: + return "DummySerializer([])" + if field.type in TYPEMAP: + return f"DummySerializer({TYPEMAP[field.type]}())" + return "DummySerializer(None)" + + +def render_serializer_schemas(schemas: List[Tuple[int, Schema]]) -> str: + schemas = sorted(schemas, key=itemgetter(0)) + tgt_schema = schemas[-1][1] + name = tgt_schema.name + lines = ["", name[0].lower() + name[1:] + f"Schemas: Dict[int, Schema] = {{"] + + for version, schema in schemas: + lines.append(f" {version}: [") + for field in schema.fields: + if field.name not in tgt_schema.field_names: + line = f"(None, {render_serializer(field, version)})," + else: + line = f"({field.name!r}, {render_serializer(field, version)})," + + lines.append(" " + line) + + for field in tgt_schema.fields: + if field.name not in schema.field_names: + lines.append(f" ({field.name!r}, {render_dummy(field)}),") + + lines.append(f" ],") + lines.append("}") + lines.append("") + lines.append("") + lines.extend( + [ + name[0].lower() + name[1:] + f"Serializers: Dict[int, BaseSerializer[{name}]] = {{", + f" version: NamedTupleSerializer({name}, schema)", + f" for version, schema in {name[0].lower() + name[1:]}Schemas.items()", + "}", + "", + ] + ) + return "\n".join(lines) + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py index 54d2f8a8..c89be3e1 100644 --- a/setup.py +++ b/setup.py @@ -36,6 +36,9 @@ "avro-python3==1.8.2", ] +if sys.version_info < (3, 7, 0): + required.append("dataclasses") + class InstallWithPostCommand(install): """Post-installation for installation mode.""" @@ -61,7 +64,10 @@ def run(self): python_requires=">=3.6", setup_requires=[], install_requires=required, - extras_require={"test": ["pytest", "pytest-mock", "pytest-cov"], "dev": ["black", "flake8"]}, + extras_require={ + "test": ["pytest", "pytest-mock", "pytest-cov"], + "dev": ["black", "flake8", "beautifulsoup4", "requests"], + }, include_package_data=True, license="MIT", classifiers=[ diff --git a/tests/integration/test_protocol_messages.py b/tests/integration/test_protocol_messages.py new file mode 100644 index 00000000..99a48083 --- /dev/null +++ b/tests/integration/test_protocol_messages.py @@ -0,0 +1,11 @@ +from esque.protocol import ApiVersionsRequestData, BrokerConnection + + +def test_simple(test_config): + server = test_config.bootstrap_hosts[0] + port = int(test_config.bootstrap_port) + with BrokerConnection((server, port), "esque_integration_test") as connection: + data = ApiVersionsRequestData() + + request = connection.send(data) + assert len(request.response_data.api_versions) > 0 diff --git a/tests/unit/protocol/__init__.py b/tests/unit/protocol/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/protocol/serializers/__init__.py b/tests/unit/protocol/serializers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/protocol/serializers/test_primitive.py b/tests/unit/protocol/serializers/test_primitive.py new file mode 100644 index 00000000..0bc6ae42 --- /dev/null +++ b/tests/unit/protocol/serializers/test_primitive.py @@ -0,0 +1,427 @@ +from io import BytesIO +from typing import Dict, Generic, List, Optional, TypeVar + +import pytest + +from esque.protocol import serializers + +T = TypeVar("T") + + +class Sample(Generic[T]): + def __init__(self, encoded_value: bytes, decoded_value: T): + self.encoded_value: bytes = encoded_value + self.decoded_value: T = decoded_value + + +SAMPLES: Dict[str, List[Sample]] = { + "BOOLEAN": [ + Sample[bool](encoded_value=b"\00", decoded_value=False), + Sample[bool](encoded_value=b"\01", decoded_value=True), + ], + "INT8": [ + Sample[int](encoded_value=b"\x00", decoded_value=0), + Sample[int](encoded_value=b"\x7f", decoded_value=127), + Sample[int](encoded_value=b"\x80", decoded_value=-128), + ], + "INT16": [ + Sample[int](encoded_value=b"\x00\x00", decoded_value=0), + Sample[int](encoded_value=b"\x7f\xff", decoded_value=32767), + Sample[int](encoded_value=b"\x80\x00", decoded_value=-32768), + ], + "INT32": [ + Sample[int](encoded_value=b"\x00\x00\x00\x00", decoded_value=0), + Sample[int](encoded_value=b"\x7f\xff\xff\xff", decoded_value=2147483647), + Sample[int](encoded_value=b"\x80\x00\x00\x00", decoded_value=-2147483648), + ], + "INT64": [ + Sample[int](encoded_value=b"\x00\x00\x00\x00\x00\x00\x00\x00", decoded_value=0), + Sample[int](encoded_value=b"\x7f\xff\xff\xff\xff\xff\xff\xff", decoded_value=9223372036854775807), + Sample[int](encoded_value=b"\x80\x00\x00\x00\x00\x00\x00\x00", decoded_value=-9223372036854775808), + ], + "UINT32": [ + Sample[int](encoded_value=b"\x00\x00\x00\x00", decoded_value=0), + Sample[int](encoded_value=b"\x7f\xff\xff\xff", decoded_value=2147483647), + Sample[int](encoded_value=b"\x80\x00\x00\x00", decoded_value=2147483648), + ], + "VARINT": [ + Sample[int](encoded_value=b"\x00", decoded_value=0), + Sample[int](encoded_value=b"\x01", decoded_value=-1), + Sample[int](encoded_value=b"\x02", decoded_value=1), + Sample[int](encoded_value=b"\x03", decoded_value=-2), + Sample[int](encoded_value=b"\x8f\xff\xff\xff\x7e", decoded_value=2147483647), + Sample[int](encoded_value=b"\x8f\xff\xff\xff\x7f", decoded_value=-2147483648), + ], + "VARLONG": [ + Sample[int](encoded_value=b"\x00", decoded_value=0), + Sample[int](encoded_value=b"\x01", decoded_value=-1), + Sample[int](encoded_value=b"\x02", decoded_value=1), + Sample[int](encoded_value=b"\x03", decoded_value=-2), + Sample[int](encoded_value=b"\x8f\xff\xff\xff\x7e", decoded_value=2147483647), + Sample[int](encoded_value=b"\x8f\xff\xff\xff\x7f", decoded_value=-2147483648), + Sample[int](encoded_value=b"\x81\xff\xff\xff\xff\xff\xff\xff\xff\x7e", decoded_value=9223372036854775807), + Sample[int](encoded_value=b"\x81\xff\xff\xff\xff\xff\xff\xff\xff\x7f", decoded_value=-9223372036854775808), + ], + "STRING": [ + Sample[str](encoded_value=b"\x00\x0812345678", decoded_value="12345678"), + Sample[str](encoded_value=b"\x00\x00", decoded_value=""), + ], + "NULLABLE_STRING": [ + Sample[Optional[str]](encoded_value=b"\x00\x0812345678", decoded_value="12345678"), + Sample[Optional[str]](encoded_value=b"\x00\x00", decoded_value=""), + Sample[Optional[str]](encoded_value=b"\xff\xff", decoded_value=None), + ], + "BYTES": [ + Sample[bytes](encoded_value=b"\x00\x00\x00\x041234", decoded_value=b"1234"), + Sample[bytes](encoded_value=b"\x00\x00\x00\x00", decoded_value=b""), + ], + "NULLABLE_BYTES": [ + Sample[Optional[bytes]](encoded_value=b"\x00\x00\x00\x041234", decoded_value=b"1234"), + Sample[Optional[bytes]](encoded_value=b"\x00\x00\x00\x00", decoded_value=b""), + Sample[Optional[bytes]](encoded_value=b"\xff\xff\xff\xff", decoded_value=None), + ], + "RECORDS": [ + # TODO we're pretending this is the same as NULLABLE_BYTES, don't know if that's true though... + Sample[Optional[bytes]](encoded_value=b"\x00\x00\x00\x041234", decoded_value=b"1234"), + Sample[Optional[bytes]](encoded_value=b"\x00\x00\x00\x00", decoded_value=b""), + Sample[Optional[bytes]](encoded_value=b"\xff\xff\xff\xff", decoded_value=None), + ], +} + + +# Represents a boolean value in a byte. Values 0 and 1 are used to represent false and true +# respectively. When reading a boolean value, any non-zero value is considered true. +@pytest.mark.parametrize("sample", SAMPLES["BOOLEAN"]) +def test_encode_boolean(sample: Sample[bool]) -> None: + actual_encoded_value = serializers.booleanSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["BOOLEAN"]) +def test_decode_boolean(sample: Sample[bool]) -> None: + actual_decoded_value = serializers.booleanSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["BOOLEAN"]) +def test_serde_boolean(sample: Sample[bool]) -> None: + + recreated_original_value = serializers.booleanSerializer.read( + BytesIO(serializers.booleanSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**7 and 2**7-1 inclusive. +@pytest.mark.parametrize("sample", SAMPLES["INT8"]) +def test_encode_int8(sample: Sample[int]) -> None: + actual_encoded_value = serializers.int8Serializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT8"]) +def test_decode_int8(sample: Sample[int]) -> None: + actual_decoded_value = serializers.int8Serializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT8"]) +def test_serde_int8(sample: Sample[int]) -> None: + + recreated_original_value = serializers.int8Serializer.read( + BytesIO(serializers.int8Serializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**15 and 2**15-1 inclusive. The values are encoded using two bytes +# in network byte order (big-endian). +@pytest.mark.parametrize("sample", SAMPLES["INT16"]) +def test_encode_int16(sample: Sample[int]) -> None: + actual_encoded_value = serializers.int16Serializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT16"]) +def test_decode_int16(sample: Sample[int]) -> None: + actual_decoded_value = serializers.int16Serializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT16"]) +def test_serde_int16(sample: Sample[int]) -> None: + + recreated_original_value = serializers.int16Serializer.read( + BytesIO(serializers.int16Serializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**31 and 2**31-1 inclusive. The values are encoded using four bytes +# in network byte order (big-endian). +@pytest.mark.parametrize("sample", SAMPLES["INT32"]) +def test_encode_int32(sample: Sample[int]) -> None: + actual_encoded_value = serializers.int32Serializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT32"]) +def test_decode_int32(sample: Sample[int]) -> None: + actual_decoded_value = serializers.int32Serializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT32"]) +def test_serde_int32(sample: Sample[int]) -> None: + + recreated_original_value = serializers.int32Serializer.read( + BytesIO(serializers.int32Serializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**63 and 2**63-1 inclusive. The values are encoded using eight bytes +# in network byte order (big-endian). +@pytest.mark.parametrize("sample", SAMPLES["INT64"]) +def test_encode_int64(sample: Sample[int]) -> None: + actual_encoded_value = serializers.int64Serializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT64"]) +def test_decode_int64(sample: Sample[int]) -> None: + actual_decoded_value = serializers.int64Serializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["INT64"]) +def test_serde_int64(sample: Sample[int]) -> None: + + recreated_original_value = serializers.int64Serializer.read( + BytesIO(serializers.int64Serializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between 0 and 2**32-1 inclusive. The values are encoded using four bytes in +# network byte order (big-endian). +@pytest.mark.parametrize("sample", SAMPLES["UINT32"]) +def test_encode_uint32(sample: Sample[int]) -> None: + actual_encoded_value = serializers.uint32Serializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["UINT32"]) +def test_decode_uint32(sample: Sample[int]) -> None: + actual_decoded_value = serializers.uint32Serializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["UINT32"]) +def test_serde_uint32(sample: Sample[int]) -> None: + + recreated_original_value = serializers.uint32Serializer.read( + BytesIO(serializers.uint32Serializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**31 and 2**31-1 inclusive. Encoding follows the variable-length +# zig-zag encoding from Google Protocol Buffers. +@pytest.mark.parametrize("sample", SAMPLES["VARINT"]) +def test_encode_varint(sample: Sample[int]) -> None: + actual_encoded_value = serializers.varIntSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["VARINT"]) +def test_decode_varint(sample: Sample[int]) -> None: + actual_decoded_value = serializers.varIntSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["VARINT"]) +def test_serde_varint(sample: Sample[int]) -> None: + + recreated_original_value = serializers.varIntSerializer.read( + BytesIO(serializers.varIntSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents an integer between -2**63 and 2**63-1 inclusive. Encoding follows the variable-length +# zig-zag encoding from Google Protocol Buffers. +@pytest.mark.parametrize("sample", SAMPLES["VARLONG"]) +def test_encode_varlong(sample: Sample[int]) -> None: + actual_encoded_value = serializers.varLongSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["VARLONG"]) +def test_decode_varlong(sample: Sample[int]) -> None: + actual_decoded_value = serializers.varLongSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["VARLONG"]) +def test_serde_varlong(sample: Sample[int]) -> None: + + recreated_original_value = serializers.varLongSerializer.read( + BytesIO(serializers.varLongSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents a sequence of characters. First the length N is given as an INT16. Then N bytes follow +# which are the UTF-8 encoding of the character sequence. Length must not be negative. +@pytest.mark.parametrize("sample", SAMPLES["STRING"]) +def test_encode_string(sample: Sample[str]) -> None: + actual_encoded_value = serializers.stringSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["STRING"]) +def test_decode_string(sample: Sample[str]) -> None: + actual_decoded_value = serializers.stringSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["STRING"]) +def test_serde_string(sample: Sample[str]) -> None: + + recreated_original_value = serializers.stringSerializer.read( + BytesIO(serializers.stringSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents a sequence of characters or null. For non-null strings, first the length N is given as an +# INT16. Then N bytes follow which are the UTF-8 encoding of the character sequence. A null value is +# encoded with length of -1 and there are no following bytes. +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_STRING"]) +def test_encode_nullable_string(sample: Sample[Optional[str]]) -> None: + actual_encoded_value = serializers.nullableStringSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_STRING"]) +def test_decode_nullable_string(sample: Sample[Optional[str]]) -> None: + actual_decoded_value = serializers.nullableStringSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_STRING"]) +def test_serde_nullable_string(sample: Sample[Optional[str]]) -> None: + + recreated_original_value = serializers.nullableStringSerializer.read( + BytesIO(serializers.nullableStringSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents a raw sequence of bytes. First the length N is given as an INT32. Then N bytes follow. +@pytest.mark.parametrize("sample", SAMPLES["BYTES"]) +def test_encode_bytes(sample: Sample[bytes]) -> None: + actual_encoded_value = serializers.bytesSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["BYTES"]) +def test_decode_bytes(sample: Sample[bytes]) -> None: + actual_decoded_value = serializers.bytesSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["BYTES"]) +def test_serde_bytes(sample: Sample[bytes]) -> None: + + recreated_original_value = serializers.bytesSerializer.read( + BytesIO(serializers.bytesSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents a raw sequence of bytes or null. For non-null values, first the length N is given as an +# INT32. Then N bytes follow. A null value is encoded with length of -1 and there are no following +# bytes. +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_BYTES"]) +def test_encode_nullable_bytes(sample: Sample[Optional[bytes]]) -> None: + actual_encoded_value = serializers.nullableBytesSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_BYTES"]) +def test_decode_nullable_bytes(sample: Sample[Optional[bytes]]) -> None: + actual_decoded_value = serializers.nullableBytesSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["NULLABLE_BYTES"]) +def test_serde_nullable_bytes(sample: Sample[Optional[bytes]]) -> None: + + recreated_original_value = serializers.nullableBytesSerializer.read( + BytesIO(serializers.nullableBytesSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value + + +# Represents a sequence of Kafka records as NULLABLE_BYTES. For a detailed description of records see +# Message Sets. +@pytest.mark.parametrize("sample", SAMPLES["RECORDS"]) +def test_encode_records(sample: Sample[Optional[bytes]]) -> None: + actual_encoded_value = serializers.recordsSerializer.encode(sample.decoded_value) + + assert actual_encoded_value == sample.encoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["RECORDS"]) +def test_decode_records(sample: Sample[Optional[bytes]]) -> None: + actual_decoded_value = serializers.recordsSerializer.read(BytesIO(sample.encoded_value)) + + assert actual_decoded_value == sample.decoded_value + + +@pytest.mark.parametrize("sample", SAMPLES["RECORDS"]) +def test_serde_records(sample: Sample[Optional[bytes]]) -> None: + + recreated_original_value = serializers.recordsSerializer.read( + BytesIO(serializers.recordsSerializer.encode(sample.decoded_value)) + ) + + assert sample.decoded_value == recreated_original_value