diff --git a/Cargo.lock b/Cargo.lock index 3264c677..6137c615 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -241,7 +241,7 @@ dependencies = [ "futures-util", "handlebars", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "mime", "multer", "num-traits", @@ -284,9 +284,9 @@ dependencies = [ "darling 0.23.0", "proc-macro-crate", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "strum", - "syn 2.0.111", + "syn 2.0.114", "thiserror 2.0.17", ] @@ -309,7 +309,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "527a4c6022fc4dac57b4f03f12395e9a391512e85ba98230b93315f8f45f27fc" dependencies = [ "bytes", - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_json", ] @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" dependencies = [ "event-listener", "event-listener-strategy", @@ -445,8 +445,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -462,8 +462,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -582,9 +582,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", @@ -631,9 +631,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "7d809780667f4410e7c41b07f52439b94d2bdf8528eeedc287fa38d3b7f95d82" [[package]] name = "bech32" @@ -682,26 +682,19 @@ dependencies = [ [[package]] name = "bip39" -version = "2.2.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d193de1f7487df1914d3a568b772458861d33f9c54249612cc2893d6915054" +checksum = "90dbd31c98227229239363921e60fcf5e558e43ec69094d46fc4996f08d1d5bc" dependencies = [ "bitcoin_hashes", ] -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - [[package]] name = "bitcoin_hashes" -version = "0.13.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" dependencies = [ - "bitcoin-internals", "hex-conservative", ] @@ -790,6 +783,36 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blockfrost" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22041e445c9606e04f7177f2dbedaef0ecded9bf34e97d7509e909828dfa7e8f" +dependencies = [ + "blockfrost-openapi", + "futures", + "futures-timer", + "reqwest 0.12.28", + "serde", + "serde_json", + "thiserror 2.0.17", + "url", +] + +[[package]] +name = "blockfrost-openapi" +version = "0.1.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62cc79368dcc9d7119091630b6a7b2b2801591836678261b4a79b7a62b559778" +dependencies = [ + "reqwest 0.12.28", + "serde", + "serde_json", + "serde_repr", + "serde_with", + "url", +] + [[package]] name = "blocking" version = "1.6.2" @@ -844,7 +867,7 @@ dependencies = [ "pin-project-lite", "rand 0.9.2", "rustls", - "rustls-native-certs 0.8.2", + "rustls-native-certs 0.8.3", "rustls-pemfile", "rustls-pki-types", "serde", @@ -911,8 +934,8 @@ dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -927,9 +950,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byte-slice-cast" @@ -944,7 +967,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c6d47a4e2961fb8721bcfc54feae6455f2f64e7054f9bc67e875f0e77f4c58d" dependencies = [ "rust_decimal", - "schemars 1.1.0", + "schemars 1.2.0", "serde", "utf8-width", ] @@ -978,7 +1001,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "syn 1.0.109", ] @@ -1005,9 +1028,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.48" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -1138,8 +1161,8 @@ checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1225,7 +1248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "unicode-xid 0.2.6", ] @@ -1424,8 +1447,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1467,9 +1490,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1481,9 +1504,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1494,9 +1517,9 @@ checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" dependencies = [ "ident_case", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1506,8 +1529,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1517,8 +1540,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1528,8 +1551,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1580,8 +1603,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1601,8 +1624,8 @@ checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ "darling 0.20.11", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1612,7 +1635,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1640,8 +1663,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1652,9 +1675,9 @@ checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "convert_case", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-xid 0.2.6", ] @@ -1692,8 +1715,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -1737,8 +1760,8 @@ checksum = "1cac124e13ae9aa56acc4241f8c8207501d93afdd8d8e62f0c1f2e12f6508c65" dependencies = [ "darling 0.20.11", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -2000,8 +2023,8 @@ checksum = "fc6ca71d5dfcbab9e09b19b5248e06a4bd8ac779055491fb47e6ebbdd20e5567" dependencies = [ "proc-macro-error2", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -2025,9 +2048,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ferroid" -version = "0.8.7" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e9414a6ae93ef993ce40a1e02944f13d4508e2bf6f1ced1580ce6910f08253" +checksum = "bb330bbd4cb7a5b9f559427f06f98a4f853a137c8298f3bd3f8ca57663e21986" dependencies = [ "portable-atomic", "rand 0.9.2", @@ -2079,9 +2102,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "finito" @@ -2144,6 +2167,21 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -2171,9 +2209,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "23.0.0" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c26fcb0454397c522c05fdad5380c4e622f8a875638af33bff5a320d1fc965" +checksum = "9ba5be0edbdb824843a0f9c6f0906ecfc66c5316218d74457003218b24909ed0" dependencies = [ "cfg-if", "parity-scale-codec", @@ -2272,8 +2310,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -2407,10 +2445,10 @@ dependencies = [ "heck", "lazy_static", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "serde", "serde_json", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2421,7 +2459,7 @@ checksum = "b684c77d1b5f9c6006068852e0e0e80c6df3ef85c24fe81ef26fbadbd595af77" dependencies = [ "graphql_client_codegen", "proc-macro2", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2439,9 +2477,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -2449,7 +2487,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -2466,15 +2504,15 @@ dependencies = [ "num-integer", "num-traits", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "syn 1.0.109", ] [[package]] name = "handlebars" -version = "6.3.2" +version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759e2d5aea3287cb1190c8ec394f42866cb5bf74fcbf213f354e3c856ea26098" +checksum = "9b3f9296c208515b87bd915a2f5d1163d4b3f863ba83337d7713cf478055948e" dependencies = [ "derive_builder", "log", @@ -2560,9 +2598,12 @@ dependencies = [ [[package]] name = "hex-conservative" -version = "0.1.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" +checksum = "fda06d18ac606267c40c04e41b9947729bf8b9efe74bd4e82b61a5f26a510b9f" +dependencies = [ + "arrayvec 0.7.6", +] [[package]] name = "hkdf" @@ -2721,12 +2762,12 @@ dependencies = [ "hyper", "hyper-util", "rustls", - "rustls-native-certs 0.8.2", + "rustls-native-certs 0.8.3", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] @@ -2742,11 +2783,27 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", @@ -2761,9 +2818,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2793,7 +2852,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core", + "windows-core 0.62.2", ] [[package]] @@ -2853,9 +2912,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -2867,9 +2926,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -2938,8 +2997,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -2960,8 +3019,8 @@ dependencies = [ "itertools 0.13.0", "manyhow", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -3103,9 +3162,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -3166,9 +3225,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", @@ -3206,15 +3265,15 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jiff" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49cce2b81f2098e7e3efc35bc2e0a6b7abec9d34128283d7a26fa8f32a6dbb35" +checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" dependencies = [ "jiff-static", "jiff-tzdb-platform", @@ -3227,20 +3286,20 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" +checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] name = "jiff-tzdb" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1283705eb0a21404d2bfd6eef2a7593d240bc42a0bdb39db0ad6fa2ec026524" +checksum = "68971ebff725b9e2ca27a601c5eb38a4c5d64422c4cbab0c535f248087eda5c2" [[package]] name = "jiff-tzdb-platform" @@ -3445,9 +3504,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libm" @@ -3457,13 +3516,13 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags", "libc", - "redox_syscall", + "redox_syscall 0.7.0", ] [[package]] @@ -3657,9 +3716,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96051b46fc183dc9cd4a223960ef37b9af631b55191852a8274bfef064cda20f" +checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593" dependencies = [ "hashbrown 0.16.1", ] @@ -3678,8 +3737,8 @@ checksum = "b33efb3ca6d3b07393750d4030418d594ab1139cee518f0dc88db70fec873587" dependencies = [ "manyhow-macros", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -3690,7 +3749,7 @@ checksum = "46fce34d199b78b6e6073abf984c9cf5fd3e9330145a93ee0738a7443e371495" dependencies = [ "proc-macro-utils", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", ] [[package]] @@ -3747,7 +3806,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "ipnet", "metrics", "metrics-util", @@ -3810,7 +3869,7 @@ version = "1.0.0-rc.3" source = "git+https://github.com/midnightntwrk/midnight-ledger?tag=ledger-7.0.0-alpha.1#f286bab87276136b2142932a4759ede03b800b03" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "syn 1.0.109", ] @@ -3918,7 +3977,7 @@ version = "1.0.0-rc.2" source = "git+https://github.com/midnightntwrk/midnight-ledger?tag=ledger-7.0.0-alpha.1#f286bab87276136b2142932a4759ede03b800b03" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", ] [[package]] @@ -4024,8 +4083,8 @@ version = "1.0.0-rc.3" source = "git+https://github.com/midnightntwrk/midnight-ledger?tag=ledger-7.0.0-alpha.1#f286bab87276136b2142932a4759ede03b800b03" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4040,7 +4099,7 @@ dependencies = [ "hex", "itertools 0.14.0", "konst", - "lru 0.16.2", + "lru 0.16.3", "midnight-base-crypto", "midnight-serialize", "midnight-storage-macros", @@ -4059,8 +4118,8 @@ source = "git+https://github.com/midnightntwrk/midnight-ledger?tag=ledger-7.0.0- dependencies = [ "midnight-serialize-macros", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4079,7 +4138,7 @@ dependencies = [ "group", "k256", "lazy_static", - "lru 0.16.2", + "lru 0.16.3", "midnight-base-crypto", "midnight-base-crypto-derive", "midnight-circuits", @@ -4151,6 +4210,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -4163,9 +4232,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -4195,6 +4264,23 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "685a9ac4b61f4e728e1d2c6a7844609c16527aeb5e6c865915c08e619c16410f" +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe 0.1.6", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" version = "0.29.0" @@ -4251,9 +4337,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +checksum = "c70f219e21142367c70c0b30c6a9e3a14d55b4d12a204d897fbec83a0363f081" dependencies = [ "winapi", ] @@ -4424,12 +4510,56 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote 1.0.43", + "syn 2.0.114", +] + [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.31.0" @@ -4543,8 +4673,8 @@ checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4571,7 +4701,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -4594,11 +4724,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ae7800a4c974efd12df917266338e79a7a74415173caf7e70aa0a0707345281" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "regex", "regex-syntax", "structmeta", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4642,8 +4772,8 @@ checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" dependencies = [ "proc-macro2", "proc-macro2-diagnostics", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4663,9 +4793,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" +checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7" dependencies = [ "memchr", "ucd-trie", @@ -4673,9 +4803,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f72981ade67b1ca6adc26ec221be9f463f2b5839c7508998daa17c23d94d7f" +checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed" dependencies = [ "pest", "pest_generator", @@ -4683,22 +4813,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee9efd8cdb50d719a80088b76f81aec7c41ed6d522ee750178f83883d271625" +checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5" dependencies = [ "pest", "pest_meta", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] name = "pest_meta" -version = "2.8.4" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1d70880e76bdc13ba52eafa6239ce793d85c8e43896507e43dd8984ff05b82" +checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365" dependencies = [ "pest", "sha2 0.10.9", @@ -4720,8 +4850,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4807,9 +4937,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" @@ -4873,7 +5003,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", ] [[package]] @@ -4884,8 +5014,8 @@ checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ "proc-macro-error-attr2", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4895,15 +5025,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eeaf08a13de400bc215877b5bdc088f241b12eb42f0a548d3390dc1c56bb7071" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "smallvec", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -4915,8 +5045,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", "version_check", "yansi", ] @@ -4953,10 +5083,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -4984,7 +5114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "syn 1.0.109", ] @@ -5067,9 +5197,9 @@ checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -5210,6 +5340,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +dependencies = [ + "bitflags", +] + [[package]] name = "ref-cast" version = "1.0.25" @@ -5226,8 +5365,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -5276,28 +5415,35 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-channel", "futures-core", "futures-util", + "h2", "http", "http-body", "http-body-util", "hyper", "hyper-rustls", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", + "mime_guess", + "native-tls", "percent-encoding", "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.8.2", + "rustls-native-certs 0.8.3", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-native-tls", "tokio-rustls", "tokio-util", "tower", @@ -5308,7 +5454,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] @@ -5383,9 +5529,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ "bitvec", "bytecheck", @@ -5401,12 +5547,12 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "syn 1.0.109", ] @@ -5497,9 +5643,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "aws-lc-rs", "log", @@ -5517,7 +5663,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ - "openssl-probe", + "openssl-probe 0.1.6", "rustls-pemfile", "rustls-pki-types", "schannel", @@ -5526,11 +5672,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.0", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -5547,9 +5693,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "web-time", "zeroize", @@ -5567,7 +5713,7 @@ dependencies = [ "log", "once_cell", "rustls", - "rustls-native-certs 0.8.2", + "rustls-native-certs 0.8.3", "rustls-platform-verifier-android", "rustls-webpki 0.103.8", "security-framework 3.5.1", @@ -5588,12 +5734,12 @@ dependencies = [ "log", "once_cell", "rustls", - "rustls-native-certs 0.8.2", + "rustls-native-certs 0.8.3", "rustls-platform-verifier-android", "rustls-webpki 0.103.8", "security-framework 3.5.1", "security-framework-sys", - "webpki-root-certs 1.0.4", + "webpki-root-certs 1.0.5", "windows-sys 0.61.2", ] @@ -5639,9 +5785,9 @@ checksum = "e5ff0cc5e135c8870a775d3320910cd9b564ec036b4dc0b8741629020be63f01" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -5687,15 +5833,15 @@ checksum = "65cb245f7fdb489e7ba43a616cbd34427fe3ba6fe0edc1d0d250085e6c84f3ec" dependencies = [ "darling 0.20.11", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] name = "scale-encode" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64901733157f9d25ef86843bd783eda439fac7efb0ad5a615d12d2cf3a29464b" +checksum = "f2a976d73564a59e482b74fd5d95f7518b79ca8c8ca5865398a4d629dd15ee50" dependencies = [ "parity-scale-codec", "primitive-types", @@ -5708,15 +5854,15 @@ dependencies = [ [[package]] name = "scale-encode-derive" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a3993a13b4eafa89350604672c8757b7ea84c7c5947d4b3691e3169c96379b" +checksum = "17020f2d59baabf2ddcdc20a4e567f8210baf089b8a8d4785f5fd5e716f92038" dependencies = [ "darling 0.20.11", "proc-macro-crate", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -5741,8 +5887,8 @@ checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -5762,9 +5908,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05c61b6b706a3eaad63b506ab50a1d2319f817ae01cf753adcc3f055f9f0fcd6" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "scale-info", - "syn 2.0.111", + "syn 2.0.114", "thiserror 2.0.17", ] @@ -5810,9 +5956,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" dependencies = [ "dyn-clone", "ref-cast", @@ -5990,8 +6136,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -6043,8 +6189,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -6069,9 +6215,9 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.1.0", + "schemars 1.2.0", "serde_core", "serde_json", "serde_with_macros", @@ -6086,8 +6232,8 @@ checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ "darling 0.21.3", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -6096,7 +6242,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "ryu", "serde", @@ -6189,10 +6335,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -6220,9 +6367,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "simdutf8" @@ -6428,6 +6575,42 @@ dependencies = [ "der", ] +[[package]] +name = "spo-indexer" +version = "3.0.0" +dependencies = [ + "anyhow", + "async-stream", + "blake2", + "blockfrost", + "byte-unit", + "clap", + "derive_more 2.1.1", + "fake", + "fastrace", + "futures", + "hex", + "humantime-serde", + "indexer-common", + "indoc", + "itertools 0.14.0", + "log", + "metrics", + "parity-scale-codec", + "parking_lot", + "paste", + "reqwest 0.13.1", + "secrecy", + "serde", + "serde_json", + "serde_with", + "sqlx", + "subxt", + "thiserror 2.0.17", + "tokio", + "trait-variant", +] + [[package]] name = "sqlx" version = "0.8.6" @@ -6449,6 +6632,7 @@ checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ "base64 0.22.1", "bytes", + "chrono", "crc", "crossbeam-queue", "either", @@ -6459,7 +6643,7 @@ dependencies = [ "futures-util", "hashbrown 0.15.5", "hashlink", - "indexmap 2.12.1", + "indexmap 2.13.0", "log", "memchr", "once_cell", @@ -6486,10 +6670,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2d452988ccaacfbf5e0bdbc348fb91d7c8af5bee192173ac3636b5fb6e6715d" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "sqlx-core", "sqlx-macros-core", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6504,7 +6688,7 @@ dependencies = [ "hex", "once_cell", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "serde", "serde_json", "sha2 0.10.9", @@ -6512,7 +6696,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.111", + "syn 2.0.114", "tokio", "url", ] @@ -6528,6 +6712,7 @@ dependencies = [ "bitflags", "byteorder", "bytes", + "chrono", "crc", "digest 0.10.7", "dotenvy", @@ -6571,6 +6756,7 @@ dependencies = [ "base64 0.22.1", "bitflags", "byteorder", + "chrono", "crc", "dotenvy", "etcetera 0.8.0", @@ -6607,6 +6793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" dependencies = [ "atoi", + "chrono", "flume", "futures-channel", "futures-core", @@ -6679,9 +6866,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "structmeta-derive", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6691,8 +6878,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -6712,8 +6899,8 @@ checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -6724,9 +6911,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "subxt" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b4bad7e66234d9a2b0ea54ccaa2f6d8455e42593d6b85a01b45e36e5f099cf0" +checksum = "3e689b7f5635ffd08301b1b7d427300f7c10bc0e66069c4068d36ce6921bc736" dependencies = [ "async-trait", "derive-where", @@ -6761,26 +6948,26 @@ dependencies = [ [[package]] name = "subxt-codegen" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a146a51746d7a4aa5e2b1c1762f5f99b7cee09ac26f7cc4fa7cb719dc88236" +checksum = "740eedc385673e6c5e0de60d2ea6d12d311359d3ccea35b86b9161e3acaf938f" dependencies = [ "heck", "parity-scale-codec", "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.111", + "syn 2.0.114", "thiserror 2.0.17", ] [[package]] name = "subxt-core" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc94815338f3c695156f1267955b3842d7b97f3b57313c468933d6118399f45" +checksum = "1f2f40f6145c1805e37339c4e460c4a18fcafae913b15d2c648b7cac991fd903" dependencies = [ "base58", "blake2", @@ -6808,9 +6995,9 @@ dependencies = [ [[package]] name = "subxt-lightclient" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf4cc8179ae36d31d8aee9fd887eadc977fda6334b57bf659526a3895e5277" +checksum = "61321269d3dcc65b8f884eb4d10e393f7bca22b0688d373a0285d4e8ad7221be" dependencies = [ "futures", "futures-util", @@ -6825,26 +7012,26 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e66911c3e9ba992086e4ec56749508cd1d9e883c274268e1c9cb55ab464859bd" +checksum = "efc6c5054278308a2b01804f00676ece77270a358a2caee6df1358cf81ec0cd5" dependencies = [ "darling 0.20.11", "parity-scale-codec", "proc-macro-error2", - "quote 1.0.42", + "quote 1.0.43", "scale-typegen", "subxt-codegen", "subxt-metadata", "subxt-utils-fetchmetadata", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "subxt-metadata" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121d9a39a3fd0dd6479b37254d3590bc01cbd6085c3311ad5eacd5e939ca9bef" +checksum = "bc80c07a71e180a42ba0f12727b1f9f39bf03746df6d546d24edbbc137f64fa1" dependencies = [ "frame-decode", "frame-metadata", @@ -6857,9 +7044,9 @@ dependencies = [ [[package]] name = "subxt-rpcs" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919e94a90f21da9a98822bb5b26af6ddd1b475a090277859f894bb8126ed5698" +checksum = "3fe65228472ea5a6bd23d8f2cd12833706466d2425805b2a38ecedc258df141a" dependencies = [ "derive-where", "finito", @@ -6883,9 +7070,9 @@ dependencies = [ [[package]] name = "subxt-utils-fetchmetadata" -version = "0.44.1" +version = "0.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d52451ea9de5238f0908e82df99c29e9919cbe72fe878cff36143633c0541c5" +checksum = "a26ed947c63b4620429465c9f7e1f346433ddc21780c4bfcfade1e3a4dcdfab8" dependencies = [ "hex", "parity-scale-codec", @@ -6988,18 +7175,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", - "quote 1.0.42", + "quote 1.0.43", "unicode-ident", ] @@ -7028,8 +7215,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -7045,6 +7232,27 @@ dependencies = [ "windows", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -7128,8 +7336,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -7139,8 +7347,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -7249,8 +7457,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -7293,9 +7511,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -7328,20 +7546,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime", "toml_parser", "winnow", @@ -7349,9 +7567,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -7404,7 +7622,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -7448,9 +7666,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -7465,15 +7683,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -7511,8 +7729,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -7628,6 +7846,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "unicase" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + [[package]] name = "unicode-bidi" version = "0.3.18" @@ -7714,7 +7938,7 @@ dependencies = [ "rustls-pki-types", "ureq-proto", "utf-8", - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] @@ -7731,14 +7955,15 @@ dependencies = [ [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -7934,7 +8159,7 @@ version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ - "quote 1.0.42", + "quote 1.0.43", "wasm-bindgen-macro-support", ] @@ -7946,8 +8171,8 @@ checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -8049,14 +8274,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.4", + "webpki-root-certs 1.0.5", ] [[package]] name = "webpki-root-certs" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" dependencies = [ "rustls-pki-types", ] @@ -8067,14 +8292,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" dependencies = [ "rustls-pki-types", ] @@ -8126,7 +8351,7 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core", + "windows-core 0.57.0", "windows-targets 0.52.6", ] @@ -8136,12 +8361,25 @@ version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-implement", - "windows-interface", - "windows-result", + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result 0.1.2", "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement 0.60.2", + "windows-interface 0.59.3", + "windows-link", + "windows-result 0.4.1", + "windows-strings", +] + [[package]] name = "windows-implement" version = "0.57.0" @@ -8149,8 +8387,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -8160,8 +8409,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -8170,6 +8430,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result 0.4.1", + "windows-strings", +] + [[package]] name = "windows-result" version = "0.1.2" @@ -8179,6 +8450,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -8558,29 +8847,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -8599,8 +8888,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", "synstructure", ] @@ -8615,13 +8904,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] @@ -8653,12 +8942,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", - "quote 1.0.42", - "syn 2.0.111", + "quote 1.0.43", + "syn 2.0.114", ] [[package]] name = "zmij" -version = "1.0.2" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4a4e8e9dc5c62d159f04fcdbe07f4c3fb710415aab4754bf11505501e3251d" +checksum = "2fc5a66a20078bf1251bde995aa2fdcc4b800c70b5d92dd2c62abc5c60f679f8" diff --git a/Cargo.toml b/Cargo.toml index 4ed1d605..d6e6e4d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,6 @@ members = [ "indexer-standalone", "indexer-tests", "spo-indexer", - "spo-api", ] [workspace.package] diff --git a/MIGRATION.md b/MIGRATION.md deleted file mode 100644 index 4081a45a..00000000 --- a/MIGRATION.md +++ /dev/null @@ -1,673 +0,0 @@ -# SPO Services Migration Documentation - -## Overview - -This document details the migration of SPO (Stake Pool Operator) services from the `midnight-indexer-spo-extension` repository into the main `midnight-indexer` repository (v3.0.0-alpha.9). - -**Migration Date**: November 17, 2025 -**Source Repository**: midnight-indexer-spo-extension (based on midnight-indexer from months ago) -**Target Repository**: midnight-indexer v3.0.0-alpha.9 -**Target Network**: Midnight Preview Network (`wss://rpc.preview.midnight.network`) - -## Background - -The SPO extension was originally developed on an older version of midnight-indexer. The main midnight-indexer repository has since been updated with 100+ commits, including critical changes for Preview network compatibility. The migration was necessary to: - -1. Get latest improvements and bug fixes from upstream -2. Support the Midnight Preview network (previous version only supported older dev networks) -3. Resolve NetworkId compatibility issues that prevented connection to Preview network - -### Key Version Changes - -| Component | Old Version (spo-extension) | New Version (midnight-indexer) | -|-----------|----------------------------|--------------------------------| -| midnight-ledger | alpha.2 | alpha.5 | -| async-nats | 0.42 | 0.45 | -| NetworkId | Enum-based | String-based wrapper | - -## Migration Strategy - -**Approach Selected**: Integrate SPO services into midnight-indexer (Option A) - -**Rationale**: -- Midnight-indexer is the canonical upstream repository -- Easier to maintain going forward -- Access to latest improvements and Preview network support -- NetworkId changes in midnight-indexer v3.0.0 required for Preview network - -## Phase 1: Repository Setup - -### 1.1 Branch Creation -```bash -git checkout -b feature/integrate-spo-services -``` - -### 1.2 Service Directories Copied -From `midnight-indexer-spo-extension` to `midnight-indexer`: -- `spo-indexer/` - Complete directory (38 files) -- `spo-api/` - Complete directory - -## Phase 2: Workspace Configuration - -### 2.1 Updated Root Cargo.toml - -**Added Workspace Members**: -```toml -members = [ - # ... existing members - "spo-indexer", - "spo-api", -] -``` - -**Added Workspace Dependencies**: -```toml -[workspace.dependencies] -blake2 = { version = "0.10.6" } -blockfrost = { version = "1.1.0" } -hex = { version = "0.4.3" } -once_cell = { version = "1.19" } -paste = { version = "1.0" } -regex = { version = "1.11" } -``` - -### 2.2 Updated Service Dependencies - -**Files Modified**: -- `spo-indexer/Cargo.toml` -- `spo-api/Cargo.toml` - -**Changes**: Updated dependencies to use workspace versions: -```toml -blake2 = { workspace = true } -blockfrost = { workspace = true } -hex = { workspace = true } -once_cell = { workspace = true } -paste = { workspace = true } -regex = { workspace = true } -``` - -## Phase 3: Database Migrations - -### 3.1 Migration Files Copied - -Copied from `spo-extension/indexer-common/migrations/postgres/` to `midnight-indexer/indexer-common/migrations/postgres/`: - -1. **002_spo_initial.sql** (84 lines) - - Creates `epochs` table - - Creates `pool_metadata_cache` table - - Creates `spo_identity` table - - Creates `committee_membership` table - - Creates `spo_epoch_performance` table - - Creates `spo_history` table - -2. **003_drop_stg_committee.sql** (4 lines) - - Drops staging table - -3. **004_spo_stake.sql** (17 lines) - - Creates `spo_stake_snapshot` table - -4. **005_spo_stake_history.sql** (29 lines) - - Creates `spo_stake_history` table - - Creates `spo_stake_refresh_state` table - -## Phase 4: Docker Configuration - -### 4.1 Updated docker-compose.yaml - -**Added Services**: - -```yaml -spo-indexer: - profiles: - - cloud - depends_on: - postgres: - condition: "service_healthy" - nats: - condition: "service_started" - build: - context: . - dockerfile: spo-indexer/Dockerfile - image: "spo-indexer:local" - restart: "no" - environment: - RUST_LOG: "spo_indexer=debug,indexer_common=debug,fastrace_opentelemetry=off,info" - APP__APPLICATION__NETWORK_ID: "preview" - APP__INFRA__NODE__URL: "wss://rpc.preview.midnight.network" - APP__INFRA__NODE__BLOCKFROST_ID: $APP__INFRA__NODE__BLOCKFROST_ID - APP__INFRA__STORAGE__HOST: "postgres" - APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD - APP__INFRA__PUB_SUB__URL: "nats:4222" - APP__INFRA__PUB_SUB__PASSWORD: $APP__INFRA__PUB_SUB__PASSWORD - healthcheck: - test: ["CMD-SHELL", "cat /var/run/spo-indexer/running || exit 0"] - -spo-api: - profiles: - - cloud - depends_on: - postgres: - condition: "service_healthy" - nats: - condition: "service_started" - build: - context: . - dockerfile: spo-api/Dockerfile - image: "spo-api:local" - restart: "no" - ports: - - "8090:8090" - environment: - RUST_LOG: "spo_api=debug,indexer_common=debug,fastrace_opentelemetry=off,info" - APP__APPLICATION__NETWORK_ID: "preview" - APP__INFRA__STORAGE__HOST: "postgres" - APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD - APP__INFRA__API__PORT: "8090" - APP__INFRA__API__MAX_COMPLEXITY: "2000" - APP__INFRA__API__MAX_DEPTH: "50" - healthcheck: - test: ["CMD-SHELL", "cat /var/run/spo-api/running || exit 0"] -``` - -**Key Changes from Original**: -- Changed from pulling pre-built images to local builds -- Updated network configuration to use Preview network -- Updated database user from "postgres" to "indexer" for consistency - -### 4.2 Environment Variables - -**Updated .envrc.local**: -```bash -export APP__INFRA__NODE__BLOCKFROST_ID="previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" -export APP__INFRA__STORAGE__PASSWORD="indexer" -export APP__INFRA__PUB_SUB__PASSWORD="indexer" -``` - -## Phase 5: Configuration Updates - -### 5.1 SPO Indexer Configuration - -**File**: `spo-indexer/config.yaml` - -**Changes**: -```yaml -application: - network_id: "preview" # Changed from "Undeployed" - -infra: - storage: - user: "indexer" # Changed from "postgres" - - node: - url: "wss://rpc.preview.midnight.network" # Changed from dev network -``` - -### 5.2 SPO API Configuration - -**File**: `spo-api/config.yaml` - -**Changes**: -```yaml -application: - network_id: "preview" # Changed from "Undeployed" -``` - -## Phase 6: Code Compatibility Fixes - -### 6.1 NetworkId Type Change - -**Issue**: NetworkId changed from `Copy` trait enum to String-based wrapper (non-Copy) - -**Files Modified**: - -1. **spo-api/src/application.rs:15** - ```rust - // BEFORE - #[derive(Debug, Clone, Copy, Deserialize)] - pub struct Config { - pub network_id: NetworkId, - } - - // AFTER - #[derive(Debug, Clone, Deserialize)] // Removed Copy - pub struct Config { - pub network_id: NetworkId, - } - ``` - -2. **spo-api/src/infra/api/mod.rs:191** - ```rust - // BEFORE - fn get_network_id(&self) -> NetworkId { - self.data::() - .copied() - .expect("NetworkId is stored in Context") - } - - // AFTER - fn get_network_id(&self) -> NetworkId { - self.data::() - .cloned() // Changed from .copied() - .expect("NetworkId is stored in Context") - } - ``` - -### 6.2 Preview Network API Compatibility - -**Issue**: Midnight Preview network RPC API changed between alpha.2 and alpha.5 - -#### Change 1: `auraPubKey` Field Removed - -**Files Modified**: - -1. **spo-indexer/src/domain/rpc.rs:94-108** - ```rust - // BEFORE - pub struct CandidateRegistration { - pub sidechain_pub_key: String, - pub sidechain_account_id: String, - pub mainchain_pub_key: String, - pub cross_chain_pub_key: String, - pub aura_pub_key: String, - pub grandpa_pub_key: String, - // ... rest of fields - } - - // AFTER - pub struct CandidateRegistration { - pub sidechain_pub_key: String, - pub sidechain_account_id: String, - pub mainchain_pub_key: String, - pub cross_chain_pub_key: String, - #[serde(default)] - pub aura_pub_key: Option, // Made optional - #[serde(default)] - pub grandpa_pub_key: Option, // Made optional - // ... rest of fields - } - ``` - -2. **spo-indexer/src/domain/rpc.rs:120-126** (Display impl) - ```rust - // BEFORE - writeln!(f, " Aura Pub Key: {}", self.aura_pub_key)?; - writeln!(f, " Grandpa Pub Key: {}", self.grandpa_pub_key)?; - - // AFTER - if let Some(aura_key) = &self.aura_pub_key { - writeln!(f, " Aura Pub Key: {}", aura_key)?; - } - if let Some(grandpa_key) = &self.grandpa_pub_key { - writeln!(f, " Grandpa Pub Key: {}", grandpa_key)?; - } - ``` - -3. **spo-indexer/src/infra/subxt_node.rs:191-192** - ```rust - // BEFORE - aura_pub_key: remove_hex_prefix(reg.aura_pub_key), - grandpa_pub_key: remove_hex_prefix(reg.grandpa_pub_key), - - // AFTER - aura_pub_key: reg.aura_pub_key.map(remove_hex_prefix), - grandpa_pub_key: reg.grandpa_pub_key.map(remove_hex_prefix), - ``` - -4. **spo-indexer/src/application.rs:266** - ```rust - // BEFORE - let aura_pk = remove_hex_prefix(raw_spo.aura_pub_key.to_string()); - - // AFTER - let aura_pk = raw_spo.aura_pub_key.as_ref() - .map(|k| remove_hex_prefix(k.to_string())) - .unwrap_or_default(); - ``` - -## Phase 7: Build and Testing - -### 7.1 Compilation - -**Command**: -```bash -source .envrc.local && docker compose --profile cloud build spo-indexer spo-api -``` - -**Results**: -- ✅ spo-indexer builds successfully -- ✅ spo-api builds successfully -- Build time: ~4 minutes (first build), ~3 minutes (incremental) - -### 7.2 Container Startup - -**Command**: -```bash -source .envrc.local && docker compose up -d postgres nats spo-indexer spo-api -``` - -**Results**: -- ✅ postgres: Started and healthy -- ✅ nats: Started -- ✅ spo-api: Started and healthy (port 8090) -- ✅ spo-indexer: Started successfully - -### 7.3 Initial Testing Results - -**Successful Operations**: -1. ✅ Connected to Preview network RPC (`wss://rpc.preview.midnight.network`) -2. ✅ Created database connection pool -3. ✅ Applied database migrations -4. ✅ Successfully processed epoch 979338 -5. ✅ Started processing epoch 979339 - -**Sample Log Output**: -```json -{"timestamp":"2025-11-17T23:05:44.259759+00:00","level":"INFO","target":"spo_indexer","file":"spo-indexer/src/main.rs","line":52,"message":"starting"} -{"timestamp":"2025-11-17T23:05:48.341566+00:00","level":"DEBUG","target":"indexer_common::infra::pool::postgres","file":"/build/indexer-common/src/infra/pool/postgres.rs","line":60,"message":"created pool"} -processing epoch 979338 -processed epoch 979338 -processing epoch 979339 -``` - -## Current Status - -### ✅ Completed - -1. Repository structure migrated -2. Workspace configuration updated -3. Database migrations integrated -4. Docker configuration updated -5. Configuration files updated for Preview network -6. NetworkId compatibility fixes applied -7. Preview network API compatibility fixes (aura_pub_key, grandpa_pub_key) -8. Successful compilation of both services -9. Successful connection to Preview network -10. Successfully processing SPO registration data from Preview network - -### ⚠️ Known Issues - -#### Issue #1: Missing RPC Method `sidechain_getEpochCommittee` - -**Error**: -```json -{"timestamp":"2025-11-17T23:05:51.417863+00:00","level":"ERROR","target":"spo_indexer","file":"spo-indexer/src/main.rs","line":31,"message":"process exited with ERROR","kvs":{"backtrace":"disabled backtrace","error":"cannot make rpc call: sidechain_getEpochCommittee"}} -``` - -**Root Cause**: The `sidechain_getEpochCommittee` RPC method does not exist in the Midnight Preview network API (likely removed or renamed between alpha.2 and alpha.5). - -**Impact**: -- spo-indexer processes a few epochs successfully -- Crashes when it tries to fetch committee information -- Prevents continuous operation - -**Location**: `spo-indexer/src/infra/subxt_node.rs:220` - -**Current Implementation**: -```rust -pub async fn get_committee(&self, epoch_number: u32) -> Result, SPOClientError> { - let rpc_params = RawValue::from_string(format!("[{}]", epoch_number))?; - - loop { - let raw_response = self - .rpc_client - .request( - "sidechain_getEpochCommittee".to_string(), // This method doesn't exist - Some(rpc_params.clone()), - ) - .await - .map_err(|_| SPOClientError::RpcCall("sidechain_getEpochCommittee".to_string()))?; - // ... - } -} -``` - -**Potential Solutions**: - -1. **Option A - Find Alternative RPC Method**: - - Research Midnight Preview network API documentation - - Find the new method name for fetching epoch committee - - Update the RPC call - -2. **Option B - Derive from Alternative Data**: - - Check if committee information is available through `sidechain_getAriadneParameters` response - - Extract committee from candidate registrations if possible - -3. **Option C - Make Committee Optional**: - - Modify application logic to handle missing committee data - - Skip committee-related operations if API unavailable - - **Note**: This may impact functionality that depends on committee data - -**Recommended Next Steps**: -1. Research Midnight Preview network RPC API documentation -2. Check if there's an alternative method to get committee data -3. Test if the application can function without committee data -4. If committee data is optional, implement graceful degradation - -## Testing Checklist - -### Completed Tests -- [x] Docker build succeeds for spo-indexer -- [x] Docker build succeeds for spo-api -- [x] Containers start without errors -- [x] Database connection established -- [x] Database migrations apply successfully -- [x] Connection to Preview network RPC successful -- [x] SPO registration data fetching works -- [x] Epoch processing works (at least partially) - -### Pending Tests -- [ ] Full epoch processing without errors -- [ ] Committee data retrieval (blocked by missing RPC) -- [ ] Pool metadata fetching from Blockfrost -- [ ] spo-api GraphQL queries -- [ ] Stake refresh functionality -- [ ] End-to-end data flow from indexer to API - -## Environment Setup - -### Required Environment Variables - -```bash -# Database -export APP__INFRA__STORAGE__PASSWORD="indexer" - -# NATS -export APP__INFRA__PUB_SUB__PASSWORD="indexer" -export APP__INFRA__LEDGER_STATE_STORAGE__PASSWORD="indexer" - -# Blockfrost (for Cardano pool metadata) -export APP__INFRA__NODE__BLOCKFROST_ID="previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" - -# Optional: Encryption secret for wallet indexer -export APP__INFRA__SECRET="303132333435363738393031323334353637383930313233343536373839303132" -``` - -### Running Services - -**Start all services**: -```bash -source .envrc.local && docker compose --profile cloud up -d -``` - -**Start only SPO services**: -```bash -source .envrc.local && docker compose up -d postgres nats spo-indexer spo-api -``` - -**View logs**: -```bash -# All logs -docker compose logs -f - -# SPO Indexer only -docker compose logs -f spo-indexer - -# SPO API only -docker compose logs -f spo-api -``` - -**Rebuild after code changes**: -```bash -source .envrc.local && docker compose build spo-indexer spo-api -source .envrc.local && docker compose up -d spo-indexer spo-api -``` - -## API Endpoints - -### SPO API -- **GraphQL Endpoint**: http://localhost:8090/api/v1/graphql -- **GraphQL Playground**: http://localhost:8090/api/v1/playground -- **Health Check**: http://localhost:8090/ready - -### Indexer API (if running full stack) -- **GraphQL Endpoint**: http://localhost:8088/api/v1/graphql -- **Health Check**: http://localhost:8088/ready - -## Files Modified Summary - -### New Files -- None (all files copied from spo-extension) - -### Modified Files - -| File | Lines Changed | Purpose | -|------|---------------|---------| -| `Cargo.toml` | +8 | Added workspace members and dependencies | -| `docker-compose.yaml` | +60 | Added spo-indexer and spo-api services | -| `.envrc.local` | +3 | Added Blockfrost ID and credentials | -| `spo-indexer/config.yaml` | 3 | Updated network_id, user, RPC URL | -| `spo-api/config.yaml` | 1 | Updated network_id | -| `spo-indexer/Cargo.toml` | 3 | Updated to use workspace dependencies | -| `spo-api/Cargo.toml` | 1 | Updated to use workspace dependencies | -| `spo-api/src/application.rs` | 1 | Removed Copy trait from Config | -| `spo-api/src/infra/api/mod.rs` | 1 | Changed .copied() to .cloned() | -| `spo-indexer/src/domain/rpc.rs` | 8 | Made aura_pub_key and grandpa_pub_key optional | -| `spo-indexer/src/infra/subxt_node.rs` | 2 | Handle optional aura/grandpa keys | -| `spo-indexer/src/application.rs` | 1 | Handle optional aura_pub_key | - -**Total Files Modified**: 13 -**Total New Files**: 4 (migration SQL files) - -## Dependency Changes - -### Updated Dependencies - -| Dependency | Old Version | New Version | Reason | -|------------|-------------|-------------|--------| -| midnight-ledger | alpha.2 | alpha.5 | Preview network support | -| async-nats | 0.42 | 0.45 | Compatibility with midnight-ledger | -| blake2 | - | 0.10.6 | Added to workspace | -| blockfrost | - | 1.1.0 | Added to workspace | -| hex | - | 0.4.3 | Added to workspace | -| once_cell | - | 1.19 | Added to workspace | -| paste | - | 1.0 | Added to workspace | -| regex | - | 1.11 | Added to workspace | - -## Breaking Changes from alpha.2 to alpha.5 - -### 1. NetworkId Type System - -**Before (alpha.2)**: -```rust -#[derive(Copy, Clone)] -pub enum NetworkId { - Undeployed, - DevNet, - TestNet, - MainNet, -} -``` - -**After (alpha.5)**: -```rust -pub struct NetworkId(pub String); -``` - -**Impact**: -- NetworkId no longer implements Copy trait -- Supports arbitrary network names ("preview", "qanet", etc.) -- Configuration changed from enum variant to string value - -### 2. Midnight RPC API Changes - -**Removed Fields in `CandidateRegistration`**: -- `auraPubKey` - No longer returned by `sidechain_getAriadneParameters` -- `grandpaPubKey` - No longer returned by `sidechain_getAriadneParameters` - -**Missing RPC Methods**: -- `sidechain_getEpochCommittee` - Method not available in Preview network - -**Impact**: -- Code must handle optional consensus keys -- Committee data retrieval needs alternative approach - -## Recommendations for Future Work - -### Immediate Priority - -1. **Resolve Committee Data Issue**: - - Contact Midnight team for Preview network RPC documentation - - Identify correct method to fetch committee information - - Or implement graceful handling if committee data is not critical - -2. **End-to-End Testing**: - - Test full epoch processing cycle - - Verify data persists correctly to database - - Test GraphQL queries through spo-api - -### Medium Priority - -3. **Documentation Updates**: - - Update README with SPO services documentation - - Document GraphQL schema - - Add examples for common queries - -4. **Monitoring**: - - Add health metrics for SPO services - - Monitor epoch processing performance - - Track Blockfrost API usage - -### Low Priority - -5. **Optimization**: - - Review and optimize database queries - - Consider caching strategies for pool metadata - - Optimize Docker build times with better layer caching - -6. **Code Cleanup**: - - Remove dead code if any - - Consolidate duplicate logic - - Update comments to reflect Preview network specifics - -## Appendix - -### A. Network Configuration Comparison - -| Config Item | Old (Dev Network) | New (Preview Network) | -|-------------|-------------------|----------------------| -| network_id | "Undeployed" | "preview" | -| RPC URL | ws://node:9944 | wss://rpc.preview.midnight.network | -| Database User | postgres | indexer | -| Blockfrost Network | mainnet | preview | - -### B. Database Schema - -See migration files in `indexer-common/migrations/postgres/`: -- `002_spo_initial.sql` - Core SPO tables -- `003_drop_stg_committee.sql` - Cleanup -- `004_spo_stake.sql` - Stake tracking -- `005_spo_stake_history.sql` - Historical stake data - -### C. References - -- **Midnight Documentation**: https://docs.midnight.network/ -- **midnight-indexer Repository**: https://github.com/midnightntwrk/midnight-indexer -- **midnight-ledger Repository**: https://github.com/midnightntwrk/midnight-ledger -- **Blockfrost API**: https://docs.blockfrost.io/ - ---- - -**Document Version**: 1.0 -**Last Updated**: November 17, 2025 -**Authors**: Migration performed with assistance from Claude (Anthropic) diff --git a/docker-compose.yaml b/docker-compose.yaml index 384b0925..8f224059 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -99,37 +99,6 @@ services: timeout: "2s" retries: 2 - spo-api: - profiles: - - cloud - depends_on: - postgres: - condition: "service_healthy" - nats: - condition: "service_started" - build: - context: . - dockerfile: spo-api/Dockerfile - image: "spo-api:local" - restart: "no" - ports: - - "8090:8090" - environment: - RUST_LOG: "spo_api=debug,indexer_common=debug,fastrace_opentelemetry=off,info" - APP__APPLICATION__NETWORK_ID: "preview" - APP__INFRA__STORAGE__HOST: "postgres" - APP__INFRA__STORAGE__PASSWORD: $APP__INFRA__STORAGE__PASSWORD - APP__INFRA__API__PORT: "8090" - APP__INFRA__API__MAX_COMPLEXITY: "2000" - APP__INFRA__API__MAX_DEPTH: "50" - healthcheck: - test: ["CMD-SHELL", "cat /var/run/spo-api/running || exit 0"] - start_interval: "2s" - start_period: "30s" - interval: "5s" - timeout: "2s" - retries: 2 - indexer-api: profiles: - cloud diff --git a/indexer-api/graphql/schema-v3.graphql b/indexer-api/graphql/schema-v3.graphql index 4e4fe531..06e01cea 100644 --- a/indexer-api/graphql/schema-v3.graphql +++ b/indexer-api/graphql/schema-v3.graphql @@ -75,6 +75,19 @@ type CollapsedMerkleTree { protocolVersion: Int! } +""" +Committee member for an epoch. +""" +type CommitteeMember { + epochNo: Int! + position: Int! + sidechainPubkeyHex: String! + expectedSlots: Int! + auraPubkeyHex: String + poolIdHex: String + spoSkHex: String +} + """ A contract action. """ @@ -354,6 +367,37 @@ type DustSpendProcessed implements DustLedgerEvent { maxId: Int! } +""" +Current epoch information. +""" +type EpochInfo { + epochNo: Int! + durationSeconds: Int! + elapsedSeconds: Int! +} + +""" +SPO performance for an epoch. +""" +type EpochPerf { + epochNo: Int! + spoSkHex: String! + produced: Int! + expected: Int! + identityLabel: String + stakeSnapshot: String + poolIdHex: String + validatorClass: String +} + +""" +First valid epoch for an SPO identity. +""" +type FirstValidEpoch { + idKey: String! + firstValidEpoch: Int! +} + scalar HexEncoded type Mutation { @@ -382,6 +426,28 @@ type ParamChange implements DustLedgerEvent { maxId: Int! } +""" +Pool metadata from Cardano. +""" +type PoolMetadata { + poolIdHex: String! + hexId: String + name: String + ticker: String + homepageUrl: String + logoUrl: String +} + +""" +Presence event for an SPO in an epoch. +""" +type PresenceEvent { + epochNo: Int! + idKey: String! + source: String! + status: String +} + type Query { """ Find a block for the given optional offset; if not present, the latest block is returned. @@ -407,6 +473,107 @@ type Query { Get the full history of Terms and Conditions changes for governance auditability. """ termsAndConditionsHistory: [TermsAndConditionsChange!]! + """ + List SPO identities with pagination. + """ + spoIdentities(limit: Int, offset: Int): [SpoIdentity!]! + """ + Get SPO identity by pool ID. + """ + spoIdentityByPoolId(poolIdHex: String!): SpoIdentity + """ + Get total count of SPOs. + """ + spoCount: Int + """ + Get pool metadata by pool ID. + """ + poolMetadata(poolIdHex: String!): PoolMetadata + """ + List pool metadata with pagination. + """ + poolMetadataList(limit: Int, offset: Int, withNameOnly: Boolean): [PoolMetadata!]! + """ + Get SPO with metadata by pool ID. + """ + spoByPoolId(poolIdHex: String!): Spo + """ + List SPOs with optional search. + """ + spoList(limit: Int, offset: Int, search: String): [Spo!]! + """ + Get composite SPO data (identity + metadata + performance). + """ + spoCompositeByPoolId(poolIdHex: String!): SpoComposite + """ + Get SPO identifiers ordered by performance. + """ + stakePoolOperators(limit: Int): [String!]! + """ + Get latest SPO performance entries. + """ + spoPerformanceLatest(limit: Int, offset: Int): [EpochPerf!]! + """ + Get SPO performance by SPO key. + """ + spoPerformanceBySpoSk(spoSkHex: String!, limit: Int, offset: Int): [EpochPerf!]! + """ + Get epoch performance for all SPOs. + """ + epochPerformance(epoch: Int!, limit: Int, offset: Int): [EpochPerf!]! + """ + Get current epoch information. + """ + currentEpochInfo: EpochInfo + """ + Get epoch utilization (produced/expected ratio). + """ + epochUtilization(epoch: Int!): Float + """ + Get committee membership for an epoch. + """ + committee(epoch: Int!): [CommitteeMember!]! + """ + Get cumulative registration totals for an epoch range. + """ + registeredTotalsSeries(fromEpoch: Int!, toEpoch: Int!): [RegisteredTotals!]! + """ + Get registration statistics for an epoch range. + """ + registeredSpoSeries(fromEpoch: Int!, toEpoch: Int!): [RegisteredStat!]! + """ + Get raw presence events for an epoch range. + """ + registeredPresence(fromEpoch: Int!, toEpoch: Int!): [PresenceEvent!]! + """ + Get first valid epoch for each SPO identity. + """ + registeredFirstValidEpochs(uptoEpoch: Int): [FirstValidEpoch!]! + """ + Get stake distribution with search and ordering. + """ + stakeDistribution(limit: Int, offset: Int, search: String, orderByStakeDesc: Boolean): [StakeShare!]! +} + +""" +Registration statistics for an epoch. +""" +type RegisteredStat { + epochNo: Int! + federatedValidCount: Int! + federatedInvalidCount: Int! + registeredValidCount: Int! + registeredInvalidCount: Int! + dparam: Float +} + +""" +Cumulative registration totals for an epoch. +""" +type RegisteredTotals { + epochNo: Int! + totalRegistered: Int! + newlyRegistered: Int! } """ @@ -539,6 +706,96 @@ type ShieldedTransactionsProgress { highestRelevantEndIndex: Int! } +""" +SPO with optional metadata. +""" +type Spo { + poolIdHex: String! + validatorClass: String! + sidechainPubkeyHex: String! + auraPubkeyHex: String + name: String + ticker: String + homepageUrl: String + logoUrl: String +} + +""" +Composite SPO data (identity + metadata + performance). +""" +type SpoComposite { + identity: SpoIdentity + metadata: PoolMetadata + performance: [EpochPerf!]! +} + +""" +SPO identity information. +""" +type SpoIdentity { + poolIdHex: String! + mainchainPubkeyHex: String! + sidechainPubkeyHex: String! + auraPubkeyHex: String + validatorClass: String! +} + +""" +Stake share information for an SPO. + +Values are sourced from mainchain pool data (e.g., Blockfrost) and keyed by Cardano pool_id. +""" +type StakeShare { + """ + Cardano pool ID (56-character hex string). + """ + poolIdHex: String! + """ + Pool name from metadata. + """ + name: String + """ + Pool ticker from metadata. + """ + ticker: String + """ + Pool homepage URL from metadata. + """ + homepageUrl: String + """ + Pool logo URL from metadata. + """ + logoUrl: String + """ + Current live stake in lovelace. + """ + liveStake: String + """ + Current active stake in lovelace. + """ + activeStake: String + """ + Number of live delegators. + """ + liveDelegators: Int + """ + Saturation ratio (0.0 to 1.0+). + """ + liveSaturation: Float + """ + Declared pledge in lovelace. + """ + declaredPledge: String + """ + Current live pledge in lovelace. + """ + livePledge: String + """ + Stake share as a fraction of total stake. + """ + stakeShare: Float +} + type Subscription { """ Subscribe to blocks starting at the given offset or at the latest block if the offset is diff --git a/indexer-api/src/domain.rs b/indexer-api/src/domain.rs index 81093208..f69f88c2 100644 --- a/indexer-api/src/domain.rs +++ b/indexer-api/src/domain.rs @@ -19,6 +19,7 @@ mod contract_action; pub mod dust; mod ledger_event; mod ledger_state; +pub mod spo; pub mod system_parameters; mod transaction; mod unshielded; diff --git a/indexer-api/src/domain/spo.rs b/indexer-api/src/domain/spo.rs new file mode 100644 index 00000000..e6a5da4a --- /dev/null +++ b/indexer-api/src/domain/spo.rs @@ -0,0 +1,139 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// SPO identity information. +#[derive(Debug, Clone)] +pub struct SpoIdentity { + pub pool_id_hex: String, + pub mainchain_pubkey_hex: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub validator_class: String, +} + +/// Pool metadata from Cardano. +#[derive(Debug, Clone)] +pub struct PoolMetadata { + pub pool_id_hex: String, + pub hex_id: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +/// SPO with optional metadata. +#[derive(Debug, Clone)] +pub struct Spo { + pub pool_id_hex: String, + pub validator_class: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +/// Composite SPO data (identity + metadata + performance). +#[derive(Debug, Clone)] +pub struct SpoComposite { + pub identity: Option, + pub metadata: Option, + pub performance: Vec, +} + +/// SPO performance for an epoch. +#[derive(Debug, Clone)] +pub struct EpochPerf { + pub epoch_no: i64, + pub spo_sk_hex: String, + pub produced: i64, + pub expected: i64, + pub identity_label: Option, + pub stake_snapshot: Option, + pub pool_id_hex: Option, + pub validator_class: Option, +} + +/// Current epoch information. +#[derive(Debug, Clone)] +pub struct EpochInfo { + pub epoch_no: i64, + pub duration_seconds: i64, + pub elapsed_seconds: i64, +} + +/// Committee member for an epoch. +#[derive(Debug, Clone)] +pub struct CommitteeMember { + pub epoch_no: i64, + pub position: i32, + pub sidechain_pubkey_hex: String, + pub expected_slots: i32, + pub aura_pubkey_hex: Option, + pub pool_id_hex: Option, + pub spo_sk_hex: Option, +} + +/// Registration statistics for an epoch. +#[derive(Debug, Clone)] +pub struct RegisteredStat { + pub epoch_no: i64, + pub federated_valid_count: i64, + pub federated_invalid_count: i64, + pub registered_valid_count: i64, + pub registered_invalid_count: i64, + pub dparam: Option, +} + +/// Cumulative registration totals for an epoch. +#[derive(Debug, Clone)] +pub struct RegisteredTotals { + pub epoch_no: i64, + pub total_registered: i64, + pub newly_registered: i64, +} + +/// Presence event for an SPO in an epoch. +#[derive(Debug, Clone)] +pub struct PresenceEvent { + pub epoch_no: i64, + pub id_key: String, + pub source: String, + pub status: Option, +} + +/// First valid epoch for an SPO identity. +#[derive(Debug, Clone)] +pub struct FirstValidEpoch { + pub id_key: String, + pub first_valid_epoch: i64, +} + +/// Stake share information for an SPO. +#[derive(Debug, Clone)] +pub struct StakeShare { + pub pool_id_hex: String, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, + pub live_stake: Option, + pub active_stake: Option, + pub live_delegators: Option, + pub live_saturation: Option, + pub declared_pledge: Option, + pub live_pledge: Option, + pub stake_share: Option, +} diff --git a/indexer-api/src/domain/storage.rs b/indexer-api/src/domain/storage.rs index 83780676..8f0978cc 100644 --- a/indexer-api/src/domain/storage.rs +++ b/indexer-api/src/domain/storage.rs @@ -18,6 +18,7 @@ pub mod contract_action; pub mod dust; pub mod ledger_events; pub mod ledger_state; +pub mod spo; pub mod system_parameters; pub mod transaction; pub mod unshielded; @@ -25,7 +26,7 @@ pub mod wallet; use crate::domain::storage::{ block::BlockStorage, contract_action::ContractActionStorage, dust::DustStorage, - ledger_events::LedgerEventStorage, ledger_state::LedgerStateStorage, + ledger_events::LedgerEventStorage, ledger_state::LedgerStateStorage, spo::SpoStorage, system_parameters::SystemParametersStorage, transaction::TransactionStorage, unshielded::UnshieldedUtxoStorage, wallet::WalletStorage, }; @@ -39,6 +40,7 @@ where + DustStorage + LedgerEventStorage + LedgerStateStorage + + SpoStorage + SystemParametersStorage + TransactionStorage + UnshieldedUtxoStorage diff --git a/indexer-api/src/domain/storage/spo.rs b/indexer-api/src/domain/storage/spo.rs new file mode 100644 index 00000000..d7c574e4 --- /dev/null +++ b/indexer-api/src/domain/storage/spo.rs @@ -0,0 +1,283 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::domain::{ + spo::{ + CommitteeMember, EpochInfo, EpochPerf, FirstValidEpoch, PoolMetadata, PresenceEvent, + RegisteredStat, RegisteredTotals, Spo, SpoComposite, SpoIdentity, StakeShare, + }, + storage::NoopStorage, +}; + +/// Storage abstraction for SPO data. +#[trait_variant::make(Send)] +pub trait SpoStorage +where + Self: Clone + Send + Sync + 'static, +{ + /// Get SPO identities with pagination. + async fn get_spo_identities( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error>; + + /// Get SPO identity by pool ID. + async fn get_spo_identity_by_pool_id( + &self, + pool_id: &str, + ) -> Result, sqlx::Error>; + + /// Get total count of SPOs. + async fn get_spo_count(&self) -> Result; + + /// Get pool metadata by pool ID. + async fn get_pool_metadata(&self, pool_id: &str) -> Result, sqlx::Error>; + + /// Get pool metadata list with pagination. + async fn get_pool_metadata_list( + &self, + limit: i64, + offset: i64, + with_name_only: bool, + ) -> Result, sqlx::Error>; + + /// Get SPO with metadata by pool ID. + async fn get_spo_by_pool_id(&self, pool_id: &str) -> Result, sqlx::Error>; + + /// Get SPO list with optional search. + async fn get_spo_list( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + ) -> Result, sqlx::Error>; + + /// Get composite SPO data (identity + metadata + performance). + async fn get_spo_composite_by_pool_id( + &self, + pool_id: &str, + perf_limit: i64, + ) -> Result, sqlx::Error>; + + /// Get SPO identifiers ordered by performance. + async fn get_stake_pool_operator_ids(&self, limit: i64) -> Result, sqlx::Error>; + + /// Get latest SPO performance entries. + async fn get_spo_performance_latest( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error>; + + /// Get SPO performance by SPO key. + async fn get_spo_performance_by_spo_sk( + &self, + spo_sk: &str, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error>; + + /// Get epoch performance for all SPOs. + async fn get_epoch_performance( + &self, + epoch: i64, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error>; + + /// Get current epoch information. + async fn get_current_epoch_info(&self) -> Result, sqlx::Error>; + + /// Get epoch utilization (produced/expected ratio). + async fn get_epoch_utilization(&self, epoch: i64) -> Result, sqlx::Error>; + + /// Get committee membership for an epoch. + async fn get_committee(&self, epoch: i64) -> Result, sqlx::Error>; + + /// Get cumulative registration totals for an epoch range. + async fn get_registered_totals_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error>; + + /// Get registration statistics for an epoch range. + async fn get_registered_spo_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error>; + + /// Get raw presence events for an epoch range. + async fn get_registered_presence( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error>; + + /// Get first valid epoch for each SPO identity. + async fn get_registered_first_valid_epochs( + &self, + upto_epoch: Option, + ) -> Result, sqlx::Error>; + + /// Get stake distribution with search and ordering. + /// Returns (stake_shares, total_live_stake). + async fn get_stake_distribution( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + order_desc: bool, + ) -> Result<(Vec, f64), sqlx::Error>; +} + +#[allow(unused_variables)] +impl SpoStorage for NoopStorage { + async fn get_spo_identities( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_identity_by_pool_id( + &self, + pool_id: &str, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_count(&self) -> Result { + unimplemented!() + } + + async fn get_pool_metadata(&self, pool_id: &str) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_pool_metadata_list( + &self, + limit: i64, + offset: i64, + with_name_only: bool, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_by_pool_id(&self, pool_id: &str) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_list( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_composite_by_pool_id( + &self, + pool_id: &str, + perf_limit: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_stake_pool_operator_ids(&self, limit: i64) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_performance_latest( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_spo_performance_by_spo_sk( + &self, + spo_sk: &str, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_epoch_performance( + &self, + epoch: i64, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_current_epoch_info(&self) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_epoch_utilization(&self, epoch: i64) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_committee(&self, epoch: i64) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_registered_totals_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_registered_spo_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_registered_presence( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_registered_first_valid_epochs( + &self, + upto_epoch: Option, + ) -> Result, sqlx::Error> { + unimplemented!() + } + + async fn get_stake_distribution( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + order_desc: bool, + ) -> Result<(Vec, f64), sqlx::Error> { + unimplemented!() + } +} diff --git a/indexer-api/src/infra/api/v3.rs b/indexer-api/src/infra/api/v3.rs index 3782266a..84e36f4b 100644 --- a/indexer-api/src/infra/api/v3.rs +++ b/indexer-api/src/infra/api/v3.rs @@ -17,6 +17,7 @@ pub mod dust; pub mod ledger_events; pub mod mutation; pub mod query; +pub mod spo; pub mod subscription; pub mod system_parameters; pub mod transaction; diff --git a/indexer-api/src/infra/api/v3/query.rs b/indexer-api/src/infra/api/v3/query.rs index f7da9188..b9d1128a 100644 --- a/indexer-api/src/infra/api/v3/query.rs +++ b/indexer-api/src/infra/api/v3/query.rs @@ -20,6 +20,11 @@ use crate::{ block::{Block, BlockOffset}, contract_action::{ContractAction, ContractActionOffset}, dust::DustGenerationStatus, + spo::{ + CommitteeMember, EpochInfo, EpochPerf, FirstValidEpoch, PoolMetadata, + PresenceEvent, RegisteredStat, RegisteredTotals, Spo, SpoComposite, SpoIdentity, + StakeShare, + }, system_parameters::{DParameterChange, TermsAndConditionsChange}, transaction::{Transaction, TransactionOffset}, }, @@ -29,6 +34,8 @@ use async_graphql::{Context, Object}; use fastrace::trace; use std::marker::PhantomData; +const DEFAULT_PERFORMANCE_LIMIT: i64 = 20; + /// GraphQL queries. pub struct Query { _s: PhantomData, @@ -268,4 +275,394 @@ where .map(TermsAndConditionsChange::from) .collect()) } + + /// List SPO identities with pagination. + #[trace] + async fn spo_identities( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + + let identities = storage + .get_spo_identities(limit, offset) + .await + .map_err_into_server_error(|| "get SPO identities")?; + + Ok(identities.into_iter().map(Into::into).collect()) + } + + /// Get SPO identity by pool ID. + #[trace] + async fn spo_identity_by_pool_id( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> ApiResult> { + let pool_id = normalize_hex(&pool_id_hex); + let storage = cx.get_storage::(); + + let identity = storage + .get_spo_identity_by_pool_id(&pool_id) + .await + .map_err_into_server_error(|| "get SPO identity by pool ID")?; + + Ok(identity.map(Into::into)) + } + + /// Get total count of SPOs. + #[trace] + async fn spo_count(&self, cx: &Context<'_>) -> ApiResult> { + let storage = cx.get_storage::(); + + let count = storage + .get_spo_count() + .await + .map_err_into_server_error(|| "get SPO count")?; + + Ok(Some(count)) + } + + /// Get pool metadata by pool ID. + #[trace] + async fn pool_metadata( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> ApiResult> { + let pool_id = normalize_hex(&pool_id_hex); + let storage = cx.get_storage::(); + + let metadata = storage + .get_pool_metadata(&pool_id) + .await + .map_err_into_server_error(|| "get pool metadata")?; + + Ok(metadata.map(Into::into)) + } + + /// List pool metadata with pagination. + #[trace] + async fn pool_metadata_list( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + with_name_only: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let with_name_only = with_name_only.unwrap_or(false); + + let metadata = storage + .get_pool_metadata_list(limit, offset, with_name_only) + .await + .map_err_into_server_error(|| "get pool metadata list")?; + + Ok(metadata.into_iter().map(Into::into).collect()) + } + + /// Get SPO with metadata by pool ID. + #[trace] + async fn spo_by_pool_id( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> ApiResult> { + let pool_id = normalize_hex(&pool_id_hex); + let storage = cx.get_storage::(); + + let spo = storage + .get_spo_by_pool_id(&pool_id) + .await + .map_err_into_server_error(|| "get SPO by pool ID")?; + + Ok(spo.map(Into::into)) + } + + /// List SPOs with optional search. + #[trace] + async fn spo_list( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + search: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(20).clamp(1, 200) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let search_ref = search.as_deref().and_then(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } + }); + + let spos = storage + .get_spo_list(limit, offset, search_ref) + .await + .map_err_into_server_error(|| "get SPO list")?; + + Ok(spos.into_iter().map(Into::into).collect()) + } + + /// Get composite SPO data (identity + metadata + performance). + #[trace] + async fn spo_composite_by_pool_id( + &self, + cx: &Context<'_>, + pool_id_hex: String, + ) -> ApiResult> { + let pool_id = normalize_hex(&pool_id_hex); + let storage = cx.get_storage::(); + + let composite = storage + .get_spo_composite_by_pool_id(&pool_id, DEFAULT_PERFORMANCE_LIMIT) + .await + .map_err_into_server_error(|| "get SPO composite by pool ID")?; + + Ok(composite.map(Into::into)) + } + + /// Get SPO identifiers ordered by performance. + #[trace] + async fn stake_pool_operators( + &self, + cx: &Context<'_>, + limit: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(20).clamp(1, 100) as i64; + + let ids = storage + .get_stake_pool_operator_ids(limit) + .await + .map_err_into_server_error(|| "get stake pool operators")?; + + Ok(ids) + } + + /// Get latest SPO performance entries. + #[trace] + async fn spo_performance_latest( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit + .unwrap_or(DEFAULT_PERFORMANCE_LIMIT as i32) + .clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + + let perfs = storage + .get_spo_performance_latest(limit, offset) + .await + .map_err_into_server_error(|| "get SPO performance latest")?; + + Ok(perfs.into_iter().map(Into::into).collect()) + } + + /// Get SPO performance by SPO key. + #[trace] + async fn spo_performance_by_spo_sk( + &self, + cx: &Context<'_>, + spo_sk_hex: String, + limit: Option, + offset: Option, + ) -> ApiResult> { + let spo_sk = normalize_hex(&spo_sk_hex); + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(100).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + + let perfs = storage + .get_spo_performance_by_spo_sk(&spo_sk, limit, offset) + .await + .map_err_into_server_error(|| "get SPO performance by SPO key")?; + + Ok(perfs.into_iter().map(Into::into).collect()) + } + + /// Get epoch performance for all SPOs. + #[trace] + async fn epoch_performance( + &self, + cx: &Context<'_>, + epoch: i64, + limit: Option, + offset: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(100).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + + let perfs = storage + .get_epoch_performance(epoch, limit, offset) + .await + .map_err_into_server_error(|| "get epoch performance")?; + + Ok(perfs.into_iter().map(Into::into).collect()) + } + + /// Get current epoch information. + #[trace] + async fn current_epoch_info(&self, cx: &Context<'_>) -> ApiResult> { + let storage = cx.get_storage::(); + + let info = storage + .get_current_epoch_info() + .await + .map_err_into_server_error(|| "get current epoch info")?; + + Ok(info.map(Into::into)) + } + + /// Get epoch utilization (produced/expected ratio). + #[trace] + async fn epoch_utilization(&self, cx: &Context<'_>, epoch: i32) -> ApiResult> { + let storage = cx.get_storage::(); + + let utilization = storage + .get_epoch_utilization(epoch as i64) + .await + .map_err_into_server_error(|| "get epoch utilization")?; + + Ok(utilization) + } + + /// Get committee membership for an epoch. + #[trace] + async fn committee(&self, cx: &Context<'_>, epoch: i64) -> ApiResult> { + let storage = cx.get_storage::(); + + let members = storage + .get_committee(epoch) + .await + .map_err_into_server_error(|| "get committee")?; + + Ok(members.into_iter().map(Into::into).collect()) + } + + /// Get cumulative registration totals for an epoch range. + #[trace] + async fn registered_totals_series( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> ApiResult> { + let storage = cx.get_storage::(); + + let totals = storage + .get_registered_totals_series(from_epoch, to_epoch) + .await + .map_err_into_server_error(|| "get registered totals series")?; + + Ok(totals.into_iter().map(Into::into).collect()) + } + + /// Get registration statistics for an epoch range. + #[trace] + async fn registered_spo_series( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> ApiResult> { + let storage = cx.get_storage::(); + + let stats = storage + .get_registered_spo_series(from_epoch, to_epoch) + .await + .map_err_into_server_error(|| "get registered SPO series")?; + + Ok(stats.into_iter().map(Into::into).collect()) + } + + /// Get raw presence events for an epoch range. + #[trace] + async fn registered_presence( + &self, + cx: &Context<'_>, + from_epoch: i64, + to_epoch: i64, + ) -> ApiResult> { + let storage = cx.get_storage::(); + + let events = storage + .get_registered_presence(from_epoch, to_epoch) + .await + .map_err_into_server_error(|| "get registered presence")?; + + Ok(events.into_iter().map(Into::into).collect()) + } + + /// Get first valid epoch for each SPO identity. + #[trace] + async fn registered_first_valid_epochs( + &self, + cx: &Context<'_>, + upto_epoch: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + + let epochs = storage + .get_registered_first_valid_epochs(upto_epoch) + .await + .map_err_into_server_error(|| "get registered first valid epochs")?; + + Ok(epochs.into_iter().map(Into::into).collect()) + } + + /// Get stake distribution with search and ordering. + #[trace] + async fn stake_distribution( + &self, + cx: &Context<'_>, + limit: Option, + offset: Option, + search: Option, + order_by_stake_desc: Option, + ) -> ApiResult> { + let storage = cx.get_storage::(); + let limit = limit.unwrap_or(50).clamp(1, 500) as i64; + let offset = offset.unwrap_or(0).max(0) as i64; + let search_ref = search.as_deref().and_then(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } + }); + let order_desc = order_by_stake_desc.unwrap_or(true); + + let (shares, _total) = storage + .get_stake_distribution(limit, offset, search_ref, order_desc) + .await + .map_err_into_server_error(|| "get stake distribution")?; + + Ok(shares.into_iter().map(Into::into).collect()) + } +} + +/// Normalize hex string by stripping 0x prefix and lowercasing. +fn normalize_hex(input: &str) -> String { + let s = input + .strip_prefix("0x") + .unwrap_or(input) + .strip_prefix("0X") + .unwrap_or(input); + s.to_ascii_lowercase() } diff --git a/indexer-api/src/infra/api/v3/spo.rs b/indexer-api/src/infra/api/v3/spo.rs new file mode 100644 index 00000000..e29caa48 --- /dev/null +++ b/indexer-api/src/infra/api/v3/spo.rs @@ -0,0 +1,326 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::domain::spo::{ + CommitteeMember as DomainCommitteeMember, EpochInfo as DomainEpochInfo, + EpochPerf as DomainEpochPerf, FirstValidEpoch as DomainFirstValidEpoch, + PoolMetadata as DomainPoolMetadata, PresenceEvent as DomainPresenceEvent, + RegisteredStat as DomainRegisteredStat, RegisteredTotals as DomainRegisteredTotals, + Spo as DomainSpo, SpoComposite as DomainSpoComposite, SpoIdentity as DomainSpoIdentity, + StakeShare as DomainStakeShare, +}; +use async_graphql::SimpleObject; + +/// SPO identity information. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct SpoIdentity { + pub pool_id_hex: String, + pub mainchain_pubkey_hex: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub validator_class: String, +} + +impl From for SpoIdentity { + fn from(d: DomainSpoIdentity) -> Self { + Self { + pool_id_hex: d.pool_id_hex, + mainchain_pubkey_hex: d.mainchain_pubkey_hex, + sidechain_pubkey_hex: d.sidechain_pubkey_hex, + aura_pubkey_hex: d.aura_pubkey_hex, + validator_class: d.validator_class, + } + } +} + +/// Pool metadata from Cardano. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct PoolMetadata { + pub pool_id_hex: String, + pub hex_id: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +impl From for PoolMetadata { + fn from(d: DomainPoolMetadata) -> Self { + Self { + pool_id_hex: d.pool_id_hex, + hex_id: d.hex_id, + name: d.name, + ticker: d.ticker, + homepage_url: d.homepage_url, + logo_url: d.logo_url, + } + } +} + +/// SPO with optional metadata. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct Spo { + pub pool_id_hex: String, + pub validator_class: String, + pub sidechain_pubkey_hex: String, + pub aura_pubkey_hex: Option, + pub name: Option, + pub ticker: Option, + pub homepage_url: Option, + pub logo_url: Option, +} + +impl From for Spo { + fn from(d: DomainSpo) -> Self { + Self { + pool_id_hex: d.pool_id_hex, + validator_class: d.validator_class, + sidechain_pubkey_hex: d.sidechain_pubkey_hex, + aura_pubkey_hex: d.aura_pubkey_hex, + name: d.name, + ticker: d.ticker, + homepage_url: d.homepage_url, + logo_url: d.logo_url, + } + } +} + +/// Composite SPO data (identity + metadata + performance). +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct SpoComposite { + pub identity: Option, + pub metadata: Option, + pub performance: Vec, +} + +impl From for SpoComposite { + fn from(d: DomainSpoComposite) -> Self { + Self { + identity: d.identity.map(Into::into), + metadata: d.metadata.map(Into::into), + performance: d.performance.into_iter().map(Into::into).collect(), + } + } +} + +/// SPO performance for an epoch. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct EpochPerf { + pub epoch_no: i64, + pub spo_sk_hex: String, + pub produced: i64, + pub expected: i64, + pub identity_label: Option, + pub stake_snapshot: Option, + pub pool_id_hex: Option, + pub validator_class: Option, +} + +impl From for EpochPerf { + fn from(d: DomainEpochPerf) -> Self { + Self { + epoch_no: d.epoch_no, + spo_sk_hex: d.spo_sk_hex, + produced: d.produced, + expected: d.expected, + identity_label: d.identity_label, + stake_snapshot: d.stake_snapshot, + pool_id_hex: d.pool_id_hex, + validator_class: d.validator_class, + } + } +} + +/// Current epoch information. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct EpochInfo { + pub epoch_no: i64, + pub duration_seconds: i64, + pub elapsed_seconds: i64, +} + +impl From for EpochInfo { + fn from(d: DomainEpochInfo) -> Self { + Self { + epoch_no: d.epoch_no, + duration_seconds: d.duration_seconds, + elapsed_seconds: d.elapsed_seconds, + } + } +} + +/// Committee member for an epoch. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct CommitteeMember { + pub epoch_no: i64, + pub position: i32, + pub sidechain_pubkey_hex: String, + pub expected_slots: i32, + pub aura_pubkey_hex: Option, + pub pool_id_hex: Option, + pub spo_sk_hex: Option, +} + +impl From for CommitteeMember { + fn from(d: DomainCommitteeMember) -> Self { + Self { + epoch_no: d.epoch_no, + position: d.position, + sidechain_pubkey_hex: d.sidechain_pubkey_hex, + expected_slots: d.expected_slots, + aura_pubkey_hex: d.aura_pubkey_hex, + pool_id_hex: d.pool_id_hex, + spo_sk_hex: d.spo_sk_hex, + } + } +} + +/// Registration statistics for an epoch. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct RegisteredStat { + pub epoch_no: i64, + pub federated_valid_count: i64, + pub federated_invalid_count: i64, + pub registered_valid_count: i64, + pub registered_invalid_count: i64, + pub dparam: Option, +} + +impl From for RegisteredStat { + fn from(d: DomainRegisteredStat) -> Self { + Self { + epoch_no: d.epoch_no, + federated_valid_count: d.federated_valid_count, + federated_invalid_count: d.federated_invalid_count, + registered_valid_count: d.registered_valid_count, + registered_invalid_count: d.registered_invalid_count, + dparam: d.dparam, + } + } +} + +/// Cumulative registration totals for an epoch. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct RegisteredTotals { + pub epoch_no: i64, + pub total_registered: i64, + pub newly_registered: i64, +} + +impl From for RegisteredTotals { + fn from(d: DomainRegisteredTotals) -> Self { + Self { + epoch_no: d.epoch_no, + total_registered: d.total_registered, + newly_registered: d.newly_registered, + } + } +} + +/// Presence event for an SPO in an epoch. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct PresenceEvent { + pub epoch_no: i64, + pub id_key: String, + pub source: String, + pub status: Option, +} + +impl From for PresenceEvent { + fn from(d: DomainPresenceEvent) -> Self { + Self { + epoch_no: d.epoch_no, + id_key: d.id_key, + source: d.source, + status: d.status, + } + } +} + +/// First valid epoch for an SPO identity. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct FirstValidEpoch { + pub id_key: String, + pub first_valid_epoch: i64, +} + +impl From for FirstValidEpoch { + fn from(d: DomainFirstValidEpoch) -> Self { + Self { + id_key: d.id_key, + first_valid_epoch: d.first_valid_epoch, + } + } +} + +/// Stake share information for an SPO. +/// +/// Values are sourced from mainchain pool data (e.g., Blockfrost) and keyed by Cardano pool_id. +#[derive(SimpleObject)] +#[graphql(rename_fields = "camelCase")] +pub struct StakeShare { + /// Cardano pool ID (56-character hex string). + pub pool_id_hex: String, + /// Pool name from metadata. + pub name: Option, + /// Pool ticker from metadata. + pub ticker: Option, + /// Pool homepage URL from metadata. + pub homepage_url: Option, + /// Pool logo URL from metadata. + pub logo_url: Option, + /// Current live stake in lovelace. + pub live_stake: Option, + /// Current active stake in lovelace. + pub active_stake: Option, + /// Number of live delegators. + pub live_delegators: Option, + /// Saturation ratio (0.0 to 1.0+). + pub live_saturation: Option, + /// Declared pledge in lovelace. + pub declared_pledge: Option, + /// Current live pledge in lovelace. + pub live_pledge: Option, + /// Stake share as a fraction of total stake. + pub stake_share: Option, +} + +impl From for StakeShare { + fn from(d: DomainStakeShare) -> Self { + Self { + pool_id_hex: d.pool_id_hex, + name: d.name, + ticker: d.ticker, + homepage_url: d.homepage_url, + logo_url: d.logo_url, + live_stake: d.live_stake, + active_stake: d.active_stake, + live_delegators: d.live_delegators, + live_saturation: d.live_saturation, + declared_pledge: d.declared_pledge, + live_pledge: d.live_pledge, + stake_share: d.stake_share, + } + } +} diff --git a/indexer-api/src/infra/storage.rs b/indexer-api/src/infra/storage.rs index eb169f0f..81132bc6 100644 --- a/indexer-api/src/infra/storage.rs +++ b/indexer-api/src/infra/storage.rs @@ -16,6 +16,7 @@ mod contract_action; mod dust; mod ledger_events; mod ledger_state; +mod spo; mod system_parameters; mod transaction; mod unshielded; diff --git a/indexer-api/src/infra/storage/spo.rs b/indexer-api/src/infra/storage/spo.rs new file mode 100644 index 00000000..f6477d7e --- /dev/null +++ b/indexer-api/src/infra/storage/spo.rs @@ -0,0 +1,1133 @@ +// This file is part of midnight-indexer. +// Copyright (C) 2025 Midnight Foundation +// SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + domain::{ + spo::{ + CommitteeMember, EpochInfo, EpochPerf, FirstValidEpoch, PoolMetadata, PresenceEvent, + RegisteredStat, RegisteredTotals, Spo, SpoComposite, SpoIdentity, StakeShare, + }, + storage::spo::SpoStorage, + }, + infra::storage::Storage, +}; +use fastrace::trace; +use indoc::indoc; + +impl SpoStorage for Storage { + #[trace] + async fn get_spo_identities( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT pool_id AS pool_id_hex, + mainchain_pubkey AS mainchain_pubkey_hex, + sidechain_pubkey AS sidechain_pubkey_hex, + aura_pubkey AS aura_pubkey_hex, + 'UNKNOWN' AS validator_class + FROM spo_identity + WHERE pool_id IS NOT NULL + ORDER BY mainchain_pubkey + LIMIT $1 OFFSET $2 + "}; + + sqlx::query_as::<_, (String, String, String, Option, String)>(query) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map( + |( + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + )| SpoIdentity { + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + }, + ) + .collect() + }) + } + + #[trace] + async fn get_spo_identity_by_pool_id( + &self, + pool_id: &str, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT pool_id AS pool_id_hex, + mainchain_pubkey AS mainchain_pubkey_hex, + sidechain_pubkey AS sidechain_pubkey_hex, + aura_pubkey AS aura_pubkey_hex, + 'UNKNOWN' AS validator_class + FROM spo_identity + WHERE pool_id = $1 + LIMIT 1 + "}; + + sqlx::query_as::<_, (String, String, String, Option, String)>(query) + .bind(pool_id) + .fetch_optional(&*self.pool) + .await + .map(|opt| { + opt.map( + |( + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + )| SpoIdentity { + pool_id_hex, + mainchain_pubkey_hex, + sidechain_pubkey_hex, + aura_pubkey_hex, + validator_class, + }, + ) + }) + } + + #[trace] + async fn get_spo_count(&self) -> Result { + let query = indoc! {" + SELECT COUNT(1)::BIGINT FROM spo_stake_snapshot + "}; + + sqlx::query_scalar::<_, i64>(query) + .fetch_one(&*self.pool) + .await + } + + #[trace] + async fn get_pool_metadata(&self, pool_id: &str) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + WHERE pool_id = $1 + LIMIT 1 + "}; + + sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(pool_id) + .fetch_optional(&*self.pool) + .await + .map(|opt| { + opt.map( + |(pool_id_hex, hex_id, name, ticker, homepage_url, logo_url)| PoolMetadata { + pool_id_hex, + hex_id, + name, + ticker, + homepage_url, + logo_url, + }, + ) + }) + } + + #[trace] + async fn get_pool_metadata_list( + &self, + limit: i64, + offset: i64, + with_name_only: bool, + ) -> Result, sqlx::Error> { + let query = if with_name_only { + indoc! {" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + WHERE name IS NOT NULL OR ticker IS NOT NULL + ORDER BY pool_id + LIMIT $1 OFFSET $2 + "} + } else { + indoc! {" + SELECT pool_id AS pool_id_hex, + hex_id AS hex_id, + name, ticker, homepage_url, url AS logo_url + FROM pool_metadata_cache + ORDER BY pool_id + LIMIT $1 OFFSET $2 + "} + }; + + sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map( + |(pool_id_hex, hex_id, name, ticker, homepage_url, logo_url)| PoolMetadata { + pool_id_hex, + hex_id, + name, + ticker, + homepage_url, + logo_url, + }, + ) + .collect() + }) + } + + #[trace] + async fn get_spo_by_pool_id(&self, pool_id: &str) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_identity si + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = si.pool_id + WHERE si.pool_id = $1 + LIMIT 1 + "}; + + sqlx::query_as::< + _, + ( + String, + String, + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(pool_id) + .fetch_optional(&*self.pool) + .await + .map(|opt| { + opt.map( + |( + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + )| Spo { + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + }, + ) + }) + } + + #[trace] + async fn get_spo_list( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + ) -> Result, sqlx::Error> { + let rows = if let Some(s) = search { + let s_like = format!("%{s}%"); + let s_hex = normalize_hex(s).unwrap_or_else(|| s.to_ascii_lowercase()); + let s_hex_like = format!("%{s_hex}%"); + + let query = indoc! {" + SELECT s.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_stake_snapshot s + LEFT JOIN spo_identity si ON si.pool_id = s.pool_id + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + WHERE ( + pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR s.pool_id ILIKE $4 + OR si.sidechain_pubkey ILIKE $4 OR si.aura_pubkey ILIKE $4 OR si.mainchain_pubkey ILIKE $4 + ) + ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) + LIMIT $1 OFFSET $2 + "}; + + sqlx::query_as::< + _, + ( + String, + String, + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(limit) + .bind(offset) + .bind(s_like) + .bind(s_hex_like) + .fetch_all(&*self.pool) + .await? + } else { + let query = indoc! {" + SELECT s.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class, + si.sidechain_pubkey AS sidechain_pubkey_hex, + si.aura_pubkey AS aura_pubkey_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url + FROM spo_stake_snapshot s + LEFT JOIN spo_identity si ON si.pool_id = s.pool_id + LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) + LIMIT $1 OFFSET $2 + "}; + + sqlx::query_as::< + _, + ( + String, + String, + String, + Option, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await? + }; + + Ok(rows + .into_iter() + .map( + |( + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + )| Spo { + pool_id_hex, + validator_class, + sidechain_pubkey_hex, + aura_pubkey_hex, + name, + ticker, + homepage_url, + logo_url, + }, + ) + .collect()) + } + + #[trace] + async fn get_spo_composite_by_pool_id( + &self, + pool_id: &str, + perf_limit: i64, + ) -> Result, sqlx::Error> { + // Get identity. + let identity = self.get_spo_identity_by_pool_id(pool_id).await?; + + // Get metadata. + let metadata = self.get_pool_metadata(pool_id).await?; + + // Get performance if identity exists. + let performance = if let Some(ref id) = identity { + self.get_spo_performance_by_spo_sk(&id.sidechain_pubkey_hex, perf_limit, 0) + .await? + } else { + vec![] + }; + + // Return None only if both identity and metadata are missing. + if identity.is_none() && metadata.is_none() { + return Ok(None); + } + + Ok(Some(SpoComposite { + identity, + metadata, + performance, + })) + } + + #[trace] + async fn get_stake_pool_operator_ids(&self, limit: i64) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT encode(sep.spo_sk,'hex') AS spo_sk_hex + FROM spo_epoch_performance sep + GROUP BY sep.spo_sk + ORDER BY MAX(sep.produced_blocks) DESC + LIMIT $1 + "}; + + sqlx::query_scalar::<_, String>(query) + .bind(limit) + .fetch_all(&*self.pool) + .await + } + + #[trace] + async fn get_spo_performance_latest( + &self, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + ORDER BY sep.epoch_no DESC, sep.produced_blocks DESC + LIMIT $1 OFFSET $2 + "}; + + sqlx::query_as::< + _, + ( + i64, + String, + i32, + i32, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await + .map(|rows| rows.into_iter().map(epoch_perf_from_row).collect()) + } + + #[trace] + async fn get_spo_performance_by_spo_sk( + &self, + spo_sk: &str, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.spo_sk = $1 + ORDER BY sep.epoch_no DESC + LIMIT $2 OFFSET $3 + "}; + + sqlx::query_as::< + _, + ( + i64, + String, + i32, + i32, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(spo_sk) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await + .map(|rows| rows.into_iter().map(epoch_perf_from_row).collect()) + } + + #[trace] + async fn get_epoch_performance( + &self, + epoch: i64, + limit: i64, + offset: i64, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT sep.epoch_no, + sep.spo_sk AS spo_sk_hex, + sep.produced_blocks, + sep.expected_blocks, + sep.identity_label, + NULL::TEXT AS stake_snapshot, + si.pool_id AS pool_id_hex, + 'UNKNOWN' AS validator_class + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.epoch_no = $1 + ORDER BY sep.produced_blocks DESC + LIMIT $2 OFFSET $3 + "}; + + sqlx::query_as::< + _, + ( + i64, + String, + i32, + i32, + Option, + Option, + Option, + Option, + ), + >(query) + .bind(epoch) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await + .map(|rows| rows.into_iter().map(epoch_perf_from_row).collect()) + } + + #[trace] + async fn get_current_epoch_info(&self) -> Result, sqlx::Error> { + let query = indoc! {" + WITH last AS ( + SELECT + epoch_no, + EXTRACT(EPOCH FROM starts_at)::BIGINT AS starts_s, + EXTRACT(EPOCH FROM ends_at)::BIGINT AS ends_s, + EXTRACT(EPOCH FROM (ends_at - starts_at))::BIGINT AS dur_s, + EXTRACT(EPOCH FROM NOW())::BIGINT AS now_s + FROM epochs + ORDER BY epoch_no DESC + LIMIT 1 + ), calc AS ( + SELECT + epoch_no, starts_s, ends_s, dur_s, now_s, + CASE WHEN ends_s > now_s THEN 0 + ELSE ((now_s - ends_s) / dur_s)::BIGINT + 1 END AS n + FROM last + ), synth AS ( + SELECT + (epoch_no + n) AS epoch_no, + dur_s AS duration_seconds, + CASE WHEN n = 0 THEN LEAST(GREATEST(now_s - starts_s, 0), dur_s) + ELSE LEAST(GREATEST(now_s - (ends_s + (n - 1) * dur_s), 0), dur_s) + END AS elapsed_seconds + FROM calc + ) + SELECT epoch_no, duration_seconds, elapsed_seconds FROM synth + "}; + + sqlx::query_as::<_, (i64, i64, i64)>(query) + .fetch_optional(&*self.pool) + .await + .map(|opt| { + opt.map(|(epoch_no, duration_seconds, elapsed_seconds)| EpochInfo { + epoch_no, + duration_seconds, + elapsed_seconds, + }) + }) + } + + #[trace] + async fn get_epoch_utilization(&self, epoch: i64) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT COALESCE( + CASE WHEN SUM(expected_blocks) > 0 + THEN SUM(produced_blocks)::DOUBLE PRECISION / SUM(expected_blocks) + ELSE 0.0 END, + 0.0) AS utilization + FROM spo_epoch_performance + WHERE epoch_no = $1 + "}; + + sqlx::query_scalar::<_, Option>(query) + .bind(epoch) + .fetch_one(&*self.pool) + .await + .map(|v| v.or(Some(0.0))) + } + + #[trace] + async fn get_committee(&self, epoch: i64) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT + cm.epoch_no, + cm.position, + cm.sidechain_pubkey AS sidechain_pubkey_hex, + cm.expected_slots, + si.aura_pubkey AS aura_pubkey_hex, + si.pool_id AS pool_id_hex, + si.spo_sk AS spo_sk_hex + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE cm.epoch_no = $1 + ORDER BY cm.position + "}; + + sqlx::query_as::< + _, + ( + i64, + i32, + String, + i32, + Option, + Option, + Option, + ), + >(query) + .bind(epoch) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map( + |( + epoch_no, + position, + sidechain_pubkey_hex, + expected_slots, + aura_pubkey_hex, + pool_id_hex, + spo_sk_hex, + )| CommitteeMember { + epoch_no, + position, + sidechain_pubkey_hex, + expected_slots, + aura_pubkey_hex, + pool_id_hex, + spo_sk_hex, + }, + ) + .collect() + }) + } + + #[trace] + async fn get_registered_totals_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + + let query = indoc! {" + WITH rng AS ( + SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no + ), + cur AS ( + SELECT s.pool_id + FROM spo_stake_snapshot s + ), + union_firsts AS ( + SELECT si.pool_id AS pool_id, MIN(sh.epoch_no)::BIGINT AS first_seen_epoch + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + UNION ALL + SELECT si.pool_id AS pool_id, MIN(cm.epoch_no)::BIGINT AS first_seen_epoch + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + UNION ALL + SELECT si.pool_id AS pool_id, MIN(sep.epoch_no)::BIGINT AS first_seen_epoch + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE si.pool_id IS NOT NULL + GROUP BY si.pool_id + ), + firsts0 AS ( + SELECT pool_id, MIN(first_seen_epoch)::BIGINT AS first_seen_epoch + FROM union_firsts + GROUP BY pool_id + ), + firsts_cur AS ( + SELECT c.pool_id, + COALESCE(f0.first_seen_epoch, $2::BIGINT) AS first_seen_epoch + FROM cur c + LEFT JOIN firsts0 f0 ON f0.pool_id = c.pool_id + ), + agg AS ( + SELECT r.epoch_no, + COUNT(*) FILTER (WHERE fc.first_seen_epoch <= r.epoch_no) AS total_registered, + COUNT(*) FILTER (WHERE fc.first_seen_epoch = r.epoch_no) AS newly_registered + FROM rng r + CROSS JOIN firsts_cur fc + GROUP BY r.epoch_no + ) + SELECT epoch_no, total_registered, newly_registered + FROM agg + ORDER BY epoch_no + "}; + + sqlx::query_as::<_, (i64, i64, i64)>(query) + .bind(start) + .bind(end) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map( + |(epoch_no, total_registered, newly_registered)| RegisteredTotals { + epoch_no, + total_registered, + newly_registered, + }, + ) + .collect() + }) + } + + #[trace] + async fn get_registered_spo_series( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + + let query = indoc! {" + WITH rng AS ( + SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no + ), + hist_valid AS ( + SELECT sh.epoch_no, + COUNT(DISTINCT si.pool_id) AS cnt + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('VALID','Valid') + AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + AND si.pool_id IS NOT NULL + GROUP BY sh.epoch_no + ), + hist_invalid AS ( + SELECT sh.epoch_no, + COUNT(DISTINCT si.pool_id) AS cnt + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('INVALID','Invalid') + AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + AND si.pool_id IS NOT NULL + GROUP BY sh.epoch_no + ), + fed AS ( + SELECT c.epoch_no, + COUNT(DISTINCT c.sidechain_pubkey) FILTER (WHERE c.expected_slots > 0) AS federated_valid_count, + 0::BIGINT AS federated_invalid_count + FROM committee_membership c + WHERE c.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + GROUP BY c.epoch_no + ) + SELECT r.epoch_no, + COALESCE(f.federated_valid_count, 0) AS federated_valid_count, + COALESCE(f.federated_invalid_count, 0) AS federated_invalid_count, + COALESCE(hv.cnt, 0) AS registered_valid_count, + COALESCE(hi.cnt, 0) AS registered_invalid_count, + COALESCE(hv.cnt, 0)::DOUBLE PRECISION AS dparam + FROM rng r + LEFT JOIN hist_valid hv ON hv.epoch_no = r.epoch_no + LEFT JOIN hist_invalid hi ON hi.epoch_no = r.epoch_no + LEFT JOIN fed f ON f.epoch_no = r.epoch_no + ORDER BY r.epoch_no + "}; + + sqlx::query_as::<_, (i64, i64, i64, i64, i64, Option)>(query) + .bind(start) + .bind(end) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map( + |( + epoch_no, + federated_valid_count, + federated_invalid_count, + registered_valid_count, + registered_invalid_count, + dparam, + )| RegisteredStat { + epoch_no, + federated_valid_count, + federated_invalid_count, + registered_valid_count, + registered_invalid_count, + dparam, + }, + ) + .collect() + }) + } + + #[trace] + async fn get_registered_presence( + &self, + from_epoch: i64, + to_epoch: i64, + ) -> Result, sqlx::Error> { + let start = from_epoch.min(to_epoch); + let end = to_epoch.max(from_epoch); + + let query = indoc! {" + WITH history AS ( + SELECT sh.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, sh.spo_sk) AS id_key, + 'history'::TEXT AS source, + sh.status::TEXT AS status + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ), + committee AS ( + SELECT cm.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, cm.sidechain_pubkey) AS id_key, + 'committee'::TEXT AS source, + NULL::TEXT AS status + FROM committee_membership cm + LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey + WHERE cm.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ), + performance AS ( + SELECT sep.epoch_no::BIGINT AS epoch_no, + COALESCE(si.pool_id, sep.spo_sk) AS id_key, + 'performance'::TEXT AS source, + NULL::TEXT AS status + FROM spo_epoch_performance sep + LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk + WHERE sep.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT + ) + SELECT epoch_no, id_key, source, status FROM history + UNION ALL + SELECT epoch_no, id_key, source, status FROM committee + UNION ALL + SELECT epoch_no, id_key, source, status FROM performance + ORDER BY epoch_no, source, id_key + "}; + + sqlx::query_as::<_, (i64, String, String, Option)>(query) + .bind(start) + .bind(end) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map(|(epoch_no, id_key, source, status)| PresenceEvent { + epoch_no, + id_key, + source, + status, + }) + .collect() + }) + } + + #[trace] + async fn get_registered_first_valid_epochs( + &self, + upto_epoch: Option, + ) -> Result, sqlx::Error> { + let query = indoc! {" + SELECT COALESCE(si.pool_id, sh.spo_sk) AS id_key, + MIN(sh.epoch_no)::BIGINT AS first_valid_epoch + FROM spo_history sh + LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk + WHERE sh.status IN ('VALID','Valid') + AND ($1::BIGINT IS NULL OR sh.epoch_no <= $1::BIGINT) + GROUP BY 1 + ORDER BY first_valid_epoch + "}; + + sqlx::query_as::<_, (String, i64)>(query) + .bind(upto_epoch) + .fetch_all(&*self.pool) + .await + .map(|rows| { + rows.into_iter() + .map(|(id_key, first_valid_epoch)| FirstValidEpoch { + id_key, + first_valid_epoch, + }) + .collect() + }) + } + + #[trace] + async fn get_stake_distribution( + &self, + limit: i64, + offset: i64, + search: Option<&str>, + order_desc: bool, + ) -> Result<(Vec, f64), sqlx::Error> { + // First get total live stake. + let total_query = indoc! {" + SELECT COALESCE(SUM(s.live_stake), 0)::TEXT + FROM spo_stake_snapshot s + "}; + let total_live_str: String = sqlx::query_scalar(total_query) + .fetch_one(&*self.pool) + .await?; + let total_live_f64: f64 = total_live_str.parse().unwrap_or(0.0); + + // Build the main query. + let base_select = if search.is_some() { + indoc! {" + SELECT + pm.pool_id AS pool_id_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, + (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, + (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT + FROM spo_stake_snapshot s + JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + WHERE ( + pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR pm.pool_id ILIKE $4 + ) + ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id + LIMIT $1 OFFSET $2 + "} + } else { + indoc! {" + SELECT + pm.pool_id AS pool_id_hex, + pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, + (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, + (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT + FROM spo_stake_snapshot s + JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id + ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id + LIMIT $1 OFFSET $2 + "} + }; + + let sql = if order_desc { + base_select.to_string() + } else { + base_select.replace("DESC", "ASC") + }; + + let rows = if let Some(s) = search { + let s_like = format!("%{s}%"); + sqlx::query_as::< + _, + ( + String, // pool_id_hex + Option, // name + Option, // ticker + Option, // homepage_url + Option, // logo_url + Option, // live_stake + Option, // active_stake + Option, // live_delegators + Option, // live_saturation + Option, // declared_pledge + Option, // live_pledge + ), + >(&sql) + .bind(limit) + .bind(offset) + .bind(s_like.clone()) + .bind(s_like) + .fetch_all(&*self.pool) + .await? + } else { + sqlx::query_as::< + _, + ( + String, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + Option, + ), + >(&sql) + .bind(limit) + .bind(offset) + .fetch_all(&*self.pool) + .await? + }; + + let stake_shares = rows + .into_iter() + .map( + |( + pool_id_hex, + name, + ticker, + homepage_url, + logo_url, + live_stake, + active_stake, + live_delegators, + live_saturation, + declared_pledge, + live_pledge, + )| { + let share = { + let ls = live_stake.as_deref().unwrap_or("0"); + let lv = ls.parse::().unwrap_or(0.0); + if total_live_f64 > 0.0 { + lv / total_live_f64 + } else { + 0.0 + } + }; + let live_delegators_i64 = live_delegators.map(|v| v as i64); + StakeShare { + pool_id_hex, + name, + ticker, + homepage_url, + logo_url, + live_stake, + active_stake, + live_delegators: live_delegators_i64, + live_saturation, + declared_pledge, + live_pledge, + stake_share: Some(share), + } + }, + ) + .collect(); + + Ok((stake_shares, total_live_f64)) + } +} + +/// Row type for epoch performance query results. +type EpochPerfRow = ( + i64, + String, + i32, + i32, + Option, + Option, + Option, + Option, +); + +/// Helper to convert epoch performance row to domain type. +fn epoch_perf_from_row(row: EpochPerfRow) -> EpochPerf { + let ( + epoch_no, + spo_sk_hex, + produced_i32, + expected_i32, + identity_label, + stake_snapshot, + pool_id_hex, + validator_class, + ) = row; + EpochPerf { + epoch_no, + spo_sk_hex, + produced: produced_i32 as i64, + expected: expected_i32 as i64, + identity_label, + stake_snapshot, + pool_id_hex, + validator_class, + } +} + +/// Normalize hex string by stripping 0x prefix and lowercasing. +fn normalize_hex(input: &str) -> Option { + if input.is_empty() { + return None; + } + let s = input + .strip_prefix("0x") + .unwrap_or(input) + .strip_prefix("0X") + .unwrap_or(input); + if !s.len().is_multiple_of(2) || s.len() > 256 { + return None; + } + // Validate hex characters. + if !s.chars().all(|c| c.is_ascii_hexdigit()) { + return None; + } + Some(s.to_ascii_lowercase()) +} diff --git a/indexer-common/migrations/postgres/001_initial.sql b/indexer-common/migrations/postgres/001_initial.sql index ad4cc188..2fe4c24e 100644 --- a/indexer-common/migrations/postgres/001_initial.sql +++ b/indexer-common/migrations/postgres/001_initial.sql @@ -197,3 +197,127 @@ CREATE TABLE system_parameters_d ( num_registered_candidates INTEGER NOT NULL ); CREATE INDEX ON system_parameters_d (block_height DESC); +-------------------------------------------------------------------------------- +-- epochs +-------------------------------------------------------------------------------- +CREATE TABLE epochs ( + epoch_no BIGINT PRIMARY KEY, + starts_at TIMESTAMPTZ NOT NULL, + ends_at TIMESTAMPTZ NOT NULL +); +-------------------------------------------------------------------------------- +-- pool_metadata_cache +-------------------------------------------------------------------------------- +CREATE TABLE pool_metadata_cache ( + pool_id VARCHAR PRIMARY KEY, + hex_id VARCHAR UNIQUE, + name TEXT, + ticker TEXT, + homepage_url TEXT, + updated_at TIMESTAMPTZ, + url TEXT +); +-- TODO: Move updated_at trigger logic to application layer (PM-21550). +CREATE OR REPLACE FUNCTION set_updated_at_timestamp() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER update_pool_metadata_cache_updated_at +BEFORE UPDATE ON pool_metadata_cache +FOR EACH ROW +EXECUTE FUNCTION set_updated_at_timestamp(); +-------------------------------------------------------------------------------- +-- spo_identity +-------------------------------------------------------------------------------- +CREATE TABLE spo_identity ( + spo_sk VARCHAR PRIMARY KEY, + sidechain_pubkey VARCHAR UNIQUE, + pool_id VARCHAR REFERENCES pool_metadata_cache(pool_id), + mainchain_pubkey VARCHAR UNIQUE, + aura_pubkey VARCHAR UNIQUE +); +CREATE INDEX IF NOT EXISTS spo_identity_pk ON spo_identity (pool_id, sidechain_pubkey, aura_pubkey); +-------------------------------------------------------------------------------- +-- committee_membership +-------------------------------------------------------------------------------- +CREATE TABLE committee_membership ( + spo_sk VARCHAR, + sidechain_pubkey VARCHAR, + epoch_no BIGINT NOT NULL, + position INT NOT NULL, + expected_slots INT NOT NULL, + PRIMARY KEY (epoch_no, position) +); +CREATE INDEX IF NOT EXISTS committee_membership_epoch_no_idx ON committee_membership (epoch_no); +-------------------------------------------------------------------------------- +-- spo_epoch_performance +-------------------------------------------------------------------------------- +CREATE TABLE spo_epoch_performance ( + spo_sk VARCHAR REFERENCES spo_identity(spo_sk), + identity_label VARCHAR, + epoch_no BIGINT NOT NULL, + expected_blocks INT NOT NULL, + produced_blocks INT NOT NULL, + PRIMARY KEY (epoch_no, spo_sk) +); +CREATE INDEX IF NOT EXISTS spo_epoch_performance_identity_pk ON spo_epoch_performance (epoch_no, identity_label); +CREATE INDEX IF NOT EXISTS spo_epoch_performance_epoch_no_idx ON spo_epoch_performance (epoch_no); +-------------------------------------------------------------------------------- +-- spo_history +-------------------------------------------------------------------------------- +CREATE TABLE spo_history ( + spo_hist_sk BIGSERIAL PRIMARY KEY, + spo_sk VARCHAR REFERENCES spo_identity(spo_sk), + epoch_no BIGINT NOT NULL, + status TEXT NOT NULL, + valid_from BIGINT NOT NULL, + valid_to BIGINT NOT NULL, + UNIQUE (spo_sk, epoch_no) +); +CREATE INDEX IF NOT EXISTS spo_history_epoch_no_idx ON spo_history (epoch_no); +-------------------------------------------------------------------------------- +-- spo_stake_snapshot +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_snapshot ( + pool_id VARCHAR PRIMARY KEY REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + live_stake NUMERIC, + active_stake NUMERIC, + live_delegators INT, + live_saturation DOUBLE PRECISION, + declared_pledge NUMERIC, + live_pledge NUMERIC, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS spo_stake_snapshot_updated_at_idx ON spo_stake_snapshot (updated_at DESC); +CREATE INDEX IF NOT EXISTS spo_stake_snapshot_live_stake_idx ON spo_stake_snapshot ((COALESCE(live_stake, 0)) DESC); +-------------------------------------------------------------------------------- +-- spo_stake_history +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_history ( + id BIGSERIAL PRIMARY KEY, + pool_id VARCHAR NOT NULL REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + mainchain_epoch INTEGER, + live_stake NUMERIC, + active_stake NUMERIC, + live_delegators INTEGER, + live_saturation DOUBLE PRECISION, + declared_pledge NUMERIC, + live_pledge NUMERIC +); +CREATE INDEX IF NOT EXISTS spo_stake_history_pool_time_idx ON spo_stake_history (pool_id, recorded_at DESC); +CREATE INDEX IF NOT EXISTS spo_stake_history_epoch_idx ON spo_stake_history (mainchain_epoch); +-------------------------------------------------------------------------------- +-- spo_stake_refresh_state +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_refresh_state ( + id BOOLEAN PRIMARY KEY DEFAULT TRUE, + last_pool_id VARCHAR, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +INSERT INTO spo_stake_refresh_state (id) +VALUES (TRUE) +ON CONFLICT (id) DO NOTHING; diff --git a/indexer-common/migrations/postgres/002_spo_initial.sql b/indexer-common/migrations/postgres/002_spo_initial.sql deleted file mode 100644 index c50a5cd1..00000000 --- a/indexer-common/migrations/postgres/002_spo_initial.sql +++ /dev/null @@ -1,84 +0,0 @@ -CREATE TABLE epochs ( - epoch_no BIGINT PRIMARY KEY, - starts_at TIMESTAMPTZ NOT NULL, - ends_at TIMESTAMPTZ NOT NULL -); - -CREATE TABLE pool_metadata_cache ( - pool_id VARCHAR PRIMARY KEY, - hex_id VARCHAR UNIQUE, - name TEXT, - ticker TEXT, - homepage_url TEXT, - updated_at TIMESTAMPTZ, - url TEXT -); - -CREATE TABLE spo_identity ( - spo_sk VARCHAR PRIMARY KEY, - sidechain_pubkey VARCHAR UNIQUE, - - pool_id VARCHAR REFERENCES pool_metadata_cache(pool_id), - mainchain_pubkey VARCHAR UNIQUE, - aura_pubkey VARCHAR UNIQUE -); - -CREATE TABLE stg_committee ( - epoch_no BIGINT NOT NULL, - position INT NOT NULL, - sidechain_pubkey VARCHAR NOT NULL, - arrived_at TIMESTAMPTZ NOT NULL -); - -CREATE TABLE committee_membership ( - spo_sk VARCHAR, - sidechain_pubkey VARCHAR, - - epoch_no BIGINT NOT NULL, - position INT NOT NULL, - expected_slots INT NOT NULL, - PRIMARY KEY (epoch_no, position) -); - -CREATE TABLE spo_epoch_performance ( - spo_sk VARCHAR REFERENCES spo_identity(spo_sk), - identity_label VARCHAR, - epoch_no BIGINT NOT NULL, - expected_blocks INT NOT NULL, - produced_blocks INT NOT NULL, - PRIMARY KEY (epoch_no, spo_sk) -); - -CREATE TABLE spo_history ( - spo_hist_sk BIGSERIAL PRIMARY KEY, - spo_sk VARCHAR REFERENCES spo_identity(spo_sk), - epoch_no BIGINT NOT NULL, - status TEXT NOT NULL, - valid_from BIGINT NOT NULL, - valid_to BIGINT NOT NULL, - UNIQUE (spo_sk, epoch_no) -); - --- Update "updated_at" field each time the record is updated -CREATE OR REPLACE FUNCTION set_updated_at_timestamp() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated_at = NOW(); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER update_pool_metadata_cache_updated_at -BEFORE UPDATE ON pool_metadata_cache -FOR EACH ROW -EXECUTE FUNCTION set_updated_at_timestamp(); - --- indexes -CREATE INDEX IF NOT EXISTS spo_identity_pk ON spo_identity (pool_id, sidechain_pubkey, aura_pubkey); - -CREATE INDEX IF NOT EXISTS spo_history_epoch_no_idx ON spo_history (epoch_no); - -CREATE INDEX IF NOT EXISTS committee_membership_epoch_no_idx ON committee_membership (epoch_no); - -CREATE INDEX IF NOT EXISTS spo_epoch_performance_identity_pk ON spo_epoch_performance (epoch_no, identity_label); -CREATE INDEX IF NOT EXISTS spo_epoch_performance_epoch_no_idx ON spo_epoch_performance (epoch_no); diff --git a/indexer-common/migrations/postgres/003_drop_stg_committee.sql b/indexer-common/migrations/postgres/003_drop_stg_committee.sql deleted file mode 100644 index 4031f2aa..00000000 --- a/indexer-common/migrations/postgres/003_drop_stg_committee.sql +++ /dev/null @@ -1,4 +0,0 @@ --- This migration drops the legacy staging table added in 002 to maintain checksum compatibility. --- Safe to run even if the table doesn't exist. - -DROP TABLE IF EXISTS stg_committee; diff --git a/indexer-common/migrations/postgres/004_spo_stake.sql b/indexer-common/migrations/postgres/004_spo_stake.sql deleted file mode 100644 index 82cdd609..00000000 --- a/indexer-common/migrations/postgres/004_spo_stake.sql +++ /dev/null @@ -1,17 +0,0 @@ --- Stake snapshot per pool (latest values). This supports explorer stake distribution views. --- Values are sourced from mainchain pool data (e.g., Blockfrost) and keyed by Cardano pool_id (56-hex string). - -CREATE TABLE IF NOT EXISTS spo_stake_snapshot ( - pool_id VARCHAR PRIMARY KEY REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, - live_stake NUMERIC, -- current live stake (lovelace-like units) as big numeric - active_stake NUMERIC, -- current active stake - live_delegators INT, -- number of live delegators - live_saturation DOUBLE PRECISION, -- saturation ratio (0..1+) - declared_pledge NUMERIC, -- declared pledge - live_pledge NUMERIC, -- current pledge - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - --- Helpful indexes for ordering/filtering -CREATE INDEX IF NOT EXISTS spo_stake_snapshot_updated_at_idx ON spo_stake_snapshot (updated_at DESC); -CREATE INDEX IF NOT EXISTS spo_stake_snapshot_live_stake_idx ON spo_stake_snapshot ((COALESCE(live_stake, 0)) DESC); diff --git a/indexer-common/migrations/postgres/005_spo_stake_history.sql b/indexer-common/migrations/postgres/005_spo_stake_history.sql deleted file mode 100644 index cd2aaef5..00000000 --- a/indexer-common/migrations/postgres/005_spo_stake_history.sql +++ /dev/null @@ -1,29 +0,0 @@ --- Stake history table and refresh state cursor - -CREATE TABLE IF NOT EXISTS spo_stake_history ( - id BIGSERIAL PRIMARY KEY, - pool_id VARCHAR NOT NULL REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, - recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), - mainchain_epoch INTEGER, - - live_stake NUMERIC, - active_stake NUMERIC, - live_delegators INTEGER, - live_saturation DOUBLE PRECISION, - declared_pledge NUMERIC, - live_pledge NUMERIC -); - -CREATE INDEX IF NOT EXISTS spo_stake_history_pool_time_idx ON spo_stake_history (pool_id, recorded_at DESC); -CREATE INDEX IF NOT EXISTS spo_stake_history_epoch_idx ON spo_stake_history (mainchain_epoch); - --- Single-row state table to track paging cursor for stake refresh -CREATE TABLE IF NOT EXISTS spo_stake_refresh_state ( - id BOOLEAN PRIMARY KEY DEFAULT TRUE, - last_pool_id VARCHAR, - updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() -); - -INSERT INTO spo_stake_refresh_state (id) -VALUES (TRUE) -ON CONFLICT (id) DO NOTHING; diff --git a/indexer-common/migrations/sqlite/001_initial.sql b/indexer-common/migrations/sqlite/001_initial.sql index ce0e62e3..dd16b849 100644 --- a/indexer-common/migrations/sqlite/001_initial.sql +++ b/indexer-common/migrations/sqlite/001_initial.sql @@ -198,3 +198,115 @@ CREATE TABLE system_parameters_d ( num_registered_candidates INTEGER NOT NULL ); CREATE INDEX system_parameters_d_block_height_idx ON system_parameters_d (block_height DESC); +-------------------------------------------------------------------------------- +-- epochs +-------------------------------------------------------------------------------- +CREATE TABLE epochs ( + epoch_no INTEGER PRIMARY KEY, + starts_at TEXT NOT NULL, + ends_at TEXT NOT NULL +); +-------------------------------------------------------------------------------- +-- pool_metadata_cache +-------------------------------------------------------------------------------- +CREATE TABLE pool_metadata_cache ( + pool_id TEXT PRIMARY KEY, + hex_id TEXT UNIQUE, + name TEXT, + ticker TEXT, + homepage_url TEXT, + updated_at TEXT, + url TEXT +); +-------------------------------------------------------------------------------- +-- spo_identity +-------------------------------------------------------------------------------- +CREATE TABLE spo_identity ( + spo_sk TEXT PRIMARY KEY, + sidechain_pubkey TEXT UNIQUE, + pool_id TEXT REFERENCES pool_metadata_cache(pool_id), + mainchain_pubkey TEXT UNIQUE, + aura_pubkey TEXT UNIQUE +); +CREATE INDEX spo_identity_pk ON spo_identity (pool_id, sidechain_pubkey, aura_pubkey); +-------------------------------------------------------------------------------- +-- committee_membership +-------------------------------------------------------------------------------- +CREATE TABLE committee_membership ( + spo_sk TEXT, + sidechain_pubkey TEXT, + epoch_no INTEGER NOT NULL, + position INTEGER NOT NULL, + expected_slots INTEGER NOT NULL, + PRIMARY KEY (epoch_no, position) +); +CREATE INDEX committee_membership_epoch_no_idx ON committee_membership (epoch_no); +-------------------------------------------------------------------------------- +-- spo_epoch_performance +-------------------------------------------------------------------------------- +CREATE TABLE spo_epoch_performance ( + spo_sk TEXT REFERENCES spo_identity(spo_sk), + identity_label TEXT, + epoch_no INTEGER NOT NULL, + expected_blocks INTEGER NOT NULL, + produced_blocks INTEGER NOT NULL, + PRIMARY KEY (epoch_no, spo_sk) +); +CREATE INDEX spo_epoch_performance_identity_pk ON spo_epoch_performance (epoch_no, identity_label); +CREATE INDEX spo_epoch_performance_epoch_no_idx ON spo_epoch_performance (epoch_no); +-------------------------------------------------------------------------------- +-- spo_history +-------------------------------------------------------------------------------- +CREATE TABLE spo_history ( + spo_hist_sk INTEGER PRIMARY KEY, + spo_sk TEXT REFERENCES spo_identity(spo_sk), + epoch_no INTEGER NOT NULL, + status TEXT NOT NULL, + valid_from INTEGER NOT NULL, + valid_to INTEGER NOT NULL, + UNIQUE (spo_sk, epoch_no) +); +CREATE INDEX spo_history_epoch_no_idx ON spo_history (epoch_no); +-------------------------------------------------------------------------------- +-- spo_stake_snapshot +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_snapshot ( + pool_id TEXT PRIMARY KEY REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + live_stake REAL, + active_stake REAL, + live_delegators INTEGER, + live_saturation REAL, + declared_pledge REAL, + live_pledge REAL, + updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX spo_stake_snapshot_updated_at_idx ON spo_stake_snapshot (updated_at DESC); +CREATE INDEX spo_stake_snapshot_live_stake_idx ON spo_stake_snapshot (COALESCE(live_stake, 0) DESC); +-------------------------------------------------------------------------------- +-- spo_stake_history +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_history ( + id INTEGER PRIMARY KEY, + pool_id TEXT NOT NULL REFERENCES pool_metadata_cache(pool_id) ON DELETE CASCADE, + recorded_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP, + mainchain_epoch INTEGER, + live_stake REAL, + active_stake REAL, + live_delegators INTEGER, + live_saturation REAL, + declared_pledge REAL, + live_pledge REAL +); +CREATE INDEX spo_stake_history_pool_time_idx ON spo_stake_history (pool_id, recorded_at DESC); +CREATE INDEX spo_stake_history_epoch_idx ON spo_stake_history (mainchain_epoch); +-------------------------------------------------------------------------------- +-- spo_stake_refresh_state +-------------------------------------------------------------------------------- +CREATE TABLE spo_stake_refresh_state ( + id INTEGER PRIMARY KEY DEFAULT 1, + last_pool_id TEXT, + updated_at TEXT NOT NULL DEFAULT CURRENT_TIMESTAMP +); +INSERT INTO spo_stake_refresh_state (id) +VALUES (1) +ON CONFLICT (id) DO NOTHING; diff --git a/justfile b/justfile index 8fc09ade..5bfb48aa 100644 --- a/justfile +++ b/justfile @@ -75,6 +75,10 @@ generate-indexer-api-schema: cargo run -p indexer-api --bin indexer-api-cli print-api-schema-v3 > \ indexer-api/graphql/schema-v3.graphql +generate-spo-api-schema: + cargo run -p spo-api --features cloud --bin spo-api-cli print-api-schema-v1 > \ + spo-api/graphql/schema-v1.graphql + build-docker-image package profile="dev": tag=$(git rev-parse --short=8 HEAD) && \ docker build \ diff --git a/spo-api/ARCHITECTURE.md b/spo-api/ARCHITECTURE.md deleted file mode 100644 index 7247e35f..00000000 --- a/spo-api/ARCHITECTURE.md +++ /dev/null @@ -1,175 +0,0 @@ -# SPO API Architecture - -This document describes the architecture of the `spo-api` service: its purpose, components, data / request flow, dependencies, operational concerns, and planned evolution. It mirrors the structure used by `indexer-api`, adapted for a Stake Pool Operator (SPO) focused read-only API MVP. - -## Purpose & Scope - -Expose Stake Pool Operator (SPO) and per-epoch performance / identity data to Midnight Explorer (and other clients) over GraphQL (HTTP + WebSocket). For the current MVP: - -- Read-only queries backed by the existing Postgres database used by the indexer. -- No NATS dependency yet (live updates & catch-up tracking to be added later). -- Simple readiness: DB connectivity + (placeholder) caught-up flag (always `true` for now). - -Future iterations will introduce: - -- NATS subscription to indexer events for real caught-up gating and push updates. -- Subscriptions for live SPO / epoch performance changes. -- Repository layer with richer projections / aggregations. - -## High-level Data / Control Flow - -```text -+----------------+ +----------------------+ +----------------------+ +------------------+ -| Postgres (SQL) | SQL | spo-api Application | HTTP | Clients (GraphQL/WS) | Admin | /ready endpoint | -+----------------+ <------ +----------------------+ <----->+ Queries / Future WS | check | DB + caught_up | - | Axum + async-graphql +----------------------+ +------------------+ - | (v1 schema) - | State: { caught_up, db } - +---------------------------+ -``` - -Mermaid (future NATS integration indicated but not active yet): - -```mermaid -flowchart LR - DB[(Postgres)] --> API[SPO API] - API -->|HTTP| Client[GraphQL Clients] - subgraph Future - NATS[(NATS Events)] --> API - end - API -.-> Ready[/ready health/] -``` - -## Components & Responsibilities - -- Application (`src/application.rs`) - - Loads network settings, spawns API server. - - Maintains `caught_up: AtomicBool` (placeholder `true` until NATS integration). - - Handles SIGTERM for graceful shutdown. - -- Infra API (`src/infra/api/*`) - - `AxumApi`: builds the router and runs the server. - - `AppState`: composite state (caught_up + optional Db pool) with `FromRef` splits for Axum extractors. - - `/ready`: returns `503` if (future) not caught up or DB ping fails; else `200`. - - GraphQL v1 schema mounted at `/api/v1/graphql` (and future WS at `/api/v1/graphql/ws`). - - Middleware: tracing (`FastraceLayer`), CORS (permissive), request body size limiting + custom 400->413 remap. - -- Domain (`src/domain.rs` and future domain modules) - - Defines the `Api` trait consumed by application orchestration. - - Will host SPO-specific domain models (stake pool operator identity, performance snapshots, epoch aggregates). - -- Repository Layer (planned: `src/infra/repo.rs`) - - SQL query abstraction over Postgres using `sqlx`. - - Provide typed return models consumed by GraphQL resolvers. - - Encapsulate pagination, filtering, and performance queries. - -## Readiness & Health - -Current readiness logic: - -1. `caught_up` flag (always `true` in MVP). -2. Lightweight DB health ping `SELECT 1` inside `/ready`. - - -If either fails (in the future, actual catch-up check), returns `503 Service Unavailable`. - -## GraphQL Schema (v1 MVP) - -- `Query.serviceInfo`: returns service name, version, network. -- Future additions: - - `stakePoolOperators(limit, offset, filters)` - - `stakePoolOperator(id)` - - `epochPerformance(epochNumber)` - - Aggregations (top K by performance, delegation composition, historical trend) -- Subscriptions (deferred until NATS): live operator performance updates, epoch rollovers. - -## Data Model (Planned) - -Conceptual entities (not yet implemented): - -- StakePoolOperator { id, identityKey, displayName, createdAt, lastActiveAt, performanceScore, commissions, totalStake } -- EpochPerformance { epoch, operatorId, blocksProduced, blocksExpected, performanceRatio, stakeShare } -- DelegatorStake (optional for explorer pivot views) - -Indexes / queries to optimize: - -- By operator id -- By epoch + operator id -- Top N operators by performance for a given epoch - -## Error Handling - -- `/ready` returns targeted messages: "database not ready" vs. future "indexer not caught up". -- GraphQL resolvers (when added) will map domain errors into structured GraphQL errors with categories (e.g. NOT_FOUND, INTERNAL). - -## Telemetry & Metrics - -(Planned) - -- Request tracing via existing fastrace integration. -- Gauge for connected WS clients (already scaffolded; currently unused field in `Metrics`). -- Counters for query types and DB latency histograms (to be added alongside repo layer). - -## Configuration - -`config.yaml` (MVP subset): - -- `infra.api`: address, port, body limits, complexity, depth. -- `infra.storage`: Postgres connection pool config. -- `application.network_id`: network enumeration. -- Telemetry config (tracing + metrics) reused from `indexer-common`. - -## Build & Run - -```sh -# Build -cargo build -p spo-api --features cloud - -# Run (ensure Postgres env / config is valid) -cargo run -p spo-api --features cloud - -# Health -curl -i http://localhost:/ready -``` - -## Evolution Roadmap - -| Milestone | Description | Status | -|-----------|-------------|--------| -| MVP scaffold | Service, config, basic GraphQL, readiness, DB pool | DONE | -| Repo layer | Introduce `repo` module with first SPO queries | PENDING | -| SPO domain models | Define core structs + mapping | PENDING | -| GraphQL SPO queries | `stakePoolOperators`, `stakePoolOperator` | PENDING | -| Performance endpoints | Epoch performance aggregates | PENDING | -| NATS integration | Real catch-up + subscriptions | PENDING | -| Subscriptions | Live operator performance stream | PENDING | -| Metrics expansion | DB/query metrics, WS client gauge | PENDING | -| Hardening | Auth (if required), pagination policies, limits | FUTURE | - -## Design Principles - -- Start minimal: add complexity (NATS, subscriptions) only when data feed is ready. -- Keep GraphQL boundary stable; evolve underlying queries behind repository abstraction. -- Prefer explicit typed models over ad-hoc JSON for performance data. -- Systematically enforce limits (complexity, depth, pagination) for resilience. - -## Open Questions / Future Decisions - -- Exact schema for performance scoring (source of truth & calculation timing). -- Need for caching layer (in-memory epoch aggregates) vs. pure SQL queries. -- Security / auth requirements for future administrative fields (if any). - -## Status Summary (Current) - -- Server & routing: READY -- Readiness endpoint: READY (DB ping + placeholder caught_up) -- DB pool: READY -- GraphQL base schema: READY (serviceInfo) -- Repo layer: NOT IMPLEMENTED -- SPO domain models: NOT IMPLEMENTED -- Subscriptions: NOT IMPLEMENTED -- Metrics enrichment: NOT IMPLEMENTED -- NATS integration: NOT IMPLEMENTED - ---- -Last updated: 2025-09-18 diff --git a/spo-api/Cargo.toml b/spo-api/Cargo.toml deleted file mode 100644 index 5de3279e..00000000 --- a/spo-api/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "spo-api" -description = "SPO API" -version = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -readme = { workspace = true } -homepage = { workspace = true } -repository = { workspace = true } -documentation = { workspace = true } -publish = { workspace = true } - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = [ "--cfg", "docsrs" ] - -[dependencies] -anyhow = { workspace = true } -async-graphql = { workspace = true, features = [ "uuid" ] } -async-graphql-axum = { workspace = true } -axum = { workspace = true, features = [ "http2" ] } -byte-unit-serde = { workspace = true } -clap = { workspace = true, features = [ "derive" ] } -derive_more = { workspace = true, features = [ "debug", "display", "from" ] } -fastrace = { workspace = true, features = [ "enable" ] } -fastrace-axum = { workspace = true } -futures = { workspace = true } -indexer-common = { path = "../indexer-common" } -indoc = { workspace = true } -log = { workspace = true, features = [ "kv" ] } -metrics = { workspace = true } -serde = { workspace = true, features = [ "derive" ] } -serde_with = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = [ "macros", "rt-multi-thread", "time", "signal" ] } -tower = { workspace = true } -tower-http = { workspace = true, features = [ "cors", "limit" ] } -trait-variant = { workspace = true } -uuid = { workspace = true, features = [ "v7" ], optional = true } -sqlx = { workspace = true, features = [ "runtime-tokio", "postgres" ] } -regex = { workspace = true } -once_cell = { workspace = true } - -[features] -cloud = [ "indexer-common/cloud", "uuid" ] - -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = [ 'cfg(coverage_nightly)' ] } - -[package.metadata.cargo-shear] -ignored = [ "uuid" ] diff --git a/spo-api/Dockerfile b/spo-api/Dockerfile deleted file mode 100644 index 470273e5..00000000 --- a/spo-api/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -ARG RUST_VERSION=1.89 -FROM rust:${RUST_VERSION}-bookworm AS chef -WORKDIR /build -RUN cargo install cargo-chef --version 0.1.72 - -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM chef AS builder -SHELL ["/bin/bash", "-c"] -ARG PROFILE=release -RUN git config --global url."https://@github.com".insteadOf "ssh://git@github.com" -COPY --from=planner /build/recipe.json recipe.json -RUN --mount=type=secret,id=netrc,target=/root/.netrc \ - cargo chef cook --profile $PROFILE --recipe-path recipe.json -COPY . . -RUN --mount=type=secret,id=netrc,target=/root/.netrc \ - cargo build -p spo-api --locked --features cloud --profile $PROFILE && \ - mkdir -p /runtime/usr/local/bin /runtime/opt/spo-api && \ - mv "./target/${PROFILE/dev/debug}/spo-api" /runtime/usr/local/bin/ && \ - install -Dm755 spo-api/bin/entrypoint.sh /runtime/usr/local/bin/entrypoint.sh && \ - install -Dm644 spo-api/config.yaml /runtime/opt/spo-api/config.yaml - -FROM debian:bookworm-slim@sha256:b1a741487078b369e78119849663d7f1a5341ef2768798f7b7406c4240f86aef AS runtime -RUN adduser --disabled-password --gecos "" --home "/nonexistent" --shell "/sbin/nologin" --no-create-home --uid "10001" appuser && \ - mkdir /var/run/spo-api && \ - chown appuser:appuser /var/run/spo-api -COPY --from=builder --chown=appuser:appuser /runtime / -USER appuser -WORKDIR /opt/spo-api -ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] -EXPOSE 8090 diff --git a/spo-api/README.md b/spo-api/README.md deleted file mode 100644 index 41fb3fd3..00000000 --- a/spo-api/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# SPO API - -GraphQL API exposing SPO identity, pool metadata, and per-epoch performance. - -- HTTP (GraphQL + GraphiQL UI): /api/v1/graphql -- WebSocket (GraphQL WS): /api/v1/graphql/ws -- Readiness: /ready - -Open GraphiQL at - -## Quick start - -Option A — cargo (local) - -```bash -# Build -cargo build -p spo-api --features cloud - -# Run (requires Postgres and password) -export APP__INFRA__STORAGE__PASSWORD=indexer -CONFIG_FILE=spo-api/config.yaml cargo run -p spo-api --features cloud -``` - -Option B — Docker Compose - -```bash -# Ensure .env contains APP__INFRA__STORAGE__PASSWORD -echo APP__INFRA__STORAGE__PASSWORD=indexer >> .env - -# Bring up DB and API -docker compose up -d postgres spo-api - -# Health -curl -f http://localhost:8090/ready -``` - -## Handy queries - -```graphql -query ServiceInfo { - serviceInfo { name version network } -} - -query LatestPerformance { - spoPerformanceLatest(limit: 10, offset: 0) { - epochNo - spoSkHex - produced - expected - poolIdHex - } -} - -query PerformanceBySPO($spoSk: String!) { - spoPerformanceBySpoSk(spoSkHex: $spoSk, limit: 5, offset: 0) { - epochNo - produced - expected - identityLabel - } -} - -query EpochPerformance($epoch: Int!) { - epochPerformance(epoch: $epoch, limit: 20, offset: 0) { - spoSkHex - produced - expected - poolIdHex - } -} - -query SpoByPoolId($poolId: String!) { - spoByPoolId(poolIdHex: $poolId) { - poolIdHex - sidechainPubkeyHex - name - ticker - } -} - -query SpoList { - spoList(limit: 10, offset: 0) { - poolIdHex - sidechainPubkeyHex - name - ticker - homepageUrl - logoUrl - } -} - -query CurrentEpochInfo { - currentEpochInfo { - epochNo - durationSeconds - elapsedSeconds - } -} - -query EpochUtilization($epoch: Int!) { - epochUtilization(epoch: $epoch) -} - -query SpoCount { - spoCount -} -``` - -## Operation reference (v1) - -- serviceInfo: ServiceInfo! -- spoIdentities(limit: Int = 50, offset: Int = 0): [SpoIdentity!]! -- spoIdentityByPoolId(poolIdHex: String!): SpoIdentity -- poolMetadata(poolIdHex: String!): PoolMetadata -- poolMetadataList(limit: Int = 50, offset: Int = 0, withNameOnly: Boolean = false): [PoolMetadata!]! -- spoList(limit: Int = 20, offset: Int = 0): [Spo!]! -- spoByPoolId(poolIdHex: String!): Spo -- spoCompositeByPoolId(poolIdHex: String!): SpoComposite -- stakePoolOperators(limit: Int = 20): [String!]! -- spoPerformanceLatest(limit: Int = 20, offset: Int = 0): [EpochPerf!]! -- spoPerformanceBySpoSk(spoSkHex: String!, limit: Int = 100, offset: Int = 0): [EpochPerf!]! -- epochPerformance(epoch: Int!, limit: Int = 100, offset: Int = 0): [EpochPerf!]! -- currentEpochInfo: EpochInfo -- epochUtilization(epoch: Int!): Float -- spoCount: BigInt - -Key return types (selected fields): - -- SpoIdentity: poolIdHex, mainchainPubkeyHex, sidechainPubkeyHex, auraPubkeyHex -- PoolMetadata: poolIdHex, hexId, name, ticker, homepageUrl, logoUrl -- Spo: poolIdHex, sidechainPubkeyHex, auraPubkeyHex, name, ticker, homepageUrl, logoUrl -- EpochPerf: epochNo, spoSkHex, produced, expected, identityLabel, poolIdHex -- EpochInfo: epochNo, durationSeconds, elapsedSeconds - -Notes - -- Identifiers are stored as plain strings (hex text), not BYTEA. Supply lowercase hex without 0x where possible. -- Performance joins use spo_sk (sidechain key) as the canonical identity. -- Subscriptions will be added later (NATS integration). - -## Configuration - -Excerpt (see spo-api/config.yaml): - -```yaml -infra: - storage: - host: localhost - port: 5432 - dbname: indexer - user: indexer - sslmode: prefer - max_connections: 10 - idle_timeout: 1m - max_lifetime: 5m - api: - address: 0.0.0.0 - port: 8090 - max_complexity: 2000 - max_depth: 50 -``` - -Provide the password via env: APP__INFRA__STORAGE__PASSWORD. diff --git a/spo-api/bin/entrypoint.sh b/spo-api/bin/entrypoint.sh deleted file mode 100644 index 2919086c..00000000 --- a/spo-api/bin/entrypoint.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -trap 'rm /var/run/spo-api/running' EXIT -trap 'kill -SIGINT $PID' INT -trap 'kill -SIGTERM $PID' TERM - -touch /var/run/spo-api/running -spo-api & -PID=$! -wait $PID diff --git a/spo-api/config.yaml b/spo-api/config.yaml deleted file mode 100644 index b6d398f9..00000000 --- a/spo-api/config.yaml +++ /dev/null @@ -1,36 +0,0 @@ -run_migrations: false - -application: - network_id: "preview" - -infra: - storage: - host: "localhost" - port: 5432 - dbname: "indexer" - user: "indexer" - sslmode: "prefer" - max_connections: 10 - idle_timeout: "1m" - max_lifetime: "5m" - - pub_sub: - url: "localhost:4222" - username: "indexer" - - api: - address: "0.0.0.0" - port: 8090 - request_body_limit: "1MiB" - max_complexity: 200 - max_depth: 15 - -telemetry: - tracing: - enabled: false - service_name: "spo-api" - otlp_exporter_endpoint: "http://localhost:4317" - metrics: - enabled: false - address: "0.0.0.0" - port: 9000 diff --git a/spo-api/src/application.rs b/spo-api/src/application.rs deleted file mode 100644 index 63635199..00000000 --- a/spo-api/src/application.rs +++ /dev/null @@ -1,49 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::domain::Api; -use anyhow::Context as AnyhowContext; -use indexer_common::domain::{NetworkId, Subscriber}; -use log::warn; -use serde::Deserialize; -use serde_with::{DisplayFromStr, serde_as}; -use std::sync::{Arc, atomic::AtomicBool}; -use tokio::{select, signal::unix::Signal, task}; - -#[serde_as] -#[derive(Debug, Clone, Deserialize)] -pub struct Config { - #[serde_as(as = "DisplayFromStr")] - pub network_id: NetworkId, -} - -pub async fn run( - config: Config, - api: impl Api, - _subscriber: impl Subscriber, - mut sigterm: Signal, -) -> anyhow::Result<()> { - let Config { network_id } = config; - - // For now we don't track catch-up; expose ready immediately. We'll wire NATS later. - let caught_up = Arc::new(AtomicBool::new(true)); - - let serve_api_task = { - task::spawn(async move { - api.serve(network_id, caught_up) - .await - .context("serving SPO API") - }) - }; - - select! { - result = serve_api_task => result - .context("serve_api_task panicked") - .and_then(|r| r.context("serve_api_task failed")), - _ = sigterm.recv() => { - warn!("SIGTERM received"); - Ok(()) - } - } -} diff --git a/spo-api/src/config.rs b/spo-api/src/config.rs deleted file mode 100644 index 0ffdcc47..00000000 --- a/spo-api/src/config.rs +++ /dev/null @@ -1,19 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -use crate::{application, infra}; -#[cfg(feature = "cloud")] -#[derive(Debug, Clone, serde::Deserialize)] -pub struct Config { - pub run_migrations: bool, - - #[serde(rename = "application")] - pub application_config: application::Config, - - #[serde(rename = "infra")] - pub infra_config: infra::Config, - - #[serde(rename = "telemetry")] - pub telemetry_config: indexer_common::telemetry::Config, -} diff --git a/spo-api/src/domain.rs b/spo-api/src/domain.rs deleted file mode 100644 index 3d3f6827..00000000 --- a/spo-api/src/domain.rs +++ /dev/null @@ -1,47 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -use indexer_common::domain::NetworkId; -use std::{ - error::Error as StdError, - sync::{Arc, atomic::AtomicBool}, -}; - -#[trait_variant::make(Send)] -pub trait Api -where - Self: 'static, -{ - type Error: StdError + Send + Sync + 'static; - - async fn serve( - self, - network_id: NetworkId, - caught_up: Arc, - ) -> Result<(), Self::Error>; -} - -// --- SPO domain types (initial draft) --- - -#[derive(Debug, Clone)] -pub struct StakePoolOperator { - pub id: String, // canonical operator id (e.g. hash or bech32) - pub identity_key: Option, // optional identity / metadata key - pub display_name: Option, - pub created_at_epoch: Option, - pub last_active_epoch: Option, - pub performance_score: Option, - pub commission_rate: Option, - pub total_stake: Option, // string to avoid premature big-int choice -} - -#[derive(Debug, Clone)] -pub struct EpochPerformance { - pub epoch: i64, - pub operator_id: String, - pub blocks_produced: Option, - pub blocks_expected: Option, - pub performance_ratio: Option, - pub stake_share: Option, -} diff --git a/spo-api/src/infra.rs b/spo-api/src/infra.rs deleted file mode 100644 index 3308ecd4..00000000 --- a/spo-api/src/infra.rs +++ /dev/null @@ -1,16 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod api; - -#[cfg_attr(docsrs, doc(cfg(feature = "cloud")))] -#[cfg(feature = "cloud")] -#[derive(Debug, Clone, serde::Deserialize)] -pub struct Config { - #[serde(rename = "api")] - pub api_config: api::Config, - - #[serde(rename = "storage")] - pub storage_config: indexer_common::infra::pool::postgres::Config, -} diff --git a/spo-api/src/infra/api/mod.rs b/spo-api/src/infra/api/mod.rs deleted file mode 100644 index b172fdff..00000000 --- a/spo-api/src/infra/api/mod.rs +++ /dev/null @@ -1,198 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod v1; - -use crate::domain::Api; -use async_graphql::Context; -use axum::{ - Router, - extract::{FromRef, State}, - http::StatusCode, - response::IntoResponse, - routing::get, -}; -use indexer_common::{domain::NetworkId, infra::pool::postgres::PostgresPool}; -use log::info; -use serde::Deserialize; -use std::{ - io, - net::IpAddr, - sync::{ - Arc, - atomic::{AtomicBool, Ordering}, - }, -}; -use thiserror::Error; -use tokio::signal::unix::{SignalKind, signal}; -use tower::ServiceBuilder; -use tower_http::{cors::CorsLayer, limit::RequestBodyLimitLayer}; - -#[derive(Clone)] -pub struct Db(pub PostgresPool); - -#[derive(Clone)] -pub struct AppState { - pub caught_up: Arc, - pub db: Option, -} - -impl FromRef for Arc { - fn from_ref(s: &AppState) -> Arc { - s.caught_up.clone() - } -} -impl FromRef for Option { - fn from_ref(s: &AppState) -> Option { - s.db.clone() - } -} - -pub struct AxumApi { - config: Config, - db: Option, -} - -impl AxumApi { - pub fn new(config: Config) -> Self { - Self { config, db: None } - } - pub fn with_db(mut self, db: Db) -> Self { - self.db = Some(db); - self - } -} - -impl Api for AxumApi { - type Error = AxumApiError; - - async fn serve( - self, - network_id: NetworkId, - caught_up: Arc, - ) -> Result<(), Self::Error> { - let Config { - address, - port, - request_body_limit, - max_complexity, - max_depth, - } = self.config; - - // In the current shape AxumApi doesn't own the pool; we keep readiness simple (caught_up only). - let app = make_app( - caught_up, - self.db, - network_id, - max_complexity, - max_depth, - request_body_limit as usize, - ); - - let listener = tokio::net::TcpListener::bind((address, port)) - .await - .map_err(AxumApiError::Bind)?; - info!(address:?, port; "listening to TCP connections"); - axum::serve(listener, app) - .with_graceful_shutdown(shutdown_signal()) - .await - .map_err(AxumApiError::Serve) - } -} - -#[derive(Debug, Clone, Deserialize)] -pub struct Config { - pub address: IpAddr, - pub port: u16, - #[serde(with = "byte_unit_serde")] - pub request_body_limit: u64, - pub max_complexity: usize, - pub max_depth: usize, -} - -#[derive(Debug, Error)] -pub enum AxumApiError { - #[error("cannot bind tcp listener")] - Bind(#[source] io::Error), - #[error("cannot serve API")] - Serve(#[source] io::Error), -} - -pub struct Metrics; -impl Default for Metrics { - fn default() -> Self { Self } -} - -#[allow(clippy::too_many_arguments)] -fn make_app( - caught_up: Arc, - db: Option, - network_id: NetworkId, - max_complexity: usize, - max_depth: usize, - request_body_limit: usize, -) -> Router { - let app_state = AppState { caught_up, db }; - let v1_app = v1::make_app(network_id, max_complexity, max_depth, app_state.db.clone()) - .with_state(app_state.clone()); - - Router::new() - .route("/ready", get(ready)) - .nest("/api/v1", v1_app) - .with_state(app_state) - .layer( - ServiceBuilder::new() - .layer(RequestBodyLimitLayer::new(request_body_limit)) - .layer(CorsLayer::permissive()), - ) -} - -async fn ready( - State(caught_up): State>, - State(db): State>, -) -> impl IntoResponse { - if !caught_up.load(Ordering::Acquire) { - ( - StatusCode::SERVICE_UNAVAILABLE, - "indexer has not yet caught up with the node", - ) - .into_response() - } else { - // if a DB is provided, try a lightweight ping - if let Some(Db(pool)) = db { - if let Err(_e) = sqlx::query_scalar::<_, i32>("SELECT 1") - .fetch_one(&*pool) - .await - { - return (StatusCode::SERVICE_UNAVAILABLE, "database not ready").into_response(); - } - } - StatusCode::OK.into_response() - } -} - -// removed custom 400->413 transform; default behavior is acceptable for MVP - -async fn shutdown_signal() { - signal(SignalKind::terminate()) - .expect("install SIGTERM handler") - .recv() - .await; -} - -pub trait ContextExt { - fn get_network_id(&self) -> NetworkId; - fn get_metrics(&self) -> &Metrics; -} -impl ContextExt for Context<'_> { - fn get_network_id(&self) -> NetworkId { - self.data::() - .cloned() - .expect("NetworkId is stored in Context") - } - fn get_metrics(&self) -> &Metrics { - self.data::() - .expect("Metrics is stored in Context") - } -} diff --git a/spo-api/src/infra/api/v1/mod.rs b/spo-api/src/infra/api/v1/mod.rs deleted file mode 100644 index d506af29..00000000 --- a/spo-api/src/infra/api/v1/mod.rs +++ /dev/null @@ -1,1543 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -use super::{AppState, ContextExt, Db, Metrics}; -use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema}; -use async_graphql_axum::{GraphQL, GraphQLSubscription}; -use axum::{ - Router, - response::IntoResponse, - routing::{get, post_service}, -}; -use indexer_common::domain::NetworkId; -use log::{info, warn}; -// no extra imports needed here -use regex::Regex; -// no rust_decimal to keep sqlx decoding simple; we parse numerics as strings when needed - -const DEFAULT_PERFORMANCE_LIMIT: i64 = 20; - -type EpochPerfRow = ( - i64, // epoch_no (BIGINT) - String, // spo_sk_hex - i32, // produced_blocks (INT) - i32, // expected_blocks (INT) - Option, // identity_label - Option, // stake_snapshot - Option, // pool_id_hex - Option, // validator_class -); - -pub fn make_app( - network_id: NetworkId, - max_complexity: usize, - max_depth: usize, - db: Option, -) -> Router { - let schema = Schema::build(Query::default(), EmptyMutation, EmptySubscription) - .limit_complexity(max_complexity) - .limit_depth(max_depth) - .data(network_id) - .data(Metrics::default()) - .data(db) - // Inject optional Db from AppState via Router state in handlers - .finish(); - - // Runtime confirmation that extended schema is present. - if schema.sdl().contains("spoCompositeByPoolId") { - info!("GraphQL schema includes spoCompositeByPoolId"); - } else { - warn!("spoCompositeByPoolId missing from schema – ensure service rebuilt without cache"); - } - - Router::new() - // Support both /graphql and /graphql/ to avoid 404 (empty body -> GraphiQL JSON parse error) - .route("/graphql", get(graphiql)) - .route("/graphql/", get(graphiql)) - .route("/graphql", post_service(GraphQL::new(schema.clone()))) - .route("/graphql/", post_service(GraphQL::new(schema.clone()))) - .route_service("/graphql/ws", GraphQLSubscription::new(schema.clone())) - .route_service("/graphql/ws/", GraphQLSubscription::new(schema)) -} - -#[derive(Default)] -pub struct Query; - -#[Object(rename_fields = "camelCase")] -impl Query { - async fn service_info(&self, cx: &Context<'_>) -> ServiceInfo { - let network = format!("{}", cx.get_network_id()); - ServiceInfo { - name: "spo-api".into(), - version: env!("CARGO_PKG_VERSION").into(), - network, - } - } - - /// Cumulative total of currently registered SPOs over an epoch range, using first-seen epochs. - /// - /// Semantics: - /// - Domain is limited to pools present in spo_stake_snapshot ("current" pools), so the final - /// value equals spo_count by construction. - /// - First-seen epoch per pool is computed as the minimum epoch where that pool_id appears in any of: - /// spo_history (via spo_identity), committee_membership (via spo_identity), spo_epoch_performance (via spo_identity). - /// - If a current pool has no appearances in those sources, it is assigned first_seen_epoch = to_epoch - /// (it will enter at the end of the requested window so totals match spo_count). - async fn registered_totals_series( - &self, - cx: &Context<'_>, - from_epoch: i64, - to_epoch: i64, - ) -> Vec { - let start = from_epoch.min(to_epoch); - let end = to_epoch.max(from_epoch); - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - WITH rng AS ( - SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no - ), - cur AS ( - SELECT s.pool_id - FROM spo_stake_snapshot s - ), - union_firsts AS ( - SELECT si.pool_id AS pool_id, MIN(sh.epoch_no)::BIGINT AS first_seen_epoch - FROM spo_history sh - LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk - WHERE si.pool_id IS NOT NULL - GROUP BY si.pool_id - UNION ALL - SELECT si.pool_id AS pool_id, MIN(cm.epoch_no)::BIGINT AS first_seen_epoch - FROM committee_membership cm - LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey - WHERE si.pool_id IS NOT NULL - GROUP BY si.pool_id - UNION ALL - SELECT si.pool_id AS pool_id, MIN(sep.epoch_no)::BIGINT AS first_seen_epoch - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - WHERE si.pool_id IS NOT NULL - GROUP BY si.pool_id - ), - firsts0 AS ( - SELECT pool_id, MIN(first_seen_epoch)::BIGINT AS first_seen_epoch - FROM union_firsts - GROUP BY pool_id - ), - firsts_cur AS ( - SELECT c.pool_id, - COALESCE(f0.first_seen_epoch, $2::BIGINT) AS first_seen_epoch - FROM cur c - LEFT JOIN firsts0 f0 ON f0.pool_id = c.pool_id - ), - agg AS ( - SELECT r.epoch_no, - COUNT(*) FILTER (WHERE fc.first_seen_epoch <= r.epoch_no) AS total_registered, - COUNT(*) FILTER (WHERE fc.first_seen_epoch = r.epoch_no) AS newly_registered - FROM rng r - CROSS JOIN firsts_cur fc - GROUP BY r.epoch_no - ) - SELECT epoch_no, total_registered, newly_registered - FROM agg - ORDER BY epoch_no - "#; - match sqlx::query_as::<_, (i64, i64, i64)>(sql) - .bind(start) - .bind(end) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map(|(epoch_no, total_registered, newly_registered)| RegisteredTotals { - epoch_no, - total_registered, - newly_registered, - }) - .collect(), - Err(e) => { - warn!("registered_totals_series query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - // ------------------------------------------------- - // Identity (no metadata) queries - // ------------------------------------------------- - async fn spo_identities( - &self, - cx: &Context<'_>, - limit: Option, - offset: Option, - ) -> Vec { - let limit = limit.unwrap_or(50).clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT pool_id AS pool_id_hex, - mainchain_pubkey AS mainchain_pubkey_hex, - sidechain_pubkey AS sidechain_pubkey_hex, - aura_pubkey AS aura_pubkey_hex, - 'UNKNOWN' AS validator_class - FROM spo_identity - WHERE pool_id IS NOT NULL - ORDER BY mainchain_pubkey - LIMIT $1 OFFSET $2 - "#; - match sqlx::query_as::<_, (String, String, String, Option, String)>(sql) - .bind(limit) - .bind(offset) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map( - |( - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - )| SpoIdentity { - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - }, - ) - .collect(), - Err(e) => { - warn!("spo_identities query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - async fn spo_identity_by_pool_id( - &self, - cx: &Context<'_>, - pool_id_hex: String, - ) -> Option { - let pool_id_hex = normalize_hex(&pool_id_hex)?; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT pool_id AS pool_id_hex, - mainchain_pubkey AS mainchain_pubkey_hex, - sidechain_pubkey AS sidechain_pubkey_hex, - aura_pubkey AS aura_pubkey_hex, - 'UNKNOWN' AS validator_class - FROM spo_identity - WHERE pool_id = $1 - LIMIT 1 - "#; - match sqlx::query_as::<_, (String, String, String, Option, String)>(sql) - .bind(&pool_id_hex) - .fetch_optional(&**pool) - .await - { - Ok(Some(( - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - ))) => Some(SpoIdentity { - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - }), - Ok(None) => None, - Err(e) => { - warn!("spo_identity_by_pool_id query failed: {e}"); - None - } - } - } else { - None - } - } - - // ------------------------------------------------- - // Metadata queries - // ------------------------------------------------- - async fn pool_metadata(&self, cx: &Context<'_>, pool_id_hex: String) -> Option { - let pool_id_hex = normalize_hex(&pool_id_hex)?; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT pool_id AS pool_id_hex, - hex_id AS hex_id, - name, ticker, homepage_url, url AS logo_url - FROM pool_metadata_cache - WHERE pool_id = $1 - LIMIT 1 - "#; - match sqlx::query_as::< - _, - ( - String, - Option, - Option, - Option, - Option, - Option, - ), - >(sql) - .bind(&pool_id_hex) - .fetch_optional(&**pool) - .await - { - Ok(Some((pool_id_hex, hex_id, name, ticker, homepage_url, logo_url))) => { - Some(PoolMetadata { - pool_id_hex, - hex_id, - name, - ticker, - homepage_url, - logo_url, - }) - } - Ok(None) => None, - Err(e) => { - warn!("pool_metadata query failed: {e}"); - None - } - } - } else { - None - } - } - - async fn pool_metadata_list( - &self, - cx: &Context<'_>, - limit: Option, - offset: Option, - with_name_only: Option, - ) -> Vec { - let limit = limit.unwrap_or(50).clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - let name_only = with_name_only.unwrap_or(false); - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = if name_only { - r#" - SELECT pool_id AS pool_id_hex, - hex_id AS hex_id, - name, ticker, homepage_url, url AS logo_url - FROM pool_metadata_cache - WHERE name IS NOT NULL OR ticker IS NOT NULL - ORDER BY pool_id - LIMIT $1 OFFSET $2 - "# - } else { - r#" - SELECT pool_id AS pool_id_hex, - hex_id AS hex_id, - name, ticker, homepage_url, url AS logo_url - FROM pool_metadata_cache - ORDER BY pool_id - LIMIT $1 OFFSET $2 - "# - }; - match sqlx::query_as::< - _, - ( - String, - Option, - Option, - Option, - Option, - Option, - ), - >(sql) - .bind(limit) - .bind(offset) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map( - |(pool_id_hex, hex_id, name, ticker, homepage_url, logo_url)| { - PoolMetadata { - pool_id_hex, - hex_id, - name, - ticker, - homepage_url, - logo_url, - } - }, - ) - .collect(), - Err(e) => { - warn!("pool_metadata_list query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - // ------------------------------------------------- - // Composite query - // ------------------------------------------------- - async fn spo_composite_by_pool_id( - &self, - cx: &Context<'_>, - pool_id_hex: String, - ) -> Option { - let pool_id_hex = normalize_hex(&pool_id_hex)?; - let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) else { - return None; - }; - - let identity_sql = r#" - SELECT pool_id AS pool_id_hex, - mainchain_pubkey AS mainchain_pubkey_hex, - sidechain_pubkey AS sidechain_pubkey_hex, - aura_pubkey AS aura_pubkey_hex, - 'UNKNOWN' AS validator_class - FROM spo_identity - WHERE pool_id = $1 - LIMIT 1 - "#; - let identity = match sqlx::query_as::< - _, - (String, String, String, Option, String), - >(identity_sql) - .bind(&pool_id_hex) - .fetch_optional(&**pool) - .await - { - Ok(Some(( - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - ))) => Some(SpoIdentity { - pool_id_hex, - mainchain_pubkey_hex, - sidechain_pubkey_hex, - aura_pubkey_hex, - validator_class, - }), - Ok(None) => None, - Err(e) => { - warn!("spo_composite_by_pool_id identity query failed: {e}"); - None - } - }; - - let metadata_sql = r#" - SELECT pool_id AS pool_id_hex, - hex_id AS hex_id, - name, ticker, homepage_url, url AS logo_url - FROM pool_metadata_cache - WHERE pool_id = $1 - LIMIT 1 - "#; - let metadata = match sqlx::query_as::< - _, - ( - String, - Option, - Option, - Option, - Option, - Option, - ), - >(metadata_sql) - .bind(&pool_id_hex) - .fetch_optional(&**pool) - .await - { - Ok(Some((pool_id_hex, hex_id, name, ticker, homepage_url, logo_url))) => { - Some(PoolMetadata { - pool_id_hex, - hex_id, - name, - ticker, - homepage_url, - logo_url, - }) - } - Ok(None) => None, - Err(e) => { - warn!("spo_composite_by_pool_id metadata query failed: {e}"); - None - } - }; - - let performance = if let Some(identity_ref) = identity.as_ref() { - // Performance rows are keyed by sidechain_pubkey (sep.spo_sk). - let sk_hex = &identity_ref.sidechain_pubkey_hex; - let perf_sql = r#" - SELECT sep.epoch_no, - sep.spo_sk AS spo_sk_hex, - sep.produced_blocks, - sep.expected_blocks, - sep.identity_label, - NULL::TEXT AS stake_snapshot, - si.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - WHERE sep.spo_sk = $1 - ORDER BY sep.epoch_no DESC - LIMIT $2 - "#; - match sqlx::query_as::< - _, - ( - i64, - String, - i32, - i32, - Option, - Option, - Option, - Option, - ), - >(perf_sql) - .bind(sk_hex) - .bind(DEFAULT_PERFORMANCE_LIMIT) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), - Err(e) => { - warn!("spo_composite_by_pool_id performance query failed: {e}"); - vec![] - } - } - } else { - vec![] - }; - - Some(SpoComposite { - identity, - metadata, - performance, - }) - } - - /// List stake pool operator identifiers (placeholder – returns empty if table missing / error). - async fn stake_pool_operators(&self, cx: &Context<'_>, limit: Option) -> Vec { - let limit = limit.unwrap_or(20).clamp(1, 100) as i64; - // Access optional Db from Router state (AppState) - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT encode(sep.spo_sk,'hex') AS spo_sk_hex - FROM spo_epoch_performance sep - GROUP BY sep.spo_sk - ORDER BY MAX(sep.produced_blocks) DESC - LIMIT $1 - "#; - match sqlx::query_scalar::<_, String>(sql) - .bind(limit) - .fetch_all(&**pool) - .await - { - Ok(rows) => return rows, - Err(e) => { - warn!("stake_pool_operators query failed: {e}"); - return vec![]; - } - } - } - vec![] - } - - /// Latest SPO performance entries ordered by epoch (desc) and produced blocks (desc). - async fn spo_performance_latest( - &self, - cx: &Context<'_>, - limit: Option, - offset: Option, - ) -> Vec { - let limit = limit - .unwrap_or(DEFAULT_PERFORMANCE_LIMIT as i32) - .clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT sep.epoch_no, - sep.spo_sk AS spo_sk_hex, - sep.produced_blocks, - sep.expected_blocks, - sep.identity_label, - NULL::TEXT AS stake_snapshot, - si.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - ORDER BY sep.epoch_no DESC, sep.produced_blocks DESC - LIMIT $1 OFFSET $2 - "#; - match sqlx::query_as::<_, EpochPerfRow>(sql) - .bind(limit) - .bind(offset) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), - Err(e) => { - warn!("spo_performance_latest query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Performance history for a single SPO (identified by its side/mainchain key hex representation). - async fn spo_performance_by_spo_sk( - &self, - cx: &Context<'_>, - spo_sk_hex: String, - limit: Option, - offset: Option, - ) -> Vec { - let spo_sk_hex = match normalize_hex(&spo_sk_hex) { - Some(hex) => hex, - None => return vec![], - }; - let limit = limit.unwrap_or(100).clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT sep.epoch_no, - sep.spo_sk AS spo_sk_hex, - sep.produced_blocks, - sep.expected_blocks, - sep.identity_label, - NULL::TEXT AS stake_snapshot, - si.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - WHERE sep.spo_sk = $1 - ORDER BY sep.epoch_no DESC - LIMIT $2 OFFSET $3 - "#; - match sqlx::query_as::<_, EpochPerfRow>(sql) - .bind(&spo_sk_hex) - .bind(limit) - .bind(offset) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), - Err(e) => { - warn!("spo_performance_by_spo_sk query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Epoch performance for a given epoch, tolerant of missing identity records. - async fn epoch_performance( - &self, - cx: &Context<'_>, - epoch: i64, - limit: Option, - offset: Option, - ) -> Vec { - let limit = limit.unwrap_or(100).clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT sep.epoch_no, - sep.spo_sk AS spo_sk_hex, - sep.produced_blocks, - sep.expected_blocks, - sep.identity_label, - NULL::TEXT AS stake_snapshot, - si.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - WHERE sep.epoch_no = $1 - ORDER BY sep.produced_blocks DESC - LIMIT $2 OFFSET $3 - "#; - match sqlx::query_as::<_, EpochPerfRow>(sql) - .bind(epoch) - .bind(limit) - .bind(offset) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows.into_iter().map(EpochPerf::from_tuple).collect(), - Err(e) => { - warn!("epoch_performance query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// List SPOs with optional metadata, paginated. - async fn spo_list( - &self, - cx: &Context<'_>, - limit: Option, - offset: Option, - search: Option, - ) -> Vec { - let limit = limit.unwrap_or(20).clamp(1, 200) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - let search = search.as_ref().and_then(|s| { - let trimmed = s.trim(); - if trimmed.is_empty() { None } else { Some(trimmed.to_string()) } - }); - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - // Use spo_stake_snapshot as the canonical current set to align counts with spo_count. - let sql = if search.is_some() { - r#" - SELECT s.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class, - si.sidechain_pubkey AS sidechain_pubkey_hex, - si.aura_pubkey AS aura_pubkey_hex, - pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url - FROM spo_stake_snapshot s - LEFT JOIN spo_identity si ON si.pool_id = s.pool_id - LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id - WHERE ( - pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR s.pool_id ILIKE $4 - OR si.sidechain_pubkey ILIKE $4 OR si.aura_pubkey ILIKE $4 OR si.mainchain_pubkey ILIKE $4 - ) - ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) - LIMIT $1 OFFSET $2 - "# - } else { - r#" - SELECT s.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class, - si.sidechain_pubkey AS sidechain_pubkey_hex, - si.aura_pubkey AS aura_pubkey_hex, - pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url - FROM spo_stake_snapshot s - LEFT JOIN spo_identity si ON si.pool_id = s.pool_id - LEFT JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id - ORDER BY COALESCE(si.mainchain_pubkey, s.pool_id) - LIMIT $1 OFFSET $2 - "# - }; - - // Build bind params - let mut q = sqlx::query_as::< - _, - ( - String, - String, - String, - Option, - Option, - Option, - Option, - Option, - ), - >(sql); - q = q.bind(limit).bind(offset); - if let Some(s) = search { - // For text fields use %term% ; for hex-like identifiers also use %term_no_0x% - let s_like = format!("%{}%", s); - let s_hex = normalize_hex(&s).unwrap_or_else(|| s.to_ascii_lowercase()); - let s_hex_like = format!("%{}%", s_hex); - q = q.bind(s_like).bind(s_hex_like); - } - - match q.fetch_all(&**pool).await { - Ok(rows) => rows - .into_iter() - .map( - |( - pool_id_hex, - validator_class, - sidechain_pubkey_hex, - aura_pubkey_hex, - name, - ticker, - homepage_url, - logo_url, - )| Spo { - pool_id_hex, - validator_class, - sidechain_pubkey_hex, - aura_pubkey_hex, - name, - ticker, - homepage_url, - logo_url, - }, - ) - .collect(), - Err(e) => { - warn!("spo_list query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Stake distribution for registered SPOs, based on spo_stake_snapshot (latest values). - async fn stake_distribution( - &self, - cx: &Context<'_>, - limit: Option, - offset: Option, - search: Option, - order_by_stake_desc: Option, - ) -> Vec { - let limit = limit.unwrap_or(50).clamp(1, 500) as i64; - let offset = offset.unwrap_or(0).max(0) as i64; - let search = search.as_ref().and_then(|s| { - let t = s.trim(); - if t.is_empty() { None } else { Some(t.to_string()) } - }); - - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - // Compute total across pools first for share calculation - let total_sql = r#" - SELECT COALESCE(SUM(s.live_stake), 0)::TEXT - FROM spo_stake_snapshot s - "#; - let total_live_str: String = match sqlx::query_scalar(total_sql) - .fetch_one(&**pool) - .await - { - Ok(v) => v, - Err(e) => { - warn!("stake_distribution total stake query failed: {e}"); - "0".to_string() - } - }; - let total_live_f64: f64 = total_live_str.parse::().unwrap_or(0.0); - - let order_desc = order_by_stake_desc.unwrap_or(true); - let base_select = if search.is_some() { - r#" - SELECT - pm.pool_id AS pool_id_hex, - pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, - (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, - (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT - FROM spo_stake_snapshot s - JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id - WHERE ( - pm.name ILIKE $3 OR pm.ticker ILIKE $3 OR pm.homepage_url ILIKE $3 OR pm.pool_id ILIKE $4 - ) - ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id - LIMIT $1 OFFSET $2 - "# - } else { - r#" - SELECT - pm.pool_id AS pool_id_hex, - pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url, - (s.live_stake)::TEXT, (s.active_stake)::TEXT, s.live_delegators, s.live_saturation, - (s.declared_pledge)::TEXT, (s.live_pledge)::TEXT - FROM spo_stake_snapshot s - JOIN pool_metadata_cache pm ON pm.pool_id = s.pool_id - ORDER BY COALESCE(s.live_stake, 0) DESC, pm.pool_id - LIMIT $1 OFFSET $2 - "# - }; - - // optionally flip order if ascending requested - let sql = if order_desc { base_select.to_string() } else { base_select.replace("DESC", "ASC") }; - - let mut q = sqlx::query_as::< - _, - ( - String, // pool_id_hex - Option, // name - Option, // ticker - Option, // homepage_url - Option, // logo_url - Option, // live_stake (TEXT) - Option, // active_stake (TEXT) - Option, // live_delegators - Option, // live_saturation - Option, // declared_pledge (TEXT) - Option, // live_pledge (TEXT) - ), - >(&sql) - .bind(limit) - .bind(offset); - - if let Some(s) = search { - let s_like = format!("%{}%", s); - q = q.bind(s_like.clone()).bind(s_like); - } - - match q.fetch_all(&**pool).await { - Ok(rows) => rows - .into_iter() - .map(|(pool_id_hex, name, ticker, homepage_url, logo_url, live_stake, active_stake, live_delegators, live_saturation, declared_pledge, live_pledge)| { - // Compute share = live_stake / total_live - let share = { - let ls = live_stake.as_deref().unwrap_or("0"); - let lv = ls.parse::().unwrap_or(0.0); - if total_live_f64 > 0.0 { lv / total_live_f64 } else { 0.0 } - }; - let live_delegators_i64 = live_delegators.map(|v| v as i64); - StakeShare { - pool_id_hex, - name, - ticker, - homepage_url, - logo_url, - live_stake, - active_stake, - live_delegators: live_delegators_i64, - live_saturation, - declared_pledge, - live_pledge, - stake_share: Some(share), - } - }) - .collect(), - Err(e) => { - warn!("stake_distribution query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Find single SPO by pool ID (hex string). - async fn spo_by_pool_id(&self, cx: &Context<'_>, pool_id_hex: String) -> Option { - let pool_id_hex = normalize_hex(&pool_id_hex)?; - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - // Accept hex string; decode on DB side. pool_id is BYTEA. - let query = r#" - SELECT si.pool_id AS pool_id_hex, - 'UNKNOWN' AS validator_class, - si.sidechain_pubkey AS sidechain_pubkey_hex, - si.aura_pubkey AS aura_pubkey_hex, - pm.name, pm.ticker, pm.homepage_url, pm.url AS logo_url - FROM spo_identity si - LEFT JOIN pool_metadata_cache pm ON pm.pool_id = si.pool_id - WHERE si.pool_id = $1 - LIMIT 1 - "#; - match sqlx::query_as::< - _, - ( - String, - String, - String, - Option, - Option, - Option, - Option, - Option, - ), - >(query) - .bind(&pool_id_hex) - .fetch_optional(&**pool) - .await - { - Ok(Some(( - pool_id_hex, - validator_class, - sidechain_pubkey_hex, - aura_pubkey_hex, - name, - ticker, - homepage_url, - logo_url, - ))) => Some(Spo { - pool_id_hex, - validator_class, - sidechain_pubkey_hex, - aura_pubkey_hex, - name, - ticker, - homepage_url, - logo_url, - }), - Err(e) => { - warn!("spo_by_pool_id query failed: {e}"); - None - } - Ok(None) => None, - } - } else { - None - } - } - - // ------------------------------------------------- - // KPI / Dashboard helpers - // ------------------------------------------------- - /// Current epoch info with duration and elapsed seconds. - async fn current_epoch_info(&self, cx: &Context<'_>) -> Option { - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - WITH last AS ( - SELECT - epoch_no, - EXTRACT(EPOCH FROM starts_at)::BIGINT AS starts_s, - EXTRACT(EPOCH FROM ends_at)::BIGINT AS ends_s, - EXTRACT(EPOCH FROM (ends_at - starts_at))::BIGINT AS dur_s, - EXTRACT(EPOCH FROM NOW())::BIGINT AS now_s - FROM epochs - ORDER BY epoch_no DESC - LIMIT 1 - ), calc AS ( - SELECT - epoch_no, starts_s, ends_s, dur_s, now_s, - CASE WHEN ends_s > now_s THEN 0 - ELSE ((now_s - ends_s) / dur_s)::BIGINT + 1 END AS n - FROM last - ), synth AS ( - SELECT - (epoch_no + n) AS epoch_no, - dur_s AS duration_seconds, - CASE WHEN n = 0 THEN LEAST(GREATEST(now_s - starts_s, 0), dur_s) - ELSE LEAST(GREATEST(now_s - (ends_s + (n - 1) * dur_s), 0), dur_s) - END AS elapsed_seconds - FROM calc - ) - SELECT epoch_no, duration_seconds, elapsed_seconds FROM synth - "#; - match sqlx::query_as::<_, (i64, i64, i64)>(sql).fetch_optional(&**pool).await { - Ok(Some((epoch_no, duration_seconds, elapsed_seconds))) => Some(EpochInfo { - epoch_no, - duration_seconds, - elapsed_seconds, - }), - Ok(None) => None, - Err(e) => { - warn!("current_epoch_info query failed: {e}"); - None - } - } - } else { - None - } - } - - /// Epoch-wide block utilization = sum(produced) / sum(expected) (0.0 if no data or expected == 0). - async fn epoch_utilization(&self, cx: &Context<'_>, epoch: i32) -> Option { - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT COALESCE( - CASE WHEN SUM(expected_blocks) > 0 - THEN SUM(produced_blocks)::DOUBLE PRECISION / SUM(expected_blocks) - ELSE 0.0 END, - 0.0) AS utilization - FROM spo_epoch_performance - WHERE epoch_no = $1 - "#; - match sqlx::query_scalar::<_, Option>(sql) - .bind(epoch as i64) - .fetch_one(&**pool) - .await - { - Ok(v) => v.or(Some(0.0)), - Err(e) => { - warn!("epoch_utilization query failed: {e}"); - None - } - } - } else { - None - } - } - - /// Number of SPO identities (with a pool_id present). - async fn spo_count(&self, cx: &Context<'_>) -> Option { - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - // Single source of truth for current SPOs: spo_stake_snapshot - let sql = r#" - SELECT COUNT(1)::BIGINT FROM spo_stake_snapshot - "#; - match sqlx::query_scalar::<_, i64>(sql).fetch_one(&**pool).await { - Ok(count) => Some(count), - Err(e) => { - warn!("spo_count query failed: {e}"); - None - } - } - } else { - None - } - } - - /// Committee membership for an epoch (ordered by position), with identity enrichment when available. - async fn committee( - &self, - cx: &Context<'_>, - epoch: i64, - ) -> Vec { - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT - cm.epoch_no, - cm.position, - cm.sidechain_pubkey AS sidechain_pubkey_hex, - cm.expected_slots, - si.aura_pubkey AS aura_pubkey_hex, - si.pool_id AS pool_id_hex, - si.spo_sk AS spo_sk_hex - FROM committee_membership cm - LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey - WHERE cm.epoch_no = $1 - ORDER BY cm.position - "#; - match sqlx::query_as::< - _, - ( - i64, // epoch_no - i32, // position - String, // sidechain_pubkey_hex - i32, // expected_slots - Option, // aura_pubkey_hex - Option, // pool_id_hex - Option, // spo_sk_hex - ), - >(sql) - .bind(epoch) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map(|(epoch_no, position, sidechain_pubkey_hex, expected_slots, aura_pubkey_hex, pool_id_hex, spo_sk_hex)| CommitteeMember { - epoch_no, - position, - sidechain_pubkey_hex, - expected_slots, - aura_pubkey_hex, - pool_id_hex, - spo_sk_hex, - }) - .collect(), - Err(e) => { - warn!("committee query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Registration counts series for an epoch range. Uses DB when possible. - async fn registered_spo_series( - &self, - cx: &Context<'_>, - from_epoch: i64, - to_epoch: i64, - ) -> Vec { - let start = from_epoch.min(to_epoch); - let end = to_epoch.max(from_epoch); - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - // Simplified: return raw per-epoch counts directly from DB sources. - // - federated_valid_count: distinct committee members with expected_slots > 0 - // - registered_valid_count: distinct VALID in spo_history per epoch - // - registered_invalid_count: distinct INVALID in spo_history per epoch - // - federated_invalid_count: 0 (not tracked) - // - dparam: same as registered_valid_count as DOUBLE PRECISION (frontend can derive other metrics) - let sql = r#" - WITH rng AS ( - SELECT generate_series($1::BIGINT, $2::BIGINT) AS epoch_no - ), - hist_valid AS ( - SELECT sh.epoch_no, - COUNT(DISTINCT si.pool_id) AS cnt - FROM spo_history sh - LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk - WHERE sh.status IN ('VALID','Valid') - AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - AND si.pool_id IS NOT NULL - GROUP BY sh.epoch_no - ), - hist_invalid AS ( - SELECT sh.epoch_no, - COUNT(DISTINCT si.pool_id) AS cnt - FROM spo_history sh - LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk - WHERE sh.status IN ('INVALID','Invalid') - AND sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - AND si.pool_id IS NOT NULL - GROUP BY sh.epoch_no - ), - fed AS ( - SELECT c.epoch_no, - COUNT(DISTINCT c.sidechain_pubkey) FILTER (WHERE c.expected_slots > 0) AS federated_valid_count, - 0::BIGINT AS federated_invalid_count - FROM committee_membership c - WHERE c.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - GROUP BY c.epoch_no - ) - SELECT r.epoch_no, - COALESCE(f.federated_valid_count, 0) AS federated_valid_count, - COALESCE(f.federated_invalid_count, 0) AS federated_invalid_count, - COALESCE(hv.cnt, 0) AS registered_valid_count, - COALESCE(hi.cnt, 0) AS registered_invalid_count, - COALESCE(hv.cnt, 0)::DOUBLE PRECISION AS dparam - FROM rng r - LEFT JOIN hist_valid hv ON hv.epoch_no = r.epoch_no - LEFT JOIN hist_invalid hi ON hi.epoch_no = r.epoch_no - LEFT JOIN fed f ON f.epoch_no = r.epoch_no - ORDER BY r.epoch_no - "#; - match sqlx::query_as::<_, (i64, i64, i64, i64, i64, Option)>(sql) - .bind(start) - .bind(end) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map(|(epoch_no, f_valid, f_invalid, r_valid, r_invalid, dparam)| RegisteredStat { - epoch_no, - federated_valid_count: f_valid, - federated_invalid_count: f_invalid, - registered_valid_count: r_valid, - registered_invalid_count: r_invalid, - dparam, - }) - .collect(), - Err(e) => { - warn!("registered_spo_series query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// Raw presence events for SPO identity per epoch across sources (history, committee, performance). - /// Frontend can reconstruct totals/new registrations from these events. - async fn registered_presence( - &self, - cx: &Context<'_>, - from_epoch: i64, - to_epoch: i64, - ) -> Vec { - let start = from_epoch.min(to_epoch); - let end = to_epoch.max(from_epoch); - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - WITH history AS ( - SELECT sh.epoch_no::BIGINT AS epoch_no, - COALESCE(si.pool_id, sh.spo_sk) AS id_key, - 'history'::TEXT AS source, - sh.status::TEXT AS status - FROM spo_history sh - LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk - WHERE sh.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - ), - committee AS ( - SELECT cm.epoch_no::BIGINT AS epoch_no, - COALESCE(si.pool_id, cm.sidechain_pubkey) AS id_key, - 'committee'::TEXT AS source, - NULL::TEXT AS status - FROM committee_membership cm - LEFT JOIN spo_identity si ON si.sidechain_pubkey = cm.sidechain_pubkey - WHERE cm.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - ), - performance AS ( - SELECT sep.epoch_no::BIGINT AS epoch_no, - COALESCE(si.pool_id, sep.spo_sk) AS id_key, - 'performance'::TEXT AS source, - NULL::TEXT AS status - FROM spo_epoch_performance sep - LEFT JOIN spo_identity si ON si.spo_sk = sep.spo_sk - WHERE sep.epoch_no BETWEEN $1::BIGINT AND $2::BIGINT - ) - SELECT epoch_no, id_key, source, status FROM history - UNION ALL - SELECT epoch_no, id_key, source, status FROM committee - UNION ALL - SELECT epoch_no, id_key, source, status FROM performance - ORDER BY epoch_no, source, id_key - "#; - match sqlx::query_as::<_, (i64, String, String, Option)>(sql) - .bind(start) - .bind(end) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map(|(epoch_no, id_key, source, status)| PresenceEvent { epoch_no, id_key, source, status }) - .collect(), - Err(e) => { - warn!("registered_presence query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } - - /// First valid epoch per identity (based on spo_history status VALID). Optional cutoff to bound the scan. - async fn registered_first_valid_epochs( - &self, - cx: &Context<'_>, - upto_epoch: Option, - ) -> Vec { - if let Some(Db(pool)) = cx.data_opt::>().and_then(|o| o.as_ref()) { - let sql = r#" - SELECT COALESCE(si.pool_id, sh.spo_sk) AS id_key, - MIN(sh.epoch_no)::BIGINT AS first_valid_epoch - FROM spo_history sh - LEFT JOIN spo_identity si ON si.spo_sk = sh.spo_sk - WHERE sh.status IN ('VALID','Valid') - AND ($1::BIGINT IS NULL OR sh.epoch_no <= $1::BIGINT) - GROUP BY 1 - ORDER BY first_valid_epoch - "#; - match sqlx::query_as::<_, (String, i64)>(sql) - .bind(upto_epoch) - .fetch_all(&**pool) - .await - { - Ok(rows) => rows - .into_iter() - .map(|(id_key, first_valid_epoch)| FirstValidEpoch { id_key, first_valid_epoch }) - .collect(), - Err(e) => { - warn!("registered_first_valid_epochs query failed: {e}"); - vec![] - } - } - } else { - vec![] - } - } -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct ServiceInfo { - pub name: String, - pub version: String, - pub network: String, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct EpochInfo { - pub epoch_no: i64, - pub duration_seconds: i64, - pub elapsed_seconds: i64, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct EpochPerf { - pub epoch_no: i64, - pub spo_sk_hex: String, - pub produced: i64, - pub expected: i64, - pub identity_label: Option, - pub stake_snapshot: Option, - pub pool_id_hex: Option, - pub validator_class: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct Spo { - pub pool_id_hex: String, - pub validator_class: String, - pub sidechain_pubkey_hex: String, - pub aura_pubkey_hex: Option, - pub name: Option, - pub ticker: Option, - pub homepage_url: Option, - pub logo_url: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct SpoIdentity { - pub pool_id_hex: String, - pub mainchain_pubkey_hex: String, - pub sidechain_pubkey_hex: String, - pub aura_pubkey_hex: Option, - pub validator_class: String, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct PoolMetadata { - pub pool_id_hex: String, - pub hex_id: Option, - pub name: Option, - pub ticker: Option, - pub homepage_url: Option, - pub logo_url: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct SpoComposite { - pub identity: Option, - pub metadata: Option, - pub performance: Vec, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct PresenceEvent { - pub epoch_no: i64, - pub id_key: String, - pub source: String, - pub status: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct FirstValidEpoch { - pub id_key: String, - pub first_valid_epoch: i64, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct CommitteeMember { - pub epoch_no: i64, - pub position: i32, - pub sidechain_pubkey_hex: String, - pub expected_slots: i32, - pub aura_pubkey_hex: Option, - pub pool_id_hex: Option, - pub spo_sk_hex: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct RegisteredStat { - pub epoch_no: i64, - pub federated_valid_count: i64, - pub federated_invalid_count: i64, - pub registered_valid_count: i64, - pub registered_invalid_count: i64, - pub dparam: Option, -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct RegisteredTotals { - pub epoch_no: i64, - pub total_registered: i64, - pub newly_registered: i64, -} - -impl EpochPerf { - fn from_tuple(row: EpochPerfRow) -> Self { - let ( - epoch_no, - spo_sk_hex, - produced_i32, - expected_i32, - identity_label, - stake_snapshot, - pool_id_hex, - validator_class, - ) = row; - Self { - epoch_no, - spo_sk_hex, - produced: produced_i32 as i64, - expected: expected_i32 as i64, - identity_label, - stake_snapshot, - pool_id_hex, - validator_class, - } - } -} - -#[derive(async_graphql::SimpleObject)] -#[graphql(rename_fields = "camelCase")] -pub struct StakeShare { - pub pool_id_hex: String, - pub name: Option, - pub ticker: Option, - pub homepage_url: Option, - pub logo_url: Option, - pub live_stake: Option, - pub active_stake: Option, - pub live_delegators: Option, - pub live_saturation: Option, - pub declared_pledge: Option, - pub live_pledge: Option, - pub stake_share: Option, -} - - -async fn graphiql() -> impl IntoResponse { - use async_graphql::http::GraphiQLSource; - use axum::response::Html; - info!("Serving GraphiQL at /graphql"); - // Because this router is nested under /api/v1, we must point the JS client to the fully-qualified path. - // Otherwise the generated GraphiQL page will attempt requests to /graphql (404) -> empty body -> JSON parse error. - Html( - GraphiQLSource::build() - .endpoint("/api/v1/graphql") - .subscription_endpoint("/api/v1/graphql/ws") - .finish(), - ) -} - -// ------------------------------------------------- -// Helpers -// ------------------------------------------------- -fn normalize_hex(input: &str) -> Option { - if input.is_empty() { - return None; - } - let s = input - .strip_prefix("0x") - .unwrap_or(input) - .strip_prefix("0X") - .unwrap_or(input); - // Accept only even-length hex (bytea) and reasonable size (<= 256 chars to avoid abuse) - if s.len() % 2 != 0 || s.len() > 256 { - return None; - } - // Cheap validation (compiled once at runtime). If regex creation fails, we fallback to returning original. - static HEX_RE: once_cell::sync::Lazy = - once_cell::sync::Lazy::new(|| Regex::new("^[0-9a-fA-F]+$").unwrap()); - if !HEX_RE.is_match(s) { - return None; - } - Some(s.to_ascii_lowercase()) -} diff --git a/spo-api/src/infra/repo.rs b/spo-api/src/infra/repo.rs deleted file mode 100644 index bd3b797d..00000000 --- a/spo-api/src/infra/repo.rs +++ /dev/null @@ -1,29 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -use indexer_common::infra::pool::postgres::PostgresPool; -use anyhow::Context; - -#[derive(Debug, Clone)] -pub struct SpoRepository { - pool: PostgresPool, -} - -impl SpoRepository { - pub fn new(pool: PostgresPool) -> Self { Self { pool } } - - /// List stake pool operator identifiers (placeholder implementation). - pub async fn list_stake_pool_operator_ids(&self, limit: i64) -> anyhow::Result> { - // TODO: Replace with real schema/table once defined (e.g., spo_operators) - // For now we query a non-existent placeholder; when integrated this will be updated. - let rows = sqlx::query_scalar::<_, String>("SELECT id FROM spo_operators ORDER BY id LIMIT $1") - .bind(limit) - .fetch_all(&*self.pool) - .await - .with_context(|| "query stake pool operator ids")?; - Ok(rows) - } -} - -// Future: introduce a trait abstraction if multiple backends are needed. diff --git a/spo-api/src/lib.rs b/spo-api/src/lib.rs deleted file mode 100644 index b69e98b5..00000000 --- a/spo-api/src/lib.rs +++ /dev/null @@ -1,10 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -pub mod application; -#[cfg(feature = "cloud")] -pub mod config; -pub mod domain; -#[cfg(feature = "cloud")] -pub mod infra; diff --git a/spo-api/src/main.rs b/spo-api/src/main.rs deleted file mode 100644 index 362738b3..00000000 --- a/spo-api/src/main.rs +++ /dev/null @@ -1,71 +0,0 @@ -// This file is part of midnight-indexer. -// Copyright (C) 2025 Midnight Foundation -// SPDX-License-Identifier: Apache-2.0 - -#[cfg(feature = "cloud")] -#[tokio::main] -async fn main() { - use log::error; - use indexer_common::telemetry; - use std::panic; - - telemetry::init_logging(); - panic::set_hook(Box::new(|panic| error!(panic:%; "process panicked"))); - - if let Err(error) = run().await { - let backtrace = error.backtrace(); - let error = format!("{error:#}"); - error!(error, backtrace:%; "process exited with ERROR") - } -} - -#[cfg(feature = "cloud")] -async fn run() -> anyhow::Result<()> { - use anyhow::Context; - use indexer_common::{config::ConfigExt, domain::NoopSubscriber, infra::pool, telemetry}; - use log::info; - use spo_api::{application, config::Config, infra, infra::api::{AxumApi, Db}}; - use tokio::signal::unix::{SignalKind, signal}; - - let sigterm = signal(SignalKind::terminate()).expect("SIGTERM handler can be registered"); - let config = Config::load().context("load configuration")?; - info!(config:?; "starting"); - let Config { - run_migrations: _, - application_config, - infra_config, - telemetry_config: - telemetry::Config { - tracing_config, - metrics_config, - }, - } = config; - - telemetry::init_tracing(tracing_config); - telemetry::init_metrics(metrics_config); - - let infra::Config { - api_config, - storage_config, - } = infra_config; - - // Create Postgres pool (for read-only access initially) and run migrations if/when added later. - let pool = pool::postgres::PostgresPool::new(storage_config) - .await - .context("create DB pool for Postgres")?; - - // Build API without NATS for now. - let api = AxumApi::new(api_config).with_db(Db(pool)); - - // Until we have a catch-up signal, application::run will just serve the API and listen for SIGTERM. - // Pass a no-op subscriber for now. - let subscriber = NoopSubscriber::default(); - application::run(application_config, api, subscriber, sigterm) - .await - .context("run SPO API application") -} - -#[cfg(not(feature = "cloud"))] -fn main() { - unimplemented!() -} diff --git a/spo-indexer/Cargo.toml b/spo-indexer/Cargo.toml index 32d5cc8f..34ef9bc2 100644 --- a/spo-indexer/Cargo.toml +++ b/spo-indexer/Cargo.toml @@ -42,7 +42,7 @@ subxt = { workspace = true, features = [ "reconnecting-rpc-client" thiserror = { workspace = true } tokio = { workspace = true, features = [ "macros", "rt-multi-thread", "time", "signal" ] } trait-variant = { workspace = true } -reqwest = { workspace = true, features = ["json", "rustls-tls"] } +reqwest = { workspace = true, features = ["json", "rustls"] } [dev-dependencies] clap = { workspace = true, features = [ "derive" ] } diff --git a/spo-indexer/config.yaml b/spo-indexer/config.yaml index d0c111a3..3daf328d 100644 --- a/spo-indexer/config.yaml +++ b/spo-indexer/config.yaml @@ -18,7 +18,7 @@ infra: port: 5432 dbname: "indexer" user: "indexer" # matches postgres service (POSTGRES_USER) - sslmode: "prefer" # consistent with spo-api + sslmode: "prefer" max_connections: 10 idle_timeout: "1m" max_lifetime: "5m" @@ -36,12 +36,12 @@ infra: genesis_protocol_version: 16000 reconnect_max_delay: "10s" # 10ms, 100ms, 1s, 10s reconnect_max_attempts: 30 # Roughly 5m - blockfrost_id: "previewukkFxumNW31cXmsBtKI1JTnbxvcVCbCj" + blockfrost_id: "${BLOCKFROST_PROJECT_ID}" telemetry: tracing: enabled: false - service_name: "chain-indexer" + service_name: "spo-indexer" otlp_exporter_endpoint: "http://localhost:4317" metrics: enabled: false diff --git a/spo-indexer/src/application.rs b/spo-indexer/src/application.rs index 41d8545b..b05d1041 100644 --- a/spo-indexer/src/application.rs +++ b/spo-indexer/src/application.rs @@ -24,121 +24,146 @@ use blake2::{ Blake2bVar, digest::{Update, VariableOutput}, }; +use log::{debug, error, info, warn}; use serde::Deserialize; use std::{cmp, collections::HashMap, time::Duration}; use subxt::utils::to_hex; -use tokio::time; +use tokio::{ + select, + signal::unix::Signal, + time::{interval, sleep}, +}; #[derive(Debug, Clone, Deserialize)] pub struct Config { pub interval: u32, - /// Stake refresh config (mandatory) + /// Stake refresh config (mandatory). pub stake_refresh: StakeRefreshConfig, } #[derive(Debug, Clone, Deserialize)] pub struct StakeRefreshConfig { - /// How often to refresh stake data in seconds + /// How often to refresh stake data in seconds. pub period_secs: u64, - /// Number of pools to fetch per cycle + /// Number of pools to fetch per cycle. pub page_size: u32, - /// Max requests per second to Blockfrost (rudimentary rate limit) + /// Max requests per second to Blockfrost (rudimentary rate limit). pub max_rps: u32, } -pub async fn run(config: Config, client: SPOClient, storage: impl Storage) -> anyhow::Result<()> { - // Mandatory background task: refresh stake snapshots periodically using Blockfrost +pub async fn run( + config: Config, + client: SPOClient, + storage: impl Storage, + mut sigterm: Signal, +) -> anyhow::Result<()> { + // Mandatory background task: refresh stake snapshots periodically using Blockfrost. let st_cfg = config.stake_refresh.clone(); let storage_bg = storage.clone(); let client_bg = client.clone(); tokio::spawn(async move { - let mut ticker = time::interval(Duration::from_secs(st_cfg.period_secs.max(60))); - // initial delay to avoid hammering on startup + let mut ticker = interval(Duration::from_secs(st_cfg.period_secs.max(60))); + // Initial delay to avoid hammering on startup. ticker.tick().await; loop { ticker.tick().await; - if let Err(e) = refresh_stake_snapshots(&client_bg, &storage_bg, &st_cfg).await { - eprintln!("stake refresh failed: {e:?}"); + if let Err(error) = refresh_stake_snapshots(&client_bg, &storage_bg, &st_cfg).await { + error!("stake refresh failed: {error:?}"); } } }); - loop { - let cur_epoch = get_epoch_to_process(&client, &storage).await?; + let poll_interval = Duration::from_secs(config.interval.into()); - if cur_epoch.is_none() { - println!("latest epoch reached"); - time::sleep(Duration::new(config.interval.into(), 0)).await; - continue; + loop { + select! { + result = process_next_epoch(poll_interval, &client, &storage) => { + result?; + } + _ = sigterm.recv() => { + warn!("SIGTERM received"); + return Ok(()); + } } + } +} - let epoch = cur_epoch.unwrap(); - println!("processing epoch {}", epoch.epoch_no); +async fn process_next_epoch( + poll_interval: Duration, + client: &SPOClient, + storage: &impl Storage, +) -> anyhow::Result<()> { + let Some(epoch) = get_epoch_to_process(client, storage).await? else { + debug!("latest epoch reached"); + sleep(poll_interval).await; + return Ok(()); + }; + info!(epoch_no = epoch.epoch_no; "processing epoch"); - let mut tx = storage.create_tx().await?; - let committee = client.get_committee(epoch.epoch_no).await?; - let raw_spos = client.get_spo_registrations(epoch.epoch_no).await?; - let membership = committee_to_membership(&client, &committee); + let mut tx = storage.create_tx().await?; + let committee = client.get_committee(epoch.epoch_no).await?; + let raw_spos = client.get_spo_registrations(epoch.epoch_no).await?; + let membership = committee_to_membership(client, &committee); - storage.save_epoch(&epoch, &mut tx).await?; - storage.save_membership(&membership, &mut tx).await?; + storage.save_epoch(&epoch, &mut tx).await?; + storage.save_membership(&membership, &mut tx).await?; - let mut blocks_produced: HashMap = HashMap::new(); - let mut val_to_registration: HashMap = HashMap::new(); + let mut blocks_produced: HashMap = HashMap::new(); + let mut val_to_registration: HashMap = HashMap::new(); - for (_, registrations) in raw_spos.candidate_registrations { - for raw_spo in ®istrations { - let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); - // Normalize all keys by stripping optional 0x prefix for consistency with DB values - let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); + for (_, registrations) in raw_spos.candidate_registrations { + for raw_spo in ®istrations { + let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); + // Normalize all keys by stripping optional 0x prefix for consistency with DB values. + let spo_sk = remove_hex_prefix(&raw_spo.sidechain_pub_key).to_owned(); - val_to_registration.insert(spo_sk.clone(), raw_spo.clone()); - save_pool_metadata(&client, &storage, &mut tx, cardano_id.clone()).await?; - save_spo_identity(&storage, &raw_spo, cardano_id, &mut tx).await?; - save_spo_history(&storage, &raw_spo, epoch.epoch_no.into(), &mut tx).await?; + val_to_registration.insert(spo_sk.clone(), raw_spo.clone()); + save_pool_metadata(client, storage, &mut tx, cardano_id.clone()).await?; + save_spo_identity(storage, raw_spo, cardano_id, &mut tx).await?; + save_spo_history(storage, raw_spo, epoch.epoch_no.into(), &mut tx).await?; - let count_mk = blocks_produced.entry(spo_sk).or_insert(0); - *count_mk += 1; - } + let count_mk = blocks_produced.entry(spo_sk).or_insert(0); + *count_mk += 1; } + } - println!("\tcommittee size: {}", committee.len()); - if committee.len() > 0 { - let blocks_remainder = client.epoch_duration % committee.len() as u32; - let expected_blocks = get_expected_blocks(&client, &epoch, committee.len() as u32); - - for (index, spo) in committee.iter().enumerate() { - let spo_sk = remove_hex_prefix(spo.sidechain_pubkey.to_string()); - let produced = blocks_produced.get(&spo_sk); - - // only count if the validator has produced a block - if produced.is_some() { - let raw_spo = val_to_registration.get(&spo_sk).unwrap(); - let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); - - let spo_performance = SPOEpochPerformance { - spo_sk, - epoch_no: epoch.epoch_no as u64, - expected_blocks: expected_blocks - + (if (index as u32) < blocks_remainder { - 1 - } else { - 0 - }), - produced_blocks: *produced.unwrap() as u64, - identity_label: cardano_id, - }; - - storage - .save_spo_performance(&spo_performance, &mut tx) - .await?; - } + debug!(committee_size = committee.len(); "committee"); + if !committee.is_empty() { + let blocks_remainder = client.epoch_duration % committee.len() as u32; + let expected_blocks = get_expected_blocks(client, &epoch, committee.len() as u32); + + for (index, spo) in committee.iter().enumerate() { + let spo_sk = remove_hex_prefix(&spo.sidechain_pubkey).to_owned(); + // Only count if the validator has produced a block. + if let Some(&produced_count) = blocks_produced.get(&spo_sk) { + let raw_spo = val_to_registration + .get(&spo_sk) + .expect("validator should have registration"); + let cardano_id = get_cardano_id(&raw_spo.mainchain_pub_key); + + let spo_performance = SPOEpochPerformance { + spo_sk, + epoch_no: epoch.epoch_no as u64, + expected_blocks: expected_blocks + + (if (index as u32) < blocks_remainder { + 1 + } else { + 0 + }), + produced_blocks: produced_count as u64, + identity_label: cardano_id, + }; + + storage + .save_spo_performance(&spo_performance, &mut tx) + .await?; } } - - tx.commit().await?; - println!("processed epoch {}", epoch.epoch_no); } + + tx.commit().await?; + info!(epoch_no = epoch.epoch_no; "processed epoch"); + Ok(()) } async fn refresh_stake_snapshots( @@ -154,27 +179,27 @@ async fn refresh_stake_snapshots( .ok() .map(|s| s.mainchain.epoch as i64); - // Cursor-based paging: resume after last_pool_id, then wrap to start + // Cursor-based paging: resume after last_pool_id, then wrap to start. let start_after = storage.get_stake_refresh_cursor().await?; let after = start_after.clone(); - // First page: after last_pool_id + // First page: after last_pool_id. let mut pool_ids = if let Some(ref last) = after { - storage.list_pool_ids_after(last, limit).await? + storage.get_pool_ids_after(last, limit).await? } else { - storage.list_pool_ids(limit, 0).await? + storage.get_pool_ids(limit, 0).await? }; - // If empty, wrap-around from beginning + // If empty, wrap-around from beginning. if pool_ids.is_empty() { - pool_ids = storage.list_pool_ids(limit, 0).await?; + pool_ids = storage.get_pool_ids(limit, 0).await?; } if pool_ids.is_empty() { return Ok(()); } - // Rate limiting + // Rate limiting. let sleep_per_req_ms = if cfg.max_rps == 0 { 0 } else { @@ -212,12 +237,12 @@ async fn refresh_stake_snapshots( .await?; total_updated += 1; } - Err(err) => { - eprintln!("stake refresh for {pid} failed: {err:?}"); + Err(error) => { + error!("stake refresh for {pid} failed: {error:?}"); } } if sleep_per_req_ms > 0 { - time::sleep(Duration::from_millis(sleep_per_req_ms)).await; + sleep(Duration::from_millis(sleep_per_req_ms)).await; } } tx.commit().await?; @@ -227,10 +252,7 @@ async fn refresh_stake_snapshots( storage.set_stake_refresh_cursor(last_id).await?; if total_updated > 0 { - println!( - "stake refresh: updated {} pools (cursor at {:?})", - total_updated, last_id - ); + info!(total_updated, cursor:? = last_id; "stake refresh completed"); } Ok(()) } @@ -241,8 +263,8 @@ async fn save_spo_history( epoch: u64, tx: &mut SqlxTransaction, ) -> anyhow::Result<()> { - // Normalize to hex without 0x - let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); + // Normalize to hex without 0x. + let spo_sk = remove_hex_prefix(&raw_spo.sidechain_pub_key).to_owned(); let spo = SPOHistory { spo_sk: spo_sk.clone(), @@ -264,16 +286,16 @@ async fn save_spo_identity( cardano_id: String, tx: &mut SqlxTransaction, ) -> anyhow::Result<()> { - // Normalize all hex-like identifiers to avoid mixed representations - let spo_sk = remove_hex_prefix(raw_spo.sidechain_pub_key.to_string()); - let aura_pk = remove_hex_prefix(raw_spo.keys.aura.to_string()); - let main_pk = remove_hex_prefix(raw_spo.mainchain_pub_key.to_string()); + // Normalize all hex-like identifiers to avoid mixed representations. + let spo_sk = remove_hex_prefix(&raw_spo.sidechain_pub_key).to_owned(); + let aura_pk = remove_hex_prefix(&raw_spo.keys.aura).to_owned(); + let main_pk = remove_hex_prefix(&raw_spo.mainchain_pub_key).to_owned(); let spo = SPO { spo_sk: spo_sk.clone(), - sidechain_pubkey: spo_sk.clone(), - pool_id: cardano_id.clone().to_string(), - aura_pubkey: aura_pk.clone(), + sidechain_pubkey: spo_sk, + pool_id: cardano_id, + aura_pubkey: aura_pk, mainchain_pubkey: main_pk, }; @@ -287,20 +309,17 @@ async fn save_pool_metadata( tx: &mut SqlxTransaction, cardano_id: String, ) -> anyhow::Result<()> { - let meta = client.get_pool_metadata(cardano_id.clone()).await; - - let saved_meta = if meta.is_ok() { - meta? - } else { - PoolMetadata { - pool_id: cardano_id.to_string(), - hex_id: cardano_id.to_string(), - name: "".to_string(), - ticker: "".to_string(), - homepage_url: "".to_string(), - url: "".to_string(), - } - }; + let saved_meta = client + .get_pool_metadata(cardano_id.clone()) + .await + .unwrap_or_else(|_| PoolMetadata { + pool_id: cardano_id.clone(), + hex_id: cardano_id, + name: String::new(), + ticker: String::new(), + homepage_url: String::new(), + url: String::new(), + }); storage.save_pool_meta(&saved_meta, tx).await?; Ok(()) @@ -316,9 +335,9 @@ fn get_expected_blocks(client: &SPOClient, epoch: &Epoch, committee_size: u32) - fn committee_to_membership( client: &SPOClient, - committee: &Vec, + committee: &[Validator], ) -> Vec { - if committee.len() == 0 { + if committee.is_empty() { return vec![]; } @@ -327,16 +346,16 @@ fn committee_to_membership( let leftover = slots_per_epoch % num_validators; committee - .into_iter() + .iter() .enumerate() .map(|(index, c)| ValidatorMembership { epoch_no: c.epoch_no, position: c.position, - // Normalize to hex without 0x for consistency with identity/performance - spo_sk: remove_hex_prefix(c.sidechain_pubkey.clone()), - sidechain_pubkey: remove_hex_prefix(c.sidechain_pubkey.clone()), + // Normalize to hex without 0x for consistency with identity/performance. + spo_sk: remove_hex_prefix(&c.sidechain_pubkey).to_owned(), + sidechain_pubkey: remove_hex_prefix(&c.sidechain_pubkey).to_owned(), expected_slots: slots_per_epoch / num_validators - + if leftover > index.try_into().unwrap() { + + if leftover > index.try_into().expect("index should fit in u32") { 1 } else { 0 @@ -345,17 +364,16 @@ fn committee_to_membership( .collect() } -/// if option is None, it means that we are already at the latest epoch +/// If option is None, it means that we are already at the latest epoch. async fn get_epoch_to_process( client: &SPOClient, storage: &impl Storage, ) -> anyhow::Result> { let latest_processed = storage.get_latest_epoch().await?; let current_epoch = client.get_current_epoch().await?; - let latest_epoch_num = if latest_processed.is_some() { - latest_processed.unwrap().epoch_no - } else { - client.get_first_epoch_num().await? + let latest_epoch_num = match latest_processed { + Some(epoch) => epoch.epoch_no, + None => client.get_first_epoch_num().await?, }; let time_offset: i64 = @@ -372,15 +390,17 @@ async fn get_epoch_to_process( } } -fn get_cardano_id(mainchain_pk: &String) -> String { - let mainchain_pk = hex_to_bytes(&mainchain_pk); - let mut hasher = Blake2bVar::new(28).unwrap(); +fn get_cardano_id(mainchain_pk: &str) -> String { + let mainchain_pk = hex_to_bytes(mainchain_pk); + let mut hasher = Blake2bVar::new(28).expect("blake2b output size 28 is valid"); hasher.update(&mainchain_pk); let mut buffer = [0u8; 28]; - hasher.finalize_variable(&mut buffer).unwrap(); + hasher + .finalize_variable(&mut buffer) + .expect("blake2b finalize should succeed with valid buffer"); - let hex_hash = to_hex(&buffer); + let hex_hash = to_hex(buffer); - remove_hex_prefix(hex_hash.to_string()) -} \ No newline at end of file + remove_hex_prefix(&hex_hash).to_owned() +} diff --git a/spo-indexer/src/domain/rpc.rs b/spo-indexer/src/domain/rpc.rs index 1e99f9bc..b572bdd5 100644 --- a/spo-indexer/src/domain/rpc.rs +++ b/spo-indexer/src/domain/rpc.rs @@ -255,4 +255,4 @@ pub enum BodyItem { pub struct TimestampExtrinsic { #[serde(rename = "Timestamp")] pub timestamp_ms: u64, -} \ No newline at end of file +} diff --git a/spo-indexer/src/domain/spo.rs b/spo-indexer/src/domain/spo.rs index 420398f7..c71e9e39 100644 --- a/spo-indexer/src/domain/spo.rs +++ b/spo-indexer/src/domain/spo.rs @@ -23,6 +23,7 @@ pub struct SPOEpochPerformance { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum SPOStatus { Valid, + Invalid, } diff --git a/spo-indexer/src/domain/storage.rs b/spo-indexer/src/domain/storage.rs index 514ac883..3c072518 100644 --- a/spo-indexer/src/domain/storage.rs +++ b/spo-indexer/src/domain/storage.rs @@ -24,7 +24,7 @@ pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Postgres>; pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Sqlite>; #[cfg(not(any(feature = "cloud", feature = "standalone")))] -/// Default to Postgres when no feature is explicitly enabled (workspace builds) +/// Default to Postgres when no feature is explicitly enabled (workspace builds). pub type SqlxTransaction = sqlx::Transaction<'static, sqlx::Postgres>; /// Storage abstraction. @@ -43,7 +43,7 @@ where async fn save_membership( &self, - memberships: &Vec, + memberships: &[ValidatorMembership], tx: &mut SqlxTransaction, ) -> Result<(), sqlx::Error>; @@ -67,9 +67,11 @@ where /// Return a page of pool_ids known to the system (for stake refreshers). /// Implementations should order by most recently updated metadata first when possible. - async fn list_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error>; - /// Return pool_ids after a given id, lexicographically, for cursor-based rotation - async fn list_pool_ids_after(&self, after: &str, limit: i64) -> Result, sqlx::Error>; + async fn get_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error>; + + /// Return pool_ids after a given id, lexicographically, for cursor-based rotation. + async fn get_pool_ids_after(&self, after: &str, limit: i64) + -> Result, sqlx::Error>; /// Upsert latest stake snapshot for a pool. async fn save_stake_snapshot( @@ -84,7 +86,7 @@ where tx: &mut SqlxTransaction, ) -> Result<(), sqlx::Error>; - /// Append a history row for stake + /// Append a history row for stake. async fn insert_stake_history( &self, pool_id: &str, @@ -98,7 +100,7 @@ where tx: &mut SqlxTransaction, ) -> Result<(), sqlx::Error>; - /// Refresh cursor helpers + /// Refresh cursor helpers. async fn get_stake_refresh_cursor(&self) -> Result, sqlx::Error>; async fn set_stake_refresh_cursor(&self, pool_id: Option<&str>) -> Result<(), sqlx::Error>; } diff --git a/spo-indexer/src/infra/storage.rs b/spo-indexer/src/infra/storage.rs index bc92c053..bc9da2bf 100644 --- a/spo-indexer/src/infra/storage.rs +++ b/spo-indexer/src/infra/storage.rs @@ -72,7 +72,7 @@ impl domain::storage::Storage for Storage { .map(|(epoch_no, starts_at, ends_at)| { Ok(Epoch { epoch_no: epoch_no as u32, - // return millis to domain + // Return millis to domain. starts_at: starts_at.timestamp_millis(), ends_at: ends_at.timestamp_millis(), }) @@ -87,7 +87,7 @@ impl domain::storage::Storage for Storage { VALUES ($1, $2, $3)" }) .bind(epoch.epoch_no as i64) - // epoch.starts_at/ends_at are in millis; store as timestamptz + // Epoch starts_at/ends_at are in millis; store as timestamptz. .bind( DateTime::from_timestamp( epoch.starts_at / 1000, @@ -142,7 +142,7 @@ impl domain::storage::Storage for Storage { #[trace] async fn save_membership( &self, - memberships: &Vec, + memberships: &[ValidatorMembership], tx: &mut SqlxTransaction, ) -> Result<(), sqlx::Error> { for member in memberships.iter() { @@ -156,7 +156,7 @@ impl domain::storage::Storage for Storage { expected_slots ) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (epoch_no, position) DO NOTHING" // Prevents re-insertion errors + ON CONFLICT (epoch_no, position) DO NOTHING" // Prevents re-insertion errors. }) .bind(&member.spo_sk) .bind(&member.sidechain_pubkey) @@ -259,7 +259,7 @@ impl domain::storage::Storage for Storage { Ok(()) } - async fn list_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error> { + async fn get_pool_ids(&self, limit: i64, offset: i64) -> Result, sqlx::Error> { let query = indoc! {" SELECT pool_id FROM pool_metadata_cache @@ -276,7 +276,11 @@ impl domain::storage::Storage for Storage { Ok(rows.into_iter().map(|(pid,)| pid).collect()) } - async fn list_pool_ids_after(&self, after: &str, limit: i64) -> Result, sqlx::Error> { + async fn get_pool_ids_after( + &self, + after: &str, + limit: i64, + ) -> Result, sqlx::Error> { let query = indoc! {" SELECT pool_id FROM pool_metadata_cache @@ -304,7 +308,7 @@ impl domain::storage::Storage for Storage { live_pledge: Option<&str>, tx: &mut SqlxTransaction, ) -> Result<(), sqlx::Error> { - // Call the inherent implementation to avoid recursive call to the trait method + // Call the inherent implementation to avoid recursive call to the trait method. Storage::save_stake_snapshot( self, pool_id, @@ -370,7 +374,6 @@ impl domain::storage::Storage for Storage { .await?; Ok(()) } - } impl Storage { @@ -411,5 +414,4 @@ impl Storage { Ok(()) } - } diff --git a/spo-indexer/src/infra/subxt_node.rs b/spo-indexer/src/infra/subxt_node.rs index 4cc858dd..6572b027 100644 --- a/spo-indexer/src/infra/subxt_node.rs +++ b/spo-indexer/src/infra/subxt_node.rs @@ -20,19 +20,19 @@ use crate::{ }; use blockfrost::{BlockfrostAPI, BlockfrostError}; use indexer_common::error::BoxError; -use log::error; -use polkadot::sidechain::storage::types::epoch_number::EpochNumber; use reqwest::Client as HttpClient; use secrecy::{ExposeSecret, SecretString}; use serde_json::value::RawValue; -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; use subxt::{ OnlineClient, PolkadotConfig, - backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}, - utils::H256, + backend::{ + legacy::LegacyRpcMethods, + rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}, + }, }; use thiserror::Error; -use tokio::time; +use tokio::time::sleep; const SLOT_PER_EPOCH_KEY: &str = "3eaeb1cee77dc09baac326e5a1d29726f38178a5f54bee65a8446a55b585f261"; const MIN_COMMITTEE_SIZE: usize = 300; @@ -63,11 +63,12 @@ pub struct SPOClient { rpc_client: RpcClient, blockfrost: BlockfrostAPI, http: HttpClient, - config: Config, + reconnect_delay: Duration, + blockfrost_id: SecretString, api: OnlineClient, } -// we will try to eliminate the 0x from any hex string out of this function +// We will try to eliminate the 0x from any hex string out of this function. impl SPOClient { /// Create a new [SPOClient] with the given [Config]. pub async fn new(config: Config) -> Result { @@ -83,11 +84,11 @@ impl SPOClient { .await .map_err(|error| SPOClientError::Subtx(error.into()))?; let blockfrost = - BlockfrostAPI::new(&config.blockfrost_id.expose_secret(), Default::default()); + BlockfrostAPI::new(config.blockfrost_id.expose_secret(), Default::default()); let http = HttpClient::builder() .user_agent("midnight-spo-indexer/1.0") .build() - .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; let (epoch_duration, slots_per_epoch) = get_epoch_duration(&api).await?; Ok(Self { @@ -96,48 +97,53 @@ impl SPOClient { http, epoch_duration, slots_per_epoch, - config, - api: api, + reconnect_delay: config.reconnect_max_delay, + blockfrost_id: config.blockfrost_id, + api, }) } pub async fn get_sidechain_status(&self) -> Result { let raw_response = self .rpc_client - .request("sidechain_getStatus".to_string(), None) + .request("sidechain_getStatus".to_owned(), None) .await - .map_err(|e| { - SPOClientError::RpcCall("sidechain_getStatus".to_string(), e.to_string()) + .map_err(|error| { + SPOClientError::RpcCall("sidechain_getStatus".to_owned(), error.to_string()) })?; let response: SidechainStatusResponse = serde_json::from_str(raw_response.get()) .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; - return Ok(response); + Ok(response) } pub async fn get_block_timestamp(&self, block_number: u32) -> Result { - let params_blockhash = RawValue::from_string(format!("[{}]", block_number)).unwrap(); - let blockhash_res = self - .rpc_client - .request("chain_getBlockHash".to_string(), Some(params_blockhash)) + let legacy_rpc = LegacyRpcMethods::::new(self.rpc_client.clone().into()); + let blockhash = legacy_rpc + .chain_get_block_hash(Some(block_number.into())) .await - .map_err(|e| { - SPOClientError::RpcCall("chain_getBlockHash".to_string(), e.to_string()) + .map_err(|error| { + SPOClientError::RpcCall("chain_getBlockHash".to_owned(), error.to_string()) + })? + .ok_or_else(|| { + SPOClientError::UnexpectedResponse(format!( + "block hash not found for block {block_number}" + )) })?; - let str_blockhash = remove_hex_prefix(blockhash_res.get().to_string().replace("\"", "")); - let raw_blockhash = H256::from_slice(hex::decode(str_blockhash).unwrap().as_slice()); let storage_query = polkadot::storage().timestamp().now(); let result = self .api .storage() - .at(raw_blockhash) + .at(blockhash) .fetch(&storage_query) .await - .unwrap(); - let timestamp = result.unwrap(); + .map_err(|error| SPOClientError::Subtx(error.into()))?; + let timestamp = result.ok_or_else(|| { + SPOClientError::UnexpectedResponse("timestamp not found in block storage".to_owned()) + })?; Ok(timestamp) } @@ -148,7 +154,7 @@ impl SPOClient { let epoch_duration = self.epoch_duration; let num_epochs: u64 = - (current_epoch.ends_at as u64 - block_timestamp as u64) / (epoch_duration as u64); + (current_epoch.ends_at as u64 - block_timestamp) / (epoch_duration as u64); Ok(current_epoch.epoch_no - num_epochs as u32) } @@ -161,156 +167,154 @@ impl SPOClient { ends_at: sidechain_status.sidechain.next_epoch_timestamp, }; - return Ok(epoch); + Ok(epoch) } pub async fn get_spo_registrations( &self, epoch_number: u32, ) -> Result { - let rpc_params = RawValue::from_string(format!("[{}]", epoch_number)).unwrap(); + let rpc_params = RawValue::from_string(format!("[{epoch_number}]")).map_err(|error| { + SPOClientError::UnexpectedResponse(format!("failed to create RPC params: {error}")) + })?; let raw_response = self .rpc_client .request( - "sidechain_getAriadneParameters".to_string(), + "sidechain_getAriadneParameters".to_owned(), Some(rpc_params), ) .await - .map_err(|e| { - SPOClientError::RpcCall("sidechain_getAriadneParameters".to_string(), e.to_string()) + .map_err(|error| { + SPOClientError::RpcCall( + "sidechain_getAriadneParameters".to_owned(), + error.to_string(), + ) })?; let mut reg_response: SPORegistrationResponse = serde_json::from_str(raw_response.get()) .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; let mut response: HashMap> = HashMap::new(); - for (mut key, registrations) in reg_response.clone().candidate_registrations { - key = remove_hex_prefix(key); + for (key, registrations) in reg_response.clone().candidate_registrations { + let key = remove_hex_prefix(&key).to_owned(); - let cleaned_registrations: Vec = registrations + let cleaned_registrations = registrations .into_iter() .map(|reg| CandidateRegistration { - sidechain_pub_key: remove_hex_prefix(reg.sidechain_pub_key), + sidechain_pub_key: remove_hex_prefix(®.sidechain_pub_key).to_owned(), sidechain_account_id: reg.sidechain_account_id, - mainchain_pub_key: remove_hex_prefix(reg.mainchain_pub_key), - cross_chain_pub_key: remove_hex_prefix(reg.cross_chain_pub_key), + mainchain_pub_key: remove_hex_prefix(®.mainchain_pub_key).to_owned(), + cross_chain_pub_key: remove_hex_prefix(®.cross_chain_pub_key).to_owned(), keys: CandidateKeys { - aura: remove_hex_prefix(reg.keys.aura), - gran: remove_hex_prefix(reg.keys.gran), + aura: remove_hex_prefix(®.keys.aura).to_owned(), + gran: remove_hex_prefix(®.keys.gran).to_owned(), }, - sidechain_signature: remove_hex_prefix(reg.sidechain_signature), - mainchain_signature: remove_hex_prefix(reg.mainchain_signature), - cross_chain_signature: remove_hex_prefix(reg.cross_chain_signature), + sidechain_signature: remove_hex_prefix(®.sidechain_signature).to_owned(), + mainchain_signature: remove_hex_prefix(®.mainchain_signature).to_owned(), + cross_chain_signature: remove_hex_prefix(®.cross_chain_signature).to_owned(), utxo: reg.utxo, is_valid: reg.is_valid, invalid_reasons: reg.invalid_reasons, }) - .collect(); + .collect::>(); response.insert(key, cleaned_registrations); } reg_response.candidate_registrations = response; - return Ok(reg_response); + Ok(reg_response) } pub async fn get_committee(&self, epoch_number: u32) -> Result, SPOClientError> { - let rpc_params = RawValue::from_string(format!("[{}]", epoch_number)).map_err(|e| { - SPOClientError::UnexpectedResponse(format!("Failed to create RPC params: {}", e)) + let rpc_params = RawValue::from_string(format!("[{epoch_number}]")).map_err(|error| { + SPOClientError::UnexpectedResponse(format!("failed to create RPC params: {error}")) })?; loop { let raw_response = self .rpc_client .request( - "sidechain_getEpochCommittee".to_string(), + "sidechain_getEpochCommittee".to_owned(), Some(rpc_params.clone()), ) .await - .map_err(|e| { + .map_err(|error| { SPOClientError::RpcCall( - "sidechain_getEpochCommittee".to_string(), - e.to_string(), + "sidechain_getEpochCommittee".to_owned(), + error.to_string(), ) }); - if raw_response.is_err() { + let Ok(raw_response) = raw_response else { return Ok(vec![]); - } + }; - let response: EpochCommitteeResponse = serde_json::from_str(raw_response?.get()) + let response: EpochCommitteeResponse = serde_json::from_str(raw_response.get()) .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; - let committee_size = response.committee.len(); - if committee_size >= MIN_COMMITTEE_SIZE { - let mut committee = vec![]; - for (index, pk) in response.committee.iter().enumerate() { - committee.push(Validator { + if response.committee.len() >= MIN_COMMITTEE_SIZE { + let committee = response + .committee + .iter() + .enumerate() + .map(|(index, pk)| Validator { epoch_no: response.sidechain_epoch, position: index as u64, - sidechain_pubkey: remove_hex_prefix(pk.sidechain_pub_key.clone()), - }); - } + sidechain_pubkey: remove_hex_prefix(&pk.sidechain_pub_key).to_owned(), + }) + .collect::>(); return Ok(committee); } - time::sleep(time::Duration::from_secs( - self.config.reconnect_max_delay.as_secs(), - )) - .await; + sleep(self.reconnect_delay).await; } } pub async fn get_pool_metadata(&self, pool_id: String) -> Result { - let raw_meta = self - .blockfrost - .pools_metadata(&pool_id) - .await - .map_err(|error| SPOClientError::Blockfrost(error))?; + let raw_meta = self.blockfrost.pools_metadata(&pool_id).await?; let meta = PoolMetadata { - pool_id: pool_id, - hex_id: remove_hex_prefix(raw_meta.hex), - name: raw_meta.name.unwrap_or("".to_string()), - ticker: raw_meta.ticker.unwrap_or("".to_string()), - homepage_url: raw_meta.homepage.unwrap_or("".to_string()), - url: raw_meta.url.unwrap_or("".to_string()), + pool_id, + hex_id: remove_hex_prefix(&raw_meta.hex).to_owned(), + name: raw_meta.name.unwrap_or_default(), + ticker: raw_meta.ticker.unwrap_or_default(), + homepage_url: raw_meta.homepage.unwrap_or_default(), + url: raw_meta.url.unwrap_or_default(), }; Ok(meta) } - /// Minimal pool stake data from Blockfrost /pools/{pool_id} + /// Minimal pool stake data from Blockfrost /pools/{pool_id}. pub async fn get_pool_data(&self, pool_id: &str) -> Result { let base = self.blockfrost_base_url(); - let url = format!("{}/pools/{}", base, pool_id); + let url = format!("{base}/pools/{pool_id}"); let resp = self .http .get(&url) - .header("project_id", self.config.blockfrost_id.expose_secret()) + .header("project_id", self.blockfrost_id.expose_secret()) .send() .await - .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; let status = resp.status(); if !status.is_success() { let txt = resp.text().await.unwrap_or_default(); return Err(SPOClientError::UnexpectedResponse(format!( - "Blockfrost GET /pools failed: {} {}", - status, txt + "blockfrost GET /pools failed: {status} {txt}" ))); } let v: serde_json::Value = resp .json() .await - .map_err(|e| SPOClientError::UnexpectedResponse(e.to_string()))?; + .map_err(|error| SPOClientError::UnexpectedResponse(error.to_string()))?; Ok(PoolStakeData::from_json(&v)) } fn blockfrost_base_url(&self) -> &'static str { - let id = self.config.blockfrost_id.expose_secret(); + let id = self.blockfrost_id.expose_secret(); if id.starts_with("mainnet") { "https://cardano-mainnet.blockfrost.io/api/v0" } else if id.starts_with("preprod") { @@ -320,7 +324,7 @@ impl SPOClient { } else if id.starts_with("testnet") { "https://cardano-testnet.blockfrost.io/api/v0" } else { - // default to preview + // Default to preview. "https://cardano-preview.blockfrost.io/api/v0" } } @@ -341,18 +345,18 @@ impl PoolStakeData { Self { live_stake: v .get("live_stake") - .and_then(|x| x.as_str().map(|s| s.to_string())), + .and_then(|x| x.as_str().map(|s| s.to_owned())), active_stake: v .get("active_stake") - .and_then(|x| x.as_str().map(|s| s.to_string())), + .and_then(|x| x.as_str().map(|s| s.to_owned())), live_delegators: v.get("live_delegators").and_then(|x| x.as_i64()), live_saturation: v.get("live_saturation").and_then(|x| x.as_f64()), declared_pledge: v .get("declared_pledge") - .and_then(|x| x.as_str().map(|s| s.to_string())), + .and_then(|x| x.as_str().map(|s| s.to_owned())), live_pledge: v .get("live_pledge") - .and_then(|x| x.as_str().map(|s| s.to_string())), + .and_then(|x| x.as_str().map(|s| s.to_owned())), } } } @@ -360,18 +364,23 @@ impl PoolStakeData { async fn get_epoch_duration( api: &OnlineClient, ) -> Result<(u32, u32), SPOClientError> { - let slot: Vec = hex::decode(SLOT_PER_EPOCH_KEY).unwrap(); - let storage_cli = api + let slot = + hex::decode(SLOT_PER_EPOCH_KEY).expect("SLOT_PER_EPOCH_KEY constant should be valid hex"); + let storage = api .storage() .at_latest() .await .map_err(|error| SPOClientError::Subtx(error.into()))?; - let res = storage_cli - .fetch_raw(slot) - .await - .map_err(|_| SPOClientError::UnexpectedResponse("".to_string()))?; - let raw_response: [u8; 4] = res.unwrap().try_into().unwrap(); + let res = storage.fetch_raw(slot).await.map_err(|_| { + SPOClientError::UnexpectedResponse("failed to fetch slots per epoch".to_owned()) + })?; + let raw_bytes = res.ok_or_else(|| { + SPOClientError::UnexpectedResponse("slots per epoch storage value not found".to_owned()) + })?; + let raw_response: [u8; 4] = raw_bytes.try_into().map_err(|_| { + SPOClientError::UnexpectedResponse("slots per epoch should be 4 bytes".to_owned()) + })?; let slots_per_epoch = u32::from_le_bytes(raw_response); Ok((SLOT_DURATION * slots_per_epoch, slots_per_epoch)) @@ -390,4 +399,4 @@ pub enum SPOClientError { #[error("unexpected error {0}")] UnexpectedResponse(String), -} \ No newline at end of file +} diff --git a/spo-indexer/src/main.rs b/spo-indexer/src/main.rs index e558fca6..0ad4abff 100644 --- a/spo-indexer/src/main.rs +++ b/spo-indexer/src/main.rs @@ -28,7 +28,8 @@ async fn main() { if let Err(error) = run().await { let backtrace = error.backtrace(); let error = format!("{error:#}"); - error!(error, backtrace:%; "process exited with ERROR") + error!(error, backtrace:%; "process exited with ERROR"); + std::process::exit(1); } } @@ -46,6 +47,10 @@ async fn run() -> anyhow::Result<()> { config::Config, infra::{self, subxt_node::SPOClient}, }; + use tokio::signal::unix::{SignalKind, signal}; + + // Register SIGTERM handler. + let sigterm = signal(SignalKind::terminate()).expect("SIGTERM handler can be registered"); // Load configuration. let config = Config::load().context("load configuration")?; @@ -79,7 +84,7 @@ async fn run() -> anyhow::Result<()> { } let storage = infra::storage::Storage::new(pool); - application::run(application_config, node, storage).await + application::run(application_config, node, storage, sigterm).await } #[cfg(not(feature = "cloud"))] diff --git a/spo-indexer/src/utils.rs b/spo-indexer/src/utils.rs index e6890be7..1e4e65dc 100644 --- a/spo-indexer/src/utils.rs +++ b/spo-indexer/src/utils.rs @@ -1,12 +1,8 @@ -pub fn remove_hex_prefix(s: String) -> String { - if s.starts_with("0x") { - s[2..].to_string() - } else { - s.to_string() - } +pub fn remove_hex_prefix(s: &str) -> &str { + s.strip_prefix("0x").unwrap_or(s) } pub fn hex_to_bytes(s: &str) -> Vec { - let hex_str = remove_hex_prefix(s.to_string()); - hex::decode(hex_str).unwrap() + let hex = remove_hex_prefix(s); + hex::decode(hex).expect("input should be valid hex string") }