diff --git a/test/OVMF-1.55.fd b/OVMF-1.55.fd similarity index 100% rename from test/OVMF-1.55.fd rename to OVMF-1.55.fd diff --git a/erlang_ls.config b/erlang_ls.config index f5621bee0..16dda9163 100644 --- a/erlang_ls.config +++ b/erlang_ls.config @@ -6,11 +6,11 @@ diagnostics: apps_dirs: - "src" - "src/*" -include_dirs: - - "src/include" include_dirs: - "src" - "src/include" + - "_build/default/lib" + - "_build/default/lib/*/include" lenses: enabled: - ct-run-test diff --git a/native/dev_snp_nif/.gitignore b/native/dev_snp_nif/.gitignore deleted file mode 100644 index be2bbcfd0..000000000 --- a/native/dev_snp_nif/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -files -target -Cargo.lock \ No newline at end of file diff --git a/native/dev_snp_nif/Cargo.lock b/native/dev_snp_nif/Cargo.lock deleted file mode 100644 index 01d2cb41c..000000000 --- a/native/dev_snp_nif/Cargo.lock +++ /dev/null @@ -1,1711 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - -[[package]] -name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bitfield" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c821a6e124197eb56d907ccc2188eab1038fb919c914f47976e64dd8dbc855d1" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" - -[[package]] -name = "cc" -version = "1.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" -dependencies = [ - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "codicon" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12170080f3533d6f09a19f81596f836854d0fa4867dc32c8172b8474b4e9de61" - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "dev_snp_nif" -version = "0.1.0" -dependencies = [ - "bincode", - "hex", - "openssl", - "reqwest", - "rustler", - "serde", - "serde_json", - "sev", - "snafu", - "tokio", -] - -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures-channel" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" -dependencies = [ - "futures-core", -] - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-io" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "futures-task" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" - -[[package]] -name = "futures-util" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" -dependencies = [ - "futures-core", - "futures-io", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "getrandom" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "idna" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" -dependencies = [ - "icu_normalizer", - "icu_properties", -] - -[[package]] -name = "indexmap" -version = "2.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "inventory" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b31349d02fe60f80bbbab1a9402364cad7460626d6030494b08ac4a2075bf81" -dependencies = [ - "rustversion", -] - -[[package]] -name = "iocuddle" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8972d5be69940353d5347a1344cb375d9b457d6809b428b05bb1ca2fb9ce007" - -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "itoa" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" - -[[package]] -name = "js-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "libc" -version = "0.2.169" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" - -[[package]] -name = "libloading" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" -dependencies = [ - "cfg-if", - "windows-targets 0.52.6", -] - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.8.0", - "libc", -] - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "litemap" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" - -[[package]] -name = "log" -version = "0.4.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "miniz_oxide" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" -dependencies = [ - "adler2", -] - -[[package]] -name = "mio" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.52.0", -] - -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" - -[[package]] -name = "openssl" -version = "0.10.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" -dependencies = [ - "bitflags 2.8.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" - -[[package]] -name = "ppv-lite86" -version = "0.2.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro2" -version = "1.0.93" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom", - "libredox", - "thiserror", -] - -[[package]] -name = "regex-lite" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" - -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.8.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustler" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f7b219d7473cf473409665a4898d66688b34736e51bb5791098b0d3390e4c98" -dependencies = [ - "inventory", - "libloading", - "regex-lite", - "rustler_codegen", -] - -[[package]] -name = "rustler_codegen" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "743ec5267bd5f18fd88d89f7e729c0f43b97d9c2539959915fa1f234300bb621" -dependencies = [ - "heck", - "inventory", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustversion" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" - -[[package]] -name = "ryu" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" - -[[package]] -name = "schannel" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.8.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "serde" -version = "1.0.217" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-big-array" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_bytes" -version = "0.11.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.217" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sev" -version = "5.0.0" -source = "git+https://github.com/PeterFarber/sev.git#436e0faec7fa4010e36a44b59508b00571fb1b5a" -dependencies = [ - "base64 0.22.1", - "bincode", - "bitfield", - "bitflags 1.3.2", - "byteorder", - "codicon", - "dirs", - "hex", - "iocuddle", - "lazy_static", - "libc", - "openssl", - "rand", - "serde", - "serde-big-array", - "serde_bytes", - "static_assertions", - "uuid", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - -[[package]] -name = "snafu" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019" -dependencies = [ - "snafu-derive", -] - -[[package]] -name = "snafu-derive" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "socket2" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "syn" -version = "2.0.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tempfile" -version = "3.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" -dependencies = [ - "cfg-if", - "fastrand", - "getrandom", - "once_cell", - "rustix", - "windows-sys 0.59.0", -] - -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - -[[package]] -name = "tokio" -version = "1.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "pin-project-lite", - "socket2", - "windows-sys 0.52.0", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "unicode-ident" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" - -[[package]] -name = "url" -version = "2.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" -dependencies = [ - "form_urlencoded", - "idna", - "percent-encoding", -] - -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - -[[package]] -name = "uuid" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" -dependencies = [ - "serde", -] - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" -dependencies = [ - "cfg-if", - "js-sys", - "once_cell", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - -[[package]] -name = "yoke" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "zerofrom" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] diff --git a/native/dev_snp_nif/Cargo.toml b/native/dev_snp_nif/Cargo.toml deleted file mode 100644 index 1179031e9..000000000 --- a/native/dev_snp_nif/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "dev_snp_nif" -version = "0.1.0" -edition = "2021" - -[lib] -name = "dev_snp_nif" -path = "src/lib.rs" -crate-type = ["dylib"] - -[dependencies] -rustler = "0.36.0" -sev = { git = "https://github.com/PeterFarber/sev.git", features = ["openssl"] } -openssl = "0.10.66" -bincode = "1.3" -snafu = "0.8.2" -hex = "0.4.3" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -reqwest = { version="0.11.10", features = ["blocking"]} -tokio = {version = "1.29.1", features =["rt-multi-thread"] } \ No newline at end of file diff --git a/native/dev_snp_nif/src/attestation.rs b/native/dev_snp_nif/src/attestation.rs deleted file mode 100644 index eb5507fcc..000000000 --- a/native/dev_snp_nif/src/attestation.rs +++ /dev/null @@ -1,89 +0,0 @@ -use rustler::{Binary, Encoder, Env, NifResult, Term}; -use rustler::types::atom::{self, ok}; -use sev::firmware::guest::{Firmware, AttestationReport}; -use serde_json::to_string; -use crate::logging::log_message; - -/// Generates an attestation report using the provided unique data and VMPL value. -/// -/// # Arguments -/// * `env` - The Rustler environment, used to encode the return value. -/// * `unique_data` - A 64-byte binary input containing unique data for the attestation report. -/// * `vmpl` - The Virtual Machine Privilege Level (VMPL) to be used in the report. -/// -/// # Returns -/// A tuple containing an `ok` atom and the serialized attestation report in JSON format. -/// If an error occurs during the generation or serialization process, an error is returned. -/// -/// # Example -/// ```erlang -/// {ok, JsonReport} = dev_snp_nif:generate_attestation_report(UniqueDataBinary, VMPL). -/// ``` -#[rustler::nif] -pub fn generate_attestation_report<'a>( - env: Env<'a>, - unique_data: Binary, - vmpl: u32, -) -> NifResult> { - log_message("INFO", file!(), line!(), "Starting attestation report generation..."); - - // Step 1: Convert the binary input to a fixed-size array. - let unique_data_array: [u8; 64] = match unique_data.as_slice().try_into() { - Ok(data) => data, - Err(_) => { - let msg = "Input binary must be exactly 64 bytes long."; - log_message("ERROR", file!(), line!(), msg); - return Err(rustler::Error::BadArg); - } - }; - - // Step 2: Open the firmware interface. - let mut firmware = match Firmware::open() { - Ok(fw) => { - log_message("INFO", file!(), line!(), "Firmware opened successfully."); - fw - } - Err(err) => { - let msg = format!("Failed to open firmware: {:?}", err); - log_message("ERROR", file!(), line!(), &msg); - return Ok((atom::error(), msg).encode(env)); - } - }; - - // Step 3: Generate the attestation report. - let report: AttestationReport = match firmware.get_report(None, Some(unique_data_array), Some(vmpl)) { - Ok(report) => { - log_message("INFO", file!(), line!(), "Attestation report generated successfully."); - report - } - Err(err) => { - let msg = format!("Failed to generate attestation report: {:?}", err); - log_message("ERROR", file!(), line!(), &msg); - return Ok((atom::error(), msg).encode(env)); - } - }; - - // Step 4: Serialize the report into a JSON string for output. - let report_json = match to_string(&report) { - Ok(json) => { - log_message("INFO", file!(), line!(), "Attestation report serialized to JSON format."); - json - } - Err(err) => { - let msg = format!("Failed to serialize attestation report: {:?}", err); - log_message("ERROR", file!(), line!(), &msg); - return Ok((atom::error(), msg).encode(env)); - } - }; - - // Step 5: Log the serialized JSON for debugging purposes. - // log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Generated report JSON: {:?}", report_json), - // ); - - // Step 6: Return the result as a tuple with the `ok` atom. - Ok((ok(), report_json).encode(env)) -} diff --git a/native/dev_snp_nif/src/digest.rs b/native/dev_snp_nif/src/digest.rs deleted file mode 100644 index 483c026f6..000000000 --- a/native/dev_snp_nif/src/digest.rs +++ /dev/null @@ -1,145 +0,0 @@ -use rustler::{Encoder, Env, MapIterator, NifResult, Term}; -use rustler::types::atom::{self, ok}; -use sev::measurement::snp::{snp_calc_launch_digest, SnpMeasurementArgs}; -use sev::measurement::vcpu_types::CpuType; -use sev::measurement::vmsa::{GuestFeatures, VMMType}; -use crate::logging::log_message; -use std::path::PathBuf; -use bincode; - -/// Struct to hold launch digest arguments passed from Erlang -#[derive(Debug)] -struct LaunchDigestArgs { - vcpus: u32, - vcpu_type: u8, - vmm_type: u8, - guest_features: u64, - ovmf_hash_str: String, - kernel_hash: String, - initrd_hash: String, - append_hash: String, -} - -/// Computes the launch digest using the input arguments provided as an Erlang map. -/// -/// # Arguments -/// * `env` - The Rustler environment, used to encode the return value. -/// * `input_map` - An Erlang map containing the input parameters required for the calculation. -/// -/// # Returns -/// A tuple containing an `ok` atom and the calculated and serialized launch digest. -/// If the input is invalid or an error occurs during calculation, an error is returned. -/// -/// # Expected Input Map Keys: -/// - `"vcpus"`: Number of virtual CPUs (u32). -/// - `"vcpu_type"`: Type of the virtual CPU (u8). -/// - `"vmm_type"`: Type of the Virtual Machine Monitor (u8). -/// - `"guest_features"`: Features of the guest (u64). -/// - `"ovmf_hash_str"`: Hash of the OVMF firmware (String). -/// - `"kernel_hash"`: Hash of the kernel (String). -/// - `"initrd_hash"`: Hash of the initrd (String). -/// - `"append_hash"`: Hash of the kernel command line arguments (String). -/// -/// # Example -/// ```erlang -/// {ok, LaunchDigest} = dev_snp_nif:compute_launch_digest(InputMap). -/// ``` -#[rustler::nif] -pub fn compute_launch_digest<'a>(env: Env<'a>, input_map: Term<'a>) -> NifResult> { - //log_message("INFO", file!(), line!(), "Starting launch digest calculation..."); - - // Step 1: Validate that the input is a map. - if !input_map.is_map() { - log_message("ERROR", file!(), line!(), "Provided input is not a map."); - return Err(rustler::Error::BadArg); - } - - // Step 2: Helper function to decode string values from the map. - fn decode_string(value: Term) -> NifResult { - match value.get_type() { - rustler::TermType::List => { - let list: Vec = value.decode()?; - String::from_utf8(list).map_err(|_| rustler::Error::BadArg) - } - _ => value.decode(), - } - } - - // Step 3: Parse input map into LaunchDigestArgs. - let mut args = LaunchDigestArgs { - vcpus: 0, - vcpu_type: 0, - vmm_type: 0, - guest_features: 0, - ovmf_hash_str: String::new(), - kernel_hash: String::new(), - initrd_hash: String::new(), - append_hash: String::new(), - }; - - let map_iter = MapIterator::new(input_map).unwrap(); - for (key, value) in map_iter { - let key_str = key.atom_to_string()?.to_string(); - match key_str.as_str() { - "vcpus" => args.vcpus = value.decode()?, - "vcpu_type" => args.vcpu_type = value.decode()?, - "vmm_type" => args.vmm_type = value.decode()?, - "guest_features" => args.guest_features = value.decode()?, - "firmware" => args.ovmf_hash_str = decode_string(value)?, - "kernel" => args.kernel_hash = decode_string(value)?, - "initrd" => args.initrd_hash = decode_string(value)?, - "append" => args.append_hash = decode_string(value)?, - _ => log_message("WARN", file!(), line!(), &format!("Unexpected key: {}", key_str)), - } - } - - //log_message("INFO", file!(), line!(), &format!("Parsed arguments: {:?}", args)); - - // Step 4: Prepare SnpMeasurementArgs for digest calculation. - let ovmf_file = "test/OVMF-1.55.fd".to_owned(); - let measurement_args = SnpMeasurementArgs { - ovmf_file: Some(PathBuf::from(ovmf_file)), - kernel_file: None, - initrd_file: None, - append: None, - - vcpus: args.vcpus, - vcpu_type: CpuType::try_from(args.vcpu_type).unwrap(), - vmm_type: Some(VMMType::try_from(args.vmm_type).unwrap()), - guest_features: GuestFeatures(args.guest_features), - ovmf_hash_str: Some(args.ovmf_hash_str.as_str()), - kernel_hash: Some(hex::decode(args.kernel_hash).unwrap().try_into().unwrap()), - initrd_hash: Some(hex::decode(args.initrd_hash).unwrap().try_into().unwrap()), - append_hash: Some(hex::decode(args.append_hash).unwrap().try_into().unwrap()), - }; - - // Step 5: Compute the launch digest. - let digest = match snp_calc_launch_digest(measurement_args) { - Ok(digest) => digest, - Err(err) => { - let msg = format!("Failed to compute launch digest: {:?}", err); - log_message("ERROR", file!(), line!(), &msg); - return Ok((atom::error(), msg).encode(env)); - } - }; - - // Step 6: Serialize the digest. - let serialized_digest = match bincode::serialize(&digest) { - Ok(serialized) => serialized, - Err(err) => { - let msg = format!("Failed to serialize launch digest: {:?}", err); - log_message("ERROR", file!(), line!(), &msg); - return Ok((atom::error(), msg).encode(env)); - } - }; - - //log_message( - // "INFO", - // file!(), - // line!(), - // "Launch digest successfully computed and serialized.", - //); - - // Step 7: Return the calculated and serialized digest. - Ok((ok(), serialized_digest).encode(env)) -} diff --git a/native/dev_snp_nif/src/helpers.rs b/native/dev_snp_nif/src/helpers.rs deleted file mode 100644 index b74482264..000000000 --- a/native/dev_snp_nif/src/helpers.rs +++ /dev/null @@ -1,110 +0,0 @@ -use sev::certs::snp::{ca, Certificate}; -use sev::firmware::host::TcbVersion; -use crate::logging::log_message; -use reqwest::blocking::get; - -/// Base URL for AMD's Key Distribution Service (KDS). -const KDS_CERT_SITE: &str = "https://kdsintf.amd.com"; -/// Endpoint for the VCEK API. -const KDS_VCEK: &str = "/vcek/v1"; -/// Endpoint for the Certificate Chain API. -const KDS_CERT_CHAIN: &str = "cert_chain"; - -/// Requests the AMD certificate chain (ASK + ARK) for the given SEV product name. -/// -/// # Arguments -/// * `sev_prod_name` - The SEV product name (e.g., "Milan"). -/// -/// # Returns -/// A `ca::Chain` containing the ASK and ARK certificates. -/// -/// # Errors -/// Returns an error if the request fails, the response is invalid, or the certificate parsing fails. -/// -/// # Example -/// ```erlang -/// {ok, CertChain} = dev_snp_nif:request_cert_chain("Milan"). -pub fn request_cert_chain(sev_prod_name: &str) -> Result> { -// Blocking version of reqwest - let url = format!("{KDS_CERT_SITE}{KDS_VCEK}/{sev_prod_name}/{KDS_CERT_CHAIN}"); - // log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Requesting AMD certificate chain from: {url}"), - // ); - - // Perform the blocking GET request - let response = get(&url)?; - let body = response.bytes()?; - - // Parse the response as a PEM-encoded certificate chain - let chain = openssl::x509::X509::stack_from_pem(&body)?; - if chain.len() < 2 { - return Err("Expected at least two certificates (ARK and ASK) in the chain".into()); - } - - // Convert ARK and ASK into the `ca::Chain` structure required by the SEV crate - let ark = chain[1].to_pem()?; - let ask = chain[0].to_pem()?; - let ca_chain = ca::Chain::from_pem(&ark, &ask)?; - - //log_message( - // "INFO", - // file!(), - // line!(), - // "Successfully fetched AMD certificate chain.", - //); - - Ok(ca_chain) -} - -/// Requests the VCEK for the given chip ID and reported TCB. -/// -/// # Arguments -/// * `chip_id` - The unique 64-byte chip ID. -/// * `reported_tcb` - The TCB version of the platform. -/// -/// # Returns -/// A `Certificate` representing the VCEK. -/// -/// # Errors -/// Returns an error if the request fails, the response is invalid, or the certificate parsing fails. -/// -/// # Example -/// ```erlang -/// {ok, VcekCert} = dev_snp_nif:request_vcek(ChipIdBinary, ReportedTcbMap). -/// ``` -pub fn request_vcek( - chip_id: [u8; 64], - reported_tcb: TcbVersion, -) -> Result> { - use reqwest::blocking::get; // Blocking version of reqwest - - let hw_id = chip_id - .iter() - .map(|byte| format!("{:02x}", byte)) - .collect::(); - - let url = format!( - "{KDS_CERT_SITE}{KDS_VCEK}/Milan/{hw_id}?blSPL={:02}&teeSPL={:02}&snpSPL={:02}&ucodeSPL={:02}", - reported_tcb.bootloader, reported_tcb.tee, reported_tcb.snp, reported_tcb.microcode - ); - - // log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Requesting VCEK from: {url}"), - // ); - - // Perform the blocking GET request - let response = get(&url)?; - let rsp_bytes = response.bytes()?; - - // Parse the VCEK response as a DER-encoded certificate - let vcek_cert = Certificate::from_der(&rsp_bytes)?; - - // log_message("INFO", file!(), line!(), "Successfully fetched VCEK."); - Ok(vcek_cert) -} diff --git a/native/dev_snp_nif/src/lib.rs b/native/dev_snp_nif/src/lib.rs deleted file mode 100644 index abfc92abc..000000000 --- a/native/dev_snp_nif/src/lib.rs +++ /dev/null @@ -1,13 +0,0 @@ -/// Entry point for the Rustler NIF module. -/// This file defines the available NIF functions and organizes them into modules. - -mod logging; -mod snp_support; -mod attestation; -mod digest; -mod verification; -mod helpers; - -rustler::init!( - "dev_snp_nif"// Module name as used in Erlang. -); diff --git a/native/dev_snp_nif/src/logging.rs b/native/dev_snp_nif/src/logging.rs deleted file mode 100644 index 31be106fa..000000000 --- a/native/dev_snp_nif/src/logging.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::thread; -use std::time::SystemTime; - -/// Logs messages with details including thread ID, timestamp, file, and line number. -/// -/// # Arguments -/// - `log_level`: The log level (e.g., "INFO", "ERROR"). -/// - `file`: The file where the log is being generated. -/// - `line`: The line number of the log statement. -/// - `message`: The log message. -/// -/// # Example -/// ```rust -/// log_message("INFO", file!(), line!(), "This is a log message."); -/// ``` -pub fn log_message(log_level: &str, file: &str, line: u32, message: &str) { - let thread_id = thread::current().id(); - let now = SystemTime::now(); - let timestamp = now - .duration_since(SystemTime::UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(0); - - println!( - "[{}#{:?} @ {}:{}] [{}] {}", - log_level, thread_id, file, line, timestamp, message - ); -} diff --git a/native/dev_snp_nif/src/snp_support.rs b/native/dev_snp_nif/src/snp_support.rs deleted file mode 100644 index 0ca9da69c..000000000 --- a/native/dev_snp_nif/src/snp_support.rs +++ /dev/null @@ -1,44 +0,0 @@ -use rustler::{Encoder, Env, NifResult, Term}; -use rustler::types::atom::ok; -use sev::firmware::guest::Firmware; -use crate::logging::log_message; - -/// Checks if Secure Nested Paging (SNP) is supported by the system. -/// -/// # Arguments -/// * `env` - The Rustler environment, used to encode the return value. -/// -/// # Returns -/// A tuple containing an `ok` atom and a boolean value: -/// - `true` if the firmware indicates that SNP is supported. -/// - `false` if SNP is not supported or if the firmware cannot be accessed. -/// -/// # Example -/// ```erlang -/// {ok, Supported} = dev_snp_nif:check_snp_support(). -/// ``` -#[rustler::nif] -pub fn check_snp_support<'a>(env: Env<'a>) -> NifResult> { - //log_message("INFO", file!(), line!(), "Checking SNP support..."); - - // Step 1: Attempt to open the firmware interface. - // If the firmware is accessible, SNP is supported; otherwise, it is not. - let is_supported = match Firmware::open() { - Ok(_) => { - //log_message("INFO", file!(), line!(), "SNP is supported."); - true // SNP is supported. - } - Err(_) => { - // log_message( - // "ERROR", - // file!(), - // line!(), - // "Failed to open firmware. SNP is not supported.", - // ); - false // SNP is not supported. - } - }; - - // Step 2: Return the result as a tuple with the `ok` atom and the boolean value. - Ok((ok(), is_supported).encode(env)) -} diff --git a/native/dev_snp_nif/src/verification.rs b/native/dev_snp_nif/src/verification.rs deleted file mode 100644 index e8636e851..000000000 --- a/native/dev_snp_nif/src/verification.rs +++ /dev/null @@ -1,310 +0,0 @@ -use rustler::{Binary, Encoder, Env, NifResult, Term}; -use rustler::types::atom::{self, ok}; -use serde_json::Value; -use serde::Deserialize; -use sev::certs::snp::{ecdsa::Signature, Chain, Verifiable}; -use sev::firmware::host::TcbVersion; -use sev::firmware::guest::{AttestationReport, GuestPolicy, PlatformInfo}; -use crate::helpers::{request_cert_chain, request_vcek}; -use crate::logging::log_message; - -/// Verifies whether the measurement in the attestation report matches the expected measurement. -/// -/// # Arguments -/// * `env` - The Rustler environment, used to encode the return value. -/// * `_report` - A binary containing the serialized attestation report (JSON format). -/// * `_expected_measurement` - A binary containing the expected measurement (as a byte array). -/// -/// # Returns -/// A tuple with: -/// - `ok` atom and a success message if the measurements match. -/// - `error` atom and an error message if the measurements do not match. -#[rustler::nif] -fn verify_measurement<'a>( - env: Env<'a>, - _report: Binary, - _expected_measurement: Binary, -) -> NifResult> { - //log_message("INFO", file!(), line!(), "Starting measurement verification..."); - - // Define a struct for deserializing the attestation report. - #[derive(Debug, Deserialize)] - struct AttestationReport { - measurement: Vec, - // Additional fields can be added here if needed. - } - - // Step 1: Deserialize the JSON report. - let report: AttestationReport = match serde_json::from_slice(_report.as_slice()) { - Ok(parsed_report) => { - //log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Successfully parsed report: {:?}", parsed_report), - //); - parsed_report - } - Err(err) => { - log_message( - "ERROR", - file!(), - line!(), - &format!("Failed to deserialize report: {:?}", err), - ); - return Ok((atom::error(), "Invalid report format").encode(env)); - } - }; - - // Step 2: Extract the actual measurement from the report. - let actual_measurement = &report.measurement; - // log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Extracted actual measurement: {:?}", actual_measurement), - // ); - - // Step 3: Decode the expected measurement from the input binary. - let expected_measurement: Vec = _expected_measurement.as_slice().to_vec(); - // log_message( - // "INFO", - // file!(), - // line!(), - // &format!("Decoded expected measurement: {:?}", expected_measurement), - // ); - - // Step 4: Compare the actual and expected measurements. - if actual_measurement == &expected_measurement { - //log_message("INFO", file!(), line!(), "Measurements match."); - Ok((atom::ok(), true).encode(env)) - } else { - //log_message("ERROR", file!(), line!(), "Measurements do not match."); - Ok((atom::error(), false).encode(env)) - } -} - - -/// Verifies the signature of an attestation report. -/// -/// # Arguments -/// * `env` - The Rustler environment, used to encode the return value. -/// * `report` - A binary containing the serialized attestation report. -/// -/// # Returns -/// A tuple with: -/// - `ok` atom and a success message if the signature is valid. -/// - `error` atom and an error message if the signature verification fails. -#[rustler::nif] -fn verify_signature<'a>( - env: Env<'a>, - report: Binary<'a>, -) -> NifResult> { - // log_message("INFO", file!(), line!(), "Verifying signature..."); - - // Step 1: Parse the report JSON into a serde Value object. - let json_data = match serde_json::from_slice::(report.as_slice()) { - Ok(data) => data, - Err(err) => { - return Ok(( - rustler::types::atom::error(), - format!("Failed to parse JSON: {}", err), - ) - .encode(env)); - } - }; - - // Step 2: Map JSON fields to the AttestationReport struct. - // Each field is individually parsed to ensure type safety. - let attestation_report = AttestationReport { - version: json_data["version"].as_u64().unwrap_or(0) as u32, - guest_svn: json_data["guest_svn"].as_u64().unwrap_or(0) as u32, - policy: GuestPolicy(json_data["policy"].as_u64().unwrap_or(0)), - family_id: json_data["family_id"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 16]), - image_id: json_data["image_id"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 16]), - vmpl: json_data["vmpl"].as_u64().unwrap_or(0) as u32, - sig_algo: json_data["sig_algo"].as_u64().unwrap_or(0) as u32, - current_tcb: TcbVersion { - bootloader: json_data["current_tcb"]["bootloader"].as_u64().unwrap_or(0) as u8, - tee: json_data["current_tcb"]["tee"].as_u64().unwrap_or(0) as u8, - snp: json_data["current_tcb"]["snp"].as_u64().unwrap_or(0) as u8, - microcode: json_data["current_tcb"]["microcode"].as_u64().unwrap_or(0) as u8, - _reserved: [0; 4], - }, - plat_info: PlatformInfo(json_data["plat_info"].as_u64().unwrap_or(0)), - _author_key_en: json_data["_author_key_en"].as_u64().unwrap_or(0) as u32, - _reserved_0: json_data["_reserved_0"].as_u64().unwrap_or(0) as u32, - report_data: json_data["report_data"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 64]), - measurement: json_data["measurement"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 48]), - host_data: json_data["host_data"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 32]), - id_key_digest: json_data["id_key_digest"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 48]), - author_key_digest: json_data["author_key_digest"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 48]), - report_id: json_data["report_id"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 32]), - report_id_ma: json_data["report_id_ma"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 32]), - reported_tcb: TcbVersion { - bootloader: json_data["reported_tcb"]["bootloader"] - .as_u64() - .unwrap_or(0) as u8, - tee: json_data["reported_tcb"]["tee"].as_u64().unwrap_or(0) as u8, - snp: json_data["reported_tcb"]["snp"].as_u64().unwrap_or(0) as u8, - microcode: json_data["reported_tcb"]["microcode"].as_u64().unwrap_or(0) as u8, - _reserved: [0; 4], - }, - _reserved_1: [0; 24], - chip_id: json_data["chip_id"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 64]), - committed_tcb: TcbVersion { - bootloader: json_data["committed_tcb"]["bootloader"] - .as_u64() - .unwrap_or(0) as u8, - tee: json_data["committed_tcb"]["tee"].as_u64().unwrap_or(0) as u8, - snp: json_data["committed_tcb"]["snp"].as_u64().unwrap_or(0) as u8, - microcode: json_data["committed_tcb"]["microcode"] - .as_u64() - .unwrap_or(0) as u8, - _reserved: [0; 4], - }, - current_build: json_data["current_build"].as_u64().unwrap_or(0) as u8, - current_minor: json_data["current_minor"].as_u64().unwrap_or(0) as u8, - current_major: json_data["current_major"].as_u64().unwrap_or(0) as u8, - _reserved_2: json_data["_reserved_2"].as_u64().unwrap_or(0) as u8, - committed_build: json_data["committed_build"].as_u64().unwrap_or(0) as u8, - committed_minor: json_data["committed_minor"].as_u64().unwrap_or(0) as u8, - committed_major: json_data["committed_major"].as_u64().unwrap_or(0) as u8, - _reserved_3: json_data["_reserved_3"].as_u64().unwrap_or(0) as u8, - launch_tcb: TcbVersion { - bootloader: json_data["launch_tcb"]["bootloader"].as_u64().unwrap_or(0) as u8, - tee: json_data["launch_tcb"]["tee"].as_u64().unwrap_or(0) as u8, - snp: json_data["launch_tcb"]["snp"].as_u64().unwrap_or(0) as u8, - microcode: json_data["launch_tcb"]["microcode"].as_u64().unwrap_or(0) as u8, - _reserved: [0; 4], - }, - _reserved_4: [0; 168], - signature: Signature { - r: json_data["signature"]["r"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 72]), - s: json_data["signature"]["s"] - .as_array() - .unwrap_or(&vec![]) - .iter() - .map(|v| v.as_u64().unwrap_or(0) as u8) - .collect::>() - .try_into() - .unwrap_or([0; 72]), - _reserved: [0; 368], - }, - }; - - // Step 3: Extract the chip ID and TCB version. - let chip_id_array: [u8; 64] = attestation_report - .chip_id - .try_into() - .expect("chip_id must be 64 bytes"); - let tcb_version = attestation_report.current_tcb; - - // Step 4: Request the certificate chain and VCEK. - let ca = request_cert_chain("Milan").unwrap(); - let vcek = request_vcek(chip_id_array, tcb_version).unwrap(); - - // Step 5: Verify the certificate chain. - if let Err(e) = ca.verify() { - log_message( - "ERROR", - file!(), - line!(), - &format!("CA chain verification failed: {:?}", e), - ); - return Ok((atom::error(), format!("CA verification failed: {:?}", e)).encode(env)); - } - //log_message("INFO", file!(), line!(), "CA chain verification successful."); - - // Step 6: Verify the attestation report. - let cert_chain = Chain { ca, vek: vcek }; - if let Err(e) = (&cert_chain, &attestation_report).verify() { - log_message( - "ERROR", - file!(), - line!(), - &format!("Attestation report verification failed: {:?}", e), - ); - return Ok((atom::error(), format!("Report verification failed: {:?}", e)).encode(env)); - } - - //log_message("INFO", file!(), line!(), "Signature verification successful."); - Ok((ok(), true).encode(env)) -} diff --git a/native/snp_nif/snp_nif.c b/native/snp_nif/snp_nif.c new file mode 100644 index 000000000..bb93c3cb0 --- /dev/null +++ b/native/snp_nif/snp_nif.c @@ -0,0 +1,645 @@ +// Minimal NIF - only for ioctl to /dev/sev-guest +// Everything else can be done in Erlang + +#include "erl_nif.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Simple logging macro for NIF (similar to DRV_DEBUG in driver code) +#define NIF_DEBUG 1 // Set to 0 to disable debug logging +#define NIF_LOG(format, ...) \ + do { \ + if (NIF_DEBUG) { \ + fprintf(stderr, "[C-NIF @ %s:%d] " format "\n", __FILE__, __LINE__, ##__VA_ARGS__); \ + } \ + } while(0) + +// SEV ioctl definitions +#ifndef _UAPI_LINUX_SEV_GUEST_H_ +#define SEV_GUEST_IOC_TYPE 'S' +#define SEV_GUEST_IOC_NR_GET_REPORT 0 + +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +#define _IOC_NONE 0U +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +struct sev_guest_request { + __u32 msg_version; + __u64 request_data; + __u64 response_data; + __u64 fw_err; +}; + +#define SEV_GUEST_IOC_GET_REPORT \ + _IOWR(SEV_GUEST_IOC_TYPE, SEV_GUEST_IOC_NR_GET_REPORT, \ + struct sev_guest_request) +#endif + +// Report request structure (96 bytes) +struct snp_report_req { + __u8 report_data[64]; + __u32 vmpl; + __u8 reserved[28]; +}; + +// Report response structure (4000 bytes) +struct snp_report_resp { + __u32 status; + __u32 report_size; + __u8 reserved[24]; + __u8 report[1184]; // AttestationReport size + __u8 padding[2784]; // Padding to 4000 bytes +}; + +// Error codes +typedef enum { + SNP_ERR_NONE = 0, + SNP_ERR_INVALID_INPUT, + SNP_ERR_IOCTL_FAILED, + SNP_ERR_FIRMWARE_ERROR, + SNP_ERR_MEMORY_ERROR +} snp_error_t; + +// Helper to create error tuple +static ERL_NIF_TERM make_error(ErlNifEnv *env, snp_error_t err_code, const char *msg) { + ERL_NIF_TERM error_code = enif_make_int(env, err_code); + ERL_NIF_TERM error_msg = enif_make_string(env, msg, ERL_NIF_LATIN1); + ERL_NIF_TERM error_tuple = enif_make_tuple2(env, error_code, error_msg); + return enif_make_tuple2(env, enif_make_atom(env, "error"), error_tuple); +} + +// NIF: check_snp_support +static ERL_NIF_TERM nif_check_snp_support(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { + int fd = open("/dev/sev-guest", O_RDONLY); + if (fd < 0) { + return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "false")); + } + close(fd); + return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "true")); +} + +// NIF: generate_attestation_report +// This is the ONLY function that needs C - everything else can be Erlang +static ERL_NIF_TERM nif_generate_attestation_report(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { + ErlNifBinary unique_data; + unsigned int vmpl; + + // Input validation + if (!enif_inspect_binary(env, argv[0], &unique_data) || unique_data.size != 64) { + return make_error(env, SNP_ERR_INVALID_INPUT, "Input binary must be exactly 64 bytes"); + } + + if (!enif_get_uint(env, argv[1], &vmpl)) { + return make_error(env, SNP_ERR_INVALID_INPUT, "Invalid VMPL value: must be an integer"); + } + + if (vmpl > 3) { + return make_error(env, SNP_ERR_INVALID_INPUT, "VMPL must be <= 3"); + } + + // Open SEV guest device + int fd = open("/dev/sev-guest", O_RDWR); + if (fd < 0) { + char err_msg[256]; + snprintf(err_msg, sizeof(err_msg), "Failed to open /dev/sev-guest: %s", strerror(errno)); + return make_error(env, SNP_ERR_IOCTL_FAILED, err_msg); + } + + // Prepare request structure + struct snp_report_req req; + memset(&req, 0, sizeof(req)); + memcpy(req.report_data, unique_data.data, 64); + req.vmpl = vmpl; + + // Prepare response structure + struct snp_report_resp resp; + memset(&resp, 0, sizeof(resp)); + + // Prepare guest request structure + struct sev_guest_request guest_req; + guest_req.msg_version = 1; + guest_req.request_data = (__u64)(unsigned long)&req; + guest_req.response_data = (__u64)(unsigned long)&resp; + guest_req.fw_err = 0; + + // Perform ioctl - THIS IS THE ONLY REASON WE NEED C + int ret = ioctl(fd, SEV_GUEST_IOC_GET_REPORT, &guest_req); + close(fd); + + if (ret < 0) { + char err_msg[256]; + snprintf(err_msg, sizeof(err_msg), "ioctl(SNP_GET_REPORT) failed: %s", strerror(errno)); + return make_error(env, SNP_ERR_IOCTL_FAILED, err_msg); + } + + if (resp.status != 0) { + char err_msg[256]; + snprintf(err_msg, sizeof(err_msg), "Firmware error (status=0x%x): SNP_GET_REPORT failed", resp.status); + return make_error(env, SNP_ERR_FIRMWARE_ERROR, err_msg); + } + + // Validate report size + if (resp.report_size != 1184) { + char err_msg[256]; + snprintf(err_msg, sizeof(err_msg), "Invalid report size: expected 1184, got %u", resp.report_size); + return make_error(env, SNP_ERR_INVALID_INPUT, err_msg); + } + + // Return binary report structure (1184 bytes) + // All parsing, verification, etc. happens in Erlang + ERL_NIF_TERM result; + unsigned char *bin = enif_make_new_binary(env, 1184, &result); + if (!bin) { + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to allocate binary for report"); + } + memcpy(bin, resp.report, 1184); + + return enif_make_tuple2(env, enif_make_atom(env, "ok"), result); +} + +// Helper function to parse certificate chain (ARK + ASK) from DER +// Returns: STACK_OF(X509) containing ARK and ASK, or NULL on error +static STACK_OF(X509) *parse_cert_chain(const unsigned char *data, long len) { + STACK_OF(X509) *chain = sk_X509_new_null(); + if (!chain) { + return NULL; + } + + const unsigned char *p = data; + long remaining = len; + + // Parse certificates sequentially from the DER blob + // AMD KDS returns ASK first, then ARK (as per SEV spec) + // The DER blob contains concatenated DER-encoded certificates + while (remaining > 0) { + const unsigned char *cert_start = p; + X509 *cert = d2i_X509(NULL, &p, remaining); + if (!cert) { + // No more certificates or parse error + break; + } + + // Calculate how many bytes were consumed + long cert_len = p - cert_start; + remaining -= cert_len; + + if (!sk_X509_push(chain, cert)) { + X509_free(cert); + sk_X509_pop_free(chain, X509_free); + return NULL; + } + } + + if (sk_X509_num(chain) < 2) { + NIF_LOG("Certificate chain must contain at least 2 certificates (ARK + ASK), got %d", sk_X509_num(chain)); + sk_X509_pop_free(chain, X509_free); + return NULL; + } + + return chain; +} + +// Verify certificate chain: ARK -> ASK -> VCEK +// Returns 1 on success, 0 on failure +static int verify_cert_chain(STACK_OF(X509) *chain, X509 *vcek) { + if (!chain || !vcek || sk_X509_num(chain) < 2) { + NIF_LOG("Invalid certificate chain or VCEK"); + return 0; + } + + // Create X509_STORE and add ARK as trusted root + X509_STORE *store = X509_STORE_new(); + if (!store) { + NIF_LOG("Failed to create X509_STORE"); + return 0; + } + + // Certificate order in DER blob: ASK first, then ARK (as per SEV spec) + // ARK is the root (self-signed), ASK is signed by ARK + X509 *ask = sk_X509_value(chain, 0); // First cert is ASK + X509 *ark = sk_X509_value(chain, 1); // Second cert is ARK (root) + + // Set verification flags - allow self-signed root and enable chain building + unsigned long flags = X509_V_FLAG_ALLOW_PROXY_CERTS; + X509_STORE_set_flags(store, flags); + + // Add ARK to store as trusted root + // Note: We need to add it as a trusted cert, not just any cert + if (!X509_STORE_add_cert(store, ark)) { + NIF_LOG("Failed to add ARK to store"); + X509_STORE_free(store); + return 0; + } + + // Verify ARK is self-signed (it should be) + X509_NAME *ark_subject = X509_get_subject_name(ark); + X509_NAME *ark_issuer = X509_get_issuer_name(ark); + int is_self_signed = X509_NAME_cmp(ark_subject, ark_issuer) == 0; + NIF_LOG("ARK is self-signed: %d", is_self_signed); + + // Create verification context + X509_STORE_CTX *ctx = X509_STORE_CTX_new(); + if (!ctx) { + NIF_LOG("Failed to create X509_STORE_CTX"); + X509_STORE_free(store); + return 0; + } + + // Build untrusted chain: ARK -> ASK -> VCEK + // Include ARK in the chain so OpenSSL can find it as ASK's issuer + // ARK is also in the store as trusted, so OpenSSL will trust it + STACK_OF(X509) *untrusted_chain = sk_X509_new_null(); + if (!untrusted_chain) { + X509_STORE_CTX_free(ctx); + X509_STORE_free(store); + return 0; + } + + // Add ARK first (root), then ASK (intermediate), then VCEK (end entity) + // Order: root to end entity (OpenSSL builds chain backwards from target) + if (!sk_X509_push(untrusted_chain, X509_dup(ark)) || + !sk_X509_push(untrusted_chain, X509_dup(ask)) || + !sk_X509_push(untrusted_chain, X509_dup(vcek))) { + sk_X509_pop_free(untrusted_chain, X509_free); + X509_STORE_CTX_free(ctx); + X509_STORE_free(store); + return 0; + } + + // Initialize verification context with VCEK as target + // The untrusted chain contains ASK and VCEK + // OpenSSL will look for ARK (ASK's issuer) in the store + if (!X509_STORE_CTX_init(ctx, store, vcek, untrusted_chain)) { + NIF_LOG("Failed to initialize X509_STORE_CTX"); + sk_X509_pop_free(untrusted_chain, X509_free); + X509_STORE_CTX_free(ctx); + X509_STORE_free(store); + return 0; + } + + // Enable chain building - this helps OpenSSL find issuers + X509_VERIFY_PARAM *param = X509_STORE_CTX_get0_param(ctx); + if (param) { + X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_ALLOW_PROXY_CERTS); + } + + // Verify the chain + // OpenSSL will automatically handle RSASSA-PSS signatures + int verify_result = X509_verify_cert(ctx); + + if (verify_result == 1) { + NIF_LOG("Certificate chain verification: SUCCESS"); + } else { + int err = X509_STORE_CTX_get_error(ctx); + NIF_LOG("Certificate chain verification: FAILED (error %d: %s)", + err, X509_verify_cert_error_string(err)); + } + + // Cleanup + sk_X509_pop_free(untrusted_chain, X509_free); + X509_STORE_CTX_free(ctx); + X509_STORE_free(store); + + return verify_result == 1; +} + +// NIF: verify_report_signature +// Uses OpenSSL to verify ECDSA P-384 signature, matching Rust implementation +static ERL_NIF_TERM nif_verify_report_signature(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { + NIF_LOG("verify_report_signature called"); + ErlNifBinary report_binary; + ErlNifBinary vcek_der; + + // Input validation + if (!enif_inspect_binary(env, argv[0], &report_binary) || report_binary.size != 1184) { + NIF_LOG("Invalid report binary size: %zu (expected 1184)", report_binary.size); + return make_error(env, SNP_ERR_INVALID_INPUT, "Report binary must be exactly 1184 bytes"); + } + + if (!enif_inspect_binary(env, argv[1], &vcek_der)) { + NIF_LOG("Failed to inspect VCEK DER"); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK DER must be a binary"); + } + + NIF_LOG("Report size: %zu, VCEK DER size: %zu", report_binary.size, vcek_der.size); + + // Extract measurable bytes (first 672 bytes = 0x2A0) + const unsigned char *measurable_bytes = report_binary.data; + size_t measurable_size = 672; + + // Compute SHA-384 hash + unsigned char hash[SHA384_DIGEST_LENGTH]; + EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); + if (!md_ctx) { + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create EVP_MD_CTX"); + } + + if (EVP_DigestInit_ex(md_ctx, EVP_sha384(), NULL) != 1 || + EVP_DigestUpdate(md_ctx, measurable_bytes, measurable_size) != 1 || + EVP_DigestFinal_ex(md_ctx, hash, NULL) != 1) { + EVP_MD_CTX_free(md_ctx); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to compute SHA-384 hash"); + } + EVP_MD_CTX_free(md_ctx); + + // Parse VCEK certificate from DER + const unsigned char *vcek_data = vcek_der.data; + X509 *vcek = d2i_X509(NULL, &vcek_data, vcek_der.size); + if (!vcek) { + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to parse VCEK certificate"); + } + + // Extract public key from VCEK + EVP_PKEY *pubkey = X509_get_pubkey(vcek); + X509_free(vcek); + if (!pubkey) { + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to extract public key from VCEK"); + } + + // Verify it's an EC key on P-384 + if (EVP_PKEY_id(pubkey) != EVP_PKEY_EC) { + EVP_PKEY_free(pubkey); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK public key is not an EC key"); + } + + EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(pubkey); + if (!ec_key) { + EVP_PKEY_free(pubkey); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to get EC key from public key"); + } + + const EC_GROUP *group = EC_KEY_get0_group(ec_key); + int nid = EC_GROUP_get_curve_name(group); + if (nid != NID_secp384r1) { + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK is not on P-384 curve"); + } + + // Extract signature R and S from report (72 bytes each, starting at offset 1016) + // For P-384, only the first 48 bytes of each are used (384 bits) + const unsigned char *sig_r_le = report_binary.data + 1016; + const unsigned char *sig_s_le = report_binary.data + 1016 + 72; + + // Convert from little-endian to big-endian (reverse first 48 bytes) + unsigned char sig_r_be[48]; + unsigned char sig_s_be[48]; + for (int i = 0; i < 48; i++) { + sig_r_be[i] = sig_r_le[47 - i]; + sig_s_be[i] = sig_s_le[47 - i]; + } + + // Create ECDSA signature from R and S + BIGNUM *r = BN_bin2bn(sig_r_be, 48, NULL); + BIGNUM *s = BN_bin2bn(sig_s_be, 48, NULL); + if (!r || !s) { + if (r) BN_free(r); + if (s) BN_free(s); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create BIGNUM from signature"); + } + + ECDSA_SIG *sig = ECDSA_SIG_new(); + if (!sig) { + BN_free(r); + BN_free(s); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create ECDSA_SIG"); + } + + ECDSA_SIG_set0(sig, r, s); + + // Verify signature + NIF_LOG("Calling ECDSA_do_verify..."); + int verify_result = ECDSA_do_verify(hash, SHA384_DIGEST_LENGTH, sig, ec_key); + NIF_LOG("ECDSA_do_verify result: %d (1=valid, 0=invalid, -1=error)", verify_result); + + // Cleanup + ECDSA_SIG_free(sig); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + + if (verify_result == 1) { + NIF_LOG("Signature verification: SUCCESS"); + return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "true")); + } else if (verify_result == 0) { + NIF_LOG("Signature verification: FAILED (invalid signature)"); + return enif_make_tuple2(env, enif_make_atom(env, "error"), enif_make_atom(env, "report_signature_invalid")); + } else { + NIF_LOG("Signature verification: ERROR (OpenSSL error)"); + return make_error(env, SNP_ERR_INVALID_INPUT, "ECDSA verification error"); + } +} + +// NIF: verify_signature_nif +// Verifies both certificate chain (ARK -> ASK -> VCEK) and report signature +static ERL_NIF_TERM nif_verify_signature_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { + NIF_LOG("verify_signature_nif called"); + ErlNifBinary report_binary; + ErlNifBinary cert_chain_der; + ErlNifBinary vcek_der; + + // Input validation + if (!enif_inspect_binary(env, argv[0], &report_binary) || report_binary.size != 1184) { + NIF_LOG("Invalid report binary size: %zu (expected 1184)", report_binary.size); + return make_error(env, SNP_ERR_INVALID_INPUT, "Report binary must be exactly 1184 bytes"); + } + + if (!enif_inspect_binary(env, argv[1], &cert_chain_der)) { + NIF_LOG("Failed to inspect cert chain DER"); + return make_error(env, SNP_ERR_INVALID_INPUT, "Certificate chain DER must be a binary"); + } + + if (!enif_inspect_binary(env, argv[2], &vcek_der)) { + NIF_LOG("Failed to inspect VCEK DER"); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK DER must be a binary"); + } + + NIF_LOG("Report size: %zu, Cert chain size: %zu, VCEK DER size: %zu", + report_binary.size, cert_chain_der.size, vcek_der.size); + + // Parse certificate chain (ARK + ASK) + const unsigned char *chain_data = cert_chain_der.data; + STACK_OF(X509) *chain = parse_cert_chain(chain_data, cert_chain_der.size); + if (!chain) { + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to parse certificate chain (expected ARK + ASK)"); + } + + // Parse VCEK certificate + const unsigned char *vcek_data = vcek_der.data; + X509 *vcek = d2i_X509(NULL, &vcek_data, vcek_der.size); + if (!vcek) { + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to parse VCEK certificate"); + } + + // Verify certificate chain: ARK -> ASK -> VCEK + if (!verify_cert_chain(chain, vcek)) { + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "Certificate chain verification failed"); + } + + // Now verify the report signature using VCEK + // Extract measurable bytes (first 672 bytes = 0x2A0) + const unsigned char *measurable_bytes = report_binary.data; + size_t measurable_size = 672; + + // Compute SHA-384 hash + unsigned char hash[SHA384_DIGEST_LENGTH]; + EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); + if (!md_ctx) { + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create EVP_MD_CTX"); + } + + if (EVP_DigestInit_ex(md_ctx, EVP_sha384(), NULL) != 1 || + EVP_DigestUpdate(md_ctx, measurable_bytes, measurable_size) != 1 || + EVP_DigestFinal_ex(md_ctx, hash, NULL) != 1) { + EVP_MD_CTX_free(md_ctx); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to compute SHA-384 hash"); + } + EVP_MD_CTX_free(md_ctx); + + // Extract public key from VCEK + EVP_PKEY *pubkey = X509_get_pubkey(vcek); + if (!pubkey) { + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to extract public key from VCEK"); + } + + // Verify it's an EC key on P-384 + if (EVP_PKEY_id(pubkey) != EVP_PKEY_EC) { + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK public key is not an EC key"); + } + + EC_KEY *ec_key = EVP_PKEY_get1_EC_KEY(pubkey); + if (!ec_key) { + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "Failed to get EC key from public key"); + } + + const EC_GROUP *group = EC_KEY_get0_group(ec_key); + int nid = EC_GROUP_get_curve_name(group); + if (nid != NID_secp384r1) { + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_INVALID_INPUT, "VCEK is not on P-384 curve"); + } + + // Extract signature R and S from report (72 bytes each, starting at offset 1016) + const unsigned char *sig_r_le = report_binary.data + 1016; + const unsigned char *sig_s_le = report_binary.data + 1016 + 72; + + // Convert from little-endian to big-endian (reverse first 48 bytes) + unsigned char sig_r_be[48]; + unsigned char sig_s_be[48]; + for (int i = 0; i < 48; i++) { + sig_r_be[i] = sig_r_le[47 - i]; + sig_s_be[i] = sig_s_le[47 - i]; + } + + // Create ECDSA signature from R and S + BIGNUM *r = BN_bin2bn(sig_r_be, 48, NULL); + BIGNUM *s = BN_bin2bn(sig_s_be, 48, NULL); + if (!r || !s) { + if (r) BN_free(r); + if (s) BN_free(s); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create BIGNUM from signature"); + } + + ECDSA_SIG *sig = ECDSA_SIG_new(); + if (!sig) { + BN_free(r); + BN_free(s); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + return make_error(env, SNP_ERR_MEMORY_ERROR, "Failed to create ECDSA_SIG"); + } + + ECDSA_SIG_set0(sig, r, s); + + // Verify signature + NIF_LOG("Calling ECDSA_do_verify..."); + int verify_result = ECDSA_do_verify(hash, SHA384_DIGEST_LENGTH, sig, ec_key); + NIF_LOG("ECDSA_do_verify result: %d (1=valid, 0=invalid, -1=error)", verify_result); + + // Cleanup + ECDSA_SIG_free(sig); + EC_KEY_free(ec_key); + EVP_PKEY_free(pubkey); + X509_free(vcek); + sk_X509_pop_free(chain, X509_free); + + if (verify_result == 1) { + NIF_LOG("Signature verification: SUCCESS"); + return enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_atom(env, "true")); + } else if (verify_result == 0) { + NIF_LOG("Signature verification: FAILED (invalid signature)"); + return enif_make_tuple2(env, enif_make_atom(env, "error"), enif_make_atom(env, "report_signature_invalid")); + } else { + NIF_LOG("Signature verification: ERROR (OpenSSL error)"); + return make_error(env, SNP_ERR_INVALID_INPUT, "ECDSA verification error"); + } +} + +// NIF function table +static ErlNifFunc nif_funcs[] = { + {"check_snp_support", 0, nif_check_snp_support}, + {"generate_attestation_report", 2, nif_generate_attestation_report}, + {"verify_report_signature", 2, nif_verify_report_signature}, + {"verify_signature_nif", 3, nif_verify_signature_nif} +}; + +ERL_NIF_INIT(snp_nif, nif_funcs, NULL, NULL, NULL, NULL) \ No newline at end of file diff --git a/rebar.config b/rebar.config index 56927139d..efb6b3f59 100644 --- a/rebar.config +++ b/rebar.config @@ -64,7 +64,6 @@ ]}. {cargo_opts, [ - {src_dir, "native/dev_snp_nif"}, {src_dir, "deps/elmdb/native/elmdb_nif"} ]}. @@ -81,9 +80,15 @@ {port_env, [ {"(linux|darwin|solaris)", "CFLAGS", - "$CFLAGS -I${REBAR_ROOT_DIR}/_build/wamr/core/iwasm/include -I/usr/local/lib/erlang/usr/include/"}, - {"(linux|darwin|solaris)", "LDFLAGS", "$LDFLAGS -L${REBAR_ROOT_DIR}/_build/wamr/lib -lvmlib -lei"}, - {"(linux|darwin|solaris)", "LDLIBS", "-lei"} + "$CFLAGS " + "-Wno-error=incompatible-pointer-types " + "-Wno-error=pointer-sign " + "-I${REBAR_ROOT_DIR}/_build/wamr/core/iwasm/include " + "-I/usr/local/lib/erlang/usr/include/"}, + {"(linux|darwin|solaris)", "LDFLAGS", "$LDFLAGS -L${REBAR_ROOT_DIR}/_build/wamr/lib -lvmlib -lei"}, + {"(linux|darwin|solaris)", "LDLIBS", "-lei"}, + {"linux", "CFLAGS", "$CFLAGS -I/usr/include/openssl"}, + {"linux", "LDFLAGS", "$LDFLAGS -lssl -lcrypto"} ]}. {post_hooks, [ @@ -91,19 +96,19 @@ {"(linux|darwin|solaris)", compile, "echo 'Post-compile hooks executed'"}, { compile, "rm -f native/hb_beamr/*.o native/hb_beamr/*.d"}, { compile, "rm -f native/hb_keccak/*.o native/hb_keccak/*.d"}, + { compile, "rm -f native/dev_snp_nif/*.o native/dev_snp_nif/*.d"}, + { compile, "rm -f native/snp_nif/*.o native/snp_nif/*.d"}, { compile, "mkdir -p priv/html"}, { compile, "cp -R src/html/* priv/html"}, + { compile, "mkdir -p priv/ovmf"}, + { compile, "cp OVMF-1.55.fd priv/ovmf/OVMF-1.55.fd"}, { compile, "cp _build/default/lib/elmdb/priv/crates/elmdb_nif/elmdb_nif.so _build/default/lib/elmdb/priv/elmdb_nif.so 2>/dev/null || true" } ]}. {provider_hooks, [ - {pre, [ - {compile, {cargo, build}} - ]}, {post, [ {compile, {pc, compile}}, - {clean, {pc, clean}}, - {clean, {cargo, clean}} + {clean, {pc, clean}} ]} ]}. @@ -118,6 +123,9 @@ {"./priv/hb_keccak.so", [ "./native/hb_keccak/hb_keccak.c", "./native/hb_keccak/hb_keccak_nif.c" + ]}, + {"./priv/snp_nif.so", [ + "./native/snp_nif/snp_nif.c" ]} ]}. @@ -132,7 +140,8 @@ {prometheus_httpd, "2.1.15"}, {prometheus, "6.0.3"}, {graphql, "0.17.1", {pkg, graphql_erl}}, - {luerl, "1.3.0"} + {luerl, "1.3.0"}, + {ssl_cert, "1.0.1"} ]}. {shell, [ @@ -144,7 +153,7 @@ ]}. {relx, [ - {release, {'hb', "0.0.1"}, [hb, b64fast, cowboy, gun, luerl, prometheus, prometheus_cowboy, elmdb]}, + {release, {'hb', "0.0.1"}, [hb, b64fast, cowboy, gun, luerl, prometheus, prometheus_cowboy, elmdb, ssl_cert]}, {include_erts, true}, {extended_start_script, true}, {overlay, [ @@ -155,7 +164,7 @@ ]}. {dialyzer, [ - {plt_extra_apps, [public_key, ranch, cowboy, prometheus, prometheus_cowboy, b64fast, eunit, gun]}, + {plt_extra_apps, [public_key, ranch, cowboy, prometheus, prometheus_cowboy, b64fast, eunit, gun, ssl_cert]}, incremental, {warnings, [no_improper_lists, no_unused]} ]}. diff --git a/rebar.lock b/rebar.lock index 6da23354c..785548277 100644 --- a/rebar.lock +++ b/rebar.lock @@ -17,7 +17,8 @@ {<<"prometheus">>,{pkg,<<"prometheus">>,<<"6.0.3">>},0}, {<<"prometheus_cowboy">>,{pkg,<<"prometheus_cowboy">>,<<"0.2.0">>},0}, {<<"prometheus_httpd">>,{pkg,<<"prometheus_httpd">>,<<"2.1.15">>},0}, - {<<"ranch">>,{pkg,<<"ranch">>,<<"2.2.0">>},0}]}. + {<<"ranch">>,{pkg,<<"ranch">>,<<"2.2.0">>},0}, + {<<"ssl_cert">>,{pkg,<<"ssl_cert">>,<<"1.0.1">>},0}]}. [ {pkg_hash,[ {<<"accept">>, <<"CD6E34A2D7E28CA38B2D3CB233734CA0C221EFBC1F171F91FEC5F162CC2D18DA">>}, @@ -30,7 +31,8 @@ {<<"prometheus">>, <<"95302236124C0F919163A7762BF7D2B171B919B6FF6148D26EB38A5D2DEF7B81">>}, {<<"prometheus_cowboy">>, <<"526F75D9850A9125496F78BCEECCA0F237BC7B403C976D44508543AE5967DAD9">>}, {<<"prometheus_httpd">>, <<"8F767D819A5D36275EAB9264AFF40D87279151646776069BF69FBDBBD562BD75">>}, - {<<"ranch">>, <<"25528F82BC8D7C6152C57666CA99EC716510FE0925CB188172F41CE93117B1B0">>}]}, + {<<"ranch">>, <<"25528F82BC8D7C6152C57666CA99EC716510FE0925CB188172F41CE93117B1B0">>}, + {<<"ssl_cert">>, <<"5E4133E7D524141836C045838C98E69964E188707DF12032CE5DA902BB40C9A3">>}]}, {pkg_hash_ext,[ {<<"accept">>, <<"CA69388943F5DAD2E7232A5478F16086E3C872F48E32B88B378E1885A59F5649">>}, {<<"cowboy">>, <<"EA99769574550FE8A83225C752E8A62780A586770EF408816B82B6FE6D46476B">>}, @@ -42,5 +44,6 @@ {<<"prometheus">>, <<"53554ECADAC0354066801D514D1A244DD026175E4EE3A9A30192B71D530C8268">>}, {<<"prometheus_cowboy">>, <<"2C7EB12F4B970D91E3B47BAAD0F138F6ADC34E53EEB0AE18068FF0AFAB441B24">>}, {<<"prometheus_httpd">>, <<"67736D000745184D5013C58A63E947821AB90CB9320BC2E6AE5D3061C6FFE039">>}, - {<<"ranch">>, <<"FA0B99A1780C80218A4197A59EA8D3BDAE32FBFF7E88527D7D8A4787EFF4F8E7">>}]} + {<<"ranch">>, <<"FA0B99A1780C80218A4197A59EA8D3BDAE32FBFF7E88527D7D8A4787EFF4F8E7">>}, + {<<"ssl_cert">>, <<"2E37259313514B854EE0BC5B0696250883568CD1A5FC9EC338D78E27C521E65D">>}]} ]. diff --git a/src/dev_green_zone.erl b/src/dev_green_zone.erl index b46b173a7..6c3d376c3 100644 --- a/src/dev_green_zone.erl +++ b/src/dev_green_zone.erl @@ -5,11 +5,83 @@ %%% and node identity cloning. All operations are protected by hardware %%% commitment and encryption. -module(dev_green_zone). + +%% Device API exports -export([info/1, info/3, join/3, init/3, become/3, key/3, is_trusted/3]). +%% Encryption helper functions +-export([encrypt_data/2, decrypt_data/3]). + -include("include/hb.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("public_key/include/public_key.hrl"). +%%% =================================================================== +%%% Type Specifications +%%% =================================================================== + +%% Device API function specs +-spec info(term()) -> #{exports := [atom()]}. +-spec info(term(), term(), map()) -> {ok, map()}. +-spec init(term(), term(), map()) -> {ok, binary()} | {error, binary()}. +-spec join(term(), term(), map()) -> {ok, map()} | {error, map() | binary()}. +-spec key(term(), term(), map()) -> {ok, map()} | {error, binary()}. +-spec become(term(), term(), map()) -> {ok, map()} | {error, binary()}. + +%% Helpers for init/3 +-spec setup_green_zone_config(map()) -> {ok, map()}. +-spec ensure_wallet(map()) -> term(). +-spec ensure_aes_key(map()) -> binary(). + +%% Helpers for join/3 +-spec extract_peer_info(map()) -> + {binary() | undefined, binary() | undefined, boolean()}. +-spec should_join_peer( + binary() | undefined, binary() | undefined, boolean() +) -> boolean(). + +%% Helpers for join_peer/5 +-spec join_peer(binary(), binary(), term(), term(), map()) -> + {ok, map()} | {error, map() | binary()}. +-spec prepare_join_request(map()) -> {ok, map()} | {error, term()}. +-spec verify_peer_response(map(), binary(), map()) -> boolean(). +-spec extract_and_decrypt_zone_key(map(), map()) -> + {ok, binary()} | {error, term()}. +-spec finalize_join_success(binary(), map()) -> {ok, map()}. + +%% Helpers for validate_join/3 +-spec validate_join(term(), map(), map()) -> {ok, map()} | {error, binary()}. +-spec extract_join_request_data(map(), map()) -> + {ok, {binary(), term()}} | {error, term()}. +-spec process_successful_join(binary(), term(), map(), map()) -> {ok, map()}. +-spec validate_peer_opts(map(), map()) -> boolean(). +-spec add_trusted_node(binary(), map(), term(), map()) -> ok. + +%% Helpers for key/3 +-spec get_appropriate_wallet(map()) -> term(). +-spec build_key_response(binary(), binary()) -> {ok, map()}. + +%% Helpers for become/3 +-spec validate_become_params(map()) -> + {ok, {binary(), binary()}} | {error, atom()}. +-spec request_and_verify_peer_key(binary(), binary(), map()) -> + {ok, map()} | {error, atom()}. +-spec finalize_become(map(), binary(), binary(), map()) -> {ok, map()}. +-spec update_node_identity(term(), map()) -> ok. + +%% General/Shared helpers +-spec default_zone_required_opts(map()) -> map(). +-spec replace_self_values(map(), map()) -> map(). +-spec is_trusted(term(), map(), map()) -> {ok, binary()}. +-spec encrypt_payload(binary(), term()) -> binary(). +-spec decrypt_zone_key(binary(), map()) -> {ok, binary()} | {error, binary()}. +-spec try_mount_encrypted_volume(term(), map()) -> ok. + +%% Encryption helper specs +-spec encrypt_data(term(), map()) -> + {ok, {binary(), binary()}} | {error, term()}. +-spec decrypt_data(binary(), binary(), map()) -> + {ok, binary()} | {error, term()}. + %% @doc Controls which functions are exposed via the device API. %% %% This function defines the security boundary for the green zone device by @@ -18,16 +90,14 @@ %% @param _ Ignored parameter %% @returns A map with the `exports' key containing a list of allowed functions info(_) -> - #{ - exports => - [ - <<"info">>, - <<"init">>, - <<"join">>, - <<"become">>, - <<"key">>, - <<"is_trusted">> - ] + #{ + exports => [ + <<"info">>, + <<"init">>, + <<"join">>, + <<"become">>, + <<"key">> + ] }. %% @doc Provides information about the green zone device and its API. @@ -37,14 +107,17 @@ info(_) -> %% 2. Version information %% 3. Available API endpoints with their parameters and descriptions %% -%% @param _Base Ignored parameter -%% @param _Req Ignored parameter +%% @param _Msg1 Ignored parameter +%% @param _Msg2 Ignored parameter %% @param _Opts A map of configuration options %% @returns {ok, Map} containing the device information and documentation -info(_Base, _Req, _Opts) -> +info(_Msg1, _Msg2, _Opts) -> InfoBody = #{ <<"description">> => - <<"Green Zone secure communication and identity management for trusted nodes">>, + << + "Green Zone secure communication", + "and identity management for trusted nodes" + >>, <<"version">> => <<"1.0">>, <<"api">> => #{ <<"info">> => #{ @@ -53,109 +126,57 @@ info(_Base, _Req, _Opts) -> <<"init">> => #{ <<"description">> => <<"Initialize the green zone">>, <<"details">> => - <<"Sets up the node's cryptographic identity with wallet and AES key">> + << + "Sets up the node's cryptographic", + "identity with wallet and AES key" + >> }, <<"join">> => #{ <<"description">> => <<"Join an existing green zone">>, <<"required_node_opts">> => #{ - <<"green_zone_peer_location">> => <<"Target peer's address">>, - <<"green_zone_peer_id">> => <<"Target peer's unique identifier">> + <<"green_zone_peer_location">> => + <<"Target peer's address">>, + <<"green_zone_peer_id">> => + <<"Target peer's unique identifier">> } }, <<"key">> => #{ - <<"description">> => <<"Retrieve and encrypt the node's private key">>, + <<"description">> => + <<"Retrieve and encrypt the node's private key">>, <<"details">> => - <<"Returns the node's private key encrypted with the shared AES key">> + << + "Returns the node's private key encrypted", + "with the shared AES key" + >> }, <<"become">> => #{ <<"description">> => <<"Clone the identity of a target node">>, <<"required_node_opts">> => #{ - <<"green_zone_peer_location">> => <<"Target peer's address">>, - <<"green_zone_peer_id">> => <<"Target peer's unique identifier">> + <<"green_zone_peer_location">> => + <<"Target peer's address">>, + <<"green_zone_peer_id">> => + <<"Target peer's unique identifier">> } } } }, {ok, #{<<"status">> => 200, <<"body">> => InfoBody}}. -%% @doc Provides the default required options for a green zone. -%% -%% This function defines the baseline security requirements for nodes in a green zone: -%% 1. Restricts loading of remote devices and only allows trusted signers -%% 2. Limits to preloaded devices from the initiating machine -%% 3. Enforces specific store configuration -%% 4. Prevents route changes from the defaults -%% 5. Requires matching hooks across all peers -%% 6. Disables message scheduling to prevent conflicts -%% 7. Enforces a permanent state to prevent further configuration changes -%% -%% @param Opts A map of configuration options from which to derive defaults -%% @returns A map of required configuration options for the green zone --spec default_zone_required_opts(Opts :: map()) -> map(). -default_zone_required_opts(Opts) -> - #{ - % trusted_device_signers => hb_opts:get(trusted_device_signers, [], Opts), - % load_remote_devices => hb_opts:get(load_remote_devices, false, Opts), - % preload_devices => hb_opts:get(preload_devices, [], Opts), - % % store => hb_opts:get(store, [], Opts), - % routes => hb_opts:get(routes, [], Opts), - % on => hb_opts:get(on, undefined, Opts), - % scheduling_mode => disabled, - % initialized => permanent - }. - -%% @doc Replace values of <<"self">> in a configuration map with corresponding values from Opts. -%% -%% This function iterates through all key-value pairs in the configuration map. -%% If a value is <<"self">>, it replaces that value with the result of -%% hb_opts:get(Key, not_found, Opts) where Key is the corresponding key. -%% -%% @param Config The configuration map to process -%% @param Opts The options map to fetch replacement values from -%% @returns A new map with <<"self">> values replaced --spec replace_self_values(Config :: map(), Opts :: map()) -> map(). -replace_self_values(Config, Opts) -> - maps:map( - fun(Key, Value) -> - case Value of - <<"self">> -> - hb_opts:get(Key, not_found, Opts); - _ -> - Value - end - end, - Config - ). - -%% @doc Returns `true' if the request is signed by a trusted node. -is_trusted(_M1, Req, Opts) -> - Signers = hb_message:signers(Req, Opts), - {ok, - hb_util:bin( - lists:any( - fun(Signer) -> - lists:member( - Signer, - maps:keys(hb_opts:get(trusted_nodes, #{}, Opts)) - ) - end, - Signers - ) - ) - }. %% @doc Initialize the green zone for a node. %% %% This function performs the following operations: -%% 1. Validates the node's history to ensure this is a valid initialization -%% 2. Retrieves or creates a required configuration for the green zone +%% 1. Checks if the green zone is already initialized +%% 2. Sets up and processes the required configuration for the green zone %% 3. Ensures a wallet (keypair) exists or creates a new one %% 4. Generates a new 256-bit AES key for secure communication %% 5. Updates the node's configuration with these cryptographic identities +%% 6. Attempts to mount an encrypted volume using the AES key %% %% Config options in Opts map: %% - green_zone_required_config: (Optional) Custom configuration requirements -%% - priv_wallet: (Optional) Existing wallet to use instead of creating a new one +%% - priv_wallet: (Optional) Existing wallet to use instead of creating +%% a new one %% - priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone %% %% @param _M1 Ignored parameter @@ -163,66 +184,46 @@ is_trusted(_M1, Req, Opts) -> %% @param Opts A map of configuration options %% @returns `{ok, Binary}' on success with confirmation message, or %% `{error, Binary}' on failure with error message. --spec init(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, binary()} | {error, binary()}. init(_M1, _M2, Opts) -> ?event(green_zone, {init, start}), - case hb_opts:get(green_zone_initialized, false, Opts) of + maybe + % Check if already initialized + false ?= hb_opts:get(green_zone_initialized, false, Opts), + % Setup configuration + {ok, ProcessedRequiredConfig} ?= setup_green_zone_config(Opts), + % Ensure wallet and AES key exist + NodeWallet = ensure_wallet(Opts), + GreenZoneAES = ensure_aes_key(Opts), + % Store configuration and finalize setup + NewOpts = Opts#{ + priv_wallet => NodeWallet, + priv_green_zone_aes => GreenZoneAES, + trusted_nodes => #{}, + green_zone_required_opts => ProcessedRequiredConfig, + green_zone_initialized => true + }, + hb_http_server:set_opts(NewOpts), + try_mount_encrypted_volume(GreenZoneAES, NewOpts), + ?event(green_zone, {init, complete}), + {ok, <<"Green zone initialized successfully.">>} + else true -> {error, <<"Green zone already initialized.">>}; - false -> - RequiredConfig = hb_opts:get( - <<"green_zone_required_config">>, - default_zone_required_opts(Opts), - Opts - ), - % Process RequiredConfig to replace <<"self">> values with actual values from Opts - ProcessedRequiredConfig = replace_self_values(RequiredConfig, Opts), - ?event(green_zone, {init, required_config, ProcessedRequiredConfig}), - % Check if a wallet exists; create one if absent. - NodeWallet = case hb_opts:get(priv_wallet, undefined, Opts) of - undefined -> - ?event(green_zone, {init, wallet, missing}), - hb:wallet(); - ExistingWallet -> - ?event(green_zone, {init, wallet, found}), - ExistingWallet - end, - % Generate a new 256-bit AES key if we have not already joined - % a green zone. - GreenZoneAES = - case hb_opts:get(priv_green_zone_aes, undefined, Opts) of - undefined -> - ?event(green_zone, {init, aes_key, generated}), - crypto:strong_rand_bytes(32); - ExistingAES -> - ?event(green_zone, {init, aes_key, found}), - ExistingAES - end, - % Store the wallet, AES key, and an empty trusted nodes map. - hb_http_server:set_opts(NewOpts =Opts#{ - priv_wallet => NodeWallet, - priv_green_zone_aes => GreenZoneAES, - trusted_nodes => #{}, - green_zone_required_opts => ProcessedRequiredConfig, - green_zone_initialized => true - }), - try_mount_encrypted_volume(GreenZoneAES, NewOpts), - ?event(green_zone, {init, complete}), - {ok, <<"Green zone initialized successfully.">>} + Error -> + ?event(green_zone, {init, error, Error}), + {error, <<"Failed to initialize green zone">>} end. %% @doc Initiates the join process for a node to enter an existing green zone. %% -%% This function performs the following operations depending on the state: -%% 1. Validates the node's history to ensure proper initialization -%% 2. Checks for target peer information (location and ID) -%% 3. If target peer is specified: -%% a. Generates a commitment report for the peer -%% b. Prepares and sends a POST request to the target peer -%% c. Verifies the response and decrypts the returned zone key -%% d. Updates local configuration with the shared AES key -%% 4. If no peer is specified, processes the join request locally +%% This function determines the appropriate join strategy and routes to the +%% correct handler: +%% 1. Extracts peer information from configuration options +%% 2. Determines whether to join a specific peer or validate a local request +%% 3. Routes to join_peer/5 if peer details are provided and node has +%% no identity +%% 4. Routes to validate_join/3 for local join request processing %% %% Config options in Opts map: %% - green_zone_peer_location: Target peer's address @@ -235,29 +236,30 @@ init(_M1, _M2, Opts) -> %% @param Opts A map of configuration options for join operations %% @returns `{ok, Map}' on success with join response details, or %% `{error, Binary}' on failure with error message. --spec join(M1 :: term(), M2 :: term(), Opts :: map()) -> - {ok, map()} | {error, binary()}. join(M1, M2, Opts) -> ?event(green_zone, {join, start}), - PeerLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts), - PeerID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts), - Identities = hb_opts:get(identities, #{}, Opts), - HasGreenZoneIdentity = maps:is_key(<<"green-zone">>, Identities), - ?event(green_zone, {join_peer, PeerLocation, PeerID, HasGreenZoneIdentity}), - if (not HasGreenZoneIdentity) andalso (PeerLocation =/= undefined) andalso (PeerID =/= undefined) -> - join_peer(PeerLocation, PeerID, M1, M2, Opts); - true -> - validate_join(M1, M2, hb_cache:ensure_all_loaded(Opts, Opts)) + maybe + % Extract peer information and determine join strategy + {PeerLocation, PeerID, HasGreenZoneIdentity} = extract_peer_info(Opts), + ?event(green_zone, + {join_peer, PeerLocation, PeerID, HasGreenZoneIdentity} + ), + % Route to appropriate join handler based on configuration + case should_join_peer(PeerLocation, PeerID, HasGreenZoneIdentity) of + true -> + join_peer(PeerLocation, PeerID, M1, M2, Opts); + false -> + validate_join(M1, M2, hb_cache:ensure_all_loaded(Opts, Opts)) + end end. %% @doc Encrypts and provides the node's private key for secure sharing. %% %% This function performs the following operations: -%% 1. Retrieves the shared AES key and the node's wallet -%% 2. Verifies that the node is part of a green zone (has a shared AES key) -%% 3. Generates a random initialization vector (IV) for encryption -%% 4. Encrypts the node's private key using AES-256-GCM with the shared key -%% 5. Returns the encrypted key and IV for secure transmission +%% 1. Determines the appropriate wallet to use (green-zone identity or default) +%% 2. Extracts the private key components from the wallet +%% 3. Encrypts the private key using the green zone AES key via helper function +%% 4. Builds and returns a standardized response with encrypted key and IV %% %% Required configuration in Opts map: %% - priv_green_zone_aes: The shared AES key for the green zone @@ -268,56 +270,36 @@ join(M1, M2, Opts) -> %% @param Opts A map of configuration options %% @returns `{ok, Map}' containing the encrypted key and IV on success, or %% `{error, Binary}' if the node is not part of a green zone --spec key(M1 :: term(), M2 :: term(), Opts :: map()) -> - {ok, map()} | {error, binary()}. key(_M1, _M2, Opts) -> ?event(green_zone, {get_key, start}), - % Retrieve the shared AES key and the node's wallet. - GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts), - Identities = hb_opts:get(identities, #{}, Opts), - Wallet = case maps:find(<<"green-zone">>, Identities) of - {ok, #{priv_wallet := GreenZoneWallet}} -> GreenZoneWallet; - _ -> hb_opts:get(priv_wallet, undefined, Opts) - end, - {{KeyType, Priv, Pub}, _PubKey} = Wallet, - ?event(green_zone, - {get_key, wallet, hb_util:human_id(ar_wallet:to_address(Pub))}), - case GreenZoneAES of - undefined -> - % Log error if no shared AES key is found. + maybe + % Get appropriate wallet (green-zone identity or default) + Wallet = get_appropriate_wallet(Opts), + {{KeyType, Priv, Pub}, _PubKey} = Wallet, + ?event(green_zone, + {get_key, wallet, hb_util:human_id(ar_wallet:to_address(Pub))}), + % Encrypt the node's private key using the helper function + {ok, {EncryptedData, IV}} ?= encrypt_data({KeyType, Priv, Pub}, Opts), + ?event(green_zone, {get_key, encrypt, complete}), + build_key_response(EncryptedData, IV) + else + {error, no_green_zone_aes_key} -> ?event(green_zone, {get_key, error, <<"no aes key">>}), {error, <<"Node not part of a green zone.">>}; - _ -> - % Generate an IV and encrypt the node's private key using AES-256-GCM. - IV = crypto:strong_rand_bytes(16), - {EncryptedKey, Tag} = crypto:crypto_one_time_aead( - aes_256_gcm, - GreenZoneAES, - IV, - term_to_binary({KeyType, Priv, Pub}), - <<>>, - true - ), - - % Log successful encryption of the private key. - ?event(green_zone, {get_key, encrypt, complete}), - {ok, #{ - <<"status">> => 200, - <<"encrypted_key">> => - base64:encode(<>), - <<"iv">> => base64:encode(IV) - }} + {error, EncryptError} -> + ?event(green_zone, {get_key, encrypt_error, EncryptError}), + {error, <<"Encryption failed">>}; + Error -> + ?event(green_zone, {get_key, unexpected_error, Error}), + {error, <<"Failed to retrieve key">>} end. %% @doc Clones the identity of a target node in the green zone. %% %% This function performs the following operations: -%% 1. Retrieves target node location and ID from the configuration -%% 2. Verifies that the local node has a valid shared AES key -%% 3. Requests the target node's encrypted key via its key endpoint -%% 4. Verifies the response is from the expected peer -%% 5. Decrypts the target node's private key using the shared AES key -%% 6. Updates the local node's wallet with the target node's identity +%% 1. Validates required parameters and green zone membership +%% 2. Requests and verifies the target node's encrypted key +%% 3. Finalizes the identity adoption process through helper functions %% %% Required configuration in Opts map: %% - green_zone_peer_location: Target node's address @@ -330,102 +312,137 @@ key(_M1, _M2, Opts) -> %% @returns `{ok, Map}' on success with confirmation details, or %% `{error, Binary}' if the node is not part of a green zone or %% identity adoption fails. --spec become(M1 :: term(), M2 :: term(), Opts :: map()) -> - {ok, map()} | {error, binary()}. become(_M1, _M2, Opts) -> ?event(green_zone, {become, start}), - % 1. Retrieve the target node's address from the incoming message. - NodeLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts), - NodeID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts), - % 2. Check if the local node has a valid shared AES key. - GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts), - case GreenZoneAES of - undefined -> - % Shared AES key not found: node is not part of a green zone. + maybe + % Validate required parameters and green zone membership + {ok, {NodeLocation, NodeID}} ?= validate_become_params(Opts), + % Request and verify peer's encrypted key + {ok, KeyResp} ?= + request_and_verify_peer_key(NodeLocation, NodeID, Opts), + % Finalize identity adoption + finalize_become(KeyResp, NodeLocation, NodeID, Opts) + else + {error, no_green_zone_aes_key} -> ?event(green_zone, {become, error, <<"no aes key">>}), {error, <<"Node not part of a green zone.">>}; - _ -> - % 3. Request the target node's encrypted key from its key endpoint. - ?event(green_zone, {become, getting_key, NodeLocation, NodeID}), - {ok, KeyResp} = hb_http:get(NodeLocation, - <<"/~greenzone@1.0/key">>, Opts), - Signers = hb_message:signers(KeyResp, Opts), - case hb_message:verify(KeyResp, Signers, Opts) and - lists:member(NodeID, Signers) of - false -> - % The response is not from the expected peer. - {error, <<"Received incorrect response from peer!">>}; - true -> - finalize_become(KeyResp, NodeLocation, NodeID, - GreenZoneAES, Opts) - end + {error, missing_peer_location} -> + {error, <<"green_zone_peer_location required">>}; + {error, missing_peer_id} -> + {error, <<"green_zone_peer_id required">>}; + {error, invalid_peer_response} -> + {error, <<"Received incorrect response from peer!">>}; + Error -> + ?event(green_zone, {become, unexpected_error, Error}), + {error, <<"Failed to adopt target node identity">>} end. -finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> - % 4. Decode the response to obtain the encrypted key and IV. - Combined = - base64:decode( - hb_ao:get(<<"encrypted_key">>, KeyResp, Opts)), - IV = base64:decode(hb_ao:get(<<"iv">>, KeyResp, Opts)), - % 5. Separate the ciphertext and the authentication tag. - CipherLen = byte_size(Combined) - 16, - <> = Combined, - % 6. Decrypt the ciphertext using AES-256-GCM with the shared AES - % key and IV. - DecryptedBin = crypto:crypto_one_time_aead( - aes_256_gcm, - GreenZoneAES, - IV, - Ciphertext, - <<>>, - Tag, - false + +%%% =================================================================== +%%% Internal Helper Functions +%%% =================================================================== + +%%% ------------------------------------------------------------------- +%%% Helpers for init/3 +%%% ------------------------------------------------------------------- + +%% @doc Setup and process green zone configuration. +%% +%% This function retrieves the required configuration, processes any +%% "self" placeholder values, and returns the processed configuration. +%% +%% @param Opts Configuration options +%% @returns {ok, ProcessedConfig} with processed configuration +setup_green_zone_config(Opts) -> + RequiredConfig = hb_opts:get( + <<"green_zone_required_config">>, + default_zone_required_opts(Opts), + Opts ), - OldWallet = hb_opts:get(priv_wallet, undefined, Opts), - OldWalletAddr = hb_util:human_id(ar_wallet:to_address(OldWallet)), - ?event(green_zone, {become, old_wallet, OldWalletAddr}), - % Print the decrypted binary - ?event(green_zone, {become, decrypted_bin, DecryptedBin}), - % 7. Convert the decrypted binary into the target node's keypair. - {KeyType, Priv, Pub} = binary_to_term(DecryptedBin), - % Print the keypair - ?event(green_zone, {become, keypair, Pub}), - % 8. Add the target node's keypair to the local node's identities. - GreenZoneWallet = {{KeyType, Priv, Pub}, {KeyType, Pub}}, + ProcessedRequiredConfig = replace_self_values(RequiredConfig, Opts), + ?event(green_zone, {init, required_config, ProcessedRequiredConfig}), + {ok, ProcessedRequiredConfig}. + +%% @doc Ensure a wallet exists, creating one if necessary. +%% +%% This function checks if a wallet already exists in the configuration +%% and creates a new one if needed. +%% +%% @param Opts Configuration options +%% @returns Wallet (existing or newly created) +ensure_wallet(Opts) -> + case hb_opts:get(priv_wallet, undefined, Opts) of + undefined -> + ?event(green_zone, {init, wallet, missing}), + hb:wallet(); + ExistingWallet -> + ?event(green_zone, {init, wallet, found}), + ExistingWallet + end. + +%% @doc Ensure an AES key exists, generating one if necessary. +%% +%% This function checks if a green zone AES key already exists and +%% generates a new 256-bit key if needed. +%% +%% @param Opts Configuration options +%% @returns AES key (existing or newly generated) +ensure_aes_key(Opts) -> + case hb_opts:get(priv_green_zone_aes, undefined, Opts) of + undefined -> + ?event(green_zone, {init, aes_key, generated}), + crypto:strong_rand_bytes(32); + ExistingAES -> + ?event(green_zone, {init, aes_key, found}), + ExistingAES + end. + +%%% ------------------------------------------------------------------- +%%% Helpers for join/3 +%%% ------------------------------------------------------------------- + +%% @doc Extract peer information from configuration options. +%% +%% This function extracts the peer location, peer ID, and checks if the +%% node already has a green zone identity. +%% +%% @param Opts Configuration options +%% @returns {PeerLocation, PeerID, HasGreenZoneIdentity} tuple +extract_peer_info(Opts) -> + PeerLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts), + PeerID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts), Identities = hb_opts:get(identities, #{}, Opts), - UpdatedIdentities = Identities#{ - <<"green-zone">> => #{ - priv_wallet => GreenZoneWallet - } - }, - NewOpts = Opts#{ - identities => UpdatedIdentities - }, - ok = - hb_http_server:set_opts( - NewOpts - ), - try_mount_encrypted_volume(GreenZoneWallet, NewOpts), - ?event(green_zone, {become, update_wallet, complete}), - {ok, #{ - <<"body">> => #{ - <<"message">> => <<"Successfully adopted target node identity">>, - <<"peer-location">> => NodeLocation, - <<"peer-id">> => NodeID - } - }}. + HasGreenZoneIdentity = maps:is_key(<<"green-zone">>, Identities), + {PeerLocation, PeerID, HasGreenZoneIdentity}. + +%% @doc Determine whether to join a specific peer or validate locally. +%% +%% This function implements the decision logic for join strategy: +%% - Join peer if: no existing identity AND peer location AND peer ID provided +%% - Validate locally otherwise +%% +%% @param PeerLocation Target peer location (may be undefined) +%% @param PeerID Target peer ID (may be undefined) +%% @param HasGreenZoneIdentity Whether node already has green zone identity +%% @returns true if should join peer, false if should validate locally +should_join_peer(PeerLocation, PeerID, HasGreenZoneIdentity) -> + (not HasGreenZoneIdentity) andalso + (PeerLocation =/= undefined) andalso + (PeerID =/= undefined). + +%%% ------------------------------------------------------------------- +%%% Helpers for join_peer/5 +%%% ------------------------------------------------------------------- %% @doc Processes a join request to a specific peer node. %% %% This function handles the client-side join flow when connecting to a peer: %% 1. Verifies the node is not already in a green zone -%% 2. Optionally adopts configuration from the target peer -%% 3. Generates a hardware-backed commitment report -%% 4. Sends a POST request to the peer's join endpoint -%% 5. Verifies the response signature -%% 6. Decrypts the returned AES key -%% 7. Updates local configuration with the shared key -%% 8. Optionally mounts an encrypted volume using the shared key +%% 2. Prepares a join request with commitment report and public key +%% 3. Sends the join request to the target peer +%% 4. Verifies the response is from the expected peer +%% 5. Extracts and decrypts the zone key from the response +%% 6. Finalizes the join by updating configuration with the shared key %% %% @param PeerLocation The target peer's address %% @param PeerID The target peer's unique identifier @@ -434,173 +451,222 @@ finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> %% @param InitOpts A map of initial configuration options %% @returns `{ok, Map}' on success with confirmation message, or %% `{error, Map|Binary}' on failure with error details --spec join_peer( - PeerLocation :: binary(), - PeerID :: binary(), - M1 :: term(), - M2 :: term(), - Opts :: map()) -> {ok, map()} | {error, map() | binary()}. join_peer(PeerLocation, PeerID, _M1, _M2, InitOpts) -> - % Check here if the node is already part of a green zone. - GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, InitOpts), - case GreenZoneAES == undefined of - true -> - Wallet = hb_opts:get(priv_wallet, undefined, InitOpts), - {ok, Report} = dev_snp:generate(#{}, #{}, InitOpts), - WalletPub = element(2, Wallet), - ?event(green_zone, {remove_uncommitted, Report}), - MergedReq = hb_ao:set( - Report, - <<"public_key">>, - base64:encode(term_to_binary(WalletPub)), - InitOpts - ), - % Create an committed join request using the wallet. - % hb_message:commit expects Opts map (which contains priv_wallet), not wallet tuple - Req = hb_cache:ensure_all_loaded( - hb_message:commit(MergedReq, InitOpts), + maybe + % Verify node is not already in a green zone + undefined ?= hb_opts:get(priv_green_zone_aes, undefined, InitOpts), + % Prepare join request + {ok, Req} ?= prepare_join_request(InitOpts), + % Send join request to peer + ?event(green_zone, + {join, sending_commitment, PeerLocation, PeerID, Req} + ), + {ok, Resp} ?= + hb_http:post( + PeerLocation, + <<"/~greenzone@1.0/join">>, + Req, InitOpts ), - ?event({join_req, {explicit, Req}}), - ?event({verify_res, hb_message:verify(Req)}), - % Log that the commitment report is being sent to the peer. - ?event(green_zone, {join, sending_commitment, PeerLocation, PeerID, Req}), - case hb_http:post(PeerLocation, <<"/~greenzone@1.0/join">>, Req, InitOpts) of - {ok, Resp} -> - % Log the response received from the peer. - ?event(green_zone, {join, join_response, PeerLocation, PeerID, Resp}), - % Ensure that the response is from the expected peer, avoiding - % the risk of a man-in-the-middle attack. - Signers = hb_message:signers(Resp, InitOpts), - ?event(green_zone, {join, signers, Signers}), - IsVerified = hb_message:verify(Resp, Signers, InitOpts), - ?event(green_zone, {join, verify, IsVerified}), - IsPeerSigner = lists:member(PeerID, Signers), - ?event(green_zone, {join, peer_is_signer, IsPeerSigner, PeerID}), - case IsPeerSigner andalso IsVerified of - false -> - % The response is not from the expected peer. - {error, <<"Received incorrect response from peer!">>}; - true -> - % Extract the encrypted shared AES key (zone-key) - % from the response. - ZoneKey = hb_ao:get(<<"zone-key">>, Resp, InitOpts), - % Decrypt the zone key using the local node's - % private key. - {ok, AESKey} = decrypt_zone_key(ZoneKey, InitOpts), - % Update local configuration with the retrieved - % shared AES key. - ?event(green_zone, {opts, {explicit, InitOpts}}), - NewOpts = InitOpts#{ - priv_green_zone_aes => AESKey - }, - hb_http_server:set_opts(NewOpts), - {ok, #{ - <<"body">> => - <<"Node joined green zone successfully.">>, - <<"status">> => 200 - }} - end; - {error, Reason} -> - {error, #{<<"status">> => 400, <<"reason">> => Reason}}; - {unavailable, Reason} -> - ?event(green_zone, { - join_error, - peer_unavailable, - PeerLocation, - PeerID, - Reason - }), - {error, #{ - <<"status">> => 503, - <<"body">> => <<"Peer node is unreachable.">> - }} - end; - false -> + % Verify response from expected peer + true ?= verify_peer_response(Resp, PeerID, InitOpts), + % Extract and decrypt zone key + {ok, AESKey} ?= extract_and_decrypt_zone_key(Resp, InitOpts), + % Update configuration with shared key + finalize_join_success(AESKey, InitOpts) + else + {error, already_joined} -> ?event(green_zone, {join, already_joined}), {error, <<"Node already part of green zone.">>}; {error, Reason} -> - % Log the error and return the initial options. - ?event(green_zone, {join, error, Reason}), - {error, Reason} + {error, #{<<"status">> => 400, <<"reason">> => Reason}}; + {unavailable, Reason} -> + ?event(green_zone, { + join_error, peer_unavailable, PeerLocation, PeerID, Reason + }), + {error, #{ + <<"status">> => 503, + <<"body">> => <<"Peer node is unreachable.">> + }}; + false -> + {error, <<"Received incorrect response from peer!">>}; + Error -> + ?event(green_zone, {join, error, Error}), + {error, Error} end. -%%%-------------------------------------------------------------------- -%%% Internal Functions -%%%-------------------------------------------------------------------- +%% @doc Prepare a join request with commitment report and public key. +%% +%% This function creates a hardware-backed commitment report and prepares +%% the join request message with the node's public key. +%% +%% @param InitOpts Initial configuration options +%% @returns {ok, Req} with prepared request, or {error, Reason} +prepare_join_request(InitOpts) -> + maybe + Wallet = hb_opts:get(priv_wallet, undefined, InitOpts), + {ok, Report} ?= dev_snp:generate(#{}, #{}, InitOpts), + WalletPub = element(2, Wallet), + ?event(green_zone, {remove_uncommitted, Report}), + MergedReq = hb_ao:set( + Report, + <<"public_key">>, + base64:encode(term_to_binary(WalletPub)), + InitOpts + ), + % Create committed join request using the wallet + Req = hb_cache:ensure_all_loaded( + hb_message:commit(MergedReq, InitOpts), + InitOpts + ), + ?event({join_req, {explicit, Req}}), + ?event({verify_res, hb_message:verify(Req)}), + {ok, Req} + end. + +%% @doc Verify that response is from expected peer. +%% +%% This function verifies the response signature and ensures it comes +%% from the expected peer to prevent man-in-the-middle attacks. +%% +%% @param Resp Response from peer +%% @param PeerID Expected peer identifier +%% @param InitOpts Configuration options +%% @returns true if verified, false otherwise +verify_peer_response(Resp, PeerID, InitOpts) -> + ?event(green_zone, {join, join_response, Resp}), + Signers = hb_message:signers(Resp, InitOpts), + ?event(green_zone, {join, signers, Signers}), + IsVerified = hb_message:verify(Resp, Signers, InitOpts), + ?event(green_zone, {join, verify, IsVerified}), + IsPeerSigner = lists:member(PeerID, Signers), + ?event(green_zone, {join, peer_is_signer, IsPeerSigner, PeerID}), + IsPeerSigner andalso IsVerified. + +%% @doc Extract and decrypt zone key from peer response. +%% +%% This function extracts the encrypted zone key from the peer's response +%% and decrypts it using the local node's private key. +%% +%% @param Resp Response containing encrypted zone key +%% @param InitOpts Configuration options +%% @returns {ok, AESKey} with decrypted key, or {error, Reason} +extract_and_decrypt_zone_key(Resp, InitOpts) -> + ZoneKey = hb_ao:get(<<"zone-key">>, Resp, InitOpts), + decrypt_zone_key(ZoneKey, InitOpts). + +%% @doc Finalize successful join by updating configuration. +%% +%% This function updates the node's configuration with the shared AES key +%% and returns a success response. +%% +%% @param AESKey Decrypted shared AES key +%% @param InitOpts Initial configuration options +%% @returns {ok, Map} with success response +finalize_join_success(AESKey, InitOpts) -> + ?event(green_zone, {opts, {explicit, InitOpts}}), + NewOpts = InitOpts#{priv_green_zone_aes => AESKey}, + hb_http_server:set_opts(NewOpts), + {ok, #{ + <<"body">> => <<"Node joined green zone successfully.">>, + <<"status">> => 200 + }}. + +%%% ------------------------------------------------------------------- +%%% Helpers for validate_join/3 +%%% ------------------------------------------------------------------- %% @doc Validates an incoming join request from another node. %% %% This function handles the server-side join flow when receiving a connection %% request: %% 1. Validates the peer's configuration meets required standards -%% 2. Extracts the commitment report and public key from the request +%% 2. Extracts join request data (node address and public key) %% 3. Verifies the hardware-backed commitment report -%% 4. Adds the joining node to the trusted nodes list -%% 5. Encrypts the shared AES key with the peer's public key -%% 6. Returns the encrypted key to the requesting node +%% 4. Processes the successful join through helper functions %% %% @param M1 Ignored parameter %% @param Req The join request containing commitment report and public key %% @param Opts A map of configuration options %% @returns `{ok, Map}' on success with encrypted AES key, or %% `{error, Binary}' on failure with error message --spec validate_join(M1 :: term(), Req :: map(), Opts :: map()) -> - {ok, map()} | {error, binary()}. validate_join(M1, Req, Opts) -> - case validate_peer_opts(Req, Opts) of - true -> do_nothing; - false -> throw(invalid_join_request) - end, - ?event(green_zone, {join, start}), - % Retrieve the commitment report and address from the join request. - Report = hb_ao:get(<<"report">>, Req, Opts), - NodeAddr = hb_ao:get(<<"address">>, Req, Opts), - ?event(green_zone, {join, extract, {node_addr, NodeAddr}}), - % Retrieve and decode the joining node's public key. - ?event(green_zone, {m1, {explicit, M1}}), - ?event(green_zone, {req, {explicit, Req}}), - EncodedPubKey = hb_ao:get(<<"public_key">>, Req, Opts), - ?event(green_zone, {encoded_pub_key, {explicit, EncodedPubKey}}), - RequesterPubKey = case EncodedPubKey of - not_found -> not_found; - Encoded -> binary_to_term(base64:decode(Encoded)) - end, - ?event(green_zone, {public_key, {explicit, RequesterPubKey}}), - % Verify the commitment report provided in the join request. - case dev_snp:verify(M1, Req, Opts) of - {ok, <<"true">>} -> - % Commitment verified. - ?event(green_zone, {join, commitment, verified}), - % Retrieve the shared AES key used for encryption. - GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts), - ?event(green_zone, {green_zone_aes, {explicit, GreenZoneAES}}), - % Retrieve the local node's wallet to extract its public key. - {WalletPubKey, _} = hb_opts:get(priv_wallet, undefined, Opts), - % Add the joining node's details to the trusted nodes list. - add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts), - % Log the update of trusted nodes. - ?event(green_zone, {join, update, trusted_nodes, ok}), - % Encrypt the shared AES key with the joining node's public key. - EncryptedPayload = encrypt_payload(GreenZoneAES, RequesterPubKey), - % Log completion of AES key encryption. - ?event(green_zone, {join, encrypt, aes_key, complete}), - {ok, #{ - <<"body">> => <<"Node joined green zone successfully.">>, - <<"node-address">> => NodeAddr, - <<"zone-key">> => base64:encode(EncryptedPayload), - <<"public_key">> => WalletPubKey - }}; + maybe + ?event(green_zone, {join, start}), + % Validate peer configuration + true ?= validate_peer_opts(Req, Opts), + % Extract join request data + {ok, {NodeAddr, RequesterPubKey}} ?= + extract_join_request_data(Req, Opts), + % Verify commitment report + {ok, <<"true">>} ?= dev_snp:verify(M1, Req, Opts), + ?event(green_zone, {join, commitment, verified}), + % Process successful join + process_successful_join(NodeAddr, RequesterPubKey, Req, Opts) + else + false -> + throw(invalid_join_request); {ok, <<"false">>} -> - % Commitment failed. ?event(green_zone, {join, commitment, failed}), {error, <<"Received invalid commitment report.">>}; Error -> - % Error during commitment verification. ?event(green_zone, {join, commitment, error, Error}), Error end. +%% @doc Extract join request data including node address and public key. +%% +%% This function extracts and processes the essential data from a join request, +%% including the node address and decoded public key. +%% +%% @param Req Join request message +%% @param Opts Configuration options +%% @returns {ok, {NodeAddr, RequesterPubKey}} or {error, Reason} +extract_join_request_data(Req, Opts) -> + maybe + % Extract basic request data + NodeAddr = hb_ao:get(<<"address">>, Req, Opts), + ?event(green_zone, {join, extract, {node_addr, NodeAddr}}), + % Extract and decode public key + EncodedPubKey = hb_ao:get(<<"public_key">>, Req, Opts), + ?event(green_zone, {encoded_pub_key, {explicit, EncodedPubKey}}), + RequesterPubKey = case EncodedPubKey of + not_found -> not_found; + Encoded -> binary_to_term(base64:decode(Encoded)) + end, + ?event(green_zone, {public_key, {explicit, RequesterPubKey}}), + {ok, {NodeAddr, RequesterPubKey}} + end. + +%% @doc Process a successful join by adding node and encrypting zone key. +%% +%% This function handles the final steps of a successful join request, +%% including adding the node to trusted list and encrypting the zone key. +%% +%% @param NodeAddr Address of joining node +%% @param RequesterPubKey Public key of joining node +%% @param Req Original join request (for Report) +%% @param Opts Configuration options +%% @returns {ok, Map} with success response +process_successful_join(NodeAddr, RequesterPubKey, Req, Opts) -> + % Get required data + Report = hb_ao:get(<<"report">>, Req, Opts), + GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts), + ?event(green_zone, {green_zone_aes, {explicit, GreenZoneAES}}), + {WalletPubKey, _} = hb_opts:get(priv_wallet, undefined, Opts), + % Add joining node to trusted nodes + add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts), + ?event(green_zone, {join, update, trusted_nodes, ok}), + % Encrypt shared AES key for the joining node + EncryptedPayload = encrypt_payload(GreenZoneAES, RequesterPubKey), + ?event(green_zone, {join, encrypt, aes_key, complete}), + {ok, #{ + <<"body">> => <<"Node joined green zone successfully.">>, + <<"node-address">> => NodeAddr, + <<"zone-key">> => base64:encode(EncryptedPayload), + <<"public_key">> => WalletPubKey + }}. + %% @doc Validates that a peer's configuration matches required options. %% %% This function ensures the peer node meets configuration requirements: @@ -613,7 +679,6 @@ validate_join(M1, Req, Opts) -> %% @param Req The request message containing the peer's configuration %% @param Opts A map of the local node's configuration options %% @returns true if the peer's configuration is valid, false otherwise --spec validate_peer_opts(Req :: map(), Opts :: map()) -> boolean(). validate_peer_opts(Req, Opts) -> ?event(green_zone, {validate_peer_opts, start, Req}), % Get the required config from the local node's configuration. @@ -627,7 +692,9 @@ validate_peer_opts(Req, Opts) -> Opts ) ), - ?event(green_zone, {validate_peer_opts, required_config, ConvertedRequiredConfig}), + ?event(green_zone, + {validate_peer_opts, required_config, ConvertedRequiredConfig} + ), PeerOpts = hb_ao:normalize_keys( hb_ao:get(<<"node-message">>, Req, undefined, Opts)), @@ -635,10 +702,18 @@ validate_peer_opts(Req, Opts) -> Result = try case hb_opts:ensure_node_history(PeerOpts, ConvertedRequiredConfig) of {ok, _} -> - ?event(green_zone, {validate_peer_opts, history_items_check, valid}), + ?event(green_zone, + {validate_peer_opts, history_items_check, valid} + ), true; {error, ErrorMsg} -> - ?event(green_zone, {validate_peer_opts, history_items_check, {invalid, ErrorMsg}}), + ?event(green_zone, + { + validate_peer_opts, + history_items_check, + {invalid, ErrorMsg} + } + ), false end catch @@ -662,10 +737,6 @@ validate_peer_opts(Req, Opts) -> %% @param RequesterPubKey The joining node's public key %% @param Opts A map of configuration options %% @returns ok --spec add_trusted_node( - NodeAddr :: binary(), - Report :: map(), - RequesterPubKey :: term(), Opts :: map()) -> ok. add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts) -> % Retrieve the current trusted nodes map. TrustedNodes = hb_opts:get(trusted_nodes, #{}, Opts), @@ -679,6 +750,233 @@ add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts) -> trusted_nodes => UpdatedTrustedNodes }). +%%% ------------------------------------------------------------------- +%%% Helpers for key/3 +%%% ------------------------------------------------------------------- + +%% @doc Get the appropriate wallet for the current context. +%% +%% This function determines which wallet to use based on whether the node +%% has a green-zone identity or should use the default wallet. +%% +%% @param Opts Configuration options containing identities and wallet info +%% @returns Wallet to use for encryption operations +get_appropriate_wallet(Opts) -> + Identities = hb_opts:get(identities, #{}, Opts), + case maps:find(<<"green-zone">>, Identities) of + {ok, #{priv_wallet := GreenZoneWallet}} -> GreenZoneWallet; + _ -> hb_opts:get(priv_wallet, undefined, Opts) + end. + +%% @doc Build successful key response with encrypted data. +%% +%% This function constructs the standard response format for successful +%% key encryption operations. +%% +%% @param EncryptedData Base64-encoded encrypted key data +%% @param IV Base64-encoded initialization vector +%% @returns {ok, Map} with standardized response format +build_key_response(EncryptedData, IV) -> + {ok, #{ + <<"status">> => 200, + <<"encrypted_key">> => base64:encode(EncryptedData), + <<"iv">> => base64:encode(IV) + }}. + +%%% ------------------------------------------------------------------- +%%% Helpers for become/3 +%%% ------------------------------------------------------------------- + +%% @doc Validate parameters required for become operation. +%% +%% This function validates that all required parameters are present for +%% the become operation and that the node is part of a green zone. +%% +%% @param Opts Configuration options +%% @returns {ok, {NodeLocation, NodeID}} if valid, or {error, Reason} +validate_become_params(Opts) -> + maybe + % Check if node is part of a green zone + GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts), + case GreenZoneAES of + undefined -> {error, no_green_zone_aes_key}; + _ -> ok + end, + % Extract and validate peer parameters + NodeLocation = + hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts), + NodeID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts), + case {NodeLocation, NodeID} of + {undefined, _} -> {error, missing_peer_location}; + {_, undefined} -> {error, missing_peer_id}; + {_, _} -> {ok, {NodeLocation, NodeID}} + end + end. + +%% @doc Request peer's key and verify the response. +%% +%% This function handles the HTTP request to get the peer's encrypted key +%% and verifies that the response is authentic and from the expected peer. +%% +%% @param NodeLocation Target node's address +%% @param NodeID Target node's identifier +%% @param Opts Configuration options +%% @returns {ok, KeyResp} if successful, or {error, Reason} +request_and_verify_peer_key(NodeLocation, NodeID, Opts) -> + maybe + ?event(green_zone, {become, getting_key, NodeLocation, NodeID}), + % Request encrypted key from target node + {ok, KeyResp} ?= + hb_http:get(NodeLocation, <<"/~greenzone@1.0/key">>, Opts), + % Verify response signature + Signers = hb_message:signers(KeyResp, Opts), + true ?= (hb_message:verify(KeyResp, Signers, Opts) and + lists:member(NodeID, Signers)), + {ok, KeyResp} + else + false -> + {error, invalid_peer_response}; + Error -> + Error + end. + +%% @doc Finalize the become process by decrypting and adopting target identity. +%% +%% This function completes the identity adoption process by: +%% 1. Extracting and decrypting the target node's encrypted key data +%% 2. Converting the decrypted data back into a keypair structure +%% 3. Creating a new green zone wallet with the target's identity +%% 4. Updating the node's identity configuration +%% 5. Mounting an encrypted volume with the new identity +%% 6. Returning confirmation of successful identity adoption +%% +%% @param KeyResp Response containing encrypted key data from target node +%% @param NodeLocation URL of the target node for logging +%% @param NodeID ID of the target node for logging +%% @param Opts Configuration options containing decryption keys +%% @returns {ok, Map} with success confirmation and peer details +finalize_become(KeyResp, NodeLocation, NodeID, Opts) -> + maybe + % Decode and decrypt the encrypted key + Combined = base64:decode(hb_ao:get(<<"encrypted_key">>, KeyResp, Opts)), + IV = base64:decode(hb_ao:get(<<"iv">>, KeyResp, Opts)), + {ok, DecryptedBin} ?= decrypt_data(Combined, IV, Opts), + % Log current wallet info + OldWallet = hb_opts:get(priv_wallet, undefined, Opts), + OldWalletAddr = hb_util:human_id(ar_wallet:to_address(OldWallet)), + ?event(green_zone, {become, old_wallet, OldWalletAddr}), + % Extract and process target node's keypair + {KeyType, Priv, Pub} = binary_to_term(DecryptedBin), + ?event(green_zone, {become, decrypted_bin, DecryptedBin}), + ?event(green_zone, {become, keypair, Pub}), + % Update node identity with target's keypair + GreenZoneWallet = {{KeyType, Priv, Pub}, {KeyType, Pub}}, + ok ?= update_node_identity(GreenZoneWallet, Opts), + % Mount encrypted volume and finalize + try_mount_encrypted_volume(GreenZoneWallet, Opts), + ?event(green_zone, {become, update_wallet, complete}), + {ok, #{ + <<"body">> => #{ + <<"message">> => + <<"Successfully adopted target node identity">>, + <<"peer-location">> => NodeLocation, + <<"peer-id">> => NodeID + } + }} + end. + +%% @doc Update node identity with new green zone wallet. +%% +%% This function updates the node's identity configuration to include +%% the new green zone wallet and commits the changes. +%% +%% @param GreenZoneWallet New wallet to use for green zone identity +%% @param Opts Current configuration options +%% @returns ok if successful +update_node_identity(GreenZoneWallet, Opts) -> + Identities = hb_opts:get(identities, #{}, Opts), + UpdatedIdentities = Identities#{ + <<"green-zone">> => #{ + priv_wallet => GreenZoneWallet + } + }, + NewOpts = Opts#{identities => UpdatedIdentities}, + hb_http_server:set_opts(NewOpts). + +%%% ------------------------------------------------------------------- +%%% General/Shared helpers +%%% ------------------------------------------------------------------- + +%% @doc Prepare a join request with commitment report and public key. +%% +%% This function creates a hardware-backed commitment report and prepares +%% the join request message with the node's public key. +%% +%% @param InitOpts Initial configuration options +%% @returns {ok, Req} with prepared request, or {error, Reason} +default_zone_required_opts(_Opts) -> + #{ + % trusted_device_signers => hb_opts:get(trusted_device_signers, [], Opts), + % load_remote_devices => hb_opts:get(load_remote_devices, false, Opts), + % preload_devices => hb_opts:get(preload_devices, [], Opts), + % % store => hb_opts:get(store, [], Opts), + % routes => hb_opts:get(routes, [], Opts), + % on => hb_opts:get(on, undefined, Opts), + % scheduling_mode => disabled, + % initialized => permanent + }. + +%% @doc Replace values of <<"self">> in a configuration map with +%% corresponding values from Opts. +%% +%% This function iterates through all key-value pairs in the configuration map. +%% If a value is <<"self">>, it replaces that value with the result of +%% hb_opts:get(Key, not_found, Opts) where Key is the corresponding key. +%% +%% @param Config The configuration map to process +%% @param Opts The options map to fetch replacement values from +%% @returns A new map with <<"self">> values replaced +replace_self_values(Config, Opts) -> + maps:map( + fun(Key, Value) -> + case Value of + <<"self">> -> + hb_opts:get(Key, not_found, Opts); + _ -> + Value + end + end, + Config + ). + +%% @doc Returns `true' if the request is signed by a trusted node. +%% +%% This function verifies whether an incoming request is signed by a node +%% that is part of the trusted nodes list in the green zone. It extracts +%% all signers from the request and checks if any of them match the trusted +%% nodes configured for this green zone. +%% +%% @param _M1 Ignored parameter +%% @param Req The request message to verify +%% @param Opts Configuration options containing trusted_nodes map +%% @returns {ok, Binary} with "true" or "false" indicating trust status +is_trusted(_M1, Req, Opts) -> + Signers = hb_message:signers(Req, Opts), + {ok, + hb_util:bin( + lists:any( + fun(Signer) -> + lists:member( + Signer, + maps:keys(hb_opts:get(trusted_nodes, #{}, Opts)) + ) + end, + Signers + ) + ) + }. + + %% @doc Encrypts an AES key with a node's RSA public key. %% %% This function securely encrypts the shared key for transmission: @@ -689,7 +987,6 @@ add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts) -> %% @param AESKey The shared AES key (256-bit binary) %% @param RequesterPubKey The node's public RSA key %% @returns The encrypted AES key --spec encrypt_payload(AESKey :: binary(), RequesterPubKey :: term()) -> binary(). encrypt_payload(AESKey, RequesterPubKey) -> ?event(green_zone, {encrypt_payload, start}), %% Expect RequesterPubKey in the form: { {rsa, E}, Pub } @@ -713,8 +1010,6 @@ encrypt_payload(AESKey, RequesterPubKey) -> %% @param EncZoneKey The encrypted zone AES key (Base64 encoded or binary) %% @param Opts A map of configuration options %% @returns {ok, DecryptedKey} on success with the decrypted AES key --spec decrypt_zone_key(EncZoneKey :: binary(), Opts :: map()) -> - {ok, binary()} | {error, binary()}. decrypt_zone_key(EncZoneKey, Opts) -> % Decode if necessary RawEncKey = case is_binary(EncZoneKey) of @@ -740,8 +1035,9 @@ decrypt_zone_key(EncZoneKey, Opts) -> %% delegating to the dev_volume module, which provides a unified interface %% for volume management. %% -%% The encryption key used for the volume is the same AES key used for green zone -%% communication, ensuring that only nodes in the green zone can access the data. +%% The encryption key used for the volume is the same AES key used for green +%% zone communication, ensuring that only nodes in the green zone can access +%% the data. %% %% @param Key The password for the encrypted volume. %% @param Opts A map of configuration options. @@ -763,6 +1059,99 @@ try_mount_encrypted_volume(Key, Opts) -> ok % Still return ok as this is an optional operation end. +%%% =================================================================== +%%% Encryption Helper Functions +%%% =================================================================== + +%% @doc Encrypt data using AES-256-GCM with the green zone shared key. +%% +%% This function provides a standardized way to encrypt data using the +%% green zone AES key from the node's configuration. It generates a random IV +%% and returns the encrypted data with authentication tag, ready for base64 +%% encoding and transmission. +%% +%% @param Data The data to encrypt (will be converted to binary via +%% term_to_binary) +%% @param Opts Server configuration options containing priv_green_zone_aes +%% @returns {ok, {EncryptedData, IV}} where EncryptedData includes the auth tag, +%% or {error, Reason} if no AES key or encryption fails +encrypt_data(Data, Opts) -> + case hb_opts:get(priv_green_zone_aes, undefined, Opts) of + undefined -> + {error, no_green_zone_aes_key}; + AESKey -> + try + % Generate random IV + IV = crypto:strong_rand_bytes(16), + % Convert data to binary if needed + DataBin = case is_binary(Data) of + true -> Data; + false -> term_to_binary(Data) + end, + % Encrypt using AES-256-GCM + {EncryptedData, Tag} = crypto:crypto_one_time_aead( + aes_256_gcm, + AESKey, + IV, + DataBin, + <<>>, + true + ), + % Combine encrypted data and tag + Combined = <>, + {ok, {Combined, IV}} + catch + Error:Reason -> + {error, {encryption_failed, Error, Reason}} + end + end. + +%% @doc Decrypt data using AES-256-GCM with the green zone shared key. +%% +%% This function provides a standardized way to decrypt data that was +%% encrypted with encrypt_data/2. It expects the encrypted data to include +%% the 16-byte authentication tag at the end. +%% +%% @param Combined The encrypted data with authentication tag appended +%% @param IV The initialization vector used during encryption +%% @param Opts Server configuration options containing priv_green_zone_aes +%% @returns {ok, DecryptedData} or {error, Reason} +decrypt_data(Combined, IV, Opts) -> + case hb_opts:get(priv_green_zone_aes, undefined, Opts) of + undefined -> + {error, no_green_zone_aes_key}; + AESKey -> + try + % Separate ciphertext and authentication tag + CipherLen = byte_size(Combined) - 16, + case CipherLen >= 0 of + false -> + {error, invalid_encrypted_data_length}; + true -> + <> = + Combined, + % Decrypt using AES-256-GCM + DecryptedBin = crypto:crypto_one_time_aead( + aes_256_gcm, + AESKey, + IV, + Ciphertext, + <<>>, + Tag, + false + ), + {ok, DecryptedBin} + end + catch + Error:Reason -> + {error, {decryption_failed, Error, Reason}} + end + end. + +%%% =================================================================== +%%% Test Functions +%%% =================================================================== + %% @doc Test RSA operations with the existing wallet structure. %% %% This test function verifies that encryption and decryption using the RSA keys diff --git a/src/dev_snp.erl b/src/dev_snp.erl index 48ce38a2b..af9203d7e 100644 --- a/src/dev_snp.erl +++ b/src/dev_snp.erl @@ -12,925 +12,17 @@ -module(dev_snp). -export([generate/3, verify/3]). -include("include/hb.hrl"). --include_lib("eunit/include/eunit.hrl"). - -%% Configuration constants --define(COMMITTED_PARAMETERS, [vcpus, vcpu_type, vmm_type, guest_features, - firmware, kernel, initrd, append]). - -%% SNP-specific constants --define(DEBUG_FLAG_BIT, 19). --define(REPORT_DATA_VERSION, 1). - -%% Test configuration constants --define(TEST_VCPUS_COUNT, 32). --define(TEST_VCPU_TYPE, 5). --define(TEST_VMM_TYPE, 1). --define(TEST_GUEST_FEATURES, 1). --define(TEST_FIRMWARE_HASH, <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>). --define(TEST_KERNEL_HASH, <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>). --define(TEST_INITRD_HASH, <<"544045560322dbcd2c454bdc50f35edf0147829ec440e6cb487b4a1503f923c1">>). --define(TEST_APPEND_HASH, <<"95a34faced5e487991f9cc2253a41cbd26b708bf00328f98dddbbf6b3ea2892e">>). %% @doc Verify an AMD SEV-SNP commitment report message. -%% -%% This function validates the identity of a remote node, its ephemeral private -%% address, and the integrity of the hardware-backed attestation report. -%% The verification process performs the following checks: -%% 1. Verify the address and the node message ID are the same as the ones -%% used to generate the nonce. -%% 2. Verify the address that signed the message is the same as the one used -%% to generate the nonce. -%% 3. Verify that the debug flag is disabled. -%% 4. Verify that the firmware, kernel, and OS (VMSAs) hashes, part of the -%% measurement, are trusted. -%% 5. Verify the measurement is valid. -%% 6. Verify the report's certificate chain to hardware root of trust. -%% -%% Required configuration in NodeOpts map: -%% - snp_trusted: List of trusted software configurations -%% - snp_enforced_keys: Keys to enforce during validation (optional) -%% -%% @param M1 The previous message in the verification chain -%% @param M2 The message containing the SNP commitment report -%% @param NodeOpts A map of configuration options for verification -%% @returns `{ok, Binary}' with "true" on successful verification, or -%% `{error, Reason}' on failure with specific error details +%% Delegates to snp_verification module. -spec verify(M1 :: term(), M2 :: term(), NodeOpts :: map()) -> - {ok, binary()} | {error, term()}. + {ok, boolean()} | {error, term()}. verify(M1, M2, NodeOpts) -> - ?event(snp_verify, verify_called), - maybe - % In pipeline flows (e.g., /~relay@1.0/call/verify~snp@1.0), the report - % comes from M1 (result of previous stage). For direct calls, it may be - % in M2. Try M1 first, then fall back to M2. - {ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport}} - ?= case extract_and_normalize_message(M1, NodeOpts) of - {ok, Result} -> {ok, Result}; - {error, {report_not_found, _}} -> - ?event(snp_verify, {report_not_in_m1_trying_m2}), - extract_and_normalize_message(M2, NodeOpts); - {error, ExtractReason} -> {error, ExtractReason} - end, - % Perform all validation steps - {ok, NonceResult} ?= verify_nonce(Address, NodeMsgID, Msg, NodeOpts), - {ok, SigResult} ?= - verify_signature_and_address( - MsgWithJSONReport, - Address, - NodeOpts - ), - {ok, DebugResult} ?= verify_debug_disabled(Msg), - {ok, TrustedResult} ?= verify_trusted_software(M1, Msg, NodeOpts), - {ok, MeasurementResult} ?= verify_measurement(Msg, ReportJSON, NodeOpts), - {ok, ReportResult} ?= verify_report_integrity(ReportJSON), - Valid = lists:all( - fun(Bool) -> Bool end, - [ - NonceResult, - SigResult, - DebugResult, - TrustedResult, - MeasurementResult, - ReportResult - ] - ), - ?event({final_validation_result, Valid}), - {ok, hb_util:bin(Valid)} - else - {error, Reason} -> {error, Reason} - end. + snp_verification:verify(M1, M2, NodeOpts). %% @doc Generate an AMD SEV-SNP commitment report and emit it as a message. -%% -%% This function creates a hardware-backed attestation report containing all -%% necessary data to validate the node's identity and software configuration. -%% The generation process performs the following operations: -%% 1. Loads and validates the provided configuration options -%% 2. Retrieves or creates a cryptographic wallet for node identity -%% 3. Generates a unique nonce using the node's address and message ID -%% 4. Extracts trusted software configuration from local options -%% 5. Generates the hardware attestation report using the NIF interface -%% 6. Packages the report with all verification data into a message -%% -%% Required configuration in Opts map: -%% - priv_wallet: Node's cryptographic wallet (created if not provided) -%% - snp_trusted: List of trusted software configurations (represents the -%% configuration of the local node generating the report) -%% -%% @param _M1 Ignored parameter -%% @param _M2 Ignored parameter -%% @param Opts A map of configuration options for report generation -%% @returns `{ok, Map}' on success with the complete report message, or -%% `{error, Reason}' on failure with error details +%% Delegates to snp_generate module. -spec generate(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, map()} | {error, term()}. -generate(_M1, _M2, Opts) -> - maybe - LoadedOpts = hb_cache:ensure_all_loaded(Opts, Opts), - ?event({generate_opts, {explicit, LoadedOpts}}), - % Validate wallet availability - {ok, ValidWallet} ?= - case hb_opts:get(priv_wallet, no_viable_wallet, LoadedOpts) of - no_viable_wallet -> {error, no_wallet_available}; - Wallet -> {ok, Wallet} - end, - % Generate address and node message components - Address = hb_util:human_id(ar_wallet:to_address(ValidWallet)), - NodeMsg = hb_private:reset(LoadedOpts), - {ok, PublicNodeMsgID} ?= dev_message:id( - NodeMsg, - #{ <<"committers">> => <<"none">> }, - LoadedOpts - ), - RawPublicNodeMsgID = hb_util:native_id(PublicNodeMsgID), - ?event({snp_node_msg, NodeMsg}), - % Generate the commitment report components - ?event({snp_address, byte_size(Address)}), - ReportData = generate_nonce(Address, RawPublicNodeMsgID), - ?event({snp_report_data, byte_size(ReportData)}), - % Extract local hashes - {ok, ValidLocalHashes} ?= - case hb_opts:get(snp_trusted, [#{}], LoadedOpts) of - [] -> {error, no_trusted_configs}; - [FirstConfig | _] -> {ok, FirstConfig}; - _ -> {error, invalid_trusted_configs_format} - end, - ?event(snp_local_hashes, {explicit, ValidLocalHashes}), - % Generate the hardware attestation report - {ok, ReportJSON} ?= case get(mock_snp_nif_enabled) of - true -> - % Return mocked response for testing - MockResponse = get(mock_snp_nif_response), - {ok, MockResponse}; - _ -> - % Call actual NIF function - dev_snp_nif:generate_attestation_report( - ReportData, - ?REPORT_DATA_VERSION - ) - end, - ?event({snp_report_json, ReportJSON}), - ?event({snp_report_generated, {nonce, ReportData}, {report, ReportJSON}}), - % Package the complete report message - ReportMsg = #{ - <<"local-hashes">> => ValidLocalHashes, - <<"nonce">> => hb_util:encode(ReportData), - <<"address">> => Address, - <<"node-message">> => NodeMsg, - <<"report">> => ReportJSON - }, - ?event({snp_report_msg, ReportMsg}), - {ok, ReportMsg} - else - {error, Reason} -> {error, Reason}; - Error -> {error, Error} - end. - -%% @doc Extract and normalize the SNP commitment message from the input. -%% -%% This function processes the raw message and extracts all necessary components -%% for verification: -%% 1. Searches for a `body' key in the message, using it as the report source -%% 2. Applies message commitment and signing filters -%% 3. Extracts and decodes the JSON report -%% 4. Normalizes the message structure by merging report data -%% 5. Extracts the node address and message ID -%% -%% @param M2 The input message containing the SNP report -%% @param NodeOpts A map of configuration options -%% @returns `{ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport}}' -%% on success with all extracted components, or `{error, Reason}' on failure --spec extract_and_normalize_message(M2 :: term(), NodeOpts :: map()) -> - {ok, {map(), binary(), binary(), binary(), map()}} | {error, term()}. -extract_and_normalize_message(M2, NodeOpts) -> - maybe - % Search for a `body' key in the message, and if found use it as the source - % of the report. If not found, use the message itself as the source. - ?event({node_opts, {explicit, NodeOpts}}), - RawMsg = hb_ao:get(<<"body">>, M2, M2, NodeOpts#{ hashpath => ignore }), - ?event({msg, {explicit, RawMsg}}), - MsgWithJSONReport = - hb_util:ok( - hb_message:with_only_committed( - hb_message:with_only_committers( - RawMsg, - hb_message:signers( - RawMsg, - NodeOpts - ), - NodeOpts - ), - NodeOpts - ) - ), - ?event({msg_with_json_report, {explicit, MsgWithJSONReport}}), - % Normalize the request message. First try to get the report from the - % committed message. If not found (e.g., message not signed), fall back - % to the raw message. - ReportJSON = case hb_ao:get(<<"report">>, MsgWithJSONReport, NodeOpts) of - not_found -> - ?event({report_not_in_committed, falling_back_to_raw}), - hb_ao:get(<<"report">>, RawMsg, NodeOpts); - Found -> Found - end, - {ok, ValidReportJSON} ?= case ReportJSON of - not_found -> - ?event({report_not_found, {m2, M2}, {raw_msg, RawMsg}}), - {error, {report_not_found, <<"No 'report' key found in message">>}}; - _ -> {ok, ReportJSON} - end, - Report = hb_json:decode(ValidReportJSON), - Msg = - maps:merge( - maps:without([<<"report">>], MsgWithJSONReport), - Report - ), - - % Extract address and node message ID - Address = hb_ao:get(<<"address">>, Msg, NodeOpts), - ?event({snp_address, Address}), - {ok, NodeMsgID} ?= extract_node_message_id(Msg, NodeOpts), - ?event({snp_node_msg_id, NodeMsgID}), - {ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport}} - else - {error, Reason} -> {error, Reason}; - Error -> {error, Error} - end. - - -%% @doc Extract the node message ID from the SNP message. -%% -%% This function handles the extraction of the node message ID, which can be -%% provided either directly as a field or embedded within a node message that -%% needs to be processed to generate the ID. -%% -%% @param Msg The normalized SNP message -%% @param NodeOpts A map of configuration options -%% @returns `{ok, NodeMsgID}' on success with the extracted ID, or -%% `{error, missing_node_msg_id}' if no ID can be found --spec extract_node_message_id(Msg :: map(), NodeOpts :: map()) -> - {ok, binary()} | {error, missing_node_msg_id}. -extract_node_message_id(Msg, NodeOpts) -> - case {hb_ao:get(<<"node-message">>, Msg, NodeOpts#{ hashpath => ignore }), - hb_ao:get(<<"node-message-id">>, Msg, NodeOpts)} of - {undefined, undefined} -> - {error, missing_node_msg_id}; - {undefined, ID} -> - {ok, ID}; - {NodeMsg, _} -> - dev_message:id(NodeMsg, #{}, NodeOpts) - end. - -%% @doc Verify that the nonce in the report matches the expected value. -%% -%% This function validates that the nonce in the SNP report was generated -%% using the correct address and node message ID, ensuring the report -%% corresponds to the expected request. -%% -%% @param Address The node's address used in nonce generation -%% @param NodeMsgID The node message ID used in nonce generation -%% @param Msg The normalized SNP message containing the nonce -%% @param NodeOpts A map of configuration options -%% @returns `{ok, true}' if the nonce matches, or `{error, nonce_mismatch}' on failure --spec verify_nonce(Address :: binary(), NodeMsgID :: binary(), - Msg :: map(), NodeOpts :: map()) -> {ok, true} | {error, nonce_mismatch}. -verify_nonce(Address, NodeMsgID, Msg, NodeOpts) -> - Nonce = hb_util:decode(hb_ao:get(<<"nonce">>, Msg, NodeOpts)), - ?event({snp_nonce, Nonce}), - NonceMatches = report_data_matches(Address, NodeMsgID, Nonce), - ?event({nonce_matches, NonceMatches}), - case NonceMatches of - true -> {ok, true}; - false -> {error, nonce_mismatch} - end. - -%% @doc Verify that the message signature and signing address are valid. -%% -%% This function validates that: -%% 1. The message signature is cryptographically valid -%% 2. The address that signed the message matches the address in the report -%% -%% @param MsgWithJSONReport The message containing the JSON report and signatures -%% @param Address The expected signing address from the report -%% @param NodeOpts A map of configuration options -%% @returns `{ok, true}' if both signature and address are valid, or -%% `{error, signature_or_address_invalid}' on failure --spec verify_signature_and_address(MsgWithJSONReport :: map(), - Address :: binary(), NodeOpts :: map()) -> - {ok, true} | {error, signature_or_address_invalid}. -verify_signature_and_address(MsgWithJSONReport, Address, NodeOpts) -> - Signers = hb_message:signers(MsgWithJSONReport, NodeOpts), - ?event({snp_signers, {explicit, Signers}}), - SigIsValid = hb_message:verify(MsgWithJSONReport, Signers), - ?event({snp_sig_is_valid, SigIsValid}), - AddressIsValid = lists:member(Address, Signers), - ?event({address_is_valid, AddressIsValid, {signer, Signers}, {address, Address}}), - case SigIsValid andalso AddressIsValid of - true -> {ok, true}; - false -> {error, signature_or_address_invalid} - end. - -%% @doc Verify that the debug flag is disabled in the SNP policy. -%% -%% This function checks the SNP policy to ensure that debug mode is disabled, -%% which is required for production environments to maintain security guarantees. -%% -%% @param Msg The normalized SNP message containing the policy -%% @returns `{ok, true}' if debug is disabled, or `{error, debug_enabled}' if enabled --spec verify_debug_disabled(Msg :: map()) -> {ok, true} | {error, debug_enabled}. -verify_debug_disabled(Msg) -> - DebugDisabled = not is_debug(Msg), - ?event({debug_disabled, DebugDisabled}), - case DebugDisabled of - true -> {ok, true}; - false -> {error, debug_enabled} - end. - -%% @doc Verify that the software configuration is trusted. -%% -%% This function validates that the firmware, kernel, and other system -%% components match approved configurations by delegating to the -%% software trust validation system. -%% -%% @param M1 The previous message in the verification chain -%% @param Msg The normalized SNP message containing software hashes -%% @param NodeOpts A map of configuration options including trusted software list -%% @returns `{ok, true}' if the software is trusted, or `{error, untrusted_software}' -%% on failure --spec verify_trusted_software(M1 :: term(), Msg :: map(), NodeOpts :: map()) -> - {ok, true} | {error, untrusted_software}. -verify_trusted_software(M1, Msg, NodeOpts) -> - {ok, IsTrustedSoftware} = execute_is_trusted(M1, Msg, NodeOpts), - ?event({trusted_software, IsTrustedSoftware}), - case IsTrustedSoftware of - true -> {ok, true}; - false -> {error, untrusted_software} - end. - -%% @doc Verify that the measurement in the SNP report is valid. -%% -%% This function validates the SNP measurement by: -%% 1. Extracting committed parameters from the message -%% 2. Computing the expected launch digest using those parameters -%% 3. Comparing the computed digest with the measurement in the report -%% -%% @param Msg The normalized SNP message containing local hashes -%% @param ReportJSON The raw JSON report containing the measurement -%% @param NodeOpts A map of configuration options -%% @returns `{ok, true}' if the measurement is valid, or -%% `{error, measurement_invalid}' on failure --spec verify_measurement(Msg :: map(), ReportJSON :: binary(), - NodeOpts :: map()) -> {ok, true} | {error, measurement_invalid}. -verify_measurement(Msg, ReportJSON, NodeOpts) -> - Args = extract_measurement_args(Msg, NodeOpts), - ?event({args, { explicit, Args}}), - {ok, Expected} = dev_snp_nif:compute_launch_digest(Args), - ExpectedBin = list_to_binary(Expected), - ?event({expected_measurement, {explicit, Expected}}), - Measurement = hb_ao:get(<<"measurement">>, Msg, NodeOpts), - ?event({measurement, {explicit,Measurement}}), - {Status, MeasurementIsValid} = - dev_snp_nif:verify_measurement( - ReportJSON, - ExpectedBin - ), - ?event({status, Status}), - ?event({measurement_is_valid, MeasurementIsValid}), - case MeasurementIsValid of - true -> {ok, true}; - false -> {error, measurement_invalid} - end. - -%% @doc Extract measurement arguments from the SNP message. -%% -%% This function extracts and formats the committed parameters needed for -%% measurement computation from the local hashes in the message. -%% -%% @param Msg The normalized SNP message containing local hashes -%% @param NodeOpts A map of configuration options -%% @returns A map of measurement arguments with atom keys --spec extract_measurement_args(Msg :: map(), NodeOpts :: map()) -> map(). -extract_measurement_args(Msg, NodeOpts) -> - maps:from_list( - lists:map( - fun({Key, Val}) -> {binary_to_existing_atom(Key), Val} end, - maps:to_list( - maps:with( - lists:map(fun atom_to_binary/1, ?COMMITTED_PARAMETERS), - hb_cache:ensure_all_loaded( - hb_ao:get(<<"local-hashes">>, Msg, NodeOpts), - NodeOpts - ) - ) - ) - ) - ). - -%% @doc Verify the integrity of the SNP report's digital signature. -%% -%% This function validates the cryptographic signature of the SNP report -%% against the hardware root of trust to ensure the report has not been -%% tampered with and originates from genuine AMD SEV-SNP hardware. -%% -%% @param ReportJSON The raw JSON report to verify -%% @returns `{ok, true}' if the report signature is valid, or -%% `{error, report_signature_invalid}' on failure --spec verify_report_integrity(ReportJSON :: binary()) -> - {ok, true} | {error, report_signature_invalid}. -verify_report_integrity(ReportJSON) -> - {ok, ReportIsValid} = dev_snp_nif:verify_signature(ReportJSON), - ?event({report_is_valid, ReportIsValid}), - case ReportIsValid of - true -> {ok, true}; - false -> {error, report_signature_invalid} - end. - -%% @doc Check if the node's debug policy is enabled. -%% -%% This function examines the SNP policy field to determine if debug mode -%% is enabled by checking the debug flag bit in the policy bitmask. -%% -%% @param Report The SNP report containing the policy field -%% @returns `true' if debug mode is enabled, `false' otherwise --spec is_debug(Report :: map()) -> boolean(). -is_debug(Report) -> - (hb_ao:get(<<"policy">>, Report, #{}) band (1 bsl ?DEBUG_FLAG_BIT)) =/= 0. - - -%% @doc Validate that all software hashes match trusted configurations. -%% -%% This function ensures that the firmware, kernel, and other system components -%% in the SNP report match approved configurations. The validation process: -%% 1. Extracts local hashes from the message -%% 2. Filters hashes to only include enforced keys -%% 3. Compares filtered hashes against trusted software configurations -%% 4. Returns true only if the configuration matches a trusted entry -%% -%% Configuration options in NodeOpts map: -%% - snp_trusted: List of maps containing trusted software configurations -%% - snp_enforced_keys: Keys to enforce during validation (defaults to all -%% committed parameters) -%% -%% @param _M1 Ignored parameter -%% @param Msg The SNP message containing local software hashes -%% @param NodeOpts A map of configuration options including trusted software -%% @returns `{ok, true}' if software is trusted, `{ok, false}' otherwise --spec execute_is_trusted(M1 :: term(), Msg :: map(), NodeOpts :: map()) -> - {ok, boolean()}. -execute_is_trusted(_M1, Msg, NodeOpts) -> - FilteredLocalHashes = get_filtered_local_hashes(Msg, NodeOpts), - TrustedSoftware = hb_opts:get(snp_trusted, [#{}], NodeOpts), - ?event({trusted_software, {explicit, TrustedSoftware}}), - IsTrusted = - is_software_trusted( - FilteredLocalHashes, - TrustedSoftware, - NodeOpts - ), - ?event({is_all_software_trusted, IsTrusted}), - {ok, IsTrusted}. - -%% @doc Extract local hashes filtered to only include enforced keys. -%% -%% This function retrieves the local software hashes from the message and -%% filters them to only include the keys that are configured for enforcement. -%% -%% @param Msg The SNP message containing local hashes -%% @param NodeOpts A map of configuration options -%% @returns A map of filtered local hashes with only enforced keys --spec get_filtered_local_hashes(Msg :: map(), NodeOpts :: map()) -> map(). -get_filtered_local_hashes(Msg, NodeOpts) -> - LocalHashes = hb_ao:get(<<"local-hashes">>, Msg, NodeOpts), - EnforcedKeys = get_enforced_keys(NodeOpts), - ?event({enforced_keys, {explicit, EnforcedKeys}}), - FilteredLocalHashes = hb_cache:ensure_all_loaded( - maps:with(EnforcedKeys, LocalHashes), - NodeOpts - ), - ?event({filtered_local_hashes, {explicit, FilteredLocalHashes}}), - FilteredLocalHashes. - -%% @doc Get the list of enforced keys for software validation. -%% -%% This function retrieves the configuration specifying which software -%% component keys should be enforced during trust validation. -%% -%% @param NodeOpts A map of configuration options -%% @returns A list of binary keys that should be enforced --spec get_enforced_keys(NodeOpts :: map()) -> [binary()]. -get_enforced_keys(NodeOpts) -> - lists:map( - fun atom_to_binary/1, - hb_opts:get(snp_enforced_keys, ?COMMITTED_PARAMETERS, NodeOpts) - ). - -%% @doc Check if filtered local hashes match any trusted configurations. -%% -%% This function compares the filtered local hashes against a list of -%% trusted software configurations, returning true if any configuration -%% matches exactly. It handles three cases: -%% 1. Empty list of trusted configurations (returns false) -%% 2. Valid list of trusted configurations (performs matching) -%% 3. Invalid trusted software configuration (returns false) -%% -%% @param FilteredLocalHashes The software hashes to validate -%% @param TrustedSoftware List of trusted software configurations or invalid input -%% @param NodeOpts Configuration options for matching -%% @returns `true' if hashes match a trusted configuration, `false' otherwise --spec is_software_trusted(map(), [] | [map()] | term(), map()) -> boolean(). -is_software_trusted(_FilteredLocalHashes, [], _NodeOpts) -> - false; -is_software_trusted(FilteredLocalHashes, TrustedSoftware, NodeOpts) - when is_list(TrustedSoftware) -> - lists:any( - fun(TrustedMap) -> - Match = - hb_message:match( - FilteredLocalHashes, - TrustedMap, - primary, - NodeOpts - ), - ?event({match, {explicit, Match}}), - is_map(TrustedMap) andalso Match == true - end, - TrustedSoftware - ); -is_software_trusted(_FilteredLocalHashes, _TrustedSoftware, _NodeOpts) -> - false. - -%% @doc Validate that the report data matches the expected nonce. -%% -%% This function ensures that the nonce in the SNP report was generated -%% using the same address and node message ID that are expected for this -%% verification request. -%% -%% @param Address The node's address used in nonce generation -%% @param NodeMsgID The node message ID used in nonce generation -%% @param ReportData The actual nonce data from the SNP report -%% @returns `true' if the report data matches the expected nonce, `false' otherwise --spec report_data_matches(Address :: binary(), NodeMsgID :: binary(), - ReportData :: binary()) -> boolean(). -report_data_matches(Address, NodeMsgID, ReportData) -> - ?event({generated_nonce, {explicit, generate_nonce(Address, NodeMsgID)}}), - ?event({expected_nonce, {explicit, ReportData}}), - generate_nonce(Address, NodeMsgID) == ReportData. - -%% @doc Generate the nonce to use in the SNP commitment report. -%% -%% This function creates a unique nonce by concatenating the node's native -%% address and message ID. This nonce is embedded in the hardware attestation -%% report to bind it to a specific verification request. -%% -%% @param RawAddress The node's raw address identifier -%% @param RawNodeMsgID The raw node message identifier -%% @returns A binary nonce formed by concatenating the native address and message ID --spec generate_nonce(RawAddress :: binary(), RawNodeMsgID :: binary()) -> binary(). -generate_nonce(RawAddress, RawNodeMsgID) -> - Address = hb_util:native_id(RawAddress), - NodeMsgID = hb_util:native_id(RawNodeMsgID), - << Address/binary, NodeMsgID/binary >>. - -%% Test helper functions and data -get_test_hashes() -> - #{ - <<"vcpus">> => ?TEST_VCPUS_COUNT, - <<"vcpu_type">> => ?TEST_VCPU_TYPE, - <<"vmm_type">> => ?TEST_VMM_TYPE, - <<"guest_features">> => ?TEST_GUEST_FEATURES, - <<"firmware">> => ?TEST_FIRMWARE_HASH, - <<"kernel">> => ?TEST_KERNEL_HASH, - <<"initrd">> => ?TEST_INITRD_HASH, - <<"append">> => ?TEST_APPEND_HASH - }. - -%% Verification test helpers -setup_test_nodes() -> - ProxyWallet = hb:wallet(<<"test/admissible-report-wallet.json">>), - ProxyOpts = #{ - store => hb_opts:get(store), - priv_wallet => ProxyWallet - }, - _ReportNode = hb_http_server:start_node(ProxyOpts), - VerifyingNode = hb_http_server:start_node(#{ - priv_wallet => ar_wallet:new(), - store => hb_opts:get(store), - snp_trusted => [ - #{ - <<"vcpus">> => ?TEST_VCPUS_COUNT, - <<"vcpu_type">> => ?TEST_VCPU_TYPE, - <<"vmm_type">> => ?TEST_VMM_TYPE, - <<"guest_features">> => ?TEST_GUEST_FEATURES, - <<"firmware">> => ?TEST_FIRMWARE_HASH, - <<"kernel">> => ?TEST_KERNEL_HASH, - <<"initrd">> => ?TEST_INITRD_HASH, - <<"append">> => ?TEST_APPEND_HASH - } - ], - snp_enforced_keys => [ - vcpu_type, vmm_type, guest_features, - firmware, kernel, initrd, append - ] - }), - {ProxyOpts, VerifyingNode}. - - -%% @doc Load test SNP report data from file. -%% -%% This function loads a sample SNP attestation report from a test file. -%% The test will fail if the file doesn't exist, ensuring predictable test data. -%% -%% @returns Binary containing test SNP report JSON data -%% @throws {error, {file_not_found, Filename}} if test file doesn't exist --spec load_test_report_data() -> binary(). -load_test_report_data() -> - TestFile = <<"test/admissible-report.json">>, - case file:read_file(TestFile) of - {ok, Data} -> - Data; - {error, enoent} -> - throw({error, {file_not_found, TestFile}}); - {error, Reason} -> - throw({error, {file_read_error, TestFile, Reason}}) - end. - - -%% Individual test cases -execute_is_trusted_exact_match_should_fail_test() -> - % Test case: Exact match with trusted software should fail when vcpus differ - Msg = #{ - <<"local-hashes">> => (get_test_hashes())#{ - <<"vcpus">> => 16 - } - }, - NodeOpts = #{ - snp_trusted => [get_test_hashes()], - snp_enforced_keys => [ - vcpus, vcpu_type, vmm_type, guest_features, - firmware, kernel, initrd, append - ] - }, - {ok, Result} = execute_is_trusted(#{}, Msg, NodeOpts), - ?assertEqual(false, Result). - -execute_is_trusted_subset_match_should_pass_test() -> - % Test case: Match with subset of keys in trusted software should pass - Msg = #{ - <<"local-hashes">> => (get_test_hashes())#{ - <<"vcpus">> => 16 - } - }, - NodeOpts = #{ - snp_trusted => [get_test_hashes()], - snp_enforced_keys => [ - vcpu_type, vmm_type, guest_features, - firmware, kernel, initrd, append - ] - }, - {ok, Result} = execute_is_trusted(#{}, Msg, NodeOpts), - ?assertEqual(true, Result). - -verify_test() -> - % Note: If this test fails, it may be because the unsigned ID of the node - % message in `test/admissible-report.eterm` has changed. If the format ever - % changes, this value will need to be updated. Recalculate the unsigned ID - % of the `Request/node-message' field, decode `Request/address', concatenate - % the two, and encode. The result will be the new `Request/nonce' value. - {ProxyOpts, VerifyingNode} = setup_test_nodes(), - {ok, [Request]} = file:consult(<<"test/admissible-report.eterm">>), - {ok, Result} = hb_http:post( - VerifyingNode, - <<"/~snp@1.0/verify">>, - hb_message:commit(Request, ProxyOpts), - ProxyOpts - ), - ?event({verify_test_result, Result}), - ?assertEqual(true, hb_util:atom(Result)). - - -%% @doc Test successful report generation with valid configuration. -generate_success_test() -> - % Set up test configuration - TestWallet = ar_wallet:new(), - TestOpts = #{ - priv_wallet => TestWallet, - snp_trusted => [#{ - <<"vcpus">> => ?TEST_VCPUS_COUNT, - <<"vcpu_type">> => ?TEST_VCPU_TYPE, - <<"firmware">> => ?TEST_FIRMWARE_HASH, - <<"kernel">> => ?TEST_KERNEL_HASH - }] - }, - % Load test report data from file - TestReportJSON = load_test_report_data(), - % Mock the NIF function to return test data - ok = mock_snp_nif(TestReportJSON), - try - % Call generate function - {ok, Result} = generate(#{}, #{}, TestOpts), - % Verify the result structure - ?assert(is_map(Result)), - ?assert(maps:is_key(<<"local-hashes">>, Result)), - ?assert(maps:is_key(<<"nonce">>, Result)), - ?assert(maps:is_key(<<"address">>, Result)), - ?assert(maps:is_key(<<"node-message">>, Result)), - ?assert(maps:is_key(<<"report">>, Result)), - % Verify the report content - ?assertEqual(TestReportJSON, maps:get(<<"report">>, Result)), - % Verify local hashes match the first trusted config - ExpectedHashes = maps:get(<<"local-hashes">>, Result), - ?assertEqual(?TEST_VCPUS_COUNT, maps:get(<<"vcpus">>, ExpectedHashes)), - ?assertEqual(?TEST_VCPU_TYPE, maps:get(<<"vcpu_type">>, ExpectedHashes)), - % Verify nonce is properly encoded - Nonce = maps:get(<<"nonce">>, Result), - ?assert(is_binary(Nonce)), - ?assert(byte_size(Nonce) > 0), - % Verify address is present and properly formatted - Address = maps:get(<<"address">>, Result), - ?assert(is_binary(Address)), - ?assert(byte_size(Address) > 0) - after - % Clean up mock - unmock_snp_nif() - end. - -%% @doc Test error handling when wallet is missing. -generate_missing_wallet_test() -> - TestOpts = #{ - % No priv_wallet provided - snp_trusted => [#{ <<"firmware">> => ?TEST_FIRMWARE_HASH }] - }, - % Mock the NIF function (shouldn't be called) - ok = mock_snp_nif(<<"dummy_report">>), - try - % Call generate function - should fail - Result = generate(#{}, #{}, TestOpts), - ?assertMatch({error, no_wallet_available}, Result) - after - unmock_snp_nif() - end. - -%% @doc Test error handling when trusted configurations are missing. -generate_missing_trusted_configs_test() -> - TestWallet = ar_wallet:new(), - TestOpts = #{ - priv_wallet => TestWallet, - snp_trusted => [] % Empty trusted configs - }, - - % Mock the NIF function (shouldn't be called) - ok = mock_snp_nif(<<"dummy_report">>), - - try - % Call generate function - should fail - Result = generate(#{}, #{}, TestOpts), - ?assertMatch({error, no_trusted_configs}, Result) - after - unmock_snp_nif() - end. - -%% @doc Test successful round-trip: generate then verify with same configuration. -verify_mock_generate_success_test_() -> - { timeout, 30, fun verify_mock_generate_success/0 }. -verify_mock_generate_success() -> - % Set up test configuration - TestWallet = ar_wallet:new(), - TestTrustedConfig = #{ - <<"vcpus">> => 32, - <<"vcpu_type">> => ?TEST_VCPU_TYPE, - <<"vmm_type">> => ?TEST_VMM_TYPE, - <<"guest_features">> => ?TEST_GUEST_FEATURES, - <<"firmware">> => ?TEST_FIRMWARE_HASH, - <<"kernel">> => ?TEST_KERNEL_HASH, - <<"initrd">> => ?TEST_INITRD_HASH, - <<"append">> => ?TEST_APPEND_HASH - }, - GenerateOpts = #{ - priv_wallet => TestWallet, - snp_trusted => [TestTrustedConfig] - }, - % Load test report data and set up mock - TestReportJSON = load_test_report_data(), - ok = mock_snp_nif(TestReportJSON), - try - % Step 1: Generate a test report using mocked SNP - {ok, GeneratedMsg} = generate(#{}, #{}, GenerateOpts), - % Verify the generated message structure - ?assert(is_map(GeneratedMsg)), - ?assert(maps:is_key(<<"report">>, GeneratedMsg)), - ?assert(maps:is_key(<<"address">>, GeneratedMsg)), - ?assert(maps:is_key(<<"nonce">>, GeneratedMsg)), - % Step 2: Set up verification options with the same trusted config - VerifyOpts = #{ - snp_trusted => [TestTrustedConfig], - snp_enforced_keys => [vcpu_type, vmm_type, guest_features, - firmware, kernel, initrd, append] - }, - % Step 3: Verify the generated report - {ok, VerifyResult} = - verify( - #{}, - hb_message:commit(GeneratedMsg, GenerateOpts), - VerifyOpts - ), - % Step 4: Assert that verification succeeds - ?assertEqual(<<"true">>, VerifyResult), - % Additional validation: verify specific fields - ReportData = maps:get(<<"report">>, GeneratedMsg), - ?assertEqual(TestReportJSON, ReportData), - LocalHashes = maps:get(<<"local-hashes">>, GeneratedMsg), - ?assertEqual(TestTrustedConfig, LocalHashes) - after - % Clean up mock - unmock_snp_nif() - end. - -%% @doc Test verification failure when using wrong trusted configuration. -verify_mock_generate_wrong_config_test_() -> - { timeout, 30, fun verify_mock_generate_wrong_config/0 }. -verify_mock_generate_wrong_config() -> - % Set up test configuration for generation - TestWallet = ar_wallet:new(), - GenerateTrustedConfig = #{ - <<"vcpus">> => ?TEST_VCPUS_COUNT, - <<"vcpu_type">> => ?TEST_VCPU_TYPE, - <<"vmm_type">> => ?TEST_VMM_TYPE, - <<"guest_features">> => ?TEST_GUEST_FEATURES, - <<"firmware">> => ?TEST_FIRMWARE_HASH, - <<"kernel">> => ?TEST_KERNEL_HASH, - <<"initrd">> => ?TEST_INITRD_HASH, - <<"append">> => ?TEST_APPEND_HASH - }, - GenerateOpts = #{ - priv_wallet => TestWallet, - snp_trusted => [GenerateTrustedConfig] - }, - % Load test report data and set up mock - TestReportJSON = load_test_report_data(), - ok = mock_snp_nif(TestReportJSON), - try - % Step 1: Generate a test report - {ok, GeneratedMsg} = generate(#{}, #{}, GenerateOpts), - % Step 2: Set up verification with DIFFERENT trusted config - WrongTrustedConfig = #{ - <<"vcpus">> => 32, % Different from generation config - <<"vcpu_type">> => 3, % Different from generation config - <<"firmware">> => <<"different_firmware_hash">>, - <<"kernel">> => <<"different_kernel_hash">> - }, - VerifyOpts = #{ - snp_trusted => [WrongTrustedConfig], - snp_enforced_keys => [vcpus, vcpu_type, firmware, kernel] - }, - % Step 3: Verify the generated report with wrong config - VerifyResult = - verify( - #{}, - hb_message:commit(GeneratedMsg, GenerateOpts), - VerifyOpts - ), - ?event({verify_result, {explicit, VerifyResult}}), - % Step 4: Assert that verification fails (either as error or false result) - case VerifyResult of - {ok, <<"false">>} -> - % Verification completed but returned false (all validations ran) - ok; - {error, _Reason} -> - % Verification failed early (expected for wrong config) - ok; - Other -> - % Unexpected result - should fail the test - ?assertEqual({ok, <<"false">>}, Other) - end - after - % Clean up mock - unmock_snp_nif() - end. - -%% @doc Mock the SNP NIF function to return test data. -%% -%% This function sets up a simple mock for dev_snp_nif:generate_attestation_report -%% to return predefined test data instead of calling actual hardware. -%% Uses process dictionary for simple mocking without external dependencies. -%% -%% @param TestReportJSON The test report data to return -%% @returns ok if mocking is successful --spec mock_snp_nif(ReportJSON :: binary()) -> ok. -mock_snp_nif(TestReportJSON) -> - % Use process dictionary for simple mocking - put(mock_snp_nif_response, TestReportJSON), - put(mock_snp_nif_enabled, true), - ok. - -%% @doc Clean up SNP NIF mocking. -%% -%% This function removes the mock setup and restores normal NIF behavior. -%% -%% @returns ok --spec unmock_snp_nif() -> ok. -unmock_snp_nif() -> - % Clean up process dictionary mock - erase(mock_snp_nif_response), - erase(mock_snp_nif_enabled), - ok. \ No newline at end of file +generate(M1, M2, Opts) -> + snp_generate:generate(M1, M2, Opts). \ No newline at end of file diff --git a/src/dev_snp_nif.erl b/src/dev_snp_nif.erl deleted file mode 100644 index bacf338c5..000000000 --- a/src/dev_snp_nif.erl +++ /dev/null @@ -1,84 +0,0 @@ --module(dev_snp_nif). --export([generate_attestation_report/2, compute_launch_digest/1, check_snp_support/0]). --export([verify_measurement/2, verify_signature/1]). --include("include/cargo.hrl"). --include("include/hb.hrl"). --include_lib("eunit/include/eunit.hrl"). - --on_load(init/0). --define(NOT_LOADED, not_loaded(?LINE)). - -check_snp_support() -> - ?NOT_LOADED. - -generate_attestation_report(_UniqueData, _VMPL) -> - ?NOT_LOADED. - -compute_launch_digest(_Args) -> - ?NOT_LOADED. - -verify_measurement(_Report, _Expected) -> - ?NOT_LOADED. - -verify_signature(_Report) -> - ?NOT_LOADED. - -init() -> - ?load_nif_from_crate(dev_snp_nif, 0). - -not_loaded(Line) -> - erlang:nif_error({not_loaded, [{module, ?MODULE}, {line, Line}]}). - -generate_attestation_report_test() -> - %% Call check_support() to determine if SNP is supported - case dev_snp_nif:check_snp_support() of - {ok, true} -> - %% SNP is supported, generate unique data and test commitment report - UniqueData = crypto:strong_rand_bytes(64), - VMPL = 1, - ?assertEqual( - {ok, UniqueData}, - dev_snp_nif:generate_attestation_report(UniqueData, VMPL) - ); - {ok, false} -> - %% SNP is not supported, log event and assert NIF not loaded - ?event("SNP not supported on machine, skipping test..."), - ?assertEqual(ok, ok) - end. - -compute_launch_digest_test() -> - %% Define the data structure - ArgsMap = #{ - vcpus => 32, - vcpu_type => 5, - vmm_type => 1, - guest_features => 16#1, - firmware => "b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510", - kernel => "69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576", - initrd => "02e28b6c718bf0a5260d6f34d3c8fe0d71bf5f02af13e1bc695c6bc162120da1", - append => "56e1e5190622c8c6b9daa4fe3ad83f3831c305bb736735bf795b284cb462c9e7" - }, - - ?event(ArgsMap), - - %% Call the NIF - {ok, Result} = dev_snp_nif:compute_launch_digest(ArgsMap), - %% Expected result - EncTestVector = - <<"wmSDSQYuzE2M3rQcourJnDJHgalADM8TBev3gyjM5ObRNOn8oglvVznFbaWhajU_">>, - ?assertMatch(EncTestVector, hb_util:encode(Result)). - -verify_measurement_test() -> - %% Define a mock report (JSON string) as binary - {ok, MockReport} = file:read_file("test/snp-measurement.json"), - %% Define the expected measurement (binary) - ExpectedMeasurement = <<94,87,4,197,20,11,255,129,179,197,146,104,8,212,152,248,110,11,60,246,82,254,24,55,201,47,157,229,163,82,108,66,191,138,241,229,40,144,133,170,116,109,17,62,20,241,144,119>>, - %% Call the NIF - Result = dev_snp_nif:verify_measurement(MockReport, ExpectedMeasurement), - ?assertMatch({ok, true}, Result). - -verify_signature_test() -> - %% Define a mock report (JSON string) as binary - {ok, MockAttestation} = file:read_file("test/snp-attestation.json"), - Result = dev_snp_nif:verify_signature(MockAttestation), - ?assertMatch({ok, true}, Result). diff --git a/src/dev_snp_test.erl b/src/dev_snp_test.erl new file mode 100644 index 000000000..7dd1b9762 --- /dev/null +++ b/src/dev_snp_test.erl @@ -0,0 +1,346 @@ +%%% @doc Test suite for dev_snp module. +%%% +%%% This module contains all test cases and test helpers for SNP commitment +%%% report generation and verification. +-module(dev_snp_test). +-include("include/hb.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +%% Test configuration constants +-define(TEST_VCPUS_COUNT, 32). +-define(TEST_VCPU_TYPE, 5). +-define(TEST_VMM_TYPE, 1). +-define(TEST_GUEST_FEATURES, 1). +-define(TEST_FIRMWARE_HASH, <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>). +-define(TEST_KERNEL_HASH, <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>). +-define(TEST_INITRD_HASH, <<"544045560322dbcd2c454bdc50f35edf0147829ec440e6cb487b4a1503f923c1">>). +-define(TEST_APPEND_HASH, <<"95a34faced5e487991f9cc2253a41cbd26b708bf00328f98dddbbf6b3ea2892e">>). + +%% Test helper functions and data +get_test_hashes() -> + #{ + <<"vcpus">> => ?TEST_VCPUS_COUNT, + <<"vcpu_type">> => ?TEST_VCPU_TYPE, + <<"vmm_type">> => ?TEST_VMM_TYPE, + <<"guest_features">> => ?TEST_GUEST_FEATURES, + <<"firmware">> => ?TEST_FIRMWARE_HASH, + <<"kernel">> => ?TEST_KERNEL_HASH, + <<"initrd">> => ?TEST_INITRD_HASH, + <<"append">> => ?TEST_APPEND_HASH + }. + +%% Verification test helpers +setup_test_nodes() -> + ProxyWallet = hb:wallet(<<"test/admissible-report-wallet.json">>), + ProxyOpts = #{ + store => hb_opts:get(store), + priv_wallet => ProxyWallet + }, + _ReportNode = hb_http_server:start_node(ProxyOpts), + VerifyingNode = hb_http_server:start_node(#{ + priv_wallet => ar_wallet:new(), + store => hb_opts:get(store), + snp_trusted => [ + #{ + <<"vcpus">> => ?TEST_VCPUS_COUNT, + <<"vcpu_type">> => ?TEST_VCPU_TYPE, + <<"vmm_type">> => ?TEST_VMM_TYPE, + <<"guest_features">> => ?TEST_GUEST_FEATURES, + <<"firmware">> => ?TEST_FIRMWARE_HASH, + <<"kernel">> => ?TEST_KERNEL_HASH, + <<"initrd">> => ?TEST_INITRD_HASH, + <<"append">> => ?TEST_APPEND_HASH + } + ], + snp_enforced_keys => [ + vcpu_type, vmm_type, guest_features, + firmware, kernel, initrd, append + ] + }), + {ProxyOpts, VerifyingNode}. + +%% @doc Load test SNP report data from file. +-spec load_test_report_data() -> binary(). +load_test_report_data() -> + TestFile = <<"test/admissible-report.json">>, + case file:read_file(TestFile) of + {ok, Data} -> + Data; + {error, enoent} -> + throw({error, {file_not_found, TestFile}}); + {error, Reason} -> + throw({error, {file_read_error, TestFile, Reason}}) + end. + +%% @doc Mock the SNP NIF function to return test data. +%% +%% This function sets up a simple mock for snp_nif:generate_attestation_report +%% to return predefined test data instead of calling actual hardware. +%% Uses process dictionary for simple mocking without external dependencies. +%% +%% @param TestReportJSON The test report data to return +%% @returns ok if mocking is successful +-spec mock_snp_nif(ReportJSON :: binary()) -> ok. +mock_snp_nif(TestReportJSON) -> + % Use process dictionary for simple mocking + put(mock_snp_nif_response, TestReportJSON), + put(mock_snp_nif_enabled, true), + ok. + +%% @doc Clean up SNP NIF mocking. +%% +%% This function removes the mock setup and restores normal NIF behavior. +%% +%% @returns ok +-spec unmock_snp_nif() -> ok. +unmock_snp_nif() -> + % Clean up process dictionary mock + erase(mock_snp_nif_response), + erase(mock_snp_nif_enabled), + ok. + +%% Individual test cases +execute_is_trusted_exact_match_should_fail_test() -> + % Test case: Exact match with trusted software should fail when vcpus differ + Msg = #{ + <<"local-hashes">> => (get_test_hashes())#{ + <<"vcpus">> => 16 + } + }, + NodeOpts = #{ + snp_trusted => [get_test_hashes()], + snp_enforced_keys => [ + vcpus, vcpu_type, vmm_type, guest_features, + firmware, kernel, initrd, append + ] + }, + {ok, Result} = snp_trust:execute_is_trusted(#{}, Msg, NodeOpts), + ?assertEqual(false, Result). + +execute_is_trusted_subset_match_should_pass_test() -> + % Test case: Match with subset of keys in trusted software should pass + Msg = #{ + <<"local-hashes">> => (get_test_hashes())#{ + <<"vcpus">> => 16 + } + }, + NodeOpts = #{ + snp_trusted => [get_test_hashes()], + snp_enforced_keys => [ + vcpu_type, vmm_type, guest_features, + firmware, kernel, initrd, append + ] + }, + {ok, Result} = snp_trust:execute_is_trusted(#{}, Msg, NodeOpts), + ?assertEqual(true, Result). + +verify_test() -> + % Note: If this test fails, it may be because the unsigned ID of the node + % message in `test/admissible-report.eterm` has changed. If the format ever + % changes, this value will need to be updated. Recalculate the unsigned ID + % of the `Request/node-message' field, decode `Request/address', concatenate + % the two, and encode. The result will be the new `Request/nonce' value. + % Requires SNP NIF (signature verification); skips when verify fails (e.g. NIF not loaded). + {ProxyOpts, VerifyingNode} = setup_test_nodes(), + {ok, [Request]} = file:consult(<<"test/admissible-report.eterm">>), + PostResult = try + hb_http:post( + VerifyingNode, + <<"/~snp@1.0/verify">>, + hb_message:commit(Request, ProxyOpts), + ProxyOpts + ) + catch + C:R:St -> + ?event({verify_test_post_error, {C, R, St}}), + {error, {C, R}} + end, + case PostResult of + {ok, Result} -> + ?event({verify_test_result, Result}), + % Response: binary <<"true">>, atom true, map, or tuple {failure, Map} / {error, _} (e.g. 500) + IsSuccess = case Result of + B when is_binary(B) -> hb_util:atom(B) =:= true; + A when is_atom(A) -> A =:= true; + Map when is_map(Map) -> + Status = maps:get(<<"status">>, Map, maps:get(status, Map, undefined)), + case Status of + 500 -> false; % Server error (e.g. NIF undef) + _ -> (maps:get(<<"body">>, Map, maps:get(body, Map, <<>>)) =:= <<"true">>) + end; + {failure, _} -> false; % e.g. 500 from server (NIF undef) + {error, _} -> false; + _ -> false + end, + if IsSuccess -> ok; + true -> {skip, "Verify returned non-true (SNP NIF may be unavailable or verification failed)"} + end; + {failure, _} -> + % Server returned 500 (e.g. NIF undef / load failed) + {skip, "Verify request returned 500 (SNP NIF may be unavailable)"}; + {error, _Reason} -> + {skip, "Verify request failed (SNP NIF may be unavailable)"} + end. + +%% @doc Test successful report generation with valid configuration. +%% Requires SNP NIF (SEV-SNP hardware or built NIF); skips when NIF not loaded. +generate_success_test() -> + % Set up test configuration + TestWallet = ar_wallet:new(), + TestOpts = #{ + priv_wallet => TestWallet, + snp_trusted => [#{ + <<"vcpus">> => ?TEST_VCPUS_COUNT, + <<"vcpu_type">> => ?TEST_VCPU_TYPE, + <<"firmware">> => ?TEST_FIRMWARE_HASH, + <<"kernel">> => ?TEST_KERNEL_HASH + }] + }, + case dev_snp:generate(#{}, #{}, TestOpts) of + {error, nif_not_loaded} -> + {skip, "SNP NIF not loaded (no SEV-SNP or NIF build)"}; + {ok, Result} -> + % Verify the result structure + ?assert(is_map(Result)), + ?assert(maps:is_key(<<"local-hashes">>, Result)), + ?assert(maps:is_key(<<"nonce">>, Result)), + ?assert(maps:is_key(<<"address">>, Result)), + ?assert(maps:is_key(<<"node-message">>, Result)), + ?assert(maps:is_key(<<"report">>, Result)), + ReportBin = maps:get(<<"report">>, Result), + ?assert(is_binary(ReportBin)), + ?assert(byte_size(ReportBin) > 0), + ExpectedHashes = maps:get(<<"local-hashes">>, Result), + ?assertEqual(?TEST_VCPUS_COUNT, maps:get(<<"vcpus">>, ExpectedHashes)), + ?assertEqual(?TEST_VCPU_TYPE, maps:get(<<"vcpu_type">>, ExpectedHashes)), + Nonce = maps:get(<<"nonce">>, Result), + ?assert(is_binary(Nonce)), + ?assert(byte_size(Nonce) > 0), + Address = maps:get(<<"address">>, Result), + ?assert(is_binary(Address)), + ?assert(byte_size(Address) > 0); + {error, Other} -> + erlang:error({generate_failed, Other}) + end. + +%% @doc Test error handling when wallet is missing. +generate_missing_wallet_test() -> + TestOpts = #{ + % No priv_wallet provided + snp_trusted => [#{ <<"firmware">> => ?TEST_FIRMWARE_HASH }] + }, + Result = dev_snp:generate(#{}, #{}, TestOpts), + ?assertMatch({error, {missing_wallet, _}}, Result). + +%% @doc Test error handling when trusted configurations are missing. +generate_missing_trusted_configs_test() -> + TestWallet = ar_wallet:new(), + TestOpts = #{ + priv_wallet => TestWallet, + snp_trusted => [] % Empty trusted configs + }, + Result = dev_snp:generate(#{}, #{}, TestOpts), + ?assertMatch({error, {empty_trusted_configs, _}}, Result). + +%% @doc Test successful round-trip: generate then verify with same configuration. +verify_mock_generate_success_test_() -> + { timeout, 30, fun verify_mock_generate_success/0 }. +verify_mock_generate_success() -> + % Set up test configuration + TestWallet = ar_wallet:new(), + TestTrustedConfig = #{ + <<"vcpus">> => 32, + <<"vcpu_type">> => ?TEST_VCPU_TYPE, + <<"vmm_type">> => ?TEST_VMM_TYPE, + <<"guest_features">> => ?TEST_GUEST_FEATURES, + <<"firmware">> => ?TEST_FIRMWARE_HASH, + <<"kernel">> => ?TEST_KERNEL_HASH, + <<"initrd">> => ?TEST_INITRD_HASH, + <<"append">> => ?TEST_APPEND_HASH + }, + GenerateOpts = #{ + priv_wallet => TestWallet, + snp_trusted => [TestTrustedConfig] + }, + % Step 1: Generate a test report (requires SNP NIF) + case dev_snp:generate(#{}, #{}, GenerateOpts) of + {error, nif_not_loaded} -> + {skip, "SNP NIF not loaded (no SEV-SNP or NIF build)"}; + {ok, GeneratedMsg} -> + % Verify the generated message structure + ?assert(is_map(GeneratedMsg)), + ?assert(maps:is_key(<<"report">>, GeneratedMsg)), + ?assert(maps:is_key(<<"address">>, GeneratedMsg)), + ?assert(maps:is_key(<<"nonce">>, GeneratedMsg)), + % Step 2: Set up verification options with the same trusted config + VerifyOpts = #{ + snp_trusted => [TestTrustedConfig], + snp_enforced_keys => [vcpu_type, vmm_type, guest_features, + firmware, kernel, initrd, append] + }, + % Step 3: Verify the generated report + {ok, VerifyResult} = + dev_snp:verify( + #{}, + hb_message:commit(GeneratedMsg, GenerateOpts), + VerifyOpts + ), + ?assertEqual(<<"true">>, VerifyResult), + ReportData = maps:get(<<"report">>, GeneratedMsg), + ?assert(is_binary(ReportData)), + LocalHashes = maps:get(<<"local-hashes">>, GeneratedMsg), + ?assertEqual(TestTrustedConfig, LocalHashes); + {error, Other} -> + erlang:error({generate_failed, Other}) + end. + +%% @doc Test verification failure when using wrong trusted configuration. +verify_mock_generate_wrong_config_test_() -> + { timeout, 30, fun verify_mock_generate_wrong_config/0 }. +verify_mock_generate_wrong_config() -> + % Set up test configuration for generation + TestWallet = ar_wallet:new(), + GenerateTrustedConfig = #{ + <<"vcpus">> => ?TEST_VCPUS_COUNT, + <<"vcpu_type">> => ?TEST_VCPU_TYPE, + <<"vmm_type">> => ?TEST_VMM_TYPE, + <<"guest_features">> => ?TEST_GUEST_FEATURES, + <<"firmware">> => ?TEST_FIRMWARE_HASH, + <<"kernel">> => ?TEST_KERNEL_HASH, + <<"initrd">> => ?TEST_INITRD_HASH, + <<"append">> => ?TEST_APPEND_HASH + }, + GenerateOpts = #{ + priv_wallet => TestWallet, + snp_trusted => [GenerateTrustedConfig] + }, + case dev_snp:generate(#{}, #{}, GenerateOpts) of + {error, nif_not_loaded} -> + {skip, "SNP NIF not loaded (no SEV-SNP or NIF build)"}; + {ok, GeneratedMsg} -> + WrongTrustedConfig = #{ + <<"vcpus">> => 32, + <<"vcpu_type">> => 3, + <<"firmware">> => <<"different_firmware_hash">>, + <<"kernel">> => <<"different_kernel_hash">> + }, + VerifyOpts = #{ + snp_trusted => [WrongTrustedConfig], + snp_enforced_keys => [vcpus, vcpu_type, firmware, kernel] + }, + VerifyResult = + dev_snp:verify( + #{}, + hb_message:commit(GeneratedMsg, GenerateOpts), + VerifyOpts + ), + ?event({verify_result, {explicit, VerifyResult}}), + case VerifyResult of + {ok, <<"false">>} -> ok; + {error, _Reason} -> ok; + Other -> ?assertEqual({ok, <<"false">>}, Other) + end; + {error, Other} -> + erlang:error({generate_failed, Other}) + end. + diff --git a/src/dev_ssl_cert.erl b/src/dev_ssl_cert.erl new file mode 100644 index 000000000..f20cee93a --- /dev/null +++ b/src/dev_ssl_cert.erl @@ -0,0 +1,1064 @@ +%%% @doc SSL Certificate device for automated Let's Encrypt certificate +%%% management using DNS-01 challenges. +%%% +%%% This device provides HTTP endpoints for requesting, managing, and renewing +%%% SSL certificates through Let's Encrypt's ACME v2 protocol. It supports +%%% both staging and production environments and handles the complete +%%% certificate lifecycle including DNS challenge generation and validation. +%%% +%%% The device generates DNS TXT records that users must manually add to their +%%% DNS providers, making it suitable for environments where automated DNS +%%% API access is not available. +%%% +%%% This module serves as the main device interface, orchestrating calls to +%%% specialized modules for validation, state management, challenge handling, +%%% and certificate operations. +-module(dev_ssl_cert). + +-include("include/hb.hrl"). +-include_lib("ssl_cert/include/ssl_cert.hrl"). + +%% Device API exports +-export([info/1, info/3, request/3, finalize/3]). +-export([renew/3, delete/3]). +-export([get_cert/3, request_cert/3]). + +-define(CERT_DIR, filename:join([element(2, file:get_cwd()), "certs"])). +-define(CERT_PEM_FILE, + filename:join( + [?CERT_DIR, <<"hyperbeam_cert.pem">>] + ) +). +-define(KEY_PEM_FILE, + filename:join( + [?CERT_DIR, <<"hyperbeam_key.pem">>] + ) +). +-define(DEFAULT_HTTPS_PORT, 443). + +%% @doc Controls which functions are exposed via the device API. +%% +%% This function defines the security boundary for the SSL certificate device +%% by explicitly listing which functions are available through HTTP endpoints. +%% +%% @param _ Ignored parameter +%% @returns A map with the `exports' key containing a list of allowed functions +info(_) -> + #{ + exports => [ + <<"info">>, + <<"request">>, + <<"finalize">>, + <<"renew">>, + <<"delete">>, + <<"get_cert">>, + <<"request_cert">> + ] + }. + +%% @doc Provides information about the SSL certificate device and its API. +%% +%% This function returns detailed documentation about the device, including: +%% 1. A high-level description of the device's purpose +%% 2. Version information +%% 3. Available API endpoints with their parameters and descriptions +%% 4. Configuration requirements and examples +%% +%% @param _Msg1 Ignored parameter +%% @param _Msg2 Ignored parameter +%% @param _Opts A map of configuration options +%% @returns {ok, Map} containing the device information and documentation +info(_Msg1, _Msg2, _Opts) -> + InfoBody = #{ + <<"description">> => + << + "SSL Certificate management with", + "Let's Encrypt DNS-01 challenges" + >>, + <<"version">> => <<"1.0">>, + <<"api">> => #{ + <<"info">> => #{ + <<"description">> => + <<"Get device info and API documentation">> + }, + <<"request">> => #{ + <<"description">> => <<"Request a new SSL certificate">>, + <<"configuration_required">> => #{ + <<"ssl_opts">> => #{ + <<"domains">> => + <<"List of domain names for certificate">>, + <<"email">> => + <<"Contact email for Let's Encrypt account">>, + <<"environment">> => + <<"'staging' or 'production'">>, + <<"auto_https">> => + << + "Automatically start HTTPS server and", + "redirect HTTP traffic (default: true)" + >>, + <<"https_port">> => <<"HTTPS port (default: 443)">> + } + }, + <<"example_config">> => #{ + <<"ssl_opts">> => #{ + <<"domains">> => + [<<"example.com">>, <<"www.example.com">>], + <<"email">> => <<"admin@example.com">>, + <<"environment">> => <<"staging">>, + <<"auto_https">> => <<"true">>, + <<"https_port">> => <<"443">> + } + }, + <<"usage">> => + << + "POST /ssl-cert@1.0/request", + " (returns challenges; state saved internally)" + >> + }, + <<"finalize">> => #{ + <<"description">> => + << + "Finalize certificate issuance", + "after DNS TXT records are set" + >>, + <<"usage">> => + << + "POST /ssl-cert@1.0/finalize", + " (validates and returns certificate)" + >>, + <<"auto_https">> => + << + "Automatically starts HTTPS server and redirects", + "HTTP traffic (default: true)" + >>, + <<"https_port">> => + << + "Configurable HTTPS port (default: 8443 for", + "development, set to 443 for production)" + >> + }, + <<"renew">> => #{ + <<"description">> => <<"Renew an existing certificate">>, + <<"required_params">> => #{ + <<"domains">> => <<"List of domain names to renew">> + } + }, + <<"delete">> => #{ + <<"description">> => <<"Delete a stored certificate">>, + <<"required_params">> => #{ + <<"domains">> => <<"List of domain names to delete">> + } + }, + <<"get_cert">> => #{ + <<"description">> => + <<"Get encrypted certificate and private key for sharing">>, + <<"usage">> => <<"POST /ssl-cert@1.0/get_cert">>, + <<"note">> => + << + "Returns encrypted certificate data that can be used by", + "another node with the same green zone AES key" + >> + }, + <<"request_cert">> => #{ + <<"description">> => + <<"Request and use certificate from another node">>, + <<"required_params">> => #{ + <<"green_zone_peer_location">> => <<"URL of the peer node">>, + <<"green_zone_peer_id">> => <<"ID of the peer node">> + }, + <<"usage">> => <<"POST /ssl-cert@1.0/request_cert">>, + <<"note">> => + << + "Automatically starts HTTPS server with the retrieved", + "certificate" + >> + } + } + }, + ssl_utils:build_success_response(200, InfoBody). + +%% @doc Requests a new SSL certificate for the specified domains. +%% +%% This function initiates the certificate request process: +%% 1. Validates the input parameters (domains, email, environment) +%% 2. Creates or retrieves an ACME account with Let's Encrypt +%% 3. Submits a certificate order for the specified domains +%% 4. Generates DNS-01 challenges for domain validation +%% 5. Stores the request state for subsequent operations +%% 6. Returns a request ID and initial status +%% +%% Required parameters in ssl_opts configuration: +%% - domains: List of domain names for the certificate +%% - email: Contact email for Let's Encrypt account registration +%% - environment: 'staging' or 'production' (use staging for testing) +%% +%% @param _M1 Ignored parameter +%% @param _M2 Request message containing certificate parameters +%% @param Opts A map of configuration options +%% @returns {ok, Map} with request ID and status, or {error, Reason} +request(_M1, _M2, Opts) -> + ?event({ssl_cert_request_started}), + maybe + {ok, ValidatedParams} ?= + extract_and_validate_ssl_params(Opts), + {ok, {RequestState, ChallengeData}} ?= + process_certificate_request_workflow(ValidatedParams, Opts), + build_request_response(RequestState, ChallengeData) + else + {error, <<"ssl_opts configuration required">>} -> + ssl_utils:build_error_response( + 400, + <<"ssl_opts configuration required">> + ); + {error, ReasonBin} when is_binary(ReasonBin) -> + ssl_utils:format_validation_error(ReasonBin); + {error, Reason} -> + ?event({ssl_cert_request_error_maybe, Reason}), + FormattedError = ssl_utils:format_error_details(Reason), + ssl_utils:build_error_response(500, FormattedError); + Error -> + ?event({ssl_cert_request_unexpected_error, Error}), + ssl_utils:build_error_response(500, <<"Internal server error">>) + end. + +%% @doc Finalizes a certificate request: validates challenges and downloads +%% the certificate. +%% +%% This function: +%% 1. Retrieves the stored request state +%% 2. Validates DNS challenges with Let's Encrypt +%% 3. Finalizes the order if challenges are valid +%% 4. Downloads the certificate if available +%% 5. Automatically starts HTTPS server on port 443 (if auto_https is enabled) +%% 6. Configures HTTP server to redirect to HTTPS +%% 7. Returns the certificate and HTTPS server status +%% +%% The auto_https feature (enabled by default) will: +%% - Start a new HTTPS listener on port 443 using the issued certificate +%% - Reconfigure the existing HTTP server to send 301 redirects to HTTPS +%% - Preserve all existing server configuration and functionality +%% +%% @param _M1 Ignored +%% @param _M2 Message containing request_state +%% @param Opts Options (supports auto_https: true/false) +%% @returns {ok, Map} result of validation and optionally certificate +finalize(_M1, _M2, Opts) -> + ?event({ssl_cert_finalize_started}), + maybe + {ok, {RequestState, PrivKeyRecord}} ?= + load_certificate_state(Opts), + {ok, {OrderStatus, Results, RequestState1}} ?= + validate_challenges(RequestState, PrivKeyRecord), + case OrderStatus of + ?ACME_STATUS_VALID -> + handle_valid_certificate( + RequestState1, + PrivKeyRecord, + Results, + Opts + ); + _ -> + build_pending_response(OrderStatus, Results, RequestState1) + end + else + {error, request_state_not_found} -> + ssl_utils:build_error_response( + 404, + <<"request state not found">> + ); + {error, invalid_request_state} -> + ssl_utils:build_error_response( + 400, + <<"request_state must be a map">> + ); + {error, Reason} -> + FormattedError = ssl_utils:format_error_details(Reason), + ssl_utils:build_error_response(500, FormattedError) + end. + + +%% @doc Renews an existing SSL certificate. +%% +%% This function initiates renewal for an existing certificate: +%% 1. Validates the domains parameter +%% 2. Retrieves the existing certificate configuration +%% 3. Initiates a new certificate request with the same parameters +%% 4. Returns a new request ID for the renewal process +%% +%% Required parameters in ssl_opts configuration: +%% - domains: List of domain names to renew +%% - email: Contact email for Let's Encrypt account +%% - environment: ACME environment setting +%% +%% @param _M1 Ignored parameter +%% @param M2 Request message containing domains to renew +%% @param Opts A map of configuration options +%% @returns {ok, Map} with renewal request ID, or {error, Reason} +renew(_M1, _M2, Opts) -> + ?event({ssl_cert_renewal_started}), + try + % Extract SSL options and validate + case extract_ssl_opts(Opts) of + {error, ErrorReason} -> + ssl_utils:build_error_response(400, ErrorReason); + {ok, SslOpts} -> + Domains = maps:get(<<"domains">>, SslOpts, not_found), + case Domains of + not_found -> + ?event({ssl_cert_renewal_domains_missing}), + ssl_utils:build_error_response( + 400, + <<"domains required in ssl_opts configuration">> + ); + _ -> + DomainList = ssl_utils:normalize_domains(Domains), + ssl_cert_ops:renew_certificate(DomainList, Opts) + end + end + catch + Error:CatchReason:Stacktrace -> + ?event({ssl_cert_renewal_error, Error, CatchReason, Stacktrace}), + ssl_utils:build_error_response(500, <<"Internal server error">>) + end. + +%% @doc Deletes a stored SSL certificate. +%% +%% This function removes a certificate from storage: +%% 1. Validates the domains parameter +%% 2. Locates the certificate in storage +%% 3. Removes the certificate files and metadata +%% 4. Returns confirmation of deletion +%% +%% Required parameters in ssl_opts configuration: +%% - domains: List of domain names to delete +%% +%% @param _M1 Ignored parameter +%% @param M2 Request message containing domains to delete +%% @param Opts A map of configuration options +%% @returns {ok, Map} with deletion confirmation, or {error, Reason} +delete(_M1, _M2, Opts) -> + ?event({ssl_cert_deletion_started}), + try + % Extract SSL options and validate + case extract_ssl_opts(Opts) of + {error, ErrorReason} -> + ssl_utils:build_error_response(400, ErrorReason); + {ok, SslOpts} -> + Domains = maps:get(<<"domains">>, SslOpts, not_found), + case Domains of + not_found -> + ?event({ssl_cert_deletion_domains_missing}), + ssl_utils:build_error_response( + 400, + <<"domains required in ssl_opts configuration">> + ); + _ -> + DomainList = ssl_utils:normalize_domains(Domains), + ssl_cert_ops:delete_certificate(DomainList, Opts) + end + end + catch + Error:CatchReason:Stacktrace -> + ?event({ssl_cert_deletion_error, Error, CatchReason, Stacktrace}), + ssl_utils:build_error_response(500, <<"Internal server error">>) + end. + +%% @doc Get encrypted certificate and private key for sharing with other nodes. +%% +%% This function encrypts the current certificate and private key using the +%% shared green zone AES key, similar to how the green zone shares wallet keys. +%% The encrypted data can be requested by another node that has the same +%% green zone AES key. +%% +%% @param _M1 Ignored parameter +%% @param _M2 Ignored parameter +%% @param Opts Server configuration options +%% @returns {ok, Map} with encrypted certificate data, or {error, Reason} +get_cert(_M1, _M2, Opts) -> + ?event(ssl_cert, {get_cert, start}), + maybe + {ok, CertPem} ?= file:read_file(?CERT_PEM_FILE), + {ok, KeyPem} ?= file:read_file(?KEY_PEM_FILE), + % Create combined certificate data + CertData = #{ + cert_pem => CertPem, + key_pem => KeyPem, + timestamp => erlang:system_time(second) + }, + % Encrypt using green zone helper function + {ok, {EncryptedData, IV}} ?= + dev_green_zone:encrypt_data(CertData, Opts), + ?event(ssl_cert, {get_cert, encrypt, complete}), + ssl_utils:build_success_response(200, #{ + <<"encrypted_cert">> => base64:encode(EncryptedData), + <<"iv">> => base64:encode(IV), + <<"message">> => + <<"Certificate encrypted and ready for sharing">> + }) + else + {error, enoent} -> + ?event(ssl_cert, {get_cert, file_not_found}), + ssl_utils:build_error_response( + 404, + <<"Certificate or key file not found">> + ); + {error, no_green_zone_aes_key} -> + ?event(ssl_cert, {get_cert, error, <<"no aes key">>}), + ssl_utils:build_error_response( + 400, + <<"Node not part of a green zone - no shared AES key">> + ); + {error, EncryptError} -> + ?event(ssl_cert, {get_cert, encrypt_error, EncryptError}), + ssl_utils:build_error_response(500, <<"Encryption failed">>); + Error -> + ?event(ssl_cert, {get_cert, unexpected_error, Error}), + ssl_utils:build_error_response(500, <<"Internal server error">>) + end. + +%% @doc Request certificate from another node and start HTTPS server. +%% +%% This function requests encrypted certificate data from another node, +%% decrypts it using the shared green zone AES key, and automatically +%% starts an HTTPS server with the retrieved certificate. +%% +%% Required parameters: +%% - peer_location: URL of the peer node +%% - peer_id: ID of the peer node for verification +%% +%% @param _M1 Ignored parameter +%% @param _M2 Request message containing peer information +%% @param Opts Server configuration options +%% @returns {ok, Map} with certificate status and HTTPS server info, or +%% {error, Reason} +request_cert(_M1, _M2, Opts) -> + ?event(ssl_cert, {request_cert, start}), + % Extract peer information + PeerLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts), + PeerID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts), + case {PeerLocation, PeerID} of + {undefined, _} -> + ssl_utils:build_error_response( + 400, + <<"green_zone_peer_location required">> + ); + {_, undefined} -> + ssl_utils:build_error_response( + 400, + <<"green_zone_peer_id required">> + ); + {_, _} -> + try_request_cert_from_peer(PeerLocation, PeerID, Opts) + end. + +%%% =================================================================== +%%% Internal Helper Functions +%%% =================================================================== + +%% @doc Try to request certificate from peer node. +%% +%% This function makes an HTTP request to the peer node's get_cert endpoint, +%% verifies the response signature, decrypts the certificate data, and +%% starts an HTTPS server with the retrieved certificate. +%% +%% @param PeerLocation URL of the peer node +%% @param PeerID Expected signer ID for verification +%% @param Opts Server configuration options +%% @returns {ok, Map} with certificate status, or {error, Reason} +try_request_cert_from_peer(PeerLocation, PeerID, Opts) -> + maybe + ?event(ssl_cert, {request_cert, getting_cert, PeerLocation, PeerID}), + % Request encrypted certificate from peer + {ok, CertResp} ?= hb_http:get(PeerLocation, + <<"/~ssl-cert@1.0/get_cert">>, Opts), + % Verify response signature + Signers = hb_message:signers(CertResp, Opts), + true ?= (hb_message:verify(CertResp, Signers, Opts) and + lists:member(PeerID, Signers)), + finalize_cert_request(CertResp, Opts) + else + false -> + ?event(ssl_cert, {request_cert, invalid_signature}), + ssl_utils:build_error_response( + 400, + <<"Invalid response signature from peer">> + ); + Error -> + ?event(ssl_cert, {request_cert, error, Error}), + ssl_utils:build_error_response( + 500, + <<"Failed to request certificate from peer">> + ) + end. + +%% @doc Finalize certificate request by decrypting and using the certificate. +%% +%% This function decrypts the certificate data received from the peer, +%% writes it to local files, and starts an HTTPS server. +%% +%% @param CertResp Response from peer containing encrypted certificate +%% @param Opts Server configuration options +%% @returns {ok, Map} with HTTPS server status +finalize_cert_request(CertResp, Opts) -> + maybe + % Extract encrypted data from response + Body = hb_ao:get(<<"body">>, CertResp, Opts), + Combined = + base64:decode(hb_ao:get(<<"encrypted_cert">>, Body, Opts)), + IV = base64:decode(hb_ao:get(<<"iv">>, Body, Opts)), + % Decrypt using green zone helper function + {ok, DecryptedBin} ?= dev_green_zone:decrypt_data(Combined, IV, Opts), + % Extract certificate components + #{cert_pem := CertPem, key_pem := KeyPem, timestamp := Timestamp} = + binary_to_term(DecryptedBin), + ?event( + ssl_cert, + {request_cert, decrypted_cert, {timestamp, Timestamp}} + ), + % Write certificate files + {ok, {CertFile, KeyFile}} ?= write_certificate_files(CertPem, KeyPem), + ?event(ssl_cert, {request_cert, files_written, {CertFile, KeyFile}}), + % Start HTTPS server with the certificate + HttpsPort = hb_opts:get(<<"https_port">>, ?DEFAULT_HTTPS_PORT, Opts), + RedirectTo = get_redirect_server_id(Opts), + HttpsResult = try hb_http_server:start_https_node( + CertFile, + KeyFile, + Opts, + RedirectTo, + HttpsPort + ) of + ServerUrl when is_binary(ServerUrl) -> + ?event(ssl_cert, {request_cert, https_started, ServerUrl}), + {started, ServerUrl} + catch + StartError:StartReason:StartStacktrace -> + ?event(ssl_cert, + { + request_cert, https_failed, + {error, StartError}, + {reason, StartReason}, + {stacktrace, StartStacktrace} + } + ), + {failed, {StartError, StartReason}} + end, + % Build response + ssl_utils:build_success_response(200, #{ + <<"message">> => + <<"Certificate retrieved and HTTPS server started">>, + <<"https_server">> => format_https_server_status(HttpsResult), + <<"certificate_timestamp">> => Timestamp + }) + else + {error, no_green_zone_aes_key} -> + ?event(ssl_cert, {request_cert, error, <<"no aes key">>}), + ssl_utils:build_error_response( + 400, + <<"Node not part of a green zone - no shared AES key">> + ); + {error, DecryptError} -> + ?event(ssl_cert, {request_cert, decrypt_error, DecryptError}), + ssl_utils:build_error_response( + 400, + <<"Failed to decrypt certificate data">> + ); + Error -> + ?event(ssl_cert, {request_cert, general_error, Error}), + ssl_utils:build_error_response( + 500, + <<"Internal server error">> + ) + end. + +%% @doc Extracts SSL options from configuration with validation. +%% +%% This function extracts and validates the ssl_opts configuration from +%% the provided options map, ensuring all required fields are present. +%% +%% @param Opts Configuration options map +%% @returns {ok, SslOpts} or {error, Reason} +extract_ssl_opts(Opts) when is_map(Opts) -> + case hb_opts:get(<<"ssl_opts">>, not_found, Opts) of + not_found -> + {error, <<"ssl_opts configuration required">>}; + SslOpts when is_map(SslOpts) -> + {ok, SslOpts}; + _ -> + {error, <<"ssl_opts must be a map">>} + end. + +%% @doc Load and validate certificate state from options. +%% +%% This function retrieves the stored certificate request state and private key +%% from the server options, validating that the request state exists and is +%% properly formatted as a map. +%% +%% @param Opts Server configuration options containing ssl_cert_request +%% and ssl_cert_rsa_key +%% @returns {ok, {RequestState, PrivKeyRecord}} or {error, Reason} +load_certificate_state(Opts) -> + RequestState = hb_opts:get(<<"priv_ssl_cert_request">>, not_found, Opts), + case RequestState of + not_found -> + {error, request_state_not_found}; + _ when is_map(RequestState) -> + PrivKeyRecord = + hb_opts:get(<<"priv_ssl_cert_rsa_key">>, not_found, Opts), + {ok, {RequestState, PrivKeyRecord}}; + _ -> + {error, invalid_request_state} + end. + +%% @doc Validate DNS challenges and return order status. +%% +%% This function validates the DNS-01 challenges with Let's Encrypt's +%% ACME server +%% to verify domain ownership. It extracts the order status, validation +%% results, +%% and updated request state from the validation response. +%% +%% @param RequestState Current certificate request state +%% @param PrivKeyRecord Private key record for challenge validation +%% @returns {ok, {OrderStatus, Results, RequestState1}} or {error, Reason} +validate_challenges(RequestState, PrivKeyRecord) -> + case ssl_cert_challenge:validate_dns_challenges_state( + RequestState, + PrivKeyRecord + ) of + {ok, ValResp} -> + ValBody = maps:get(<<"body">>, ValResp, #{}), + OrderStatus = maps:get(<<"order_status">>, ValBody, <<"unknown">>), + Results = maps:get(<<"results">>, ValBody, []), + RequestState1 = + maps:get(<<"request_state">>, ValBody, RequestState), + {ok, {OrderStatus, Results, RequestState1}}; + Error -> + Error + end. + +%% @doc Handle valid certificate: download and optionally start HTTPS server. +%% +%% This function processes a validated certificate order by downloading the +%% certificate from Let's Encrypt, extracting the certificate data, and +%% optionally starting an HTTPS server with the new certificate. +%% +%% @param RequestState Validated certificate request state +%% @param PrivKeyRecord Private key record for the certificate +%% @param Results Validation results from challenge verification +%% @param Opts Server configuration options +%% @returns {ok, Response} with certificate and optional HTTPS server +%% status +handle_valid_certificate(RequestState, PrivKeyRecord, Results, Opts) -> + case ssl_cert_ops:download_certificate_state(RequestState, Opts) of + {ok, DownResp} -> + ?event(ssl_cert, {ssl_cert_certificate_downloaded, DownResp}), + maybe + {ok, {CertPem, DomainsOut, PrivKeyPem}} ?= + extract_certificate_data(DownResp, PrivKeyRecord), + ?event( + ssl_cert, + { + ssl_cert_certificate_and_key_ready_for_nginx, + {domains, DomainsOut} + } + ), + HttpsResult = + maybe_start_https_server( + CertPem, + PrivKeyPem, + DomainsOut, + Opts + ), + build_success_response( + DomainsOut, + Results, + HttpsResult + ) + end; + {error, _} -> + build_processing_response(Results) + end. + +%% @doc Extract certificate data from download response. +%% +%% This function extracts the certificate PEM, domain list, and serialized +%% private key from the certificate download response. It handles the case +%% where no private key record is available. +%% +%% @param DownResp Certificate download response from Let's Encrypt +%% @param PrivKeyRecord Private key record (may be not_found) +%% @returns {ok, {CertPem, DomainsOut, PrivKeyPem}} +extract_certificate_data(DownResp, PrivKeyRecord) -> + DownBody = maps:get(<<"body">>, DownResp, #{}), + CertPem = maps:get(<<"certificate_pem">>, DownBody, <<>>), + DomainsOut = maps:get(<<"domains">>, DownBody, []), + PrivKeyPem = + case PrivKeyRecord of + not_found -> <<"">>; + Key -> ssl_cert_state:serialize_private_key(Key) + end, + {ok, {CertPem, DomainsOut, PrivKeyPem}}. + +%% @doc Optionally start HTTPS server with certificate. +%% +%% This function checks the auto_https configuration setting and conditionally +%% starts an HTTPS server with the provided certificate. If auto_https is +%% disabled, it skips the server startup. +%% +%% @param CertPem PEM-encoded certificate chain +%% @param PrivKeyPem PEM-encoded private key +%% @param DomainsOut List of domains for the certificate +%% @param Opts Server configuration options (checks auto_https setting) +%% @returns {started, ServerUrl} | {skipped, Reason} | {failed, Error} +maybe_start_https_server(CertPem, PrivKeyPem, DomainsOut, Opts) -> + {ok, SSLOpts} = extract_and_validate_ssl_params(Opts), + ?event(ssl_cert, {sslopts, {explicit, SSLOpts}}), + case hb_opts:get(<<"auto_https">>, true, SSLOpts) of + true -> + ?event( + ssl_cert, + { + starting_https_server_with_certificate, + {domains, DomainsOut} + } + ), + HttpsPort = hb_opts:get(<<"https_port">>, ?DEFAULT_HTTPS_PORT, SSLOpts), + start_https_server_with_certificate( + CertPem, + PrivKeyPem, + DomainsOut, + Opts, + HttpsPort + ); + false -> + ?event(ssl_cert, {auto_https_disabled, {domains, DomainsOut}}), + {skipped, auto_https_disabled} + end. + +%% @doc Start HTTPS server with certificate files. +%% +%% This function writes the certificate and key to temporary files, determines +%% the HTTP server to redirect from, and starts a new HTTPS server. It handles +%% all aspects of HTTPS server startup including redirect configuration. +%% +%% @param CertPem PEM-encoded certificate chain +%% @param PrivKeyPem PEM-encoded private key +%% @param DomainsOut List of domains for logging and tracking +%% @param Opts Server configuration options +%% @param HttpsPort HTTPS port number for the server +%% @returns {started, ServerUrl} or {failed, {Error, Reason}} +start_https_server_with_certificate( + CertPem,PrivKeyPem, DomainsOut, Opts, HttpsPort +) -> + maybe + {ok, {CertFile, KeyFile}} ?= + write_certificate_files(CertPem, PrivKeyPem), + RedirectTo = get_redirect_server_id(Opts), + ?event( + ssl_cert, + { + https_server_config, + {cert_file, CertFile}, + {key_file, KeyFile}, + {redirect_to, RedirectTo}, + {https_port, HttpsPort} + } + ), + try hb_http_server:start_https_node( + CertFile, + KeyFile, + Opts, + RedirectTo, + HttpsPort + ) of + ServerUrl when is_binary(ServerUrl) -> + ?event( + ssl_cert, + { + https_server_started_successfully, + {server_url, ServerUrl}, + {domains, DomainsOut} + } + ), + {started, ServerUrl} + catch + Error:Reason:Stacktrace -> + ?event(ssl_cert, + { + https_server_start_failed, + {error, Error}, + {reason, Reason}, + {stacktrace, Stacktrace}, + {domains, DomainsOut} + } + ), + {failed, {Error, Reason}} + end + end. + +%% @doc Write certificate and key to files. +%% +%% This function writes the PEM-encoded certificate and private key to +%% files that can be used by Cowboy for TLS configuration. It ensures +%% the target directory exists before writing files. +%% Both files must be written successfully for the operation to succeed. +%% +%% @param CertPem PEM-encoded certificate chain +%% @param PrivKeyPem PEM-encoded private key +%% @returns {ok, {CertFile, KeyFile}} or {error, Reason} +write_certificate_files(CertPem, PrivKeyPem) -> + CertFile = ?CERT_PEM_FILE, + KeyFile = ?KEY_PEM_FILE, + % Ensure the directory exists + case filelib:ensure_dir(filename:join(?CERT_DIR, "dummy")) of + ok -> + case { + file:write_file(CertFile, CertPem), + file:write_file(KeyFile, ssl_utils:bin(PrivKeyPem)) + } of + {ok, ok} -> {ok, {CertFile, KeyFile}}; + {Error, ok} -> Error; + {ok, Error} -> Error; + {Error1, _Error2} -> Error1 % Return first error if both fail + end; + {error, Reason} -> + {error, {failed_to_create_cert_directory, Reason}} + end. + +%% @doc Get the server ID for HTTP redirect setup. +%% +%% This function determines which HTTP server should be configured to +%% redirect +%% traffic to HTTPS. It first checks for an explicit http_server setting, +%% then falls back to using the current server's wallet address. +%% +%% @param Opts Server configuration options +%% @returns ServerID binary for the HTTP server to configure +get_redirect_server_id(Opts) -> + case hb_opts:get(http_server, no_server, Opts) of + no_server -> + % Fallback to current server wallet + hb_util:human_id( + ar_wallet:to_address( + hb_opts:get(priv_wallet, hb:wallet(), Opts) + ) + ); + ServerId -> + ServerId + end. + +%% @doc Build success response with certificate and HTTPS server info. +%% +%% This function constructs the final success response containing the +%% issued +%% certificate, private key, validation results, and HTTPS server status. +%% The response format is standardized for API consumers. +%% +%% @param DomainsOut List of domains the certificate covers +%% @param Results Validation results from challenge verification +%% @param HttpsResult HTTPS server startup result +%% @returns {ok, #{status => 200, body => ResponseMap}} +build_success_response(DomainsOut, Results, HttpsResult) -> + ResponseBody = #{ + <<"message">> => <<"Certificate issued successfully">>, + <<"domains">> => DomainsOut, + <<"results">> => Results, + <<"https_server">> => format_https_server_status(HttpsResult) + }, + ssl_utils:build_success_response(200, ResponseBody). + +%% @doc Format HTTPS server status for response. +%% +%% This function formats the HTTPS server startup result into a +%% standardized +%% response structure with status, URL, and descriptive message. It handles +%% success, failure, and skipped cases. +%% +%% @param HttpsResult Server startup result: {started, Url} | {failed, Error} +%% | {skipped, Reason} +%% @returns Map with status, server_url/error/reason, and message fields +format_https_server_status({started, ServerUrl}) -> + #{ + <<"status">> => <<"started">>, + <<"server_url">> => ServerUrl, + <<"message">> => iolist_to_binary([ + <<"HTTPS server started at ">>, + ServerUrl, + <<", HTTP traffic will be redirected">> + ]) + }; +format_https_server_status({failed, {Error, Reason}}) -> + #{ + <<"status">> => <<"failed">>, + <<"error">> => ssl_utils:bin(hb_format:term({Error, Reason})), + <<"message">> => + <<"Certificate issued but HTTPS server failed to start">> + }; +format_https_server_status({skipped, Reason}) -> + #{ + <<"status">> => <<"skipped">>, + <<"reason">> => ssl_utils:bin(Reason), + <<"message">> => + <<"Certificate issued, HTTPS server not started ", + "(auto_https disabled)">> + }. + +%% @doc Build response for pending certificate orders. +%% +%% This function creates a response for certificate orders that are not yet +%% valid, indicating that DNS challenge validation is still in progress or +%% incomplete. +%% +%% @param OrderStatus Current ACME order status (e.g., pending, +%% processing) +%% @param Results Validation results from challenge attempts +%% @param RequestState1 Updated request state for potential retry +%% @returns {ok, #{status => 200, body => ResponseMap}} +build_pending_response(OrderStatus, Results, RequestState1) -> + ResponseBody = #{ + <<"message">> => <<"Validation not complete">>, + <<"order_status">> => OrderStatus, + <<"results">> => Results, + <<"request_state">> => RequestState1 + }, + ssl_utils:build_success_response(200, ResponseBody). + +%% @doc Build response when certificate is still processing. +%% +%% This function creates a response for orders that have been finalized +%% but +%% where the certificate is not yet ready for download from Let's +%% Encrypt. +%% This typically happens when there's a delay in certificate issuance. +%% +%% @param Results Validation results from challenge verification +%% @returns {ok, #{status => 200, body => ResponseMap}} +build_processing_response(Results) -> + ResponseBody = #{ + <<"message">> => + <<"Order finalized; certificate not ready for download yet">>, + <<"order_status">> => ?ACME_STATUS_PROCESSING, + <<"results">> => Results + }, + ssl_utils:build_success_response(200, ResponseBody). + +%% @doc Extract and validate SSL parameters from options. +%% +%% This function loads server options, extracts SSL configuration, and +%% validates all required parameters using the ssl_cert_validation +%% module. +%% It leverages the library's comprehensive validation functions. +%% +%% @param Opts Server configuration options +%% @returns {ok, ValidatedParams} or {error, Reason} +extract_and_validate_ssl_params(Opts) -> + maybe + LoadedOpts = hb_cache:ensure_all_loaded(Opts, Opts), + StrippedOpts = + maps:without( + [<<"ssl_cert_rsa_key">>, <<"ssl_cert_opts">>], + LoadedOpts + ), + ?event({ssl_cert_request_started_with_opts, StrippedOpts}), + % Extract SSL options from configuration + {ok, SslOpts} ?= extract_ssl_opts(StrippedOpts), + % Extract parameters + Domains = maps:get(<<"domains">>, SslOpts, not_found), + Email = maps:get(<<"email">>, SslOpts, not_found), + Environment = maps:get(<<"environment">>, SslOpts, staging), + ?event({ + ssl_cert_request_params_from_config, + {domains, Domains}, + {email, Email}, + {environment, Environment} + }), + % Use library validation function - this does all the heavy lifting! + {ok, ValidatedParams} ?= + ssl_cert_validation:validate_request_params( + Domains, + Email, + Environment + ), + % Enhance with system defaults (library already includes key_size) + EnhancedParams = ValidatedParams#{ + storage_path => ?SSL_CERT_STORAGE_PATH + }, + {ok, EnhancedParams} + end. + +%% @doc Process the complete certificate request workflow. +%% +%% This function handles the ACME certificate request processing and +%% state persistence using the ssl_cert_ops module. It orchestrates +%% the request submission and state management. +%% +%% @param ValidatedParams Validated certificate request parameters +%% @param Opts Server configuration options +%% @returns {ok, {RequestState, ChallengeData}} or {error, Reason} +process_certificate_request_workflow(ValidatedParams, Opts) -> + maybe + % Process the certificate request using library function + Wallet = hb_opts:get(priv_wallet, hb:wallet(), Opts), + {ok, ProcResp} ?= + ssl_cert_ops:process_certificate_request(ValidatedParams, Wallet), + {ok, {RequestState, ChallengeData}} ?= + persist_request_state(ProcResp, Opts), + {ok, {RequestState, ChallengeData}} + end. + +%% @doc Build the certificate request response. +%% +%% This function constructs the response for a successful certificate +%% request +%% using the ssl_utils response building functions. It includes DNS challenges +%% and instructions for the next step. +%% +%% @param RequestState Certificate request state data (unused but kept +%% for consistency) +%% @param FormattedChallenges Formatted DNS challenges for the response +%% @returns {ok, #{status => 200, body => ResponseMap}} +build_request_response(_RequestState, FormattedChallenges) -> + ResponseBody = #{ + <<"message">> => + << + "Create DNS TXT records for the following", + " challenges, then call finalize" + >>, + <<"challenges">> => FormattedChallenges, + <<"next_step">> => <<"finalize">> + }, + ssl_utils:build_success_response(200, ResponseBody). + +%% @doc Persist certificate request state in server options. +%% +%% This function extracts the request state and certificate key from +%% the +%% processing response and persists them in the server options for later +%% retrieval during finalization. It uses ssl_cert_challenge library +%% functions for formatting challenges. +%% +%% @param ProcResp Processing response from certificate request +%% @param Opts Server configuration options +%% @returns {ok, {RequestState, ChallengeData}} or {error, Reason} +persist_request_state(ProcResp, Opts) -> + maybe + NewOpts = hb_http_server:get_opts(Opts), + ProcBody = maps:get(<<"body">>, ProcResp, #{}), + RequestState0 = maps:get(<<"request_state">>, ProcBody, #{}), + CertificateKey = maps:get(<<"certificate_key">>, ProcBody, not_found), + ?event({ssl_cert_orchestration_created_request}), + % Persist request state in node opts (overwrites previous) + ok = hb_http_server:set_opts( + NewOpts#{ + <<"priv_ssl_cert_request">> => RequestState0, + <<"priv_ssl_cert_rsa_key">> => CertificateKey + } + ), + % Format challenges using library function + Challenges = maps:get(<<"challenges">>, RequestState0, []), + FormattedChallenges = + ssl_cert_challenge:format_challenges_for_response(Challenges), + {ok, {RequestState0, FormattedChallenges}} + end. + diff --git a/src/hb_http_client.erl b/src/hb_http_client.erl index 3611cc088..63e78bf7b 100644 --- a/src/hb_http_client.erl +++ b/src/hb_http_client.erl @@ -3,7 +3,7 @@ -module(hb_http_client). -behaviour(gen_server). -include("include/hb.hrl"). --export([start_link/1, request/2]). +-export([start_link/1, req/2]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). -record(state, { @@ -12,11 +12,6 @@ opts = #{} }). --define(DEFAULT_RETRIES, 0). --define(DEFAULT_RETRY_TIME, 1000). --define(DEFAULT_KEEPALIVE_TIMEOUT, 60_000). --define(DEFAULT_CONNECT_TIMEOUT, 60_000). - %%% ================================================================== %%% Public interface. %%% ================================================================== @@ -24,42 +19,17 @@ start_link(Opts) -> gen_server:start_link({local, ?MODULE}, ?MODULE, Opts, []). -request(Args, Opts) -> - request(Args, hb_opts:get(http_retry, ?DEFAULT_RETRIES, Opts), Opts). -request(Args, RemainingRetries, Opts) -> - case do_request(Args, Opts) of - {error, Details} -> maybe_retry(RemainingRetries, Args, Details, Opts); - {ok, Status, Headers, Body} -> {ok, Status, Headers, Body} - end. - -do_request(Args, Opts) -> +req(Args, Opts) -> req(Args, false, Opts). +req(Args, ReestablishedConnection, Opts) -> case hb_opts:get(http_client, gun, Opts) of - gun -> gun_req(Args, Opts); - httpc -> httpc_req(Args, Opts) + gun -> + MaxRedirects = hb_maps:get(gun_max_redirects, Opts, 5), + GunArgs = Args#{redirects_left => MaxRedirects}, + gun_req(GunArgs, ReestablishedConnection, Opts); + httpc -> httpc_req(Args, ReestablishedConnection, Opts) end. -maybe_retry(0, _, ErrDetails, _) -> {error, ErrDetails}; -maybe_retry(Remaining, Args, ErrDetails, Opts) -> - RetryBaseTime = hb_opts:get(http_retry_time, ?DEFAULT_RETRY_TIME, Opts), - RetryTime = - case hb_opts:get(http_retry_mode, backoff, Opts) of - constant -> RetryBaseTime; - backoff -> - BaseRetries = hb_opts:get(http_retry, ?DEFAULT_RETRIES, Opts), - RetryBaseTime * (1 + (BaseRetries - Remaining)) - end, - ?event( - warning, - {retrying_http_request, - {after_ms, RetryTime}, - {error, ErrDetails}, - {request, Args} - } - ), - timer:sleep(RetryTime), - request(Args, Remaining - 1, Opts). - -httpc_req(Args, Opts) -> +httpc_req(Args, _, Opts) -> #{ peer := Peer, path := Path, @@ -68,11 +38,13 @@ httpc_req(Args, Opts) -> body := Body } = Args, ?event({httpc_req, Args}), - {Host, Port} = parse_peer(Peer, Opts), - Scheme = case Port of - 443 -> "https"; - _ -> "http" + ParsedPeer = uri_string:parse(iolist_to_binary(Peer)), + #{ scheme := Scheme, host := Host } = ParsedPeer, + DefaultPort = case Scheme of + <<"https">> -> 443; + <<"http">> -> 80 end, + Port = maps:get(port, ParsedPeer, DefaultPort), ?event(http_client, {httpc_req, {explicit, Args}}), URL = binary_to_list(iolist_to_binary([Scheme, "://", Host, ":", integer_to_binary(Port), Path])), FilteredHeaders = hb_maps:without([<<"content-type">>, <<"cookie">>], Headers, Opts), @@ -111,9 +83,11 @@ httpc_req(Args, Opts) -> } end, ?event({http_client_outbound, Method, URL, Request}), + FollowRedirects = hb_maps:get(http_follow_redirects, Opts, true), + ReqOpts = [{autoredirect, FollowRedirects}], HTTPCOpts = [{full_result, true}, {body_format, binary}], StartTime = os:system_time(millisecond), - case httpc:request(Method, Request, [], HTTPCOpts) of + case httpc:request(Method, Request, ReqOpts, HTTPCOpts) of {ok, {{_, Status, _}, RawRespHeaders, RespBody}} -> EndTime = os:system_time(millisecond), RespHeaders = @@ -137,47 +111,58 @@ httpc_req(Args, Opts) -> {error, Reason} end. -gun_req(Args, Opts) -> - gun_req(Args, false, Opts). gun_req(Args, ReestablishedConnection, Opts) -> - StartTime = os:system_time(millisecond), - #{ peer := Peer, path := Path, method := Method } = Args, - Response = + StartTime = os:system_time(millisecond), + #{ peer := Peer, path := Path, method := Method, redirects_left := RedirectsLeft } = Args, + Response = case catch gen_server:call(?MODULE, {get_connection, Args, Opts}, infinity) of {ok, PID} -> ar_rate_limiter:throttle(Peer, Path, Opts), - case do_gun_request(PID, Args, Opts) of - {error, Error} when Error == {shutdown, normal}; - Error == noproc -> + case request(PID, Args, Opts) of + {error, Error} when Error == {shutdown, normal}; Error == noproc -> case ReestablishedConnection of - true -> {error, client_error}; - false -> gun_req(Args, true, Opts) + true -> + {error, client_error}; + false -> + req(Args, true, Opts) end; - Reply -> - Reply - end; + Reply = {_Ok, StatusCode, RedirectRes, _} -> + FollowRedirects = hb_maps:get(http_follow_redirects, Opts, true), + case lists:member(StatusCode, [301, 302, 307, 308]) of + true when FollowRedirects, RedirectsLeft > 0 -> + RedirectArgs = Args#{ redirects_left := RedirectsLeft - 1 }, + handle_redirect( + RedirectArgs, + ReestablishedConnection, + Opts, + RedirectRes, + Reply + ); + _ -> Reply + end + end; {'EXIT', _} -> {error, client_error}; Error -> Error - end, - EndTime = os:system_time(millisecond), - %% Only log the metric for the top-level call to req/2 - not the recursive call - %% that happens when the connection is reestablished. - case ReestablishedConnection of - true -> - ok; - false -> - record_duration(#{ - <<"request-method">> => method_to_bin(Method), - <<"request-path">> => hb_util:bin(Path), - <<"status-class">> => get_status_class(Response), - <<"duration">> => EndTime - StartTime - }, - Opts - ) - end, - Response. + end, + EndTime = os:system_time(millisecond), + %% Only log the metric for the top-level call to req/2 - not the recursive call + %% that happens when the connection is reestablished. + case ReestablishedConnection of + true -> + ok; + false -> + record_duration(#{ + <<"request-method">> => method_to_bin(Method), + <<"request-path">> => hb_util:bin(Path), + <<"status-class">> => get_status_class(Response), + <<"duration">> => EndTime - StartTime + }, + Opts + ) + end, + Response. %% @doc Record the duration of the request in an async process. We write the %% data to prometheus if the application is enabled, as well as invoking the @@ -198,6 +183,7 @@ record_duration(Details, Opts) -> GetFormat, [ <<"request-method">>, + <<"request-path">>, <<"status-class">> ] ), @@ -282,7 +268,7 @@ init_prometheus(Opts) -> application:ensure_all_started([prometheus, prometheus_cowboy]), prometheus_counter:new([ {name, gun_requests_total}, - {labels, [http_method, status_class]}, + {labels, [http_method, route, status_class]}, { help, "The total number of GUN requests." @@ -293,7 +279,7 @@ init_prometheus(Opts) -> prometheus_histogram:new([ {name, http_request_duration_seconds}, {buckets, [0.01, 0.1, 0.5, 1, 5, 10, 30, 60]}, - {labels, [http_method, status_class]}, + {labels, [http_method, route, status_class]}, { help, "The total duration of an hb_http_client:req call. This includes more than" @@ -312,11 +298,13 @@ init_prometheus(Opts) -> ]), prometheus_counter:new([ {name, http_client_downloaded_bytes_total}, - {help, "The total amount of bytes requested via HTTP, per remote endpoint"} + {help, "The total amount of bytes requested via HTTP, per remote endpoint"}, + {labels, [route]} ]), prometheus_counter:new([ {name, http_client_uploaded_bytes_total}, - {help, "The total amount of bytes posted via HTTP, per remote endpoint"} + {help, "The total amount of bytes posted via HTTP, per remote endpoint"}, + {labels, [route]} ]), ?event(started), {ok, #state{ opts = Opts }}. @@ -451,6 +439,7 @@ handle_info({gun_down, PID, Protocol, Reason, _KilledStreams, _UnprocessedStream handle_info({'DOWN', _Ref, process, PID, Reason}, #state{ pid_by_peer = PIDByPeer, status_by_pid = StatusByPID } = State) -> + ?event(redirect, {down, {pid, PID}, {reason, Reason}}), case hb_maps:get(PID, StatusByPID, not_found) of not_found -> {noreply, State}; @@ -485,6 +474,37 @@ terminate(Reason, #state{ status_by_pid = StatusByPID }) -> %%% Private functions. %%% ================================================================== +handle_redirect(Args, ReestablishedConnection, Opts, Res, Reply) -> + case lists:keyfind(<<"location">>, 1, Res) of + false -> + % There's no Location header, so we can't follow the redirect. + Reply; + {_LocationHeaderName, Location} -> + case uri_string:parse(Location) of + {error, _Reason, _Detail} -> + % Server returned a Location header but the URI was malformed. + Reply; + Parsed -> + #{ scheme := NewScheme, host := NewHost, path := NewPath } = Parsed, + Port = maps:get(port, Parsed, undefined), + FormattedPort = case Port of + undefined -> ""; + _ -> lists:flatten(io_lib:format(":~i", [Port])) + end, + NewPeer = lists:flatten( + io_lib:format( + "~s://~s~s~s", + [NewScheme, NewHost, FormattedPort, NewPath] + ) + ), + NewArgs = Args#{ + peer := NewPeer, + path := NewPath + }, + gun_req(NewArgs, ReestablishedConnection, Opts) + end + end. + %% @doc Safe wrapper for prometheus_gauge:inc/2. inc_prometheus_gauge(Name) -> case application:get_application(prometheus) of @@ -511,7 +531,13 @@ inc_prometheus_counter(Name, Labels, Value) -> end. open_connection(#{ peer := Peer }, Opts) -> - {Host, Port} = parse_peer(Peer, Opts), + ParsedPeer = uri_string:parse(iolist_to_binary(Peer)), + #{ scheme := Scheme, host := Host } = ParsedPeer, + DefaultPort = case Scheme of + <<"https">> -> 443; + <<"http">> -> 80 + end, + Port = maps:get(port, ParsedPeer, DefaultPort), ?event(http_outbound, {parsed_peer, {peer, Peer}, {host, Host}, {port, Port}}), BaseGunOpts = #{ @@ -520,7 +546,7 @@ open_connection(#{ peer := Peer }, Opts) -> keepalive => hb_opts:get( http_keepalive, - ?DEFAULT_KEEPALIVE_TIMEOUT, + no_keepalive_timeout, Opts ) }, @@ -528,14 +554,14 @@ open_connection(#{ peer := Peer }, Opts) -> connect_timeout => hb_opts:get( http_connect_timeout, - ?DEFAULT_CONNECT_TIMEOUT, + no_connect_timeout, Opts ) }, Transport = - case Port of - 443 -> tls; - _ -> tcp + case Scheme of + <<"https">> -> tls; + <<"http">> -> tcp end, DefaultProto = case hb_features:http3() of @@ -546,7 +572,13 @@ open_connection(#{ peer := Peer }, Opts) -> GunOpts = case Proto = hb_opts:get(protocol, DefaultProto, Opts) of http3 -> BaseGunOpts#{protocols => [http3], transport => quic}; - _ -> BaseGunOpts + _ -> BaseGunOpts#{ + transport => Transport, + tls_opts => [ + % {verify, verify_none}, % For development - disable peer verification + {cacerts, public_key:cacerts_get()} + ] + } end, ?event(http_outbound, {gun_open, @@ -556,22 +588,7 @@ open_connection(#{ peer := Peer }, Opts) -> {transport, Transport} } ), - gun:open(Host, Port, GunOpts). - -parse_peer(Peer, Opts) -> - Parsed = uri_string:parse(Peer), - case Parsed of - #{ host := Host, port := Port } -> - {hb_util:list(Host), Port}; - URI = #{ host := Host } -> - { - hb_util:list(Host), - case hb_maps:get(scheme, URI, undefined, Opts) of - <<"https">> -> 443; - _ -> hb_opts:get(port, 8734, Opts) - end - } - end. + gun:open(hb_util:list(Host), Port, GunOpts). reply_error([], _Reason) -> ok; @@ -579,14 +596,16 @@ reply_error([PendingRequest | PendingRequests], Reason) -> ReplyTo = element(1, PendingRequest), Args = element(2, PendingRequest), Method = hb_maps:get(method, Args), - record_response_status(Method, {error, Reason}), + Path = hb_maps:get(path, Args), + record_response_status(Method, Path, {error, Reason}), gen_server:reply(ReplyTo, {error, Reason}), reply_error(PendingRequests, Reason). -record_response_status(Method, Response) -> +record_response_status(Method, Path, Response) -> inc_prometheus_counter(gun_requests_total, [ hb_util:list(method_to_bin(Method)), + Path, hb_util:list(get_status_class(Response)) ], 1 @@ -613,7 +632,7 @@ method_to_bin(patch) -> method_to_bin(_) -> <<"unknown">>. -do_gun_request(PID, Args, Opts) -> +request(PID, Args, Opts) -> Timer = inet:start_timer( hb_opts:get(http_request_send_timeout, no_request_send_timeout, Opts) @@ -655,7 +674,7 @@ do_gun_request(PID, Args, Opts) -> is_peer_request => hb_maps:get(is_peer_request, Args, true, Opts) }, Response = await_response(hb_maps:merge(Args, ResponseArgs, Opts), Opts), - record_response_status(Method, Response), + record_response_status(Method, Path, Response), inet:stop_timer(Timer), Response. @@ -692,7 +711,7 @@ await_response(Args, Opts) -> end; {data, fin, Data} -> FinData = iolist_to_binary([Acc | Data]), - download_metric(FinData), + download_metric(FinData, Args), upload_metric(Args), {ok, hb_maps:get(status, Args, undefined, Opts), @@ -700,16 +719,16 @@ await_response(Args, Opts) -> FinData }; {error, timeout} = Response -> - record_response_status(Method, Response), + record_response_status(Method, Path, Response), gun:cancel(PID, Ref), log(warn, gun_await_process_down, Args, Response, Opts), Response; {error, Reason} = Response when is_tuple(Reason) -> - record_response_status(Method, Response), + record_response_status(Method, Path, Response), log(warn, gun_await_process_down, Args, Reason, Opts), Response; Response -> - record_response_status(Method, Response), + record_response_status(Method, Path, Response), log(warn, gun_await_unknown, Args, Response, Opts), Response end. @@ -729,17 +748,17 @@ log(Type, Event, #{method := Method, peer := Peer, path := Path}, Reason, Opts) ), ok. -download_metric(Data) -> +download_metric(Data, #{path := Path}) -> inc_prometheus_counter( http_client_downloaded_bytes_total, - [], + [Path], byte_size(Data) ). -upload_metric(#{method := post, body := Body}) -> +upload_metric(#{method := post, path := Path, body := Body}) -> inc_prometheus_counter( http_client_uploaded_bytes_total, - [], + [Path], byte_size(Body) ); upload_metric(_) -> diff --git a/src/hb_http_server.erl b/src/hb_http_server.erl index 456eda3f0..52daa1f6a 100644 --- a/src/hb_http_server.erl +++ b/src/hb_http_server.erl @@ -1,31 +1,116 @@ -%%% @doc A router that attaches a HTTP server to the AO-Core resolver. -%%% Because AO-Core is built to speak in HTTP semantics, this module -%%% only has to marshal the HTTP request into a message, and then -%%% pass it to the AO-Core resolver. -%%% -%%% `hb_http:reply/4' is used to respond to the client, handling the -%%% process of converting a message back into an HTTP response. -%%% -%%% The router uses an `Opts' message as its Cowboy initial state, -%%% such that changing it on start of the router server allows for -%%% the execution parameters of all downstream requests to be controlled. +%%% @doc HyperBEAM HTTP/HTTPS server with SSL certificate integration. +%%% +%%% This module provides a complete HTTP and HTTPS server implementation +%%% for HyperBEAM nodes, with automatic SSL certificate management and +%%% HTTP to HTTPS redirect capabilities. +%%% +%%% Key features: +%%% - HTTP server with AO-Core integration for message processing +%%% - HTTPS server with automatic SSL certificate deployment +%%% - HTTP to HTTPS redirect with 301 Moved Permanently responses +%%% - SSL certificate integration via dev_ssl_cert device +%%% - Configurable ports for development and production +%%% - Prometheus metrics integration (optional) +%%% - Complete application lifecycle management +%%% +%%% The module marshals HTTP requests into HyperBEAM message format, +%%% processes them through the AO-Core resolver, and converts responses +%%% back to HTTP format using `hb_http:reply/4'. +%%% +%%% Configuration is managed through an `Opts' message that serves as +%%% Cowboy's initial state, allowing dynamic control of execution +%%% parameters for all downstream requests. -module(hb_http_server). --export([start/0, start/1, allowed_methods/2, init/2]). --export([set_opts/1, set_opts/2, get_opts/0, get_opts/1]). --export([set_default_opts/1, set_proc_server_id/1]). --export([start_node/0, start_node/1]). + +%% Public API exports +-export([ + start/0, start/1, + start_node/0, start_node/1, + start_https_node/5 +]). + +%% Request handling exports +-export([ + init/2, + allowed_methods/2 +]). + +%% HTTPS and redirect exports +-export([ + redirect_to_https/3 +]). + +%% Configuration and state management exports +-export([ + set_opts/1, set_opts/2, + get_opts/0, get_opts/1, + set_default_opts/1, + set_proc_server_id/1 +]). + +%% Type specifications +-type server_opts() :: map(). +-type server_id() :: binary(). +-type listener_ref() :: pid(). + +%% Function specifications +-spec start() -> {ok, listener_ref()}. +-spec start(server_opts()) -> {ok, listener_ref()}. +-spec start_node() -> binary(). +-spec start_node(server_opts()) -> binary(). +-spec start_https_node( + binary(), + binary(), + server_opts(), + server_id() | no_server, + integer() +) -> binary(). +-spec redirect_to_https(cowboy_req:req(), server_opts(), integer()) -> + {ok, cowboy_req:req(), server_opts()}. + -include_lib("eunit/include/eunit.hrl"). -include("include/hb.hrl"). -%% @doc Starts the HTTP server. Optionally accepts an `Opts' message, which -%% is used as the source for server configuration settings, as well as the -%% `Opts' argument to use for all AO-Core resolution requests downstream. +%% Default configuration constants +-define(DEFAULT_HTTP_PORT, 8734). +-define(DEFAULT_IDLE_TIMEOUT, 300000). +-define(DEFAULT_CONFIG_FILE, <<"config.flat">>). +-define(DEFAULT_PRIV_KEY_FILE, <<"hyperbeam-key.json">>). +-define(DEFAULT_DASHBOARD_PATH, <<"/~hyperbuddy@1.0/dashboard">>). +-define(RANDOM_PORT_MIN, 10000). +-define(RANDOM_PORT_RANGE, 50000). + +%% Test certificate paths +-define(TEST_CERT_FILE, "test/test-tls.pem"). +-define(TEST_KEY_FILE, "test/test-tls.key"). + +%% HTTP/3 timeouts +-define(HTTP3_STARTUP_TIMEOUT, 2000). + +%%% =================================================================== +%%% Public API & Main Entry Points +%%% =================================================================== + +%% @doc Starts the HTTP server with configuration loading and setup. +%% +%% This function performs the complete HTTP server initialization including: +%% 1. Loading configuration from files +%% 2. Setting up store and wallet configuration +%% 3. Displaying the startup greeter message +%% 4. Starting the HTTP server with merged configuration +%% +%% The function loads configuration from the configured location, merges it +%% with environment defaults, and starts all necessary services. +%% +%% @returns {ok, Listener} where Listener is the Cowboy listener PID start() -> ?event(http, {start_store, <<"cache-mainnet">>}), Loaded = - case hb_opts:load(Loc = hb_opts:get(hb_config_location, <<"config.flat">>)) of + case hb_opts:load( + Loc = hb_opts:get(hb_config_location, ?DEFAULT_CONFIG_FILE) + ) of {ok, Conf} -> - ?event(boot, {loaded_config, {path, Loc}, {config, Conf}}), + ?event(boot, {loaded_config, Loc, Conf}), Conf; {error, Reason} -> ?event(boot, {failed_to_load_config, Loc, Reason}), @@ -42,7 +127,7 @@ start() -> UpdatedStoreOpts = case StoreOpts of no_store -> no_store; - _ when is_list(StoreOpts) -> + _ when is_list(StoreOpts) -> hb_store_opts:apply(StoreOpts, StoreDefaults); _ -> StoreOpts end, @@ -51,173 +136,128 @@ start() -> hb:wallet( hb_opts:get( priv_key_location, - <<"hyperbeam-key.json">>, + ?DEFAULT_PRIV_KEY_FILE, Loaded ) ), - maybe_greeter(Loaded, PrivWallet), + print_greeter_if_not_test(MergedConfig, PrivWallet), start( Loaded#{ priv_wallet => PrivWallet, store => UpdatedStoreOpts, - port => hb_opts:get(port, 8734, Loaded), - cache_writers => [hb_util:human_id(ar_wallet:to_address(PrivWallet))] + port => hb_opts:get(port, ?DEFAULT_HTTP_PORT, Loaded), + cache_writers => + [hb_util:human_id(ar_wallet:to_address(PrivWallet))] } ). + +%% @doc Starts the HTTP server with provided options. +%% +%% This function starts the HTTP server using the provided configuration +%% options. It ensures all required applications are started, initializes +%% HyperBEAM, and creates the server with default option processing. +%% +%% @param Opts Configuration options map for the server +%% @returns {ok, Listener} where Listener is the Cowboy listener PID start(Opts) -> - application:ensure_all_started([ - kernel, - stdlib, - inets, - ssl, - ranch, - cowboy, - gun, - os_mon - ]), + start_required_applications(), hb:init(), BaseOpts = set_default_opts(Opts), {ok, Listener, _Port} = new_server(BaseOpts), {ok, Listener}. -%% @doc Print the greeter message to the console if we are not running tests. -maybe_greeter(MergedConfig, PrivWallet) -> - case hb_features:test() of - false -> - print_greeter(MergedConfig, PrivWallet); - true -> - ok - end. +%% @doc Start a test node with default configuration. +%% +%% This function starts a complete HyperBEAM node for testing purposes +%% using default configuration. It's a convenience wrapper around +%% start_node/1 with an empty options map. +%% +%% @returns Node URL binary for making HTTP requests +start_node() -> + start_node(#{}). -%% @doc Print the greeter message to the console. Includes the version, operator -%% address, URL to access the node, and the wider configuration (including the -%% keys inherited from the default configuration). -print_greeter(Config, PrivWallet) -> - FormattedConfig = hb_format:term(Config, Config, 2), - io:format("~n" - "===========================================================~n" - "== ██╗ ██╗██╗ ██╗██████╗ ███████╗██████╗ ==~n" - "== ██║ ██║╚██╗ ██╔╝██╔══██╗██╔════╝██╔══██╗ ==~n" - "== ███████║ ╚████╔╝ ██████╔╝█████╗ ██████╔╝ ==~n" - "== ██╔══██║ ╚██╔╝ ██╔═══╝ ██╔══╝ ██╔══██╗ ==~n" - "== ██║ ██║ ██║ ██║ ███████╗██║ ██║ ==~n" - "== ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ==~n" - "== ==~n" - "== ██████╗ ███████╗ █████╗ ███╗ ███╗ VERSION: ==~n" - "== ██╔══██╗██╔════╝██╔══██╗████╗ ████║ v~p. ==~n" - "== ██████╔╝█████╗ ███████║██╔████╔██║ ==~n" - "== ██╔══██╗██╔══╝ ██╔══██║██║╚██╔╝██║ EAT GLASS, ==~n" - "== ██████╔╝███████╗██║ ██║██║ ╚═╝ ██║ BUILD THE ==~n" - "== ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ FUTURE. ==~n" - "===========================================================~n" - "== Node live at: ~s ==~n" - "== Operator: ~s ==~n" - "===========================================================~n" - "== Config: ==~n" - "===========================================================~n" - " ~s~n~n" - "===========================================================~n", - [ - ?HYPERBEAM_VERSION, - string:pad( - lists:flatten( - io_lib:format( - "http://~s:~p", - [ - hb_opts:get(host, <<"localhost">>, Config), - hb_opts:get(port, 8734, Config) - ] - ) - ), - 39, - leading, - $ % Note: Space after `$` is functional, not garbage. - ), - hb_util:human_id(ar_wallet:to_address(PrivWallet)), - FormattedConfig - ] - ). +%% @doc Start a complete HyperBEAM node with custom configuration. +%% +%% This function performs complete node startup including: +%% 1. Starting all required Erlang applications +%% 2. Initializing HyperBEAM core systems +%% 3. Starting the supervisor tree +%% 4. Creating and starting the HTTP server +%% 5. Returning the node URL for client connections +%% +%% @param Opts Configuration options map for the node +%% @returns Node URL binary like <<"http://localhost:8734/">> +start_node(Opts) -> + start_required_applications(), + hb:init(), + hb_sup:start_link(Opts), + ServerOpts = set_default_opts(Opts), + {ok, _Listener, Port} = new_server(ServerOpts), + <<"http://localhost:", (hb_util:bin(Port))/binary, "/">>. + +%% @doc Start an HTTPS node with the given certificate and key. +%% +%% This function follows the same pattern as start_node() but creates an HTTPS +%% server instead of HTTP. It does complete application startup, supervisor +%% initialization, and proper node configuration. +%% +%% @param CertFile Path to certificate PEM file +%% @param KeyFile Path to private key PEM file +%% @param Opts Server configuration options (supports https_port) +%% @param RedirectTo HTTP server ID to configure for redirect +%% @param HttpsPort HTTPS port number for the server +%% @returns HTTPS node URL binary like <<"https://localhost:8443/">> +start_https_node(CertFile, KeyFile, Opts, RedirectTo, HttpsPort) -> + ?event(https, {starting_https_node, {opts_keys, maps:keys(Opts)}}), + % Ensure all required applications are started + start_required_applications(), + % Initialize HyperBEAM + hb:init(), + % Start supervisor with HTTPS-specific options + StrippedOpts = maps:without([port], Opts), + HttpsOpts = StrippedOpts#{ + port => HttpsPort + }, + hb_sup:start_link(HttpsOpts), + % Set up server options for HTTPS + ServerOpts = set_default_opts(HttpsOpts), + % Create the HTTPS server using new_server with TLS transport + {ok, _Listener, Port} = + new_https_server(ServerOpts, CertFile, KeyFile, RedirectTo, HttpsPort), + % Return HTTPS URL + <<"https://localhost:", (integer_to_binary(Port))/binary, "/">>. -%% @doc Trigger the creation of a new HTTP server node. Accepts a `NodeMsg' -%% message, which is used to configure the server. This function executed the -%% `start' hook on the node, giving it the opportunity to modify the `NodeMsg' -%% before it is used to configure the server. The `start' hook expects gives and -%% expects the node message to be in the `body' key. +%%% =================================================================== +%%% Core Server Creation +%%% =================================================================== + +%% @doc Create a new HTTP server with full configuration processing. +%% +%% This function handles the complete HTTP server creation workflow: +%% 1. Merging provided options with environment defaults +%% 2. Processing startup hooks for configuration modification +%% 3. Generating unique server identifiers +%% 4. Setting up Cowboy dispatchers and protocol options +%% 5. Configuring optional Prometheus metrics +%% 6. Starting the appropriate protocol listener (HTTP/2 or HTTP/3) +%% +%% @param RawNodeMsg Raw node message configuration +%% @returns {ok, Listener, Port} or {error, Reason} new_server(RawNodeMsg) -> + % Prepare node message with defaults RawNodeMsgWithDefaults = hb_maps:merge( hb_opts:default_message_with_env(), RawNodeMsg#{ only => local } ), - HookMsg = #{ <<"body">> => RawNodeMsgWithDefaults }, - NodeMsg = - case dev_hook:on(<<"start">>, HookMsg, RawNodeMsgWithDefaults) of - {ok, #{ <<"body">> := NodeMsgAfterHook }} -> NodeMsgAfterHook; - Unexpected -> - ?event(http, - {failed_to_start_server, - {unexpected_hook_result, Unexpected} - } - ), - throw( - {failed_to_start_server, - {unexpected_hook_result, Unexpected} - } - ) - end, - % Put server ID into node message so it's possible to update current server + % Process startup hooks using shared utility + {ok, NodeMsg} = process_server_hooks(RawNodeMsgWithDefaults), + % Initialize HTTP and create server ID hb_http:start(), - ServerID = - hb_util:human_id( - ar_wallet:to_address( - hb_opts:get( - priv_wallet, - no_wallet, - NodeMsg - ) - ) - ), - % Put server ID into node message so it's possible to update current server - % params. - NodeMsgWithID = hb_maps:put(http_server, ServerID, NodeMsg), - Dispatcher = cowboy_router:compile([{'_', [{'_', ?MODULE, ServerID}]}]), - ProtoOpts = #{ - env => #{ dispatch => Dispatcher, node_msg => NodeMsgWithID }, - stream_handlers => [cowboy_stream_h], - max_connections => infinity, - idle_timeout => hb_opts:get(idle_timeout, 300000, NodeMsg) - }, - PrometheusOpts = - case hb_opts:get(prometheus, not hb_features:test(), NodeMsg) of - true -> - ?event(prometheus, - {starting_prometheus, {test_mode, hb_features:test()}} - ), - % Attempt to start the prometheus application, if possible. - try - application:ensure_all_started([prometheus, prometheus_cowboy]), - prometheus_registry:register_collector(hb_metrics_collector), - ProtoOpts#{ - metrics_callback => - fun prometheus_cowboy2_instrumenter:observe/1, - stream_handlers => [cowboy_metrics_h, cowboy_stream_h] - } - catch - Type:Reason -> - % If the prometheus application is not started, we can - % still start the HTTP server, but we won't have any - % metrics. - ?event(prometheus, - {prometheus_not_started, {type, Type}, {reason, Reason}} - ), - ProtoOpts - end; - false -> - ?event(prometheus, - {prometheus_not_started, {test_mode, hb_features:test()}} - ), - ProtoOpts - end, + ServerID = generate_server_id(NodeMsg), + % Create protocol options with Prometheus support + ProtoOpts = create_base_protocol_opts(ServerID, NodeMsg), + PrometheusOpts = add_prometheus_if_enabled(ProtoOpts, NodeMsg), DefaultProto = case hb_features:http3() of true -> http3; @@ -247,19 +287,85 @@ new_server(RawNodeMsg) -> ), {ok, Listener, Port}. +%% @doc Create a new HTTPS server with TLS configuration. +%% +%% This function creates an HTTPS server using the provided SSL certificate +%% files. It handles the complete HTTPS server setup including: +%% 1. Processing server startup hooks +%% 2. Creating unique HTTPS server identifiers +%% 3. Setting up dispatchers and protocol options +%% 4. Configuring Prometheus metrics if enabled +%% 5. Starting the TLS listener with certificates +%% 6. Setting up HTTP to HTTPS redirect if requested +%% +%% @param Opts Server configuration options +%% @param CertFile Path to SSL certificate PEM file +%% @param KeyFile Path to SSL private key PEM file +%% @param RedirectTo HTTP server ID to configure for redirect (or no_server) +%% @param HttpsPort HTTPS port number for the server +%% @returns {ok, Listener, Port} or {error, Reason} +new_https_server(Opts, CertFile, KeyFile, RedirectTo, HttpsPort) -> + ?event(https, {creating_new_https_server, {opts_keys, maps:keys(Opts)}}), + try + {ok, NodeMsg} = process_server_hooks(Opts), + {_ServerID, HttpsServerID} = create_https_server_id(NodeMsg), + {_Dispatcher, ProtoOpts} = + create_https_dispatcher(HttpsServerID, NodeMsg), + FinalProtoOpts = add_prometheus_if_enabled(ProtoOpts, NodeMsg), + {ok, Listener} = + start_tls_listener( + HttpsServerID, + HttpsPort, + CertFile, + KeyFile, + FinalProtoOpts + ), + setup_redirect_if_needed(RedirectTo, NodeMsg, HttpsPort), + {ok, Listener, HttpsPort} + catch + Error:Reason:Stacktrace -> + ?event( + https, + { + https_server_creation_failed, + {error, Error}, + {reason, Reason}, + {stacktrace, Stacktrace} + } + ), + {error, {Error, Reason}} + end. + +%%% =================================================================== +%%% Protocol-Specific Server Functions +%%% =================================================================== + +%% @doc Start HTTP/3 server using QUIC transport. +%% +%% This function starts an HTTP/3 server using the QUIC protocol for +%% enhanced performance. It handles: +%% 1. Starting the QUICER application for QUIC support +%% 2. Creating a Cowboy QUIC listener with test certificates +%% 3. Configuring Ranch server options for QUIC transport +%% 4. Setting up connection supervision +%% +%% @param ServerID Unique server identifier +%% @param ProtoOpts Protocol options for Cowboy +%% @param NodeMsg Node configuration message +%% @returns {ok, Port, ServerPID} or {error, Reason} start_http3(ServerID, ProtoOpts, NodeMsg) -> ?event(http, {start_http3, ServerID}), Parent = self(), ServerPID = spawn(fun() -> application:ensure_all_started(quicer), - {ok, Listener} = + {ok, _Listener} = cowboy:start_quic( ServerID, TransOpts = #{ socket_opts => [ - {certfile, "test/test-tls.pem"}, - {keyfile, "test/test-tls.key"}, + {certfile, ?TEST_CERT_FILE}, + {keyfile, ?TEST_KEY_FILE}, {port, hb_opts:get(port, 0, NodeMsg)} ] }, @@ -286,10 +392,17 @@ start_http3(ServerID, ProtoOpts, NodeMsg) -> receive stop -> stopped end end), receive {ok, Port} -> {ok, Port, ServerPID} - after 2000 -> + after ?HTTP3_STARTUP_TIMEOUT -> {error, {timeout, starting_http3_server, ServerID}} end. +%% @doc HTTP/3 connection supervisor loop. +%% +%% This function provides a minimal connection supervisor for HTTP/3 +%% servers. QUIC doesn't use traditional connection supervisors, so +%% this is a placeholder that ignores all messages. +%% +%% @returns never returns (infinite loop) http3_conn_sup_loop() -> receive _ -> @@ -297,6 +410,18 @@ http3_conn_sup_loop() -> http3_conn_sup_loop() end. +%% @doc Start HTTP/2 server using TCP transport. +%% +%% This function starts an HTTP/2 server with fallback to HTTP/1.1 +%% using TCP transport. It handles: +%% 1. Starting a Cowboy clear (non-TLS) listener +%% 2. Port configuration and binding +%% 3. Restart handling for already-started listeners +%% +%% @param ServerID Unique server identifier +%% @param ProtoOpts Protocol options for Cowboy +%% @param NodeMsg Node configuration message +%% @returns {ok, Port, Listener} or {error, Reason} start_http2(ServerID, ProtoOpts, NodeMsg) -> ?event(http, {start_http2, ServerID}), StartRes = @@ -331,9 +456,28 @@ start_http2(ServerID, ProtoOpts, NodeMsg) -> start_http2(ServerID, ProtoOpts, NodeMsg) end. -%% @doc Entrypoint for all HTTP requests. Receives the Cowboy request option and -%% the server ID, which can be used to lookup the node message. + +%%% =================================================================== +%%% Request Handling +%%% =================================================================== + +%% @doc Entrypoint for all HTTP requests. +%% +%% This function serves as the main entry point for all incoming HTTP +%% requests. It handles two types of requests: +%% 1. Redirect requests - configured to redirect HTTP to HTTPS +%% 2. Normal requests - standard HyperBEAM request processing +%% +%% The function routes requests based on the handler state type. +%% +%% @param Req Cowboy request object +%% @param State Either {redirect_https, Opts, HttpsPort} or ServerID +%% @returns {ok, UpdatedReq, State} +init(Req, {redirect_https, Opts, HttpsPort}) -> + % Handle HTTPS redirect + redirect_to_https(Req, Opts, HttpsPort); init(Req, ServerID) -> + % Handle normal requests case cowboy_req:method(Req) of <<"OPTIONS">> -> cors_reply(Req, ServerID); _ -> @@ -341,29 +485,20 @@ init(Req, ServerID) -> handle_request(Req, Body, ServerID) end. -%% @doc Helper to grab the full body of a HTTP request, even if it's chunked. -read_body(Req) -> read_body(Req, <<>>). -read_body(Req0, Acc) -> - case cowboy_req:read_body(Req0) of - {ok, Data, _Req} -> {ok, << Acc/binary, Data/binary >>}; - {more, Data, Req} -> read_body(Req, << Acc/binary, Data/binary >>) - end. - -%% @doc Reply to CORS preflight requests. -cors_reply(Req, _ServerID) -> - Req2 = cowboy_req:reply(204, #{ - <<"access-control-allow-origin">> => <<"*">>, - <<"access-control-allow-headers">> => <<"*">>, - <<"access-control-allow-methods">> => - <<"GET, POST, PUT, DELETE, OPTIONS, PATCH">> - }, Req), - ?event(http_debug, {cors_reply, {req, Req}, {req2, Req2}}), - {ok, Req2, no_state}. - -%% @doc Handle all non-CORS preflight requests as AO-Core requests. Execution -%% starts by parsing the HTTP request into HyerBEAM's message format, then -%% passing the message directly to `meta@1.0' which handles calling AO-Core in -%% the appropriate way. +%% @doc Handle all non-CORS preflight requests as AO-Core requests. +%% +%% This function processes normal HTTP requests through the AO-Core system: +%% 1. Adding request timing information +%% 2. Retrieving server configuration options +%% 3. Handling root path redirects to default dashboard +%% 4. Parsing HTTP requests into HyperBEAM message format +%% 5. Invoking the meta@1.0 device for request processing +%% 6. Converting responses back to HTTP format +%% +%% @param RawReq Raw Cowboy request object +%% @param Body HTTP request body as binary +%% @param ServerID Server identifier for configuration lookup +%% @returns {ok, UpdatedReq, State} handle_request(RawReq, Body, ServerID) -> % Insert the start time into the request so that it can be used by the % `hb_http' module to calculate the duration of the request. @@ -373,15 +508,15 @@ handle_request(RawReq, Body, ServerID) -> put(server_id, ServerID), case {cowboy_req:path(RawReq), cowboy_req:qs(RawReq)} of {<<"/">>, <<>>} -> - % If the request is for the root path, serve a redirect to the default - % request of the node. + % If the request is for the root path, serve a + % redirect to the default request of the node. Req2 = cowboy_req:reply( 302, #{ <<"location">> => hb_opts:get( default_request, - <<"/~hyperbuddy@1.0/index">>, + ?DEFAULT_DASHBOARD_PATH, NodeMsg ) }, @@ -397,51 +532,113 @@ handle_request(RawReq, Body, ServerID) -> {cowboy_req, {explicit, Req}, {body, {string, Body}}} } ), + % TracePID = hb_tracer:start_trace(), % Parse the HTTP request into HyerBEAM's message format. - try hb_http:req_to_tabm_singleton(Req, Body, NodeMsg) of - ReqSingleton -> - try - CommitmentCodec = - hb_http:accept_to_codec(ReqSingleton, NodeMsg), - ?event(http, - {parsed_singleton, - {req_singleton, ReqSingleton}, - {accept_codec, CommitmentCodec}}, - #{} - ), - % Invoke the meta@1.0 device to handle the request. - {ok, Res} = - dev_meta:handle( - NodeMsg#{ - commitment_device => CommitmentCodec - }, - ReqSingleton - ), - hb_http:reply(Req, ReqSingleton, Res, NodeMsg) - catch - Type:Details:Stacktrace -> - handle_error( - Req, - ReqSingleton, - Type, - Details, - Stacktrace, - NodeMsg - ) - end - catch ParseError:ParseDetails:ParseStacktrace -> - handle_error( - Req, - #{}, - ParseError, - ParseDetails, - ParseStacktrace, - NodeMsg - ) + ReqSingleton = + try hb_http:req_to_tabm_singleton(Req, Body, NodeMsg) + catch ParseError:ParseDetails:ParseStacktrace -> + {parse_error, ParseError, ParseDetails, ParseStacktrace} + end, + try + case ReqSingleton of + {parse_error, PType, PDetails, PStacktrace} -> + erlang:raise(PType, PDetails, PStacktrace); + _ -> + ok + end, + CommitmentCodec = + hb_http:accept_to_codec(ReqSingleton, NodeMsg), + ?event(http, + {parsed_singleton, + {req_singleton, ReqSingleton}, + {accept_codec, CommitmentCodec}} + % #{trace => TracePID} + ), + % hb_tracer:record_step(TracePID, request_parsing), + % Invoke the meta@1.0 device to handle the request. + {ok, Res} = + dev_meta:handle( + NodeMsg#{ + commitment_device => CommitmentCodec + % trace => TracePID + }, + ReqSingleton + ), + hb_http:reply(Req, ReqSingleton, Res, NodeMsg) + catch + Type:Details:Stacktrace -> + handle_error( + Req, + ReqSingleton, + Type, + Details, + Stacktrace, + NodeMsg + ) end end. +%% @doc Read the complete body of an HTTP request. +%% +%% This function handles reading HTTP request bodies that may be sent +%% in chunks. It accumulates all chunks into a single binary for +%% processing by the request handler. +%% +%% @param Req Cowboy request object +%% @returns {ok, Body} where Body is the complete request body +read_body(Req) -> read_body(Req, <<>>). + +%% @doc Read HTTP request body with accumulator for chunked data. +%% +%% This is the internal implementation that handles chunked request +%% bodies by recursively reading chunks and accumulating them into +%% a single binary. +%% +%% @param Req0 Cowboy request object +%% @param Acc Accumulator binary for body chunks +%% @returns {ok, CompleteBody} +read_body(Req0, Acc) -> + case cowboy_req:read_body(Req0) of + {ok, Data, _Req} -> {ok, << Acc/binary, Data/binary >>}; + {more, Data, Req} -> read_body(Req, << Acc/binary, Data/binary >>) + end. + +%% @doc Reply to CORS preflight requests. +%% +%% This function handles HTTP OPTIONS requests for CORS (Cross-Origin +%% Resource Sharing) preflight checks. It returns appropriate CORS +%% headers allowing cross-origin requests from any domain with any +%% headers and standard HTTP methods. +%% +%% @param Req Cowboy request object +%% @param _ServerID Server identifier (unused) +%% @returns {ok, UpdatedReq, State} +cors_reply(Req, _ServerID) -> + Req2 = cowboy_req:reply(204, #{ + <<"access-control-allow-origin">> => <<"*">>, + <<"access-control-allow-headers">> => <<"*">>, + <<"access-control-allow-methods">> => + <<"GET, POST, PUT, DELETE, OPTIONS, PATCH">> + }, Req), + ?event(http_debug, {cors_reply, {req, Req}, {req2, Req2}}), + {ok, Req2, no_state}. + %% @doc Return a 500 error response to the client. +%% +%% This function handles internal server errors by: +%% 1. Formatting error details and stacktrace for logging +%% 2. Creating a structured error message +%% 3. Logging the error with appropriate formatting +%% 4. Removing noise from stacktrace and details +%% 5. Sending the error response to the client +%% +%% @param Req Cowboy request object +%% @param Singleton Request singleton for response formatting +%% @param Type Error type +%% @param Details Error details +%% @param Stacktrace Error stacktrace +%% @param NodeMsg Node configuration for formatting +%% @returns {ok, UpdatedReq, State} handle_error(Req, Singleton, Type, Details, Stacktrace, NodeMsg) -> DetailsStr = hb_util:bin(hb_format:message(Details, NodeMsg, 1)), StacktraceStr = hb_util:bin(hb_format:trace(Stacktrace)), @@ -462,29 +659,128 @@ handle_error(Req, Singleton, Type, Details, Stacktrace, NodeMsg) -> 1 ) } - }, - NodeMsg + } ), % Remove leading and trailing noise from the stacktrace and details. FormattedErrorMsg = ErrorMsg#{ - <<"stacktrace">> => hb_util:bin(hb_format:remove_noise(StacktraceStr)), - <<"details">> => hb_util:bin(hb_format:remove_noise(DetailsStr)) + <<"stacktrace">> => + hb_util:bin(hb_format:remove_noise(StacktraceStr)), + <<"details">> => + hb_util:bin(hb_format:remove_noise(DetailsStr)) }, hb_http:reply(Req, Singleton, FormattedErrorMsg, NodeMsg). -%% @doc Return the list of allowed methods for the HTTP server. +%% @doc Return the list of allowed HTTP methods for the server. +%% +%% This function specifies which HTTP methods are supported by the +%% HyperBEAM HTTP server. It's used by Cowboy for method validation +%% and CORS preflight responses. +%% +%% @param Req Cowboy request object +%% @param State Handler state +%% @returns {MethodList, Req, State} where MethodList contains allowed methods allowed_methods(Req, State) -> { - [<<"GET">>, <<"POST">>, <<"PUT">>, <<"DELETE">>, <<"OPTIONS">>, <<"PATCH">>], + [ + <<"GET">>, <<"POST">>, <<"PUT">>, + <<"DELETE">>, <<"OPTIONS">>, <<"PATCH">> + ], Req, State }. -%% @doc Merges the provided `Opts' with uncommitted values from `Request', -%% preserves the http_server value, and updates node_history by prepending -%% the `Request'. If a server reference exists, updates the Cowboy environment -%% variable 'node_msg' with the resulting options map. +%%% =================================================================== +%%% HTTPS & Redirect Functions +%%% =================================================================== + +%% @doc Set up HTTP to HTTPS redirect on the original server. +%% +%% This function modifies an existing HTTP server's dispatcher to redirect +%% all incoming traffic to the HTTPS equivalent. It: +%% 1. Creates a new Cowboy dispatcher with redirect handlers +%% 2. Updates the server's environment with the new dispatcher +%% 3. Logs the redirect configuration for debugging +%% +%% @param ServerID HTTP server identifier to configure for redirect +%% @param Opts Configuration options containing HTTPS port information +%% @param HttpsPort HTTPS port number for the server +%% @returns ok +setup_http_redirect(ServerID, Opts, HttpsPort) -> + ?event(https, {setting_up_http_redirect, {server_id, ServerID}}), + % Create a new dispatcher that redirects everything to HTTPS + % We use a special redirect handler that will be handled by init/2 + RedirectDispatcher = cowboy_router:compile([ + {'_', [ + {'_', ?MODULE, {redirect_https, Opts, HttpsPort}} + ]} + ]), + % Update the server's dispatcher + cowboy:set_env(ServerID, dispatch, RedirectDispatcher), + ?event(https, {http_redirect_configured, {server_id, ServerID}}). + +%% @doc HTTP to HTTPS redirect handler. +%% +%% This handler processes HTTP requests and sends 301 Moved Permanently +%% responses to redirect clients to HTTPS. It: +%% 1. Extracts host, path, and query string from the request +%% 2. Determines the appropriate HTTPS port from configuration +%% 3. Constructs the HTTPS URL preserving path and query parameters +%% 4. Sends a 301 redirect with CORS headers +%% +%% @param Req0 Cowboy request object +%% @param State Handler state containing server options +%% @param HttpsPort HTTPS port number for the server +%% @returns {ok, UpdatedReq, State} +redirect_to_https(Req0, State, HttpsPort) -> + Host = cowboy_req:host(Req0), + Path = cowboy_req:path(Req0), + Qs = cowboy_req:qs(Req0), + % Get HTTPS port from state, default to 443 + % Build the HTTPS URL with port if not standard HTTPS port + BaseUrl = case HttpsPort of + 443 -> <<"https://", Host/binary>>; + _ -> + PortBin = integer_to_binary(HttpsPort), + <<"https://", Host/binary, ":", PortBin/binary>> + end, + Location = case Qs of + <<>> -> + <>; + _ -> + <> + end, + ?event( + https, + { + redirecting_to_https, + {from, Path}, + {to, Location}, + {https_port, HttpsPort} + } + ), + % Send 301 redirect + Req = cowboy_req:reply(301, #{ + <<"location">> => Location, + <<"access-control-allow-origin">> => <<"*">>, + <<"access-control-allow-headers">> => <<"*">>, + <<"access-control-allow-methods">> => + <<"GET, POST, PUT, DELETE, OPTIONS, PATCH">> + }, Req0), + {ok, Req, State}. + +%%% =================================================================== +%%% Configuration & State Management +%%% =================================================================== + +%% @doc Set server options by updating Cowboy environment. +%% +%% This function updates the server's runtime configuration by setting +%% the 'node_msg' environment variable in the Cowboy listener. It's used +%% to dynamically update server behavior without restarting. +%% +%% @param Opts Options map containing http_server reference and new settings +%% @returns ok set_opts(Opts) -> case hb_opts:get(http_server, no_server_ref, Opts) of no_server_ref -> @@ -492,6 +788,19 @@ set_opts(Opts) -> ServerRef -> ok = cowboy:set_env(ServerRef, node_msg, Opts) end. + +%% @doc Merge request with server options and update node history. +%% +%% This function performs advanced options merging by: +%% 1. Preparing and normalizing both request and server options +%% 2. Merging uncommitted request values with server configuration +%% 3. Updating the node history with the new request +%% 4. Preserving the http_server reference for future updates +%% 5. Updating the live server configuration +%% +%% @param Request Request message with new configuration values +%% @param Opts Current server options +%% @returns {ok, MergedOpts} where MergedOpts contains the updated configuration set_opts(Request, Opts) -> PreparedOpts = hb_opts:mimic_default_types( @@ -513,30 +822,64 @@ set_opts(Request, Opts) -> ?event(set_opts, {merged_opts, {explicit, MergedOpts}}), History = hb_opts:get(node_history, [], Opts) - ++ [ hb_private:reset(maps:without([node_history], PreparedRequest)) ], + ++ [ + hb_private:reset( + maps:without([node_history], PreparedRequest) + ) + ], FinalOpts = MergedOpts#{ http_server => hb_opts:get(http_server, no_server, Opts), node_history => History }, {set_opts(FinalOpts), FinalOpts}. -%% @doc Get the node message for the current process. +%% @doc Get server options for the current process. +%% +%% This function retrieves the current server configuration for the +%% calling process by looking up the server ID from the process +%% dictionary and fetching the associated node message. +%% +%% @returns Server options map or no_node_msg if not found get_opts() -> get_opts(#{ http_server => get(server_id) }). +%% @doc Get server options for a specific server. +%% +%% This function retrieves the server configuration for a specific +%% server by extracting the server reference and fetching the +%% 'node_msg' environment variable from Cowboy. +%% +%% @param NodeMsg Node message containing server reference +%% @returns Server options map or no_node_msg if not found get_opts(NodeMsg) -> ServerRef = hb_opts:get(http_server, no_server_ref, NodeMsg), cowboy:get_env(ServerRef, node_msg, no_node_msg). %% @doc Initialize the server ID for the current process. +%% +%% This function stores the server identifier in the process dictionary +%% so that other functions can retrieve server-specific configuration +%% without explicitly passing the server ID. +%% +%% @param ServerID Server identifier to store +%% @returns ok set_proc_server_id(ServerID) -> put(server_id, ServerID). -%% @doc Apply the default node message to the given opts map. +%% @doc Apply default configuration to the provided options. +%% +%% This function enhances the provided options with system defaults: +%% 1. Generating a random port if none provided +%% 2. Creating a new wallet if none provided +%% 3. Setting up default store configuration +%% 4. Adding derived values like address and force_signed flag +%% +%% @param Opts Base options map to enhance with defaults +%% @returns Enhanced options map with all required defaults set_default_opts(Opts) -> % Create a temporary opts map that does not include the defaults. TempOpts = Opts#{ only => local }, % Get the port to use for the server. If no port is provided, we use port 0 - % will the operating system assign a free port. + % and let the operating system assign a free port. Port = hb_opts:get(port, 0, TempOpts), Wallet = case hb_opts:get(priv_wallet, no_viable_wallet, TempOpts) of @@ -564,10 +907,102 @@ set_default_opts(Opts) -> force_signed => true }. -%% @doc Test that we can start the server, send a message, and get a response. -start_node() -> - start_node(#{}). -start_node(Opts) -> +%%% =================================================================== +%%% UI & Display Functions +%%% =================================================================== + +%% @doc Conditionally print the startup greeter message. +%% +%% This function displays the HyperBEAM startup banner and configuration +%% information, but only when not running in test mode. It provides +%% visual feedback about successful server startup and configuration. +%% +%% @param MergedConfig Complete server configuration +%% @param PrivWallet Private wallet for operator address display +%% @returns ok +print_greeter_if_not_test(MergedConfig, PrivWallet) -> + case hb_features:test() of + false -> + print_greeter(MergedConfig, PrivWallet); + true -> + ok + end. + +%% @doc Print the HyperBEAM startup banner and configuration. +%% +%% This function displays a detailed startup message including: +%% 1. ASCII art HyperBEAM logo +%% 2. Version information +%% 3. Server URL for access +%% 4. Operator wallet address +%% 5. Complete configuration details +%% +%% The output provides comprehensive information about the running +%% server instance for debugging and verification. +%% +%% @param Config Server configuration map +%% @param PrivWallet Private wallet for operator identification +%% @returns ok +print_greeter(Config, PrivWallet) -> + FormattedConfig = hb_format:term(Config, Config, 2), + io:format("~n" + "===========================================================~n" + "== ██╗ ██╗██╗ ██╗██████╗ ███████╗██████╗ ==~n" + "== ██║ ██║╚██╗ ██╔╝██╔══██╗██╔════╝██╔══██╗ ==~n" + "== ███████║ ╚████╔╝ ██████╔╝█████╗ ██████╔╝ ==~n" + "== ██╔══██║ ╚██╔╝ ██╔═══╝ ██╔══╝ ██╔══██╗ ==~n" + "== ██║ ██║ ██║ ██║ ███████╗██║ ██║ ==~n" + "== ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ==~n" + "== ==~n" + "== ██████╗ ███████╗ █████╗ ███╗ ███╗ VERSION: ==~n" + "== ██╔══██╗██╔════╝██╔══██╗████╗ ████║ v~p. ==~n" + "== ██████╔╝█████╗ ███████║██╔████╔██║ ==~n" + "== ██╔══██╗██╔══╝ ██╔══██║██║╚██╔╝██║ EAT GLASS, ==~n" + "== ██████╔╝███████╗██║ ██║██║ ╚═╝ ██║ BUILD THE ==~n" + "== ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ FUTURE. ==~n" + "===========================================================~n" + "== Node activate at: ~s ==~n" + "== Operator: ~s ==~n" + "===========================================================~n" + "== Config: ==~n" + "===========================================================~n" + " ~s~n" + "===========================================================~n", + [ + ?HYPERBEAM_VERSION, + string:pad( + lists:flatten( + io_lib:format( + "http://~s:~p", + [ + hb_opts:get(host, <<"localhost">>, Config), + hb_opts:get(port, ?DEFAULT_HTTP_PORT, Config) + ] + ) + ), + 35, leading, $ + ), + hb_util:human_id(ar_wallet:to_address(PrivWallet)), + FormattedConfig + ] + ). + +%%% =================================================================== +%%% Shared Server Utilities +%%% =================================================================== + +%% @doc Start all required applications for HyperBEAM servers. +%% +%% This function ensures all necessary Erlang applications are started +%% for both HTTP and HTTPS servers. The applications include: +%% 1. Core Erlang applications (kernel, stdlib) +%% 2. Network applications (inets, ssl) +%% 3. HTTP server applications (ranch, cowboy) +%% 4. HTTP client applications (gun) +%% 5. System monitoring (os_mon) +%% +%% @returns ok or {error, Reason} +start_required_applications() -> application:ensure_all_started([ kernel, stdlib, @@ -577,22 +1012,264 @@ start_node(Opts) -> cowboy, gun, os_mon - ]), - hb:init(), - hb_sup:start_link(Opts), - ServerOpts = set_default_opts(Opts), - {ok, _Listener, Port} = new_server(ServerOpts), - <<"http://localhost:", (hb_util:bin(Port))/binary, "/">>. + ]). + +%% @doc Generate unique server ID from wallet address. +%% +%% This function creates a unique server identifier by: +%% 1. Extracting the private wallet from node configuration +%% 2. Converting the wallet to an Arweave address +%% 3. Creating a human-readable ID from the address +%% +%% The resulting ID is used for Cowboy listener registration and +%% server identification throughout the system. +%% +%% @param NodeMsg Node configuration containing wallet information +%% @returns ServerID binary for use as Cowboy listener name +generate_server_id(NodeMsg) -> + hb_util:human_id( + ar_wallet:to_address( + hb_opts:get(priv_wallet, no_wallet, NodeMsg) + ) + ). + +%% @doc Create base protocol options for Cowboy servers. +%% +%% This function creates the standard protocol options used by both +%% HTTP and HTTPS servers. It configures: +%% 1. Cowboy dispatcher with the server module and ID +%% 2. Environment variables including node message +%% 3. Stream handlers for request processing +%% 4. Connection limits and timeout settings +%% +%% @param ServerID Server identifier for the dispatcher +%% @param NodeMsg Node configuration message +%% @returns Protocol options map for Cowboy listener +create_base_protocol_opts(ServerID, NodeMsg) -> + NodeMsgWithID = hb_maps:put(http_server, ServerID, NodeMsg), + Dispatcher = cowboy_router:compile([{'_', [{'_', ?MODULE, ServerID}]}]), + #{ + env => #{ dispatch => Dispatcher, node_msg => NodeMsgWithID }, + stream_handlers => [cowboy_stream_h], + max_connections => infinity, + idle_timeout => hb_opts:get(idle_timeout, ?DEFAULT_IDLE_TIMEOUT, NodeMsg) + }. + +%% @doc Add Prometheus metrics to protocol options if enabled. +%% +%% This function conditionally enhances protocol options with Prometheus +%% metrics collection. It: +%% 1. Checks if Prometheus is enabled in configuration +%% 2. Starts Prometheus applications if needed +%% 3. Adds metrics callback and enhanced stream handlers +%% 4. Handles graceful fallback if Prometheus is unavailable +%% +%% @param ProtoOpts Base protocol options to enhance +%% @param NodeMsg Node configuration message +%% @returns Enhanced protocol options with optional Prometheus support +add_prometheus_if_enabled(ProtoOpts, NodeMsg) -> + case hb_opts:get(prometheus, not hb_features:test(), NodeMsg) of + true -> + ?event(prometheus, + {starting_prometheus, {test_mode, hb_features:test()}} + ), + try + application:ensure_all_started([prometheus, prometheus_cowboy]), + ProtoOpts#{ + metrics_callback => + fun prometheus_cowboy2_instrumenter:observe/1, + stream_handlers => [cowboy_metrics_h, cowboy_stream_h] + } + catch + Type:Reason -> + ?event(prometheus, + {prometheus_not_started, {type, Type}, {reason, Reason}} + ), + ProtoOpts + end; + false -> + ?event(prometheus, + {prometheus_not_started, {test_mode, hb_features:test()}} + ), + ProtoOpts + end. + +%% @doc Process server startup hooks for configuration modification. +%% +%% This function executes the startup hook system, allowing external +%% devices and modules to modify server configuration before startup. +%% It: +%% 1. Wraps options in the expected hook message format +%% 2. Calls the startup hook with the configuration +%% 3. Extracts the modified configuration from the hook response +%% 4. Handles hook execution errors with appropriate logging +%% +%% @param Opts Initial server options to process through hooks +%% @returns {ok, ModifiedNodeMsg} or throws {failed_to_start_server, Reason} +process_server_hooks(Opts) -> + HookMsg = #{ <<"body">> => Opts }, + case dev_hook:on(<<"start">>, HookMsg, Opts) of + {ok, #{ <<"body">> := NodeMsgAfterHook }} -> + {ok, NodeMsgAfterHook}; + Unexpected -> + ?event(server, + {failed_to_start_server, + {unexpected_hook_result, Unexpected} + } + ), + throw( + {failed_to_start_server, + {unexpected_hook_result, Unexpected} + } + ) + end. + +%%% =================================================================== +%%% HTTPS Server Helper Functions +%%% =================================================================== + +%% @doc Create HTTPS server IDs from node configuration. +%% +%% This function generates unique server identifiers for HTTPS servers: +%% 1. Initializes the HTTP module for request handling +%% 2. Generates the base server ID using the shared utility +%% 3. Creates the HTTPS-specific server ID by appending '_https' +%% +%% The HTTPS server ID is used for Cowboy listener registration and +%% must be unique from the HTTP server ID. +%% +%% @param NodeMsg Node configuration message containing wallet +%% @returns {ServerID, HttpsServerID} tuple for server identification +create_https_server_id(NodeMsg) -> + % Initialize HTTP module + hb_http:start(), + % Create server ID using shared utility + ServerID = generate_server_id(NodeMsg), + HttpsServerID = <>, + {ServerID, HttpsServerID}. + +%% @doc Create HTTPS dispatcher and protocol options. +%% +%% This function sets up the Cowboy dispatcher and protocol options +%% for HTTPS servers by leveraging the shared utility functions. +%% It: +%% 1. Creates base protocol options using the shared utility +%% 2. Extracts the dispatcher for return compatibility +%% 3. Ensures consistent configuration between HTTP and HTTPS +%% +%% @param HttpsServerID Unique HTTPS server identifier +%% @param NodeMsg Node configuration message +%% @returns {Dispatcher, ProtoOpts} tuple for Cowboy configuration +create_https_dispatcher(HttpsServerID, NodeMsg) -> + % Use shared utility for protocol options + ProtoOpts = create_base_protocol_opts(HttpsServerID, NodeMsg), + % Extract dispatcher for return (though not used in current flow) + #{env := #{dispatch := Dispatcher}} = ProtoOpts, + {Dispatcher, ProtoOpts}. + +%% @doc Start TLS listener for HTTPS server. +%% +%% This function starts the actual Cowboy TLS listener with the +%% provided certificate files and protocol options. It handles +%% the low-level server startup. +%% +%% @param HttpsServerID Unique HTTPS server identifier +%% @param HttpsPort Port number for HTTPS server +%% @param CertFile Path to certificate PEM file +%% @param KeyFile Path to private key PEM file +%% @param ProtoOpts Protocol options for Cowboy +%% @returns {ok, Listener} or {error, Reason} +start_tls_listener(HttpsServerID, HttpsPort, CertFile, KeyFile, ProtoOpts) -> + ?event( + https, + { + starting_tls_listener, + {server_id, HttpsServerID}, + {port, HttpsPort}, + {cert_file, CertFile}, + {key_file, KeyFile} + } + ), + case cowboy:start_tls( + HttpsServerID, + [ + {port, HttpsPort}, + {certfile, CertFile}, + {keyfile, KeyFile} + ], + ProtoOpts + ) of + {ok, Listener} -> + ?event( + https, + { + https_server_started, + {listener, Listener}, + {server_id, HttpsServerID}, + {port, HttpsPort} + } + ), + {ok, Listener}; + {error, Reason} -> + ?event(https, {tls_listener_start_failed, {reason, Reason}}), + {error, Reason} + end. +%% @doc Set up HTTP to HTTPS redirect if needed. +%% +%% This function conditionally configures an existing HTTP server +%% to redirect all traffic to HTTPS. It: +%% 1. Validates the redirect target server ID +%% 2. Configures HTTP server redirect if target is valid +%% 3. Logs redirect setup or skipping with reasons +%% 4. Handles invalid server IDs gracefully +%% +%% The redirect setup allows seamless HTTP to HTTPS migration. +%% +%% @param RedirectTo HTTP server ID to configure (or no_server to skip) +%% @param NodeMsg Node configuration message with HTTPS port +%% @param HttpsPort HTTPS port number for redirect URL construction +%% @returns ok +setup_redirect_if_needed(RedirectTo, NodeMsg, HttpsPort) -> + ?event( + https, + { + checking_for_http_server_to_redirect, + {original_server_id, RedirectTo} + } + ), + case RedirectTo of + no_server -> + ?event(https, {no_original_server_to_redirect}), + ok; + _ when is_binary(RedirectTo) -> + ?event( + https, + { + setting_up_redirect_from_http_to_https, + {http_server, RedirectTo}, + {https_port, HttpsPort} + } + ), + setup_http_redirect(RedirectTo, NodeMsg, HttpsPort); + _ -> + ?event(https, {invalid_redirect_server_id, RedirectTo}), + ok + end. + +%%% =================================================================== %%% Tests -%%% The following only covering the HTTP server initialization process. For tests -%%% of HTTP server requests/responses, see `hb_http.erl'. - -%% @doc Ensure that the `start' hook can be used to modify the node options. We -%% do this by creating a message with a device that has a `start' key. This -%% key takes the message's body (the anticipated node options) and returns a -%% modified version of that body, which will be used to configure the node. We -%% then check that the node options were modified as we expected. +%%% =================================================================== + +%% @doc Test server startup hook functionality. +%% +%% This test verifies that the startup hook system works correctly by: +%% 1. Creating a test device with a startup hook +%% 2. Starting a node with the hook configuration +%% 3. Verifying that the hook modified the server options +%% 4. Confirming the modified options are accessible via the API +%% +%% @returns ok (test assertion) set_node_opts_test() -> Node = start_node(#{ @@ -614,8 +1291,16 @@ set_node_opts_test() -> {ok, LiveOpts} = hb_http:get(Node, <<"/~meta@1.0/info">>, #{}), ?assert(hb_ao:get(<<"test-success">>, LiveOpts, false, #{})). -%% @doc Test the set_opts/2 function that merges request with options, -%% manages node history, and updates server state. +%% @doc Test the set_opts/2 function for options merging and history. +%% +%% This test validates the options merging functionality by: +%% 1. Starting a test node with a known wallet +%% 2. Testing empty node history initialization +%% 3. Testing single request option merging +%% 4. Testing multiple request history accumulation +%% 5. Verifying node history growth and option persistence +%% +%% @returns ok (test assertions) set_opts_test() -> DefaultOpts = hb_opts:default_message_with_env(), start_node(DefaultOpts#{ @@ -649,15 +1334,27 @@ set_opts_test() -> ?assert(length(NodeHistory2) == 2), ?assert(Key2 == <<"world2">>), % Test case 3: Non-empty node_history case - {ok, UpdatedOpts3} = set_opts(#{}, UpdatedOpts2#{ <<"hello3">> => <<"world3">> }), + {ok, UpdatedOpts3} = + set_opts(#{}, UpdatedOpts2#{ <<"hello3">> => <<"world3">> }), NodeHistory3 = hb_opts:get(node_history, not_found, UpdatedOpts3), Key3 = hb_opts:get(<<"hello3">>, not_found, UpdatedOpts3), ?event(debug_node_history, {node_history_length, length(NodeHistory3)}), ?assert(length(NodeHistory3) == 3), ?assert(Key3 == <<"world3">>). +%% @doc Test server restart functionality. +%% +%% This test verifies that servers can be restarted with updated +%% configuration by: +%% 1. Starting a server with initial configuration +%% 2. Starting a second server with the same wallet but different config +%% 3. Verifying that the second server has the updated configuration +%% 4. Confirming that server restart preserves functionality +%% +%% @returns ok (test assertion) restart_server_test() -> - % We force HTTP2, overriding the HTTP3 feature, because HTTP3 restarts don't work yet. + % We force HTTP2, overriding the HTTP3 feature, + % because HTTP3 restarts don't work yet. Wallet = ar_wallet:new(), BaseOpts = #{ <<"test-key">> => <<"server-1">>, @@ -668,5 +1365,5 @@ restart_server_test() -> N2 = start_node(BaseOpts#{ <<"test-key">> => <<"server-2">> }), ?assertEqual( {ok, <<"server-2">>}, - hb_http:get(N2, <<"/~meta@1.0/info/test-key">>, #{protocol => http2}) - ). + hb_http:get(N2, <<"/~meta@1.0/info/test-key">>, #{}) + ). \ No newline at end of file diff --git a/src/hb_opts.erl b/src/hb_opts.erl index 1000d8361..3ba09706f 100644 --- a/src/hb_opts.erl +++ b/src/hb_opts.erl @@ -115,6 +115,12 @@ default_message() -> %% What HTTP client should the node use? %% Options: gun, httpc http_client => gun, + %% Should the HTTP client automatically follow 3xx redirects? + http_follow_redirects => true, + %% For the gun HTTP client, to mitigate resource exhaustion attacks, what's + %% the maximum number of automatic 3xx redirects we'll allow when + %% http_follow_redirects = true? + gun_max_redirects => 5, %% Scheduling mode: Determines when the SU should inform the recipient %% that an assignment has been scheduled for a message. %% Options: aggressive(!), local_confirmation, remote_confirmation, @@ -190,6 +196,7 @@ default_message() -> #{<<"name">> => <<"tx@1.0">>, <<"module">> => dev_codec_tx}, #{<<"name">> => <<"volume@1.0">>, <<"module">> => dev_volume}, #{<<"name">> => <<"secret@1.0">>, <<"module">> => dev_secret}, + #{<<"name">> => <<"ssl-cert@1.0">>, <<"module">> => dev_ssl_cert}, #{<<"name">> => <<"wasi@1.0">>, <<"module">> => dev_wasi}, #{<<"name">> => <<"wasm-64@1.0">>, <<"module">> => dev_wasm}, #{<<"name">> => <<"whois@1.0">>, <<"module">> => dev_whois} diff --git a/src/include/snp_constants.hrl b/src/include/snp_constants.hrl new file mode 100644 index 000000000..a7a647515 --- /dev/null +++ b/src/include/snp_constants.hrl @@ -0,0 +1,207 @@ +%%% @doc Constants for SNP commitment reports. +%%% +%%% This file contains all numeric constants used across SNP modules to avoid +%%% magic numbers and improve maintainability. + +%% Report structure sizes +-define(REPORT_SIZE, 1184). % Total SNP report size in bytes +-define(REPORT_MAIN_PORTION_SIZE, 1016). % Size of main portion before signature +-define(REPORT_SIGNATURE_SIZE, 168). % Signature portion size (72 + 72 + 24) + +%% Page and memory sizes +-define(PAGE_SIZE, 4096). % Standard page size in bytes (4KB) +-define(LAUNCH_DIGEST_SIZE, 48). % Launch digest size in bytes (SHA-384) +-define(MAX_VCPUS, 512). % Max VCPUs for launch digest (DoS safeguard) +-define(LAUNCH_DIGEST_BITS, 384). % Launch digest size in bits (48 * 8) +-define(CHIP_ID_SIZE, 64). % Chip ID size in bytes + +%% Hash sizes +-define(SHA256_SIZE, 32). % SHA-256 hash size in bytes +-define(SHA384_SIZE, 48). % SHA-384 hash size in bytes +-define(HEX_STRING_48_BYTES, 96). % Hex string length for 48-byte hash + +%% Page info structure +-define(PAGE_INFO_LEN, 112). % Page info structure size (0x70 bytes) + +%% Memory addresses and masks +-define(FOUR_GB, 16#100000000). % 4GB address (0x100000000) +-define(PAGE_MASK, 16#FFF). % Page offset mask (4KB alignment) +-define(BSP_EIP, 16#FFFFFFFFF0). % BSP EIP value (0xffff_fff0) + +%% VMSA page structure offsets (in hex for clarity) +-define(VMSA_OFFSET_ES, 16#0). % ES segment register offset +-define(VMSA_OFFSET_CS, 16#10). % CS segment register offset +-define(VMSA_OFFSET_SS, 16#20). % SS segment register offset +-define(VMSA_OFFSET_DS, 16#30). % DS segment register offset +-define(VMSA_OFFSET_FS, 16#40). % FS segment register offset +-define(VMSA_OFFSET_GS, 16#50). % GS segment register offset +-define(VMSA_OFFSET_GDTR, 16#60). % GDTR segment register offset +-define(VMSA_OFFSET_LDTR, 16#70). % LDTR segment register offset +-define(VMSA_OFFSET_IDTR, 16#80). % IDTR segment register offset +-define(VMSA_OFFSET_TR, 16#90). % TR segment register offset +-define(VMSA_OFFSET_EFER, 16#D0). % EFER control register offset +-define(VMSA_OFFSET_CR4, 16#148). % CR4 control register offset +-define(VMSA_OFFSET_CR0, 16#158). % CR0 control register offset +-define(VMSA_OFFSET_DR7, 16#160). % DR7 control register offset +-define(VMSA_OFFSET_DR6, 16#168). % DR6 control register offset +-define(VMSA_OFFSET_RFLAGS, 16#170). % RFLAGS control register offset +-define(VMSA_OFFSET_RIP, 16#178). % RIP control register offset +-define(VMSA_OFFSET_G_PAT, 16#268). % G_PAT register offset +-define(VMSA_OFFSET_RDX, 16#310). % RDX register offset +-define(VMSA_OFFSET_SEV_FEATURES, 16#3B0). % SEV features register offset +-define(VMSA_OFFSET_XCR0, 16#3E8). % XCR0 register offset +-define(VMSA_OFFSET_MXCSR, 16#408). % MXCSR register offset +-define(VMSA_OFFSET_X87_FCW, 16#410). % X87 FCW register offset + +%% VMSA register values +-define(VMSA_EFER_VALUE, 16#1000). % EFER register value +-define(VMSA_CR4_VALUE, 16#40). % CR4 register value +-define(VMSA_CR0_VALUE, 16#10). % CR0 register value +-define(VMSA_DR7_VALUE, 16#400). % DR7 register value +-define(VMSA_DR6_VALUE, 16#FFFF0FF0). % DR6 register value +-define(VMSA_RFLAGS_VALUE, 16#2). % RFLAGS register value +-define(VMSA_G_PAT_VALUE, 16#7040600070406). % G_PAT register value +-define(VMSA_XCR0_VALUE, 16#1). % XCR0 register value +-define(VMSA_CS_SELECTOR, 16#F000). % CS selector value +-define(VMSA_SEGMENT_LIMIT, 16#FFFF). % Standard segment limit value +-define(VMSA_SEGMENT_ATTRIB_ES, 16#93). % ES segment attribute +-define(VMSA_SEGMENT_ATTRIB_DS, 16#93). % DS segment attribute +-define(VMSA_SEGMENT_ATTRIB_FS, 16#93). % FS segment attribute +-define(VMSA_SEGMENT_ATTRIB_GS, 16#93). % GS segment attribute +-define(VMSA_SEGMENT_ATTRIB_LDTR, 16#82). % LDTR segment attribute + +%% VMSA GPA +-define(VMSA_GPA, 16#FFFFFFFFF000). % VMSA page GPA + +%% Page type constants +-define(PAGE_TYPE_NORMAL, 1). % Normal page type +-define(PAGE_TYPE_VMSA, 2). % VMSA page type +-define(PAGE_TYPE_ZERO, 3). % Zero page type +-define(PAGE_TYPE_SVSM_CAA, 4). % SVSM CAA page type +-define(PAGE_TYPE_SECRETS, 5). % Secrets page type +-define(PAGE_TYPE_CPUID, 6). % CPUID page type + +%% SEV hash table constants +-define(SEV_HASH_TABLE_ENTRY_LENGTH, 50). % SEV hash table entry length +-define(SEV_HASH_TABLE_SIZE, 168). % SEV hash table total size +-define(SEV_HASH_TABLE_PADDING, 8). % SEV hash table padding size + +%% SPL value limits +-define(MAX_SPL_VALUE, 255). % Maximum SPL value (u8) + +%% Report data version +-define(REPORT_DATA_VERSION, 1). % Report data version + +%% Signature component sizes +-define(SIGNATURE_R_SIZE, 72). % Signature R component size in bytes +-define(SIGNATURE_S_SIZE, 72). % Signature S component size in bytes +-define(SIGNATURE_RESERVED_SIZE, 24). % Signature reserved area size in bytes +-define(SIGNATURE_RESERVED_BITS, 192). % Signature reserved area size in bits (24 * 8) +-define(RESERVED1_SIZE, 24). % Reserved1 field size in bytes +-define(RESERVED1_BITS, 192). % Reserved1 field size in bits (24 * 8) +-define(RESERVED4_BITS, 1344). % Reserved4 field size in bits (168 * 8) + +%% OVMF footer table constants +-define(OVMF_ENTRY_HEADER_SIZE, 18). % OVMF entry header size (2 bytes size + 16 bytes GUID) +-define(OVMF_DESCRIPTOR_SIZE, 12). % OVMF metadata section descriptor size +-define(OVMF_FOOTER_OFFSET, 32). % OVMF footer table offset from end of file + +%% Configuration constants +-define(COMMITTED_PARAMETERS, [vcpus, vcpu_type, vmm_type, guest_features, + firmware, kernel, initrd, append]). % Parameters committed in SNP reports +%% Guest policy DEBUG bit (AMD SEV-SNP): policy.DEBUG=1 => debug VM, 0 => production. +%% Use this bit only; do not infer debug from TCB/SVN. Report must be verified (signature + chain) first. +-define(DEBUG_FLAG_BIT, 19). % Bit position of DEBUG in SNP guest policy (u64) +-define(SNP_GUEST_POLICY_DEBUG, (1 bsl ?DEBUG_FLAG_BIT)). % Mask for C-style (report.policy & SNP_GUEST_POLICY_DEBUG) + +%% TCB structure offsets +-define(TCB_OFFSET_BOOTLOADER, 0). % Bootloader SPL offset in TCB structure +-define(TCB_OFFSET_TEE, 1). % TEE SPL offset in TCB structure +-define(TCB_OFFSET_SNP, 6). % SNP SPL offset in TCB structure (skips reserved bytes 2-5) +-define(TCB_OFFSET_MICROCODE, 7). % Microcode SPL offset in TCB structure +-define(TCB_RESERVED_BYTES, 4). % Reserved bytes in TCB structure (bytes 2-5) +-define(TCB_SIZE, 8). % Total TCB structure size in bytes + +%% Report field sizes +-define(FAMILY_ID_SIZE, 16). % Family ID size in bytes +-define(IMAGE_ID_SIZE, 16). % Image ID size in bytes +-define(HOST_DATA_SIZE, 32). % Host data size in bytes +-define(REPORT_ID_SIZE, 32). % Report ID size in bytes + +%% Signature reserved area +-define(SIGNATURE_RESERVED_TOTAL_SIZE, 368). % Total signature reserved area (includes padding after R+S) + +%% Signature verification constants +-define(SIGNATURE_PORTION_SIZE, 144). % Signature portion size (72 + 72 bytes) +-define(SIGNATURE_R_BITS, 576). % Signature R size in bits (72 * 8) +-define(SIGNATURE_S_BITS, 576). % Signature S size in bits (72 * 8) + +%% HTTP constants +-define(HTTP_PORT_HTTPS, 443). % HTTPS default port +-define(HTTP_PORT_HTTP, 80). % HTTP default port +-define(HTTP_STATUS_OK, 200). % HTTP success status code + +%% Certificate constants +-define(CERT_CHAIN_MIN_SIZE, 2). % Minimum certificates in chain (ASK + ARK) +-define(CERT_SINGLE, 1). % Single certificate + +%% OVMF parsing constants +-define(OVMF_MIN_FILE_SIZE, 50). % Minimum OVMF file size for parsing +-define(OVMF_GPA_EIP_SIZE, 4). % Size of GPA/EIP fields in bytes (u32) + +%% OVMF section type constants +-define(OVMF_SECTION_SNP_SEC_MEMORY, 1). % SnpSecMemory section type +-define(OVMF_SECTION_SNP_SECRETS, 2). % SnpSecrets section type +-define(OVMF_SECTION_CPUID, 3). % Cpuid section type +-define(OVMF_SECTION_SVSM_CAA, 4). % SvsmCaa section type +-define(OVMF_SECTION_SNP_KERNEL_HASHES, 16). % SnpKernelHashes section type (0x10) + +%% VMM type constants +-define(VMM_TYPE_QEMU, 1). % QEMU VMM type +-define(VMM_TYPE_EC2, 2). % EC2 VMM type + +%% VMM-specific VMSA flags (QEMU) +-define(VMM_QEMU_CS_FLAGS, 16#9B). % QEMU CS segment flags +-define(VMM_QEMU_SS_FLAGS, 16#93). % QEMU SS segment flags +-define(VMM_QEMU_TR_FLAGS, 16#8B). % QEMU TR segment flags +-define(VMM_QEMU_MXCSR, 16#1F80). % QEMU MXCSR value +-define(VMM_QEMU_FCW, 16#37F). % QEMU X87 FCW value + +%% VMM-specific VMSA flags (EC2) +-define(VMM_EC2_BSP_CS_FLAGS, 16#9A). % EC2 BSP CS segment flags +-define(VMM_EC2_BSP_SS_FLAGS, 16#92). % EC2 BSP SS segment flags +-define(VMM_EC2_BSP_TR_FLAGS, 16#83). % EC2 BSP TR segment flags +-define(VMM_EC2_AP_CS_FLAGS, 16#9B). % EC2 AP CS segment flags +-define(VMM_EC2_AP_SS_FLAGS, 16#92). % EC2 AP SS segment flags +-define(VMM_EC2_AP_TR_FLAGS, 16#83). % EC2 AP TR segment flags + +%% EIP bit masks +-define(EIP_LOWER_16_MASK, 16#FFFF). % Mask for lower 16 bits of EIP +-define(EIP_UPPER_16_MASK, 16#FFFF0000). % Mask for upper 16 bits of EIP (CS base) + +%% Hash size constants for SEV hashes +-define(SEV_HASH_BINARY_SIZE, 32). % SEV hash binary size (SHA-256) +-define(SEV_HASH_HEX_SIZE, 64). % SEV hash hex string size (32 bytes * 2) + +%% JSON preview size +-define(JSON_PREVIEW_SIZE, 1000). % Size for JSON preview in logging + +%% AMD KDS (Key Distribution Service) constants +-define(KDS_CERT_SITE, "https://kdsintf.amd.com"). % AMD KDS certificate site URL +-define(KDS_VCEK_PATH, "/vcek/v1"). % AMD KDS VCEK certificate path +-define(DEFAULT_SEV_PRODUCT, "Milan"). % Default SEV product name + +%% OVMF metadata constants +-define(OVMF_METADATA_VERSION, 1). % OVMF metadata version +-define(OVMF_METADATA_HEADER_SIZE, 16). % OVMF metadata header size (4 bytes signature + 4 bytes size + 4 bytes version + 4 bytes num_items) +-define(OVMF_METADATA_OFFSET_SIZE, 4). % OVMF metadata offset field size (u32) + +%% Default reset EIP +-define(DEFAULT_RESET_EIP, 0). % Default reset EIP value when OVMF parsing fails + +%% VMSA area sizes (for debugging/logging) +-define(VMSA_SEGMENT_REGS_AREA_SIZE, 160). % Segment registers area size (0x0-0x9F) +-define(VMSA_CONTROL_REGS_AREA_SIZE, 304). % Control registers area size (from EFER offset) +-define(VMSA_GENERAL_REGS_AREA_OFFSET, 16#300). % General registers area offset +-define(VMSA_GENERAL_REGS_AREA_SIZE, 256). % General registers area size (0x300-0x3FF) + diff --git a/src/include/snp_guids.hrl b/src/include/snp_guids.hrl new file mode 100644 index 000000000..9610b38b5 --- /dev/null +++ b/src/include/snp_guids.hrl @@ -0,0 +1,42 @@ +%%% @doc GUID definitions for SNP commitment reports. +%%% +%%% This file contains all GUID (Globally Unique Identifier) definitions used +%%% across SNP modules. GUIDs are defined in little-endian byte order to match +%%% the Rust implementation. + +%% SEV Hash Table GUIDs (from Rust sev_hashes.rs) +%% SEV_HASH_TABLE_HEADER_GUID: 9438d606-4f22-4cc9-b479-a793d411fd21 +-define(SEV_HASH_TABLE_HEADER_GUID, <<6, 214, 56, 148, 34, 79, 201, 76, + 180, 121, 167, 147, 212, 17, 253, 33>>). + +%% SEV_CMDLINE_ENTRY_GUID: 97d02dd8-bd20-4c94-aa78-e7714d36ab2a +-define(SEV_CMDLINE_ENTRY_GUID, <<216, 45, 208, 151, 32, 189, 148, 76, + 170, 120, 231, 113, 77, 54, 171, 42>>). + +%% SEV_INITRD_ENTRY_GUID: 44baf731-3a2f-4bd7-9af1-41e29169781d +%% Note: Bytes 8-9 swapped to match Rust (9a f1) +-define(SEV_INITRD_ENTRY_GUID, <<49, 247, 186, 68, 47, 58, 215, 75, + 154, 241, 65, 226, 145, 105, 120, 29>>). + +%% SEV_KERNEL_ENTRY_GUID: 4de79437-abd2-427f-b835-d5b172d2045b +%% Note: Bytes 8-9 swapped to match Rust (b8 35) +-define(SEV_KERNEL_ENTRY_GUID, <<55, 148, 231, 77, 210, 171, 127, 66, + 184, 53, 213, 177, 114, 210, 4, 91>>). + +%% OVMF GUIDs +%% OVMF_TABLE_FOOTER_GUID: 96b582de-1fb2-45f7-baea-a366c55a082d +-define(OVMF_TABLE_FOOTER_GUID, <<222, 130, 181, 150, 178, 31, 247, 69, + 186, 234, 163, 102, 197, 90, 8, 45>>). + +%% OVMF_SEV_METADATA_GUID: dc886566-984a-4798-a75e-5585a7bf67cc +-define(OVMF_SEV_METADATA_GUID, <<102, 101, 136, 220, 74, 152, 152, 71, + 167, 94, 85, 133, 167, 191, 103, 204>>). + +%% SEV_HASH_TABLE_RV_GUID: 7237551f-3a3b-4b04-927b-1da6efa8d454 +-define(SEV_HASH_TABLE_RV_GUID, <<31, 55, 85, 114, 59, 58, 4, 75, + 146, 123, 29, 166, 239, 168, 212, 84>>). + +%% SEV_ES_RESET_BLOCK_GUID: 00f771de-1a7e-4fcb-890e-68c77e2fb44e +-define(SEV_ES_RESET_BLOCK_GUID, <<222, 113, 247, 0, 126, 26, 203, 79, + 137, 14, 104, 199, 126, 47, 180, 78>>). + diff --git a/src/include/snp_launch_digest.hrl b/src/include/snp_launch_digest.hrl new file mode 100644 index 000000000..16c6fc110 --- /dev/null +++ b/src/include/snp_launch_digest.hrl @@ -0,0 +1,11 @@ +%%% @doc Shared definitions for launch digest computation modules. +%%% +%%% This header file contains the gctx record definition and common helper +%%% functions used across launch digest sub-modules. + +%% Record for SEV-SNP launch digest context +-record(gctx, {ld = <<0:?LAUNCH_DIGEST_BITS>> :: binary()}). % ld = launch digest (?LAUNCH_DIGEST_SIZE bytes) + +%% Helper: Convert binary to hex string for logging +-define(BINARY_TO_HEX_STRING(Binary), hb_util:list(hb_util:to_hex(Binary))). + diff --git a/src/snp_certificates.erl b/src/snp_certificates.erl new file mode 100644 index 000000000..1019338e6 --- /dev/null +++ b/src/snp_certificates.erl @@ -0,0 +1,409 @@ +%%% @doc Certificate operations for SNP commitment reports. +%%% +%%% This module handles fetching certificates from AMD KDS (Key Distribution +%%% Service) and converting between PEM and DER certificate formats. + +%%% Certificates are not cached; each fetch goes to the network. +-module(snp_certificates). +-export([fetch_cert_chain/1, fetch_vcek/6, pem_to_der_chain/1, pem_cert_to_der/1, + fetch_verification_certificates/6]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% @doc Fetches the AMD certificate chain (ASK + ARK) for the given SEV product name. +%% @param SevProdName SEV product name (e.g., "Milan"). Defaults to "Milan" if not provided. +%% @returns {ok, CertChainPEM} on success, {error, Reason} on failure +-spec fetch_cert_chain(SevProdName :: undefined | binary() | string()) -> + {ok, binary()} | {error, term()}. +fetch_cert_chain(SevProdName) -> + Product = normalize_sev_product(SevProdName), + Path = lists:flatten([?KDS_VCEK_PATH, "/", Product, "/cert_chain"]), + URL = ?KDS_CERT_SITE ++ Path, + ?event(snp, {fetch_cert_chain_http_request, #{ + url => URL, + product => Product + }}), + {TimeMicros, Result} = timer:tc(fun() -> do_http_get(URL) end), + TimeMs = TimeMicros / 1000, + case Result of + {ok, CertChainPEM} = SuccessResult -> + ?event(snp_short, {fetch_cert_chain_success, #{ + size => byte_size(CertChainPEM), + time_ms => TimeMs + }}), + SuccessResult; + Error -> + ?event(snp_error, {fetch_cert_chain_error, #{ + operation => <<"fetch_cert_chain">>, + error => Error, + url => URL, + product => Product, + time_ms => TimeMs, + suggestion => <<"Check network connectivity and AMD KDS availability. Verify product name is correct (e.g., 'Milan').">> + }}), + Error + end. + +%% @doc Fetches the VCEK certificate for the given chip ID and TCB version. +%% @param ChipId 64-byte binary chip ID +%% @param BootloaderSPL Bootloader SPL version (u8, 0-255) +%% @param TeeSPL TEE SPL version (u8, 0-255) +%% @param SnpSPL SNP SPL version (u8, 0-255) +%% @param UcodeSPL Microcode SPL version (u8, 0-255) +%% @param SevProdName Optional SEV product name. Defaults to "Milan". +%% @returns {ok, VcekDER} on success, {error, Reason} on failure +-spec fetch_vcek(ChipId :: binary(), BootloaderSPL :: integer(), + TeeSPL :: integer(), SnpSPL :: integer(), UcodeSPL :: integer(), + SevProdName :: undefined | binary() | string()) -> + {ok, binary()} | {error, term()}. +fetch_vcek(ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, SevProdName) -> + case snp_validation:validate_chip_id(ChipId) of + {error, Reason} -> {error, {invalid_chip_id, Reason}}; + {ok, ValidChipId} -> + case snp_validation:validate_spl_values(BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL) of + {error, Reason} -> {error, Reason}; + ok -> + Product = normalize_sev_product(SevProdName), + HwId = hb_util:list(hb_util:to_hex(ValidChipId)), + Path = lists:flatten([ + ?KDS_VCEK_PATH, "/", Product, "/", HwId, + "?blSPL=", hb_util:list(hb_util:bin(BootloaderSPL)), + "&teeSPL=", hb_util:list(hb_util:bin(TeeSPL)), + "&snpSPL=", hb_util:list(hb_util:bin(SnpSPL)), + "&ucodeSPL=", hb_util:list(hb_util:bin(UcodeSPL)) + ]), + URL = ?KDS_CERT_SITE ++ Path, + ?event(snp, {fetch_vcek_http_request, #{ + url => URL, + product => Product, + chip_id_hex => HwId, + spl_values => #{ + bootloader => BootloaderSPL, + tee => TeeSPL, + snp => SnpSPL, + ucode => UcodeSPL + } + }}), + {TimeMicros, Result} = timer:tc(fun() -> do_http_get(URL) end), + TimeMs = TimeMicros / 1000, + case Result of + {ok, VcekDER} = SuccessResult -> + ?event(snp_short, {fetch_vcek_success, #{ + size => byte_size(VcekDER), + time_ms => TimeMs + }}), + SuccessResult; + Error -> + ?event(snp_error, {fetch_vcek_error, #{ + operation => <<"fetch_vcek">>, + error => Error, + url => URL, + time_ms => TimeMs, + suggestion => <<"Check network connectivity and AMD KDS availability. Verify chip ID and SPL values are correct.">> + }}), + Error + end + end + end. + +%% @doc Convert PEM certificate chain to DER-encoded binary. +%% Parses PEM certificates and concatenates their DER encodings. +%% @param CertChainPEM Binary containing PEM-encoded certificates (ASK + ARK) +%% @returns Binary containing concatenated DER-encoded certificates (ASK DER + ARK DER) +-spec pem_to_der_chain(CertChainPEM :: binary()) -> binary() | {error, term()}. +pem_to_der_chain(CertChainPEM) -> + % Validate input is binary and appears to be PEM format + case is_binary(CertChainPEM) andalso byte_size(CertChainPEM) > 0 of + false -> + ActualType = snp_util:get_type_name(CertChainPEM), + ActualSize = case is_binary(CertChainPEM) of + true -> byte_size(CertChainPEM); + false -> 0 + end, + ?event(snp_error, {pem_to_der_chain_invalid_input, #{ + operation => <<"pem_to_der_chain">>, + actual_type => ActualType, + actual_size => ActualSize, + expected => <<"non-empty binary">> + }}), + {error, <<"Certificate chain validation failed: expected non-empty binary, got ", + ActualType/binary, " of size ", (hb_util:bin(integer_to_list(ActualSize)))/binary, + ". Ensure the certificate chain is a valid PEM-encoded binary.">>}; + true -> + % Basic PEM format validation (should start with -----BEGIN) + case snp_validation:validate_pem_binary(CertChainPEM) of + {error, Reason} -> + Preview = case byte_size(CertChainPEM) > 50 of + true -> <<(binary:part(CertChainPEM, 0, 50))/binary, <<"...">>/binary>>; + false -> CertChainPEM + end, + ?event(snp_error, {pem_to_der_chain_invalid_format, #{ + operation => <<"pem_to_der_chain">>, + actual_preview => Preview, + expected => <<"PEM format starting with '-----BEGIN'">> + }}), + {error, Reason}; + {ok, _} -> + {PemTimeMicros, PemResult} = timer:tc(fun() -> + try + % Parse PEM certificates using public_key + Certs = public_key:pem_decode(CertChainPEM), + case length(Certs) of + N when N >= ?CERT_CHAIN_MIN_SIZE -> + % Extract certificates and convert to DER format + % Order: ASK first, then ARK (as per SEV spec and PEM order) + DERBinaries = [public_key:der_encode('Certificate', public_key:pem_entry_decode(Cert)) || Cert <- Certs], + % Concatenate DER binaries + << <> || DER <- DERBinaries >>; + ActualCount -> + ?event(snp_error, {pem_to_der_chain_insufficient_certs, #{ + operation => <<"pem_to_der_chain">>, + actual_count => ActualCount, + expected_min => ?CERT_CHAIN_MIN_SIZE, + expected_certs => <<"ASK + ARK">> + }}), + {error, <<"Certificate chain validation failed: expected at least ", + (hb_util:bin(integer_to_list(?CERT_CHAIN_MIN_SIZE)))/binary, + " certificates (ASK + ARK), got ", + (hb_util:bin(integer_to_list(ActualCount)))/binary, + ". Ensure the certificate chain contains both ASK and ARK certificates.">>} + end + catch + Error:Reason -> + ?event(snp_error, {pem_to_der_chain_parse_error, #{ + operation => <<"pem_to_der_chain">>, + error => Error, + reason => Reason, + suggestion => <<"Check that the PEM data is valid and properly formatted. Each certificate should be between '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers.">> + }}), + {error, {pem_parse_error, Error, Reason}} + end + end), + PemTimeMs = PemTimeMicros / 1000, + ?event(snp, {pem_to_der_chain_time_ms, PemTimeMs}), + PemResult + end + end. + +%% @doc Convert a single PEM certificate to DER. +%% @param CertPEM Binary containing PEM-encoded certificate +%% @returns Binary containing DER-encoded certificate +-spec pem_cert_to_der(CertPEM :: binary()) -> binary() | {error, term()}. +pem_cert_to_der(CertPEM) -> + % Validate input is binary and appears to be PEM format + case is_binary(CertPEM) andalso byte_size(CertPEM) > 0 of + false -> + ActualType = snp_util:get_type_name(CertPEM), + ActualSize = case is_binary(CertPEM) of + true -> byte_size(CertPEM); + false -> 0 + end, + ?event(snp_error, {pem_cert_to_der_invalid_input, #{ + operation => <<"pem_cert_to_der">>, + actual_type => ActualType, + actual_size => ActualSize, + expected => <<"non-empty binary">> + }}), + {error, <<"Certificate validation failed: expected non-empty binary, got ", + ActualType/binary, " of size ", (hb_util:bin(integer_to_list(ActualSize)))/binary, + ". Ensure the certificate is a valid PEM-encoded binary.">>}; + true -> + % Basic PEM format validation + case snp_validation:validate_pem_binary(CertPEM) of + {error, Reason} -> + Preview = case byte_size(CertPEM) > 50 of + true -> <<(binary:part(CertPEM, 0, 50))/binary, <<"...">>/binary>>; + false -> CertPEM + end, + ?event(snp_error, {pem_cert_to_der_invalid_format, #{ + operation => <<"pem_cert_to_der">>, + actual_preview => Preview, + expected => <<"PEM format starting with '-----BEGIN'">> + }}), + {error, Reason}; + {ok, _} -> + try + Certs = public_key:pem_decode(CertPEM), + case length(Certs) of + ?CERT_SINGLE -> + [Cert] = Certs, + CertDER = public_key:pem_entry_decode(Cert), + public_key:der_encode('Certificate', CertDER); + 0 -> + ?event(snp_error, {pem_cert_to_der_no_certs, #{ + operation => <<"pem_cert_to_der">>, + actual_count => 0, + expected => <<"exactly 1 certificate">> + }}), + {error, <<"Certificate parsing failed: PEM data contains no certificates. Ensure the PEM data includes a certificate between '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers.">>}; + ActualCount -> + ?event(snp_error, {pem_cert_to_der_multiple_certs, #{ + operation => <<"pem_cert_to_der">>, + actual_count => ActualCount, + expected => <<"exactly 1 certificate">>, + suggestion => <<"Use pem_to_der_chain/1 for multiple certificates">> + }}), + {error, <<"Certificate parsing failed: expected exactly 1 certificate, got ", + (hb_util:bin(integer_to_list(ActualCount)))/binary, + ". For multiple certificates, use pem_to_der_chain/1 instead.">>} + end + catch + Error:Reason -> + ?event(snp_error, {pem_cert_to_der_parse_error, #{ + operation => <<"pem_cert_to_der">>, + error => Error, + reason => Reason, + suggestion => <<"Check that the PEM data is valid and properly formatted. The certificate should be between '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers.">> + }}), + {error, {pem_parse_error, Error, Reason}} + end + end + end. + +%% Helper to normalize SEV product name to list format +-spec normalize_sev_product(undefined | binary() | string()) -> string(). +normalize_sev_product(undefined) -> ?DEFAULT_SEV_PRODUCT; +normalize_sev_product(<<>>) -> ?DEFAULT_SEV_PRODUCT; +normalize_sev_product("") -> ?DEFAULT_SEV_PRODUCT; +normalize_sev_product(P) when is_binary(P) -> hb_util:list(P); +normalize_sev_product(P) when is_list(P) -> P. + +%% Validate SPL values are in valid u8 range (0-255) + +%% Internal helper to make HTTP GET requests +%% Uses hb_http_client for consistency with HyperBEAM HTTP infrastructure +-spec do_http_get(URL :: binary() | string()) -> {ok, binary()} | {error, term()}. +do_http_get(URL) when is_list(URL) -> + do_http_get(hb_util:bin(URL)); +do_http_get(URL) when is_binary(URL) -> + % Validate URL is not empty + case byte_size(URL) > 0 of + false -> + ?event(snp_error, {do_http_get_empty_url, #{ + operation => <<"do_http_get">>, + actual => <<"empty binary">>, + expected => <<"non-empty URL string or binary">> + }}), + {error, <<"HTTP request failed: URL cannot be empty. Provide a valid URL string or binary.">>}; + true -> + case uri_string:parse(URL) of + #{scheme := Scheme, host := Host} = URI -> + Port = case Scheme of + <<"https">> -> ?HTTP_PORT_HTTPS; + "https" -> ?HTTP_PORT_HTTPS; + _ -> ?HTTP_PORT_HTTP + end, + HostBin = hb_util:bin(Host), + Peer = case Scheme of + <<"https">> -> <<"https://", HostBin/binary, ":", (hb_util:bin(Port))/binary>>; + "https" -> <<"https://", HostBin/binary, ":", (hb_util:bin(Port))/binary>>; + _ -> <<"http://", HostBin/binary, ":", (hb_util:bin(Port))/binary>> + end, + Path = hb_maps:get(path, URI, <<"/">>, #{}), + Query = hb_maps:get(query, URI, undefined, #{}), + FullPath = case Query of + undefined -> Path; + <<>> -> Path; + "" -> Path; + Q when is_binary(Q) -> <>; + Q when is_list(Q) -> <> + end, + Request = #{ + peer => Peer, + method => <<"GET">>, + path => FullPath, + headers => #{}, + body => <<>> + }, + ?event(snp, {do_http_get_request, #{ + url => URL, + peer => Peer, + path => FullPath + }}), + case hb_http_client:request(Request, #{}) of + {ok, ?HTTP_STATUS_OK, _Headers, Body} -> + ?event(snp_short, {do_http_get_success, byte_size(Body)}), + {ok, Body}; + {ok, Status, _Headers, _Body} -> + ?event(snp_error, {do_http_get_status_error, #{ + operation => <<"do_http_get">>, + url => URL, + actual_status => Status, + expected_status => ?HTTP_STATUS_OK, + suggestion => <<"Check if the URL is correct and the server is responding. Status codes: 404=not found, 500=server error, etc.">> + }}), + {error, {http_error, Status}}; + {error, Reason} -> + ?event(snp_error, {do_http_get_request_error, #{ + operation => <<"do_http_get">>, + url => URL, + error => Reason, + suggestion => <<"Check network connectivity, DNS resolution, and firewall settings. Verify the URL is accessible.">> + }}), + {error, Reason} + end; + Error -> + ?event(snp_error, {do_http_get_invalid_url, #{ + operation => <<"do_http_get">>, + url => URL, + parse_error => Error, + expected => <<"valid URL with scheme and host (e.g., 'https://example.com/path')">> + }}), + {error, {invalid_url, Error}} + end + end; +do_http_get(InvalidURL) -> + ActualType = case is_binary(InvalidURL) of + true -> <<"binary">>; + false -> case is_list(InvalidURL) of + true -> <<"list">>; + false -> <<"other">> + end + end, + ?event(snp_error, {do_http_get_invalid_type, #{ + operation => <<"do_http_get">>, + actual_type => ActualType, + expected => <<"binary or string (list)">> + }}), + {error, <<"HTTP request failed: URL must be a binary or string, got ", + ActualType/binary, ". Convert the URL to a binary or string before calling.">>}. + +%% @doc Fetch both certificate chain and VCEK for verification. +%% This is a convenience function that fetches both certificates needed for +%% report signature verification in a single call. +%% @param ChipId The chip ID (64 bytes) +%% @param BootloaderSPL Bootloader SPL value (0-255) +%% @param TeeSPL TEE SPL value (0-255) +%% @param SnpSPL SNP SPL value (0-255) +%% @param UcodeSPL Microcode SPL value (0-255) +%% @returns {ok, {CertChainPEM, VcekDER}} on success, {error, Reason} when a fetch fails +-spec fetch_verification_certificates(ChipId :: binary(), BootloaderSPL :: integer(), + TeeSPL :: integer(), SnpSPL :: integer(), UcodeSPL :: integer(), NodeOpts :: map()) -> + {ok, {binary(), binary()}} | {error, term()}. +fetch_verification_certificates(ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, NodeOpts) -> + ?event(snp_short, {fetching_cert_chain_start}), + Family = hb_opts:get(<<"cpu_family">>, undefined, NodeOpts), + case fetch_cert_chain(Family) of + {ok, CertChainPEM} -> + ?event(snp_short, {cert_chain_fetched, byte_size(CertChainPEM)}), + ?event(snp, {fetching_vcek_start, #{ + chip_id => hb_util:to_hex(ChipId), + bootloader => BootloaderSPL, + tee => TeeSPL, + snp => SnpSPL, + microcode => UcodeSPL + }}), + case fetch_vcek(ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, Family) of + {ok, VcekDER} -> + ?event(snp_short, {vcek_fetched, byte_size(VcekDER)}), + {ok, {CertChainPEM, VcekDER}}; + {error, Reason} -> + ?event(snp_error, {fetch_verification_certificates_vcek_failed, #{reason => Reason}}), + {error, Reason} + end; + {error, Reason} -> + ?event(snp_error, {fetch_verification_certificates_cert_chain_failed, #{reason => Reason}}), + {error, Reason} + end. + diff --git a/src/snp_generate.erl b/src/snp_generate.erl new file mode 100644 index 000000000..e3b08ebee --- /dev/null +++ b/src/snp_generate.erl @@ -0,0 +1,249 @@ +%%% @doc Generation functions for SNP commitment reports. +%%% +%%% This module handles the generation of SNP attestation reports, including +%%% wallet validation, nonce generation, report creation, and message packaging. +-module(snp_generate). +-export([generate/3]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). + +%% Type definitions +-type report_message() :: map(). % Report message map with keys: local-hashes, nonce, address, node-message, report + +%% Helper function to validate configuration options +-spec validate_generate_config(Opts :: map()) -> {ok, map()} | {error, term()}. +validate_generate_config(Opts) -> + maybe + % Validate wallet (required) + {ok, _} ?= validate_wallet(Opts), + % Validate snp_trusted (required) + {ok, _} ?= validate_snp_trusted(Opts), + {ok, Opts} + else + {error, Reason} -> {error, Reason}; + Error -> {error, {config_validation_error, Error}} + end. + +%% Helper function to validate wallet configuration +%% Wallets are tuples: {{KeyType, Priv, Pub}, {KeyType, Pub}} +-spec validate_wallet(Opts :: map()) -> {ok, tuple()} | {error, term()}. +validate_wallet(Opts) -> + case hb_opts:get(priv_wallet, no_viable_wallet, Opts) of + no_viable_wallet -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"priv_wallet">>, + reason => <<"no_viable_wallet">>, + expected => <<"A valid cryptographic wallet tuple">>, + suggestion => <<"Ensure priv_wallet is provided in the configuration options or can be created automatically.">> + }}), + {error, {missing_wallet, <<"priv_wallet is required but not available">>}}; + Wallet when is_tuple(Wallet), tuple_size(Wallet) =:= 2 -> + % Validate it's a valid wallet by trying to get the address + try + _Address = ar_wallet:to_address(Wallet), + ?event(snp, {wallet_validated, #{is_tuple => true}}), + {ok, Wallet} + catch + _:_ -> + ActualType = snp_util:get_type_name(Wallet), + ?event(snp_error, {config_validation_failed, #{ + option => <<"priv_wallet">>, + actual_type => ActualType, + expected => <<"valid wallet tuple">>, + suggestion => <<"priv_wallet must be a valid wallet tuple from ar_wallet:new() or ar_wallet:load_keyfile().">> + }}), + {error, {invalid_wallet_type, <<"priv_wallet must be a valid wallet tuple">>}} + end; + InvalidWallet -> + ActualType = snp_util:get_type_name(InvalidWallet), + ?event(snp_error, {config_validation_failed, #{ + option => <<"priv_wallet">>, + actual_type => ActualType, + expected => <<"wallet tuple">>, + suggestion => <<"priv_wallet must be a wallet tuple (from ar_wallet:new() or ar_wallet:load_keyfile()).">> + }}), + {error, {invalid_wallet_type, <<"priv_wallet must be a wallet tuple">>}} + end. + +%% Helper function to validate snp_trusted configuration +-spec validate_snp_trusted(Opts :: map()) -> {ok, [map()]} | {error, term()}. +validate_snp_trusted(Opts) -> + case hb_opts:get(snp_trusted, [#{}], Opts) of + [] -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + reason => <<"empty_list">>, + expected => <<"Non-empty list of trusted software configuration maps">>, + suggestion => <<"snp_trusted must contain at least one trusted software configuration map.">> + }}), + {error, {empty_trusted_configs, <<"snp_trusted cannot be empty">>}}; + TrustedList when is_list(TrustedList) -> + % Validate each trusted config in the list + validate_trusted_configs_list(TrustedList, 0); + InvalidTrusted -> + ActualType = snp_util:get_type_name(InvalidTrusted), + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + actual_type => ActualType, + expected => <<"list of maps">>, + suggestion => <<"snp_trusted must be a list of maps, each containing trusted software configuration.">> + }}), + {error, {invalid_trusted_type, <<"snp_trusted must be a list">>}} + end. + +%% Helper function to validate each trusted config in the list +-spec validate_trusted_configs_list(TrustedList :: [map()], Index :: non_neg_integer()) -> + {ok, [map()]} | {error, term()}. +validate_trusted_configs_list(TrustedList, StartIndex) -> + validate_trusted_configs_list(TrustedList, StartIndex, []). + +validate_trusted_configs_list([], _Index, Acc) -> + {ok, lists:reverse(Acc)}; +validate_trusted_configs_list([Config | Rest], Index, Acc) -> + case is_map(Config) of + true -> + % Validate that config contains at least some expected keys + % (We don't require all committed parameters, but at least one should be present) + ConfigKeys = maps:keys(Config), + BinaryKeys = [K || K <- ConfigKeys, is_binary(K)], + AtomKeys = [K || K <- ConfigKeys, is_atom(K)], + AllKeys = BinaryKeys ++ AtomKeys, + case length(AllKeys) > 0 of + true -> + % Accumulate the validated config + validate_trusted_configs_list(Rest, Index + 1, [Config | Acc]); + false -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + index => Index, + reason => <<"empty_config_map">>, + expected => <<"Map with at least one configuration key">>, + suggestion => <<"Each trusted software configuration must contain at least one key (e.g., firmware, kernel, vcpus, etc.).">> + }}), + {error, {empty_trusted_config, Index, <<"Trusted config at index ", (hb_util:bin(integer_to_list(Index)))/binary, " is empty">>}} + end; + false -> + ActualType = snp_util:get_type_name(Config), + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + index => Index, + actual_type => ActualType, + expected => <<"map">>, + suggestion => <<"Each element in snp_trusted must be a map containing trusted software configuration.">> + }}), + {error, {invalid_trusted_config_type, Index, <<"Config at index ", (hb_util:bin(integer_to_list(Index)))/binary, " must be a map">>}} + end. + +%% Helper function to generate attestation report via NIF only (no mock fallback). +%% If the NIF is not loaded, returns {error, nif_not_loaded} so production never +%% uses process-dictionary or fake report data. +-spec generate_attestation_report(ReportData :: binary()) -> {ok, binary()} | {error, term()}. +generate_attestation_report(ReportData) -> + {ReportTimeMicros, ReportResult} = timer:tc(fun() -> + try + snp_nif:generate_attestation_report( + ReportData, + ?REPORT_DATA_VERSION + ) + catch + error:{nif_error, _} -> + ?event(snp_short, {nif_not_loaded, #{operation => <<"generate_attestation_report">>}}), + {error, nif_not_loaded}; + error:undef -> + % NIF not loaded: stubs raise undef when NIF module load failed + ?event(snp_short, {nif_not_loaded, #{operation => <<"generate_attestation_report">>}}), + {error, nif_not_loaded} + end + end), + ReportTimeMs = ReportTimeMicros / 1000, + ?event(snp_short, {report_generation_time_ms, ReportTimeMs}), + ReportResult. + +%% Helper function to convert report binary to JSON map +-spec convert_report_binary_to_json(ReportBinary :: binary()) -> {ok, map()} | {error, term()}. +convert_report_binary_to_json(ReportBinary) -> + case snp_nif:report_binary_to_json(ReportBinary) of + {ok, Map} -> {ok, Map}; + {error, ConvertReason} -> {error, {report_conversion_failed, ConvertReason}}; + Map when is_map(Map) -> {ok, Map}; + UnexpectedFormat -> {error, {unexpected_report_format, UnexpectedFormat}} + end. + +%% @doc Generate an AMD SEV-SNP commitment report and emit it as a message. +%% +%% This function creates a hardware-backed attestation report containing all +%% necessary data to validate the node's identity and software configuration. +%% The generation process performs the following operations: +%% 1. Loads and validates the provided configuration options +%% 2. Retrieves or creates a cryptographic wallet for node identity +%% 3. Generates a unique nonce using the node's address and message ID +%% 4. Extracts trusted software configuration from local options +%% 5. Generates the hardware attestation report using the NIF interface +%% 6. Packages the report with all verification data into a message +%% +%% Required configuration in Opts map: +%% - priv_wallet: Node's cryptographic wallet (created if not provided) +%% - snp_trusted: List of trusted software configurations (represents the +%% configuration of the local node generating the report) +%% +%% @param _M1 Ignored parameter (for compatibility with dev_message interface) +%% @param _M2 Ignored parameter (for compatibility with dev_message interface) +%% @param Opts A map of configuration options for report generation: +%% - priv_wallet: map() - Node's cryptographic wallet (created if not provided) +%% - snp_trusted: [map()] - List of trusted software configurations +%% @returns `{ok, Map}' on success with the complete report message containing: +%% - <<"local-hashes">>: map() - Trusted software hashes +%% - <<"nonce">>: binary() - Encoded nonce +%% - <<"address">>: binary() - Node address +%% - <<"node-message">>: map() - Node message +%% - <<"report">>: binary() - JSON-encoded SNP report +%% or `{error, Reason}' on failure with error details +-spec generate(M1 :: term(), M2 :: term(), Opts :: map()) -> + {ok, report_message()} | {error, term()}. +generate(_M1, _M2, Opts) -> + maybe + LoadedOpts = hb_cache:ensure_all_loaded(Opts, Opts), + % Validate configuration options + {ok, _} ?= validate_generate_config(LoadedOpts), + % Validate wallet availability + {ok, ValidWallet} ?= validate_wallet(LoadedOpts), + % Generate address and node message components + Address = hb_util:human_id(ar_wallet:to_address(ValidWallet)), + NodeMsg = hb_private:reset(LoadedOpts), + {ok, PublicNodeMsgID} ?= dev_message:id( + NodeMsg, + #{ <<"committers">> => <<"none">> }, + LoadedOpts + ), + RawPublicNodeMsgID = hb_util:native_id(PublicNodeMsgID), + % Generate the commitment report components + ?event(snp_short, {snp_address, byte_size(Address)}), + ReportData = snp_nonce:generate_nonce(Address, RawPublicNodeMsgID), + ?event(snp_short, {snp_report_data, byte_size(ReportData)}), + % Extract local hashes (already validated by validate_generate_config) + {ok, ValidTrustedList} ?= validate_snp_trusted(LoadedOpts), + {ok, ValidLocalHashes} ?= + case ValidTrustedList of + [FirstConfig | _] -> {ok, FirstConfig}; + _ -> {error, invalid_trusted_configs_format} + end, + % Generate the hardware attestation report + {ok, ReportBinary} ?= generate_attestation_report(ReportData), + % Convert binary to JSON for storage/transmission + {ok, ReportMap} ?= convert_report_binary_to_json(ReportBinary), + ReportJSON = hb_json:encode(ReportMap), + ?event(snp_short, {snp_report_generated, #{report_size => byte_size(ReportJSON)}}), + % Package the complete report message + ReportMsg = #{ + <<"local-hashes">> => ValidLocalHashes, + <<"nonce">> => hb_util:encode(ReportData), + <<"address">> => Address, + <<"node-message">> => NodeMsg, + <<"report">> => ReportJSON + }, + {ok, ReportMsg} + else + {error, GenerateError} -> {error, GenerateError}; + GenerateError -> {error, GenerateError} + end. + diff --git a/src/snp_launch_digest.erl b/src/snp_launch_digest.erl new file mode 100644 index 000000000..ff2d82dda --- /dev/null +++ b/src/snp_launch_digest.erl @@ -0,0 +1,247 @@ +%%% @doc Launch digest computation for SNP commitment reports. +%%% +%%% This module orchestrates the computation of launch digests for AMD SEV-SNP +%%% attestation reports, delegating to specialized sub-modules for OVMF parsing, +%%% VMSA page creation, and launch digest calculation. +-module(snp_launch_digest). +-export([compute_launch_digest/1]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_launch_digest.hrl"). +-include("include/snp_guids.hrl"). + +%% Type definitions +-type gctx() :: #gctx{}. +-type vmm_type() :: ?VMM_TYPE_QEMU | ?VMM_TYPE_EC2. +-type vcpu_type() :: integer(). % VCPU type identifier (0=Epyc, 1=EpycV1, etc.) +-type guest_features() :: non_neg_integer(). % Guest features flags +-type launch_digest_args() :: #{ + vcpus => integer(), + vcpu_type => integer(), + vmm_type => ?VMM_TYPE_QEMU | ?VMM_TYPE_EC2, + guest_features => non_neg_integer(), + firmware => undefined | binary() | list(), + kernel => undefined | binary(), + initrd => undefined | binary(), + append => undefined | binary(), + sev_hashes_gpa => non_neg_integer() +}. + +%% @doc Compute launch digest - pure Erlang implementation +%% @param Args Map containing: vcpus, vcpu_type, vmm_type, guest_features, firmware, kernel, initrd, append, sev_hashes_gpa +%% @returns {ok, Digest} where Digest is ?LAUNCH_DIGEST_SIZE-byte binary, or {error, invalid_args} if Args is not a map +-spec compute_launch_digest(Args :: map() | term()) -> + {ok, binary()} | {error, invalid_args}. +compute_launch_digest(Args) when is_map(Args) -> + compute_launch_digest_erlang(Args); +compute_launch_digest(_Args) -> + {error, invalid_args}. + +%% @doc Compute launch digest - pure Erlang implementation +%% @param Args Map containing launch digest parameters: +%% - vcpus: non_neg_integer() - Number of VCPUs +%% - vcpu_type: integer() - VCPU type identifier +%% - vmm_type: integer() - VMM type (1=QEMU, 2=EC2) +%% - guest_features: non_neg_integer() - Guest features flags +%% - firmware: undefined | binary() | list() - Firmware hash (optional) +%% - kernel: undefined | binary() - Kernel hash (optional) +%% - initrd: undefined | binary() - Initrd hash (optional) +%% - append: undefined | binary() - Append hash (optional) +%% - sev_hashes_gpa: non_neg_integer() - SEV hashes table GPA (optional, defaults to 0) +%% @returns {ok, Digest} where Digest is ?LAUNCH_DIGEST_SIZE-byte binary, or {error, {computation_failed, Error, Reason}} on failure +-spec compute_launch_digest_erlang(Args :: map()) -> + {ok, binary()} | {error, {computation_failed, term(), term()}}. +compute_launch_digest_erlang(Args) -> + ?event(snp_short, {compute_launch_digest_erlang_start, Args}), + {TimeMicros, Result} = timer:tc(fun() -> + try + compute_launch_digest_steps(Args) + catch + Error:Reason -> + ?event(snp_error, {compute_launch_digest_erlang_error, #{error => Error, reason => Reason}}), + {error, {computation_failed, Error, Reason}} + end + end), + TimeMs = TimeMicros / 1000, + ?event(snp_short, {compute_launch_digest_time_ms, TimeMs}), + Result. + +%% Helper function to execute launch digest computation steps +-spec compute_launch_digest_steps(Args :: map()) -> {ok, binary()} | {error, term()}. +compute_launch_digest_steps(Args) -> + % Extract parameters + {VCPUs, VCPUType, VMMType, GuestFeatures, FirmwareHash, KernelHash, InitrdHash, AppendHash, SevHashesGPA} = + extract_launch_digest_params(Args), + % Reject invalid vcpus to prevent huge list allocation / DoS (finding #7) + case is_integer(VCPUs) andalso VCPUs >= 1 andalso VCPUs =< ?MAX_VCPUS of + true -> ok; + false -> erlang:error({invalid_vcpus, VCPUs}, [VCPUs]) + end, + % Initialize GCTX with OVMF hash + GCTX = initialize_gctx_from_firmware(FirmwareHash), + + % Parse and update OVMF metadata (also get reset EIP for VMSA) + {GCTX1, ResetEIP} = process_ovmf_metadata(GCTX, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA), + + % Create VMSA pages and update GCTX + GCTX2 = create_and_update_vmsa_pages(GCTX1, VCPUs, VCPUType, VMMType, GuestFeatures, ResetEIP), + + % Return final digest + FinalLDHex = snp_util:binary_to_hex_string(GCTX2#gctx.ld), + ?event(snp_short, {compute_launch_digest_erlang_success, #{ + digest_size => byte_size(GCTX2#gctx.ld), + digest_hex => FinalLDHex + }}), + {ok, GCTX2#gctx.ld}. + +%% Helper function to process OVMF metadata +-spec process_ovmf_metadata(GCTX :: #gctx{}, VMMType :: integer(), + KernelHash :: undefined | binary(), InitrdHash :: undefined | binary(), + AppendHash :: undefined | binary(), SevHashesGPA :: non_neg_integer()) -> + {#gctx{}, ResetEIP :: non_neg_integer()}. +process_ovmf_metadata(GCTX, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA) -> + ?event(snp_short, {parsing_ovmf_metadata, #{vmm_type => VMMType, sev_hashes_gpa => SevHashesGPA}}), + {GCTX1, ResetEIP} = snp_launch_digest_ovmf:parse_and_update_ovmf_metadata_erlang( + GCTX, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA), + AfterMetadataLDHex = snp_util:binary_to_hex_string(GCTX1#gctx.ld), + ?event(snp_short, {ovmf_metadata_parsed, #{ + ld_size => byte_size(GCTX1#gctx.ld), + ld_hex => AfterMetadataLDHex, + reset_eip => ResetEIP + }}), + {GCTX1, ResetEIP}. + +%% Helper function to create VMSA pages and update GCTX +-spec create_and_update_vmsa_pages(GCTX :: gctx(), VCPUs :: integer(), VCPUType :: vcpu_type(), + VMMType :: vmm_type(), GuestFeatures :: guest_features(), ResetEIP :: non_neg_integer()) -> gctx(). +create_and_update_vmsa_pages(GCTX, VCPUs, VCPUType, VMMType, GuestFeatures, ResetEIP) -> + % Create VMSA pages (use reset EIP from OVMF, matching Rust) + ?event(snp_short, {creating_vmsa_pages, #{vcpu_type => VCPUType, vmm_type => VMMType, guest_features => GuestFeatures, reset_eip => ResetEIP}}), + {BSPVMSA, APVMSA} = snp_launch_digest_vmsa:create_vmsa_pages_erlang( + ResetEIP, VCPUType, VMMType, GuestFeatures), + ?event(snp_short, {vmsa_pages_created, #{bsp_size => byte_size(BSPVMSA), ap_size => byte_size(APVMSA)}}), + + % Update GCTX with VMSA pages + ?event(snp_short, {updating_with_vmsa_pages, #{vcpus => VCPUs}}), + GCTX2 = snp_launch_digest_gctx:update_with_vmsa_pages(GCTX, VCPUs, BSPVMSA, APVMSA), + AfterVMSALDHex = snp_util:binary_to_hex_string(GCTX2#gctx.ld), + ?event(snp_short, {vmsa_pages_updated, #{ + ld_size => byte_size(GCTX2#gctx.ld), + ld_hex => AfterVMSALDHex + }}), + GCTX2. + +%% Helper function to extract launch digest parameters from Args map. +%% Args may have binary or atom keys; we use binary keys as canonical. +-spec extract_launch_digest_params(Args :: launch_digest_args()) -> + {integer(), vcpu_type(), vmm_type(), guest_features(), undefined | binary() | list(), + undefined | binary(), undefined | binary(), undefined | binary(), non_neg_integer()}. +extract_launch_digest_params(Args) when is_map(Args) -> + VCPUs = arg_get(Args, <<"vcpus">>, undefined), + VCPUType = arg_get(Args, <<"vcpu_type">>, undefined), + VMMType = arg_get(Args, <<"vmm_type">>, undefined), + GuestFeatures = arg_get(Args, <<"guest_features">>, 0), + FirmwareHash = arg_get(Args, <<"firmware">>, undefined), + KernelHash = arg_get(Args, <<"kernel">>, undefined), + InitrdHash = arg_get(Args, <<"initrd">>, undefined), + AppendHash = arg_get(Args, <<"append">>, undefined), + SevHashesGPA = arg_get(Args, <<"sev_hashes_gpa">>, 0), + ?event(snp, {extracted_params, #{vcpus => VCPUs, vcpu_type => VCPUType, vmm_type => VMMType, guest_features => GuestFeatures}}), + FirmwareHashInfo = case FirmwareHash of + undefined -> undefined; + FH when is_binary(FH) -> {size, byte_size(FH)}; + _ -> FirmwareHash + end, + KernelHashInfo = case KernelHash of + undefined -> undefined; + KH when is_binary(KH) -> {size, byte_size(KH)}; + _ -> KernelHash + end, + InitrdHashInfo = case InitrdHash of + undefined -> undefined; + IH when is_binary(IH) -> {size, byte_size(IH)}; + _ -> InitrdHash + end, + AppendHashInfo = case AppendHash of + undefined -> undefined; + AH when is_binary(AH) -> {size, byte_size(AH)}; + _ -> AppendHash + end, + ?event(snp_short, {extracted_hashes, #{ + firmware => FirmwareHashInfo, + kernel => KernelHashInfo, + initrd => InitrdHashInfo, + append => AppendHashInfo, + sev_hashes_gpa => SevHashesGPA + }}), + {VCPUs, VCPUType, VMMType, GuestFeatures, FirmwareHash, KernelHash, InitrdHash, AppendHash, SevHashesGPA}. + +%% Get Arg by binary key, fallback to atom key (for callers that pass atom-key maps). +arg_get(Args, BinKey, Default) when is_map(Args) -> + case maps:find(BinKey, Args) of + {ok, V} -> V; + error -> + try maps:get(binary_to_existing_atom(BinKey, utf8), Args) + catch _:_ -> Default + end + end. + +%% Helper function to initialize GCTX from firmware hash +-spec initialize_gctx_from_firmware(FirmwareHash :: undefined | binary() | list()) -> gctx(). +initialize_gctx_from_firmware(FirmwareHash) -> + FirmwareHashInfo = case FirmwareHash of + undefined -> undefined; + FH when is_binary(FH) -> {size, byte_size(FH)}; + _ -> FirmwareHash + end, + ?event(snp_short, {initializing_gctx, #{firmware_hash => FirmwareHashInfo}}), + GCTX = case FirmwareHash of + undefined -> + ?event(snp_short, gctx_init_with_zeros), + % When firmware hash is not provided, initialize with zeros + % Then we'll update with full OVMF data in parse_ovmf_and_update + % (matching Rust: gctx.update_page(PageType::Normal, ovmf.gpa(), Some(ovmf.data()), None)?) + snp_launch_digest_gctx:init_gctx(); + Hash when is_binary(Hash) -> + HashSize = byte_size(Hash), + ?event(snp_short, {gctx_init_with_binary, #{size => HashSize}}), + case HashSize of + ?HEX_STRING_48_BYTES -> + ?event(snp_short, gctx_init_from_hex_96), + case snp_util:hex_to_binary(Hash) of + {ok, B} -> snp_launch_digest_gctx:init_gctx_with_seed(B); + {error, invalid_hex} -> erlang:error(invalid_hex) + end; + ?LAUNCH_DIGEST_SIZE -> + ?event(snp_short, gctx_init_from_binary_48), + snp_launch_digest_gctx:init_gctx_with_seed(Hash); + _ -> + ?event(snp_short, {gctx_init_fallback_to_zeros, #{size => HashSize}}), + snp_launch_digest_gctx:init_gctx() + end; + Hash when is_list(Hash) -> + HashBin = hb_util:bin(Hash), + HashSize = byte_size(HashBin), + ?event(snp_short, {gctx_init_with_list, #{size => HashSize}}), + case HashSize of + ?HEX_STRING_48_BYTES -> + ?event(snp, gctx_init_from_hex_96_list), + case snp_util:hex_to_binary(HashBin) of + {ok, B} -> snp_launch_digest_gctx:init_gctx_with_seed(B); + {error, invalid_hex} -> erlang:error(invalid_hex) + end; + ?LAUNCH_DIGEST_SIZE -> + ?event(snp, gctx_init_from_binary_48_list), + snp_launch_digest_gctx:init_gctx_with_seed(HashBin); + _ -> + ?event(snp, {gctx_init_fallback_to_zeros_list, #{size => HashSize}}), + snp_launch_digest_gctx:init_gctx() + end + end, + InitialLDHex = snp_util:binary_to_hex_string(GCTX#gctx.ld), + ?event(snp, {gctx_initialized, #{ + ld_size => byte_size(GCTX#gctx.ld), + ld_hex => InitialLDHex + }}), + GCTX. + diff --git a/src/snp_launch_digest_gctx.erl b/src/snp_launch_digest_gctx.erl new file mode 100644 index 000000000..74a02c325 --- /dev/null +++ b/src/snp_launch_digest_gctx.erl @@ -0,0 +1,251 @@ +%%% @doc GCTX (Launch Digest Context) management for SNP commitment reports. +%%% +%%% This module handles the initialization and updating of the launch digest +%%% context (GCTX), which tracks the current state of the launch digest +%%% computation. +-module(snp_launch_digest_gctx). +-export([init_gctx/0, init_gctx_with_seed/1, gctx_update_page/4, build_page_info/9, update_with_vmsa_pages/4]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_launch_digest.hrl"). +-include("include/snp_guids.hrl"). + +%% Type definitions +-type gctx() :: #gctx{}. +-type page_type() :: ?PAGE_TYPE_NORMAL | ?PAGE_TYPE_VMSA | ?PAGE_TYPE_ZERO | + ?PAGE_TYPE_SVSM_CAA | ?PAGE_TYPE_SECRETS | ?PAGE_TYPE_CPUID. +-type gpa() :: non_neg_integer(). % Guest Physical Address + +%% Helper function to normalize binary to exact size (pad or truncate) +%% Optimized to avoid multiple pattern matches and improve performance +-spec normalize_binary_to_size(Binary :: binary() | term(), TargetSize :: non_neg_integer()) -> binary(). +normalize_binary_to_size(Binary, TargetSize) when is_binary(Binary) -> + case byte_size(Binary) of + TargetSize -> Binary; + Size when Size > TargetSize -> binary:part(Binary, 0, TargetSize); + Size when Size < TargetSize -> + PaddingSize = TargetSize - Size, + <> + end; +normalize_binary_to_size(_, TargetSize) -> + <<0:(TargetSize * 8)>>. + +%% @doc Initialize GCTX with zeros +%% @returns #gctx{} record with launch digest initialized to zeros +-spec init_gctx() -> gctx(). +init_gctx() -> + ?event(snp_short, init_gctx_called), + GCTX = #gctx{ld = <<0:?LAUNCH_DIGEST_BITS>>}, % ?LAUNCH_DIGEST_SIZE bytes of zeros + ?event(snp_short, {init_gctx_result, #{ld_size => byte_size(GCTX#gctx.ld)}}), + GCTX. + +%% @doc Initialize GCTX with seed (OVMF hash) +%% @param Seed ?LAUNCH_DIGEST_SIZE-byte binary seed value +%% @returns #gctx{} record with launch digest initialized to seed +-spec init_gctx_with_seed(Seed :: binary()) -> gctx(). +init_gctx_with_seed(Seed) when byte_size(Seed) =:= ?LAUNCH_DIGEST_SIZE -> + ?event(snp_short, {init_gctx_with_seed, #{seed_size => byte_size(Seed)}}), + GCTX = #gctx{ld = Seed}, + ?event(snp_short, {init_gctx_with_seed_result, #{ld_size => byte_size(GCTX#gctx.ld)}}), + GCTX. + +%% @doc Update launch digest with page data +%% @param GCTX #gctx{} record with current launch digest +%% @param PageType integer() - Page type (1=Normal, 2=VMSA, 3=Zero, etc.) +%% @param GPA non_neg_integer() - Guest physical address +%% @param Contents undefined | binary() - Page contents (undefined for zero pages) +%% @returns #gctx{} record with updated launch digest +-spec gctx_update_page(GCTX :: gctx(), PageType :: page_type(), GPA :: gpa(), Contents :: undefined | binary()) -> + gctx(). +gctx_update_page(GCTX, PageType, GPA, Contents) -> + CurrentLD = GCTX#gctx.ld, + CurrentLDHex = snp_util:binary_to_hex_string(CurrentLD), + ?event(snp_short, {gctx_update_page_start, #{ + page_type => PageType, + gpa => GPA, + contents_size => case Contents of undefined -> undefined; Cont when is_binary(Cont) -> byte_size(Cont); _ -> Contents end, + current_ld_size => byte_size(CurrentLD), + current_ld_hex => CurrentLDHex + }}), + PageInfoLen = ?PAGE_INFO_LEN, + IsIMI = 0, + VMPL3Perms = 0, + VMPL2Perms = 0, + VMPL1Perms = 0, + + % Build page_info structure + PageInfo = build_page_info( + CurrentLD, PageType, GPA, Contents, + IsIMI, VMPL3Perms, VMPL2Perms, VMPL1Perms, PageInfoLen), + PageInfoHex = snp_util:binary_to_hex_string(PageInfo), + ?event(snp_short, {page_info_built, #{ + page_info_size => byte_size(PageInfo), + page_info_hex => PageInfoHex + }}), + + % Hash page_info to get new launch digest + NewLD = crypto:hash(sha384, PageInfo), + ?event(snp_short, {gctx_update_page_complete, #{ + page_type => PageType, + gpa => GPA, + new_ld_size => byte_size(NewLD) + }}), + + GCTX#gctx{ld = NewLD}. + +%% @doc Build page_info structure +%% @param CurrentLD binary() - Current launch digest (?LAUNCH_DIGEST_SIZE bytes) +%% @param PageType integer() - Page type (1=Normal, 2=VMSA, 3=Zero, etc.) +%% @param GPA non_neg_integer() - Guest physical address +%% @param Contents undefined | binary() - Page contents (undefined for zero pages) +%% @param IsIMI integer() - IMI flag (0 or 1) +%% @param VMPL3 integer() - VMPL3 permissions +%% @param VMPL2 integer() - VMPL2 permissions +%% @param VMPL1 integer() - VMPL1 permissions +%% @param PageInfoLen integer() - Page info structure length (?PAGE_INFO_LEN) +%% @returns binary() - Page info structure (?PAGE_INFO_LEN bytes) +-spec build_page_info(CurrentLD :: binary(), PageType :: integer(), GPA :: non_neg_integer(), + Contents :: undefined | binary(), IsIMI :: integer(), VMPL3 :: integer(), + VMPL2 :: integer(), VMPL1 :: integer(), PageInfoLen :: integer()) -> binary(). +build_page_info(CurrentLD, PageType, GPA, Contents, IsIMI, VMPL3, VMPL2, VMPL1, PageInfoLen) -> + CurrentLDSizeInfo = case CurrentLD of CLD when is_binary(CLD) -> byte_size(CLD); _ -> undefined end, + ContentsSizeInfo = case Contents of undefined -> undefined; Cont when is_binary(Cont) -> byte_size(Cont); _ -> Contents end, + ?event(snp_short, {build_page_info_start, #{ + current_ld_size => CurrentLDSizeInfo, + page_type => PageType, + gpa => GPA, + contents_size => ContentsSizeInfo + }}), + % Ensure CurrentLD is exactly ?LAUNCH_DIGEST_SIZE bytes + CurrentLDOriginalSize = case is_binary(CurrentLD) of true -> byte_size(CurrentLD); false -> undefined end, + CurrentLD48 = normalize_binary_to_size(CurrentLD, ?LAUNCH_DIGEST_SIZE), + case CurrentLDOriginalSize of + undefined -> + ?event(snp_short, current_ld_not_binary_using_zeros); + Size when Size > ?LAUNCH_DIGEST_SIZE -> + ?event(snp_short, {current_ld_truncated, #{from => Size, to => ?LAUNCH_DIGEST_SIZE}}); + Size when Size < ?LAUNCH_DIGEST_SIZE -> + ?event(snp_short, {current_ld_padded, #{from => Size, to => ?LAUNCH_DIGEST_SIZE}}); + _ -> ok + end, + + % Copy current launch digest (?LAUNCH_DIGEST_SIZE bytes) + % Copy page contents or hash + % For zero pages, secrets, and CPUID pages, Rust uses ZEROS = [0; ?LAUNCH_DIGEST_SIZE] (?LAUNCH_DIGEST_SIZE bytes of zeros) + % This matches the Rust implementation: const ZEROS: [u8; LD_BYTES] = [0; LD_BYTES]; + PageContentsHash = case {PageType, Contents} of + {?PAGE_TYPE_ZERO, _} -> + ?event(snp_short, page_contents_zero_page), + <<0:?LAUNCH_DIGEST_BITS>>; % PAGE_TYPE_ZERO - ?LAUNCH_DIGEST_SIZE bytes of zeros (matching Rust ZEROS) + {?PAGE_TYPE_SECRETS, _} -> + ?event(snp_short, page_contents_secrets), + <<0:?LAUNCH_DIGEST_BITS>>; % PAGE_TYPE_SECRETS - ?LAUNCH_DIGEST_SIZE bytes of zeros (matching Rust ZEROS) + {?PAGE_TYPE_CPUID, _} -> + ?event(snp, page_contents_cpuid), + <<0:?LAUNCH_DIGEST_BITS>>; % PAGE_TYPE_CPUID - ?LAUNCH_DIGEST_SIZE bytes of zeros (matching Rust ZEROS) + {?PAGE_TYPE_NORMAL, C} when is_binary(C), byte_size(C) =:= ?PAGE_SIZE -> + ?event(snp_short, {page_contents_normal_hashing, #{size => byte_size(C)}}), + crypto:hash(sha384, C); % PAGE_TYPE_NORMAL + {?PAGE_TYPE_VMSA, C} when is_binary(C), byte_size(C) =:= ?PAGE_SIZE -> + ?event(snp_short, {page_contents_vmsa_hashing, #{size => byte_size(C)}}), + crypto:hash(sha384, C); % PAGE_TYPE_VMSA + {_, C} when is_binary(C), byte_size(C) =:= ?LAUNCH_DIGEST_SIZE -> + ?event(snp_short, {page_contents_already_hash, #{size => byte_size(C)}}), + C; % Already a ?LAUNCH_DIGEST_SIZE-byte hash + {_, _} -> + ?event(snp, {page_contents_default_zeros, #{page_type => PageType}}), + <<0:?LAUNCH_DIGEST_BITS>> % Default to ?LAUNCH_DIGEST_SIZE bytes of zeros + end, + + % Ensure PageContentsHash is exactly ?LAUNCH_DIGEST_SIZE bytes + PageContentsHashOriginalSize = byte_size(PageContentsHash), + PageContentsHash48 = normalize_binary_to_size(PageContentsHash, ?LAUNCH_DIGEST_SIZE), + if PageContentsHashOriginalSize > ?LAUNCH_DIGEST_SIZE -> + ?event(snp, {page_contents_hash_truncated, #{from => PageContentsHashOriginalSize, to => ?LAUNCH_DIGEST_SIZE}}); + PageContentsHashOriginalSize < ?LAUNCH_DIGEST_SIZE -> + ?event(snp, {page_contents_hash_padded, #{from => PageContentsHashOriginalSize, to => ?LAUNCH_DIGEST_SIZE}}); + true -> ok + end, + + % Build complete page_info (?PAGE_INFO_LEN bytes) + PageInfo = <>, + CurrentLDHex = snp_util:binary_to_hex_string(CurrentLD48), + PageContentsHashHex = snp_util:binary_to_hex_string(PageContentsHash48), + ?event(snp, {build_page_info_complete, #{ + page_info_size => byte_size(PageInfo), + current_ld_hex => CurrentLDHex, + page_contents_hash_hex => PageContentsHashHex, + page_info_len => PageInfoLen, + page_type => PageType, + gpa => GPA, + gpa_hex => integer_to_list(GPA, 16) + }}), + PageInfo. + +%% @doc Update GCTX with VMSA pages +%% @param GCTX #gctx{} record with current launch digest +%% @param VCPUs non_neg_integer() - Number of VCPUs +%% @param BSPVMSA binary() - BSP VMSA page (?PAGE_SIZE bytes) +%% @param APVMSA binary() - AP VMSA page (?PAGE_SIZE bytes) +%% @returns #gctx{} record with updated launch digest +-spec update_with_vmsa_pages(GCTX :: #gctx{}, VCPUs :: non_neg_integer(), BSPVMSA :: binary(), APVMSA :: binary()) -> + #gctx{}. +update_with_vmsa_pages(GCTX, VCPUs, BSPVMSA, APVMSA) -> + % DoS safeguard: reject out-of-range VCPUs before building lists:seq(0, VCPUs - 1) + case is_integer(VCPUs) andalso VCPUs >= 1 andalso VCPUs =< ?MAX_VCPUS of + true -> ok; + false -> erlang:error({invalid_vcpus, VCPUs}, [GCTX, VCPUs, BSPVMSA, APVMSA]) + end, + ?event(snp, {update_with_vmsa_pages_start, #{ + vcpus => VCPUs, + bsp_vmsa_size => byte_size(BSPVMSA), + ap_vmsa_size => byte_size(APVMSA), + current_ld_size => byte_size(GCTX#gctx.ld) + }}), + VMSAGPA = ?VMSA_GPA, + Result = lists:foldl( + fun(I, AccGCTX) -> + VMSAToUse = case I of + 0 -> + ?event(snp, {updating_vmsa_for_vcpu, #{vcpu => I, type => bsp}}), + BSPVMSA; + _ -> + ?event(snp, {updating_vmsa_for_vcpu, #{vcpu => I, type => ap}}), + APVMSA + end, + VMSAHash = crypto:hash(sha384, VMSAToUse), + ?event(snp, {vmsa_before_update, #{ + vcpu => I, + vmsa_type => case I of 0 -> bsp; _ -> ap end, + vmsa_hash_hex => snp_util:binary_to_hex_string(VMSAHash), + current_ld_hex => snp_util:binary_to_hex_string(AccGCTX#gctx.ld), + vmsa_gpa => VMSAGPA + }}), + NewGCTX = gctx_update_page(AccGCTX, ?PAGE_TYPE_VMSA, VMSAGPA, VMSAToUse), + ?event(snp, {vmsa_updated_for_vcpu, #{ + vcpu => I, + vmsa_type => case I of 0 -> bsp; _ -> ap end, + new_ld_size => byte_size(NewGCTX#gctx.ld), + new_ld_hex => snp_util:binary_to_hex_string(NewGCTX#gctx.ld), + old_ld_hex => snp_util:binary_to_hex_string(AccGCTX#gctx.ld) + }}), + NewGCTX + end, + GCTX, + lists:seq(0, VCPUs - 1) + ), + ?event(snp_short, {update_with_vmsa_pages_complete, #{ + vcpus => VCPUs, + final_ld_size => byte_size(Result#gctx.ld) + }}), + Result. + diff --git a/src/snp_launch_digest_ovmf.erl b/src/snp_launch_digest_ovmf.erl new file mode 100644 index 000000000..723dfe8d2 --- /dev/null +++ b/src/snp_launch_digest_ovmf.erl @@ -0,0 +1,516 @@ +%%% @doc OVMF parsing and metadata processing for SNP commitment reports. +%%% +%%% This module handles parsing of OVMF firmware files, extracting metadata +%%% sections, and updating the launch digest context with OVMF-related data. +-module(snp_launch_digest_ovmf). +-export([parse_and_update_ovmf_metadata_erlang/6]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_launch_digest.hrl"). +-include("include/snp_guids.hrl"). + +%% @doc Parse and update OVMF metadata +%% @param GCTX #gctx{} record with current launch digest +%% @param VMMType integer() - VMM type (1=QEMU, 2=EC2) +%% @param KernelHash undefined | binary() - Kernel hash (optional) +%% @param InitrdHash undefined | binary() - Initrd hash (optional) +%% @param AppendHash undefined | binary() - Append hash (optional) +%% @param SevHashesGPA non_neg_integer() - SEV hashes table GPA +%% @returns {#gctx{}, ResetEIP} where ResetEIP is the reset EIP value from OVMF +-spec parse_and_update_ovmf_metadata_erlang(GCTX :: #gctx{}, VMMType :: integer(), + KernelHash :: undefined | binary(), InitrdHash :: undefined | binary(), + AppendHash :: undefined | binary(), SevHashesGPA :: non_neg_integer()) -> + {#gctx{}, non_neg_integer()}. +parse_and_update_ovmf_metadata_erlang(GCTX, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA) -> + ?event(snp, {parse_and_update_ovmf_metadata_start, #{ + vmm_type => VMMType, + sev_hashes_gpa => SevHashesGPA, + has_kernel => is_binary(KernelHash), + has_initrd => is_binary(InitrdHash), + has_append => is_binary(AppendHash) + }}), + % OVMF is copied to priv/ovmf/ at build time (rebar pre_hook); single fixed path. + OvmfPath = filename:join([code:priv_dir(hb), "ovmf", "OVMF-1.55.fd"]), + ?event(snp, {ovmf_path, OvmfPath}), + case file:read_file_info(OvmfPath) of + {ok, _FileInfo} -> + ?event(snp_short, {ovmf_file_found, #{path => OvmfPath}}), + parse_ovmf_and_update(GCTX, OvmfPath, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA); + {error, Reason} -> + ?event(snp_error, {ovmf_file_not_found, #{path => OvmfPath, reason => Reason}}), + % Fallback: use default reset EIP (0x0) if OVMF not found, matching Rust + DefaultResetEIP = ?DEFAULT_RESET_EIP, + ?event(snp, {using_default_reset_eip, #{reset_eip => DefaultResetEIP}}), + % If OVMF parsing failed but we have SEV hashes GPA, try to update just the hashes table + GCTX1 = case {KernelHash, InitrdHash, AppendHash, SevHashesGPA} of + {K, I, A, GPA} when is_binary(K), is_binary(I), is_binary(A), GPA =/= 0 -> + ?event(snp, {updating_sev_hashes_table_fallback, #{gpa => GPA}}), + case snp_launch_digest_sev_hashes:update_sev_hashes_table(GCTX, K, I, A, GPA) of + {ok, G} -> G; + {error, invalid_hex} -> erlang:error(invalid_hex) + end; + _ -> + ?event(snp, no_sev_hashes_update_possible), + GCTX + end, + {GCTX1, DefaultResetEIP} + end. + +%% Parse OVMF and update GCTX with all metadata sections +%% Returns {GCTX, ResetEIP} where ResetEIP is read from OVMF footer table (matching Rust) +-spec parse_ovmf_and_update(GCTX :: #gctx{}, OvmfPath :: string(), VMMType :: integer(), + KernelHash :: undefined | binary(), InitrdHash :: undefined | binary(), + AppendHash :: undefined | binary(), SevHashesGPA :: non_neg_integer()) -> + {#gctx{}, non_neg_integer()}. +parse_ovmf_and_update(GCTX, OvmfPath, VMMType, KernelHash, InitrdHash, AppendHash, SevHashesGPA) -> + ?event(snp, {parse_ovmf_and_update_start, #{ + path => OvmfPath, + vmm_type => VMMType, + has_kernel => is_binary(KernelHash), + has_initrd => is_binary(InitrdHash), + has_append => is_binary(AppendHash), + sev_hashes_gpa_arg => SevHashesGPA + }}), + % Get SEV hashes table GPA from footer table if not provided (matches Rust ovmf.sev_hashes_table_gpa()) + FinalSevHashesGPA = case SevHashesGPA of + 0 -> + case snp_ovmf:parse_ovmf_sev_hashes_gpa(OvmfPath) of + {ok, GPA} -> + ?event(snp_short, {sev_hashes_gpa_from_footer_table, #{gpa => GPA}}), + GPA; + _ -> + ?event(snp, sev_hashes_gpa_not_found_in_footer_table), + 0 + end; + _ -> SevHashesGPA + end, + case file:read_file(OvmfPath) of + {ok, OvmfData} -> + % If GCTX was initialized with zeros (no firmware hash provided), + % update it with full OVMF data first (matching Rust behavior) + % Rust: gctx.update_page(PageType::Normal, ovmf.gpa(), Some(ovmf.data()), None)? + OvmfSize = byte_size(OvmfData), + OvmfGPA = ?FOUR_GB - OvmfSize, + GCTX1 = case GCTX#gctx.ld of + <<0:?LAUNCH_DIGEST_BITS>> -> % If LD is all zeros, we need to update with OVMF data + ?event(snp, {updating_gctx_with_ovmf_data, #{ + ovmf_size => OvmfSize, + ovmf_gpa => OvmfGPA, + ovmf_gpa_hex => integer_to_list(OvmfGPA, 16), + ld_before_hex => snp_util:binary_to_hex_string(GCTX#gctx.ld) + }}), + % Update GCTX with full OVMF data as Normal page + % This processes the OVMF in ?PAGE_SIZE chunks, hashing each page + UpdatedGCTX = snp_launch_digest_gctx:gctx_update_page(GCTX, ?PAGE_TYPE_NORMAL, OvmfGPA, OvmfData), + ?event(snp, {ovmf_data_update_complete, #{ + ld_after_hex => snp_util:binary_to_hex_string(UpdatedGCTX#gctx.ld) + }}), + UpdatedGCTX; + _ -> + ?event(snp, {gctx_already_initialized_with_hash, #{ + ld_hex => snp_util:binary_to_hex_string(GCTX#gctx.ld) + }}), + GCTX % Already initialized with firmware hash, skip OVMF update + end, + ?event(snp, {after_ovmf_data_update, #{ + ld_hex => snp_util:binary_to_hex_string(GCTX1#gctx.ld) + }}), + % Read reset EIP from OVMF footer table (matching Rust ovmf.sev_es_reset_eip()) + ResetEIP = case snp_ovmf:parse_ovmf_reset_eip(OvmfPath) of + {ok, EIP} -> + ?event(snp_short, {reset_eip_from_ovmf, #{eip => EIP}}), + EIP; + {error, Reason} -> + ?event(snp, {reset_eip_not_found_using_default, #{default => ?DEFAULT_RESET_EIP, reason => Reason}}), + ?DEFAULT_RESET_EIP % Default to 0 if not found (Rust would error, but we continue) + end, + case parse_ovmf_metadata_sections(OvmfData) of + {ok, Sections} -> + ?event(snp_short, {ovmf_metadata_sections_parsed, #{count => length(Sections)}}), + % Process all sections (starting from GCTX1 which may have been updated with OVMF data) + GCTX2 = lists:foldl( + fun(Section, AccGCTX) -> + SectionNum = hb_maps:get(<<"section_type">>, Section, undefined, #{}), + SectionGPA = hb_maps:get(<<"gpa">>, Section, undefined, #{}), + SectionSize = hb_maps:get(<<"size">>, Section, undefined, #{}), + LD_BeforeSection = snp_util:binary_to_hex_string(AccGCTX#gctx.ld), + ?event(snp, {metadata_section_before, #{ + section_type => SectionNum, + section_gpa => SectionGPA, + section_size => SectionSize, + ld_before_hex => LD_BeforeSection + }}), + ?event(snp, {processing_metadata_section, #{ + section_type => SectionNum, + gpa => SectionGPA, + size => SectionSize, + ld_before_hex => LD_BeforeSection + }}), + NewGCTX = process_ovmf_section(AccGCTX, Section, VMMType, KernelHash, InitrdHash, AppendHash, OvmfData, FinalSevHashesGPA), + LD_AfterSection = snp_util:binary_to_hex_string(NewGCTX#gctx.ld), + ?event(snp, {metadata_section_after, #{ + section_type => SectionNum, + section_gpa => SectionGPA, + section_size => SectionSize, + ld_before_hex => LD_BeforeSection, + ld_after_hex => LD_AfterSection + }}), + ?event(snp, {metadata_section_processed, #{ + section_type => SectionNum, + section_gpa => SectionGPA, + section_size => SectionSize, + ld_before_hex => LD_BeforeSection, + ld_after_hex => LD_AfterSection + }}), + NewGCTX + end, + GCTX, + Sections + ), + % Special handling for EC2 VMM type: process CPUID sections again + GCTX3 = case VMMType of + ?VMM_TYPE_EC2 -> % EC2 + ?event(snp, {processing_cpuid_sections_for_ec2, #{ + ld_before_hex => snp_util:binary_to_hex_string(GCTX2#gctx.ld) + }}), + Result = lists:foldl( + fun(Section, AccGCTX) -> + case Section of + #{<<"section_type">> := ?OVMF_SECTION_CPUID} -> % Cpuid + SectionGPA = hb_maps:get(<<"gpa">>, Section, undefined, #{}), + ?event(snp, {processing_cpuid_section_ec2, #{ + gpa => SectionGPA, + ld_before_hex => snp_util:binary_to_hex_string(AccGCTX#gctx.ld) + }}), + NewGCTX = snp_launch_digest_gctx:gctx_update_page(AccGCTX, ?PAGE_TYPE_CPUID, SectionGPA, undefined), + ?event(snp, {cpuid_section_ec2_processed, #{ + gpa => SectionGPA, + ld_after_hex => snp_util:binary_to_hex_string(NewGCTX#gctx.ld) + }}), + NewGCTX; + _ -> AccGCTX + end + end, + GCTX2, + Sections + ), + ?event(snp_short, {cpuid_sections_ec2_complete, #{ + ld_hex => snp_util:binary_to_hex_string(Result#gctx.ld) + }}), + Result; + _ -> GCTX2 + end, + % Verify SEV hashes section exists if we have hashes + case {KernelHash, InitrdHash, AppendHash} of + {K, I, A} when is_binary(K), is_binary(I), is_binary(A) -> + HasSevHashes = lists:any( + fun(S) -> hb_maps:get(<<"section_type">>, S, undefined, #{}) =:= ?OVMF_SECTION_SNP_KERNEL_HASHES end, % SnpKernelHashes = 0x10 + Sections + ), + case HasSevHashes of + true -> {GCTX3, ResetEIP}; + false -> + ?event(snp, missing_snp_kernel_hashes_section), + {GCTX3, ResetEIP} % Continue anyway, but log the issue + end; + _ -> {GCTX3, ResetEIP} + end; + {error, MetadataReason} -> + ?event(snp_error, {ovmf_metadata_parse_failed, #{reason => MetadataReason}}), + % Fallback: try to use SEV hashes GPA if available + GCTX1 = case {KernelHash, InitrdHash, AppendHash} of + {K, I, A} when is_binary(K), is_binary(I), is_binary(A) -> + case snp_ovmf:parse_ovmf_sev_hashes_gpa(OvmfPath) of + {ok, FallbackGPA} -> + ?event(snp, {fallback_to_sev_hashes_gpa, #{gpa => FallbackGPA}}), + case snp_launch_digest_sev_hashes:update_sev_hashes_table(GCTX, K, I, A, FallbackGPA) of + {ok, G} -> G; + {error, invalid_hex} -> erlang:error(invalid_hex) + end; + _ -> GCTX + end; + _ -> GCTX + end, + {GCTX1, ResetEIP} + end; + {error, Reason} -> + ?event(snp_error, {ovmf_file_read_failed, #{reason => Reason}}), + {GCTX, ?DEFAULT_RESET_EIP} % Return default reset EIP if file read fails + end. + +%% Parse OVMF metadata sections from OVMF data +-spec parse_ovmf_metadata_sections(binary()) -> {ok, [map()]} | {error, term()}. +parse_ovmf_metadata_sections(OvmfData) -> + % OVMF_SEV_METADATA_GUID: dc886566-984a-4798-a75e-5585a7bf67cc + % UUID to_bytes_le() converts to: 666588dc4a989847a75e5585a7bf67cc + % Which is: [102, 101, 136, 220, 74, 152, 152, 71, 167, 94, 85, 133, 167, 191, 103, 204] + OvmfSevMetadataGuid = <<102, 101, 136, 220, 74, 152, 152, 71, 167, 94, 85, 133, 167, 191, 103, 204>>, + + % First, parse footer table to find the metadata GUID entry + case parse_ovmf_footer_table_for_guid(OvmfData, OvmfSevMetadataGuid) of + {ok, MetadataEntry} -> + % Metadata entry contains offset_from_end (i32, little-endian) + <> = binary:part(MetadataEntry, 0, 4), + DataSize = byte_size(OvmfData), + HeaderStart = DataSize - OffsetFromEnd, + + % Parse metadata header: signature (4 bytes) + size (u32) + version (u32) + num_items (u32) + % Signature should be "ASEV" + ExpectedSignature = <<"ASEV">>, + case binary:part(OvmfData, HeaderStart, 4) of + ExpectedSignature -> + <<_:4/binary, HeaderSize:32/little, Version:32/little, NumItems:32/little>> = + binary:part(OvmfData, HeaderStart, 16), + + if + Version =/= ?OVMF_METADATA_VERSION -> {error, {invalid_metadata_version, Version}}; + HeaderSize < ?OVMF_METADATA_HEADER_SIZE -> {error, {invalid_header_size, HeaderSize}}; + true -> + % Parse section descriptors + ItemsStart = HeaderStart + ?OVMF_METADATA_HEADER_SIZE, + ItemsSize = HeaderSize - ?OVMF_METADATA_HEADER_SIZE, + parse_metadata_section_descriptors(OvmfData, ItemsStart, ItemsSize, NumItems, []) + end; + OtherSignature -> + {error, {invalid_signature, OtherSignature}} + end; + {error, Reason} -> + {error, {metadata_guid_not_found, Reason}} + end. + +%% Parse metadata section descriptors +-spec parse_metadata_section_descriptors(binary(), integer(), integer(), integer(), [map()]) -> + {ok, [map()]} | {error, term()}. +parse_metadata_section_descriptors(_OvmfData, _ItemsStart, _ItemsSize, 0, Acc) -> + {ok, lists:reverse(Acc)}; +parse_metadata_section_descriptors(OvmfData, ItemsStart, ItemsSize, NumItems, Acc) when NumItems > 0 -> + % OvmfSevMetadataSectionDesc: GPA (u32, 4 bytes) + Size (u32, 4 bytes) + SectionType (u8, 1 byte) + padding (3 bytes) = 12 bytes + % With #[repr(C)], the struct is padded to 12 bytes for alignment + DescriptorSize = ?OVMF_DESCRIPTOR_SIZE, + Index = length(Acc), + Offset = ItemsStart + (Index * DescriptorSize), + + if + Offset + DescriptorSize > byte_size(OvmfData) -> + {error, {descriptor_out_of_bounds, Index}}; + true -> + <> = + binary:part(OvmfData, Offset, DescriptorSize), + + Section = #{ + <<"gpa">> => GPA, + <<"size">> => Size, + <<"section_type">> => SectionType + }, + ?event(snp, {parsed_metadata_section, Section}), + parse_metadata_section_descriptors(OvmfData, ItemsStart, ItemsSize, NumItems - 1, [Section | Acc]) + end. + +%% Process a single OVMF metadata section +-spec process_ovmf_section(GCTX :: #gctx{}, Section :: map(), VMMType :: integer(), + KernelHash :: undefined | binary(), InitrdHash :: undefined | binary(), + AppendHash :: undefined | binary(), OvmfData :: binary(), SevHashesTableGPA :: non_neg_integer()) -> + #gctx{}. +process_ovmf_section(GCTX, Section, VMMType, KernelHash, InitrdHash, AppendHash, _OvmfData, SevHashesTableGPA) -> + SectionType = hb_maps:get(<<"section_type">>, Section, undefined, #{}), + GPA = hb_maps:get(<<"gpa">>, Section, undefined, #{}), + Size = hb_maps:get(<<"size">>, Section, undefined, #{}), + + LD_Before = snp_util:binary_to_hex_string(GCTX#gctx.ld), + ?event(snp, {processing_section_start, #{ + section_type => SectionType, + gpa => GPA, + size => Size, + ld_before_hex => LD_Before + }}), + + Result = case SectionType of + ?OVMF_SECTION_SNP_SEC_MEMORY -> % SnpSecMemory + ?event(snp, {processing_section_snp_sec_memory, #{gpa => GPA, size => Size}}), + % Process as zero pages (multiple ?PAGE_SIZE pages) + process_zero_pages(GCTX, GPA, Size); + ?OVMF_SECTION_SNP_SECRETS -> % SnpSecrets + ?event(snp, {processing_section_snp_secrets, #{gpa => GPA}}), + snp_launch_digest_gctx:gctx_update_page(GCTX, ?PAGE_TYPE_SECRETS, GPA, undefined); + ?OVMF_SECTION_CPUID -> % Cpuid + if + VMMType =/= ?VMM_TYPE_EC2 -> % Not EC2 + ?event(snp, {processing_section_cpuid, #{gpa => GPA}}), + snp_launch_digest_gctx:gctx_update_page(GCTX, ?PAGE_TYPE_CPUID, GPA, undefined); + true -> + ?event(snp, {skipping_cpuid_section_for_ec2, #{gpa => GPA}}), + GCTX + end; + ?OVMF_SECTION_SNP_KERNEL_HASHES -> % SnpKernelHashes (0x10) + case {KernelHash, InitrdHash, AppendHash} of + {K, I, A} when is_binary(K), is_binary(I), is_binary(A) -> + ?event(snp_short, {processing_section_snp_kernel_hashes, #{ + section_gpa => GPA, + size => Size, + sev_hashes_table_gpa => SevHashesTableGPA + }}), + % Use footer table GPA for page offset (matches Rust: sev_hashes_table_gpa & _PAGE_MASK) + % But use section GPA directly for update_page call (matches Rust: gpa parameter) + PageOffset = case SevHashesTableGPA of + 0 -> GPA band ?PAGE_MASK; % Fallback to section GPA if footer table GPA not available + _ -> SevHashesTableGPA band ?PAGE_MASK + end, + % Use section GPA directly (not page-aligned) to match Rust implementation + ?event(snp, {sev_hashes_page_offset_calc, #{ + page_offset => PageOffset, + section_gpa => GPA, + using_footer_table_gpa => SevHashesTableGPA =/= 0 + }}), + case snp_launch_digest_sev_hashes:construct_sev_hashes_page_erlang(K, I, A, PageOffset) of + {ok, SevHashesPage} -> + SevHashesPageHex = snp_util:binary_to_hex_string(SevHashesPage), + SevHashesPageHash = crypto:hash(sha384, SevHashesPage), + SevHashesPageHashHex = snp_util:binary_to_hex_string(SevHashesPageHash), + ?event(snp, {sev_hashes_page_ready, #{ + page_offset => PageOffset, + page_size => byte_size(SevHashesPage), + page_hex => SevHashesPageHex, + page_sha384 => SevHashesPageHashHex + }}), + snp_launch_digest_gctx:gctx_update_page(GCTX, ?PAGE_TYPE_NORMAL, GPA, SevHashesPage); % use GPA directly + {error, invalid_hex} -> + erlang:error(invalid_hex) + end; + _ -> + ?event(snp, {skipping_snp_kernel_hashes_no_hashes, #{gpa => GPA}}), + % Process as zero pages if no hashes provided + process_zero_pages(GCTX, GPA, Size) + end; + ?OVMF_SECTION_SVSM_CAA -> % SvsmCaa + ?event(snp, {processing_section_svsm_caa, #{gpa => GPA, size => Size}}), + process_zero_pages(GCTX, GPA, Size); + _ -> + ?event(snp_error, {unknown_section_type, #{type => SectionType, gpa => GPA}}), + GCTX + end, + LD_After = snp_util:binary_to_hex_string(Result#gctx.ld), + ?event(snp, {processing_section_complete, #{ + section_type => SectionType, + gpa => GPA, + ld_before_hex => LD_Before, + ld_after_hex => LD_After + }}), + Result. + +%% Process zero pages (multiple 4KB pages) +-spec process_zero_pages(GCTX :: #gctx{}, GPA :: non_neg_integer(), Size :: non_neg_integer()) -> #gctx{}. +process_zero_pages(GCTX, _GPA, Size) when Size =< 0 -> + GCTX; +process_zero_pages(GCTX, GPA, Size) -> + % Process in ?PAGE_SIZE chunks + Pages = Size div ?PAGE_SIZE, + ?event(snp, {process_zero_pages_start, #{ + gpa => GPA, + size => Size, + pages => Pages + }}), + Result = lists:foldl( + fun(PageNum, AccGCTX) -> + PageGPA = GPA + (PageNum * ?PAGE_SIZE), + ?event(snp, {processing_zero_page, #{ + page_num => PageNum, + page_gpa => PageGPA, + total_pages => Pages + }}), + snp_launch_digest_gctx:gctx_update_page(AccGCTX, ?PAGE_TYPE_ZERO, PageGPA, undefined) + end, + GCTX, + lists:seq(0, Pages - 1) + ), + ?event(snp, {process_zero_pages_complete, #{ + pages_processed => Pages, + final_ld_hex => snp_util:binary_to_hex_string(Result#gctx.ld) + }}), + Result. + +%% Parse OVMF footer table to find a specific GUID entry +-spec parse_ovmf_footer_table_for_guid(binary(), binary()) -> {ok, binary()} | {error, term()}. +parse_ovmf_footer_table_for_guid(OvmfData, TargetGuid) -> + DataSize = byte_size(OvmfData), + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, + % Footer table ends ?OVMF_FOOTER_OFFSET bytes before end, last entry is at start_of_footer_table + StartOfFooterTable = DataSize - ?OVMF_FOOTER_OFFSET - ENTRY_HEADER_SIZE, + ?event(snp, {parsing_footer_table, #{ + data_size => DataSize, + start_of_footer_table => StartOfFooterTable + }}), + + % Read the footer entry + FooterEntry = binary:part(OvmfData, StartOfFooterTable, ENTRY_HEADER_SIZE), + <> = FooterEntry, + + % OVMF_TABLE_FOOTER_GUID (from snp_guids.hrl) + ExpectedFooterGuid = ?OVMF_TABLE_FOOTER_GUID, + + FooterGuidHex = hb_util:to_hex(FooterGuid), + ExpectedGuidHex = hb_util:to_hex(ExpectedFooterGuid), + ?event(snp, {footer_entry_read, #{ + footer_size => FooterSize, + footer_guid_hex => FooterGuidHex, + expected_guid_hex => ExpectedGuidHex, + match => FooterGuid =:= ExpectedFooterGuid + }}), + + if + FooterGuid =/= ExpectedFooterGuid -> + ?event(snp_error, {footer_guid_mismatch, #{ + read => FooterGuidHex, + expected => ExpectedGuidHex + }}), + {error, invalid_footer_guid}; + FooterSize < ENTRY_HEADER_SIZE -> {error, invalid_footer_size}; + true -> + % Calculate table size and start + TableSize = FooterSize - ENTRY_HEADER_SIZE, + TableStart = StartOfFooterTable - TableSize, + ?event(snp, {footer_table_calculated, #{ + table_size => TableSize, + table_start => TableStart + }}), + + if + TableStart < 0 -> {error, invalid_table_offset}; + true -> + % Read the table and search backwards for the target GUID + TableData = binary:part(OvmfData, TableStart, TableSize), + TargetGuidHex = hb_util:to_hex(TargetGuid), + ?event(snp, {searching_for_guid_in_table, #{ + target_guid_hex => TargetGuidHex, + table_size => TableSize + }}), + find_guid_in_table(TableData, TargetGuid, TableSize) + end + end. + +%% Find a GUID entry in the footer table (searching backwards) +-spec find_guid_in_table(binary(), binary(), integer()) -> {ok, binary()} | {error, term()}. +find_guid_in_table(_TableData, _TargetGuid, Offset) when Offset < ?OVMF_ENTRY_HEADER_SIZE -> + {error, guid_not_found}; +find_guid_in_table(TableData, TargetGuid, Offset) -> + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, + EntryHeaderOffset = Offset - ENTRY_HEADER_SIZE, + <> = + binary:part(TableData, EntryHeaderOffset, ENTRY_HEADER_SIZE), + + if + EntrySize < ENTRY_HEADER_SIZE -> {error, invalid_entry_size}; + Offset < EntrySize -> {error, invalid_entry_offset}; + EntryGuid =:= TargetGuid -> + % Found it! Entry data is before the header + DataOffset = Offset - EntrySize, + if + DataOffset + ?OVMF_METADATA_OFFSET_SIZE > byte_size(TableData) -> {error, invalid_data_offset}; + true -> + % Return the entry data (first 4 bytes are the offset/data we need) + EntryData = binary:part(TableData, DataOffset, EntrySize - ENTRY_HEADER_SIZE), + {ok, EntryData} + end; + true -> + find_guid_in_table(TableData, TargetGuid, Offset - EntrySize) + end. + diff --git a/src/snp_launch_digest_sev_hashes.erl b/src/snp_launch_digest_sev_hashes.erl new file mode 100644 index 000000000..f8cacd5ac --- /dev/null +++ b/src/snp_launch_digest_sev_hashes.erl @@ -0,0 +1,144 @@ +%%% @doc SEV hashes table construction for SNP commitment reports. +%%% +%%% This module handles the construction of SEV hashes pages, which contain +%%% kernel, initrd, and append hashes in a structured format. +-module(snp_launch_digest_sev_hashes). +-export([construct_sev_hashes_page_erlang/4, update_sev_hashes_table/5]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_launch_digest.hrl"). +-include("include/snp_guids.hrl"). + +%% @doc Construct SEV hashes page +%% @param KernelHash binary() - Kernel hash (SHA-256, ?SEV_HASH_BINARY_SIZE bytes or hex string) +%% @param InitrdHash binary() - Initrd hash (SHA-256, ?SEV_HASH_BINARY_SIZE bytes or hex string) +%% @param AppendHash binary() - Append hash (SHA-256, ?SEV_HASH_BINARY_SIZE bytes or hex string) +%% @param PageOffset non_neg_integer() - Page offset for hash table placement +%% @returns {ok, binary()} - Complete SEV hashes page (?PAGE_SIZE bytes), or {error, invalid_hex} +-spec construct_sev_hashes_page_erlang(KernelHash :: binary(), InitrdHash :: binary(), + AppendHash :: binary(), PageOffset :: non_neg_integer()) -> + {ok, binary()} | {error, invalid_hex}. +construct_sev_hashes_page_erlang(KernelHash, InitrdHash, AppendHash, PageOffset) -> + ?event(snp, {construct_sev_hashes_page_start, #{ + page_offset => PageOffset, + kernel_size => byte_size(KernelHash), + initrd_size => byte_size(InitrdHash), + append_size => byte_size(AppendHash) + }}), + % Convert hex strings to binary if needed (hashes come in as hex strings, need ?SEV_HASH_BINARY_SIZE-byte binaries) + case hash_to_binary(KernelHash) of + {error, invalid_hex} -> {error, invalid_hex}; + {ok, KernelHashBin} -> + case hash_to_binary(InitrdHash) of + {error, invalid_hex} -> {error, invalid_hex}; + {ok, InitrdHashBin} -> + case hash_to_binary(AppendHash) of + {error, invalid_hex} -> {error, invalid_hex}; + {ok, AppendHashBin} -> + build_sev_hashes_page(KernelHashBin, InitrdHashBin, AppendHashBin, PageOffset) + end + end + end. + +%% @doc Convert hash (binary or hex string) to ?SEV_HASH_BINARY_SIZE binary. +-spec hash_to_binary(binary()) -> {ok, binary()} | {error, invalid_hex}. +hash_to_binary(Hash) when byte_size(Hash) =:= ?SEV_HASH_BINARY_SIZE -> + {ok, Hash}; +hash_to_binary(Hash) when byte_size(Hash) =:= ?SEV_HASH_HEX_SIZE -> + snp_util:hex_to_binary(Hash); +hash_to_binary(Hash) -> + {ok, Hash}. + +-spec build_sev_hashes_page(binary(), binary(), binary(), non_neg_integer()) -> {ok, binary()}. +build_sev_hashes_page(KernelHashBin, InitrdHashBin, AppendHashBin, PageOffset) -> + ?event(snp, {hashes_converted, #{ + kernel_size => byte_size(KernelHashBin), + initrd_size => byte_size(InitrdHashBin), + append_size => byte_size(AppendHashBin) + }}), + + % SEV Hash Table GUIDs (from snp_guids.hrl) + SevHashTableHeaderGuid = ?SEV_HASH_TABLE_HEADER_GUID, + SevCmdlineEntryGuid = ?SEV_CMDLINE_ENTRY_GUID, + SevInitrdEntryGuid = ?SEV_INITRD_ENTRY_GUID, + SevKernelEntryGuid = ?SEV_KERNEL_ENTRY_GUID, + + % Each entry is: GUID (16 bytes) + Length (2 bytes LE) + Hash (?SEV_HASH_BINARY_SIZE bytes SHA-256) + % According to Rust code, length = size_of::() = ?SEV_HASH_TABLE_ENTRY_LENGTH bytes + EntryLength = ?SEV_HASH_TABLE_ENTRY_LENGTH, % Total entry size including GUID + + % Build entries (cmdline/append, initrd, kernel) + % Entry format: GUID (16) + Length (2, LE) + Hash (?SEV_HASH_BINARY_SIZE) + % Note: Rust uses EntryLength = 50 (total entry size) in the length field + AppendEntry = <>, + InitrdEntry = <>, + KernelEntry = <>, + + % Build the SevHashTable structure (matches Rust PaddedSevHashTable) + % Header: GUID (16) + Length (2) = 18 bytes + % Table length = size_of::() = 16 (guid) + 2 (length) + 3*?SEV_HASH_TABLE_ENTRY_LENGTH (entries) = ?SEV_HASH_TABLE_SIZE + TableLength = ?SEV_HASH_TABLE_SIZE, + Header = <>, + + % Build complete table: Header + Cmdline + Initrd + Kernel + % Order matches Rust: cmdline, initrd, kernel + HashTable = <
>, + + % The Rust code uses bincode serialization which may add padding + % PaddedSevHashTable adds padding to align to 16 bytes + % Padding size = ((size_of::() + 15) & !15) - size_of::() + % SevHashTable size = ?SEV_HASH_TABLE_SIZE, so padding = ?SEV_HASH_TABLE_PADDING + PaddingSize = ?SEV_HASH_TABLE_PADDING, + Padding = <<0:(PaddingSize*8)>>, + PaddedHashTable = <>, + + ?event(snp, {hash_table_built, #{ + header_size => byte_size(Header), + table_length => TableLength, + hash_table_size => byte_size(HashTable), + padded_size => byte_size(PaddedHashTable) + }}), + + % Build the page: zeros up to offset, then hash table, then zeros to fill page + PagePrefix = <<0:(PageOffset*8)>>, + HashTableSize = byte_size(PaddedHashTable), + PageSuffixSize = ?PAGE_SIZE - PageOffset - HashTableSize, + PageSuffix = case PageSuffixSize > 0 of + true -> <<0:(PageSuffixSize*8)>>; + false -> <<>> + end, + Result = <>, + ?event(snp_short, {construct_sev_hashes_page_complete, #{ + result_size => byte_size(Result), + page_offset => PageOffset, + hash_table_size => HashTableSize + }}), + {ok, Result}. + +%% @doc Update SEV hashes table in GCTX +%% @param GCTX #gctx{} record with current launch digest +%% @param KernelHash binary() - Kernel hash +%% @param InitrdHash binary() - Initrd hash +%% @param AppendHash binary() - Append hash +%% @param SevHashesGPA non_neg_integer() - SEV hashes table GPA +%% @returns {ok, #gctx{}} with updated launch digest, or {error, invalid_hex} +-spec update_sev_hashes_table(GCTX :: #gctx{}, KernelHash :: binary(), InitrdHash :: binary(), + AppendHash :: binary(), SevHashesGPA :: non_neg_integer()) -> {ok, #gctx{}} | {error, invalid_hex}. +update_sev_hashes_table(GCTX, KernelHash, InitrdHash, AppendHash, SevHashesGPA) -> + ?event(snp, {update_sev_hashes_table_start, #{ + sev_hashes_gpa => SevHashesGPA, + kernel_size => byte_size(KernelHash), + initrd_size => byte_size(InitrdHash), + append_size => byte_size(AppendHash) + }}), + PageOffset = SevHashesGPA band ?PAGE_MASK, + PageAlignedGPA = SevHashesGPA band (bnot ?PAGE_MASK), + ?event(snp, {sev_hashes_page_calc, #{page_offset => PageOffset, page_aligned_gpa => PageAlignedGPA}}), + case construct_sev_hashes_page_erlang(KernelHash, InitrdHash, AppendHash, PageOffset) of + {ok, SevHashesPage} -> + ?event(snp_short, {sev_hashes_page_constructed, #{page_size => byte_size(SevHashesPage)}}), + {ok, snp_launch_digest_gctx:gctx_update_page(GCTX, ?PAGE_TYPE_NORMAL, PageAlignedGPA, SevHashesPage)}; + {error, invalid_hex} -> + {error, invalid_hex} + end. + diff --git a/src/snp_launch_digest_vmsa.erl b/src/snp_launch_digest_vmsa.erl new file mode 100644 index 000000000..dd5b41bf0 --- /dev/null +++ b/src/snp_launch_digest_vmsa.erl @@ -0,0 +1,415 @@ +%%% @doc VMSA (Virtual Machine Save Area) page creation for SNP commitment reports. +%%% +%%% This module handles the creation of VMSA pages for BSP and AP VCPUs, +%%% including segment registers, control registers, and other CPU state fields. +-module(snp_launch_digest_vmsa). +-export([create_vmsa_pages_erlang/4]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% @doc Create VMSA pages for BSP and AP +%% ResetEIP should be read from OVMF footer table (matching Rust ovmf.sev_es_reset_eip()) +%% BSP uses BSP_EIP = 0xffff_fff0, AP uses ResetEIP from OVMF (matching Rust) +%% @param ResetEIP non_neg_integer() - Reset EIP value from OVMF +%% @param VCPUType integer() - VCPU type identifier +%% @param VMMType integer() - VMM type (1=QEMU, 2=EC2) +%% @param GuestFeatures non_neg_integer() - Guest features flags +%% @returns {BSPVMSA, APVMSA} where both are ?PAGE_SIZE-byte binaries +-spec create_vmsa_pages_erlang(ResetEIP :: non_neg_integer(), VCPUType :: integer(), + VMMType :: integer(), GuestFeatures :: non_neg_integer()) -> + {binary(), binary()}. +create_vmsa_pages_erlang(ResetEIP, VCPUType, VMMType, GuestFeatures) -> + ?event(snp, {create_vmsa_pages_start, #{reset_eip => ResetEIP, vcpu_type => VCPUType, vmm_type => VMMType, guest_features => GuestFeatures}}), + % BSP uses BSP_EIP (matching Rust const BSP_EIP: u64 = 0xffff_fff0;) + BSP_EIP = ?BSP_EIP, + BSPVMSA = create_vmsa_page_erlang(BSP_EIP, VCPUType, VMMType, GuestFeatures), + ?event(snp_short, {bsp_vmsa_created, #{size => byte_size(BSPVMSA), eip => BSP_EIP}}), + % AP uses ResetEIP from OVMF (matching Rust: ap_eip parameter) + APVMSA = create_vmsa_page_erlang(ResetEIP, VCPUType, VMMType, GuestFeatures), + ?event(snp_short, {ap_vmsa_created, #{size => byte_size(APVMSA), eip => ResetEIP}}), + ?event(snp_short, {vmsa_pages_created, #{bsp_size => byte_size(BSPVMSA), ap_size => byte_size(APVMSA)}}), + {BSPVMSA, APVMSA}. + +%% Create a single VMSA page +%% Matching Rust build_save_area() function exactly +%% +%% Rust sets the following fields (all others remain at default/zero): +%% - Segment registers (all VmcbSeg: selector, attrib, limit, base): +%% - es: (0, 0x93, 0xffff, 0) +%% - cs: (0xf000, cs_flags, 0xffff, eip & 0xffff0000) +%% - ss: (0, ss_flags, 0xffff, 0) +%% - ds: (0, 0x93, 0xffff, 0) +%% - fs: (0, 0x93, 0xffff, 0) +%% - gs: (0, 0x93, 0xffff, 0) +%% - gdtr: (0, 0, 0xffff, 0) +%% - idtr: (0, 0, 0xffff, 0) +%% - ldtr: (0, 0x82, 0xffff, 0) +%% - tr: (0, tr_flags, 0xffff, 0) +%% - Control registers: +%% - efer: 0x1000 +%% - cr4: 0x40 +%% - cr0: 0x10 +%% - dr7: 0x400 +%% - dr6: 0xffff0ff0 +%% - rflags: 0x2 +%% - rip: eip & 0xffff +%% - Other fields: +%% - g_pat: 0x7040600070406 +%% - rdx: rdx (from vcpu_type.sig() or 0) +%% - sev_features: guest_features.0 +%% - xcr0: 0x1 +%% - mxcsr: mxcsr (from vmm_type) +%% - x87_fcw: fcw (from vmm_type) +%% +%% Note: All other fields remain at their default values (zeros). +%% The struct is initialized with SevEsSaveArea::default() which zeros everything. +-spec create_vmsa_page_erlang(EIP :: non_neg_integer(), VCPUType :: integer(), + VMMType :: integer(), GuestFeatures :: non_neg_integer()) -> binary(). +create_vmsa_page_erlang(EIP, VCPUType, VMMType, GuestFeatures) -> + % Determine if this is BSP or AP based on EIP + VMSAType = if EIP =:= ?BSP_EIP -> <<"BSP">>; true -> <<"AP">> end, + ?event(snp, {create_vmsa_page_start, #{eip => EIP, vmsa_type => VMSAType, guest_features => GuestFeatures, vcpu_type => VCPUType, vmm_type => VMMType}}), + % Initialize VMSA page with all zeros (?PAGE_SIZE bytes) + VMSA = <<0:(?PAGE_SIZE * 8)>>, + ?event(snp, {vmsa_initialized, #{size => byte_size(VMSA)}}), + + % Determine flags and values based on VMMType (matching Rust) + {CSFlags, SSFlags, TRFlags, RDXValue, MXCSRValue, FCWValue} = determine_vmm_flags(EIP, VCPUType, VMMType), + + % Log all field values we're setting (matching Rust build_save_area) + ?event(snp, {vmsa_field_values_set, {explicit, #{ + % Segment registers (VmcbSeg: selector, attrib, limit, base) + es => #{selector => 0, attrib => ?VMSA_SEGMENT_ATTRIB_ES, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + cs => #{selector => ?VMSA_CS_SELECTOR, attrib => CSFlags, limit => ?VMSA_SEGMENT_LIMIT, base => (EIP band ?EIP_UPPER_16_MASK)}, + ss => #{selector => 0, attrib => SSFlags, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + ds => #{selector => 0, attrib => ?VMSA_SEGMENT_ATTRIB_DS, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + fs => #{selector => 0, attrib => ?VMSA_SEGMENT_ATTRIB_FS, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + gs => #{selector => 0, attrib => ?VMSA_SEGMENT_ATTRIB_GS, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + gdtr => #{selector => 0, attrib => 0, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + idtr => #{selector => 0, attrib => 0, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + ldtr => #{selector => 0, attrib => ?VMSA_SEGMENT_ATTRIB_LDTR, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + tr => #{selector => 0, attrib => TRFlags, limit => ?VMSA_SEGMENT_LIMIT, base => 0}, + % Control registers + efer => ?VMSA_EFER_VALUE, + cr4 => ?VMSA_CR4_VALUE, + cr0 => ?VMSA_CR0_VALUE, + dr7 => ?VMSA_DR7_VALUE, + dr6 => ?VMSA_DR6_VALUE, + rflags => ?VMSA_RFLAGS_VALUE, + rip => (EIP band ?EIP_LOWER_16_MASK), + % Other fields + g_pat => ?VMSA_G_PAT_VALUE, + rdx => RDXValue, + sev_features => GuestFeatures, + xcr0 => ?VMSA_XCR0_VALUE, + mxcsr => MXCSRValue, + x87_fcw => FCWValue + }}}), + + % Match Rust: area.rip = eip & 0xffff (lower 16 bits only) + RIPValue = EIP band ?EIP_LOWER_16_MASK, + % Match Rust: area.cs.base = eip & 0xffff0000 (upper 16 bits to CS base) + CSBaseValue = EIP band ?EIP_UPPER_16_MASK, + + % Set all segment registers + VMSA10 = set_all_vmsa_segments(VMSA, EIP, CSFlags, SSFlags, TRFlags), + + % Set all control registers + VMSA17 = set_all_vmsa_control_registers(VMSA10, RIPValue), + + % Set all other fields + VMSA23 = set_all_vmsa_other_fields(VMSA17, RDXValue, GuestFeatures, MXCSRValue, FCWValue), + + % Verify and log all critical field values + % Read back key fields to verify they were set correctly + <<_BeforeRIP:(?VMSA_OFFSET_RIP)/binary, RIPReadBack:64/little, _AfterRIP/binary>> = VMSA23, + <<_BeforeRDX:(?VMSA_OFFSET_RDX)/binary, RDXReadBack:64/little, _AfterRDX/binary>> = VMSA23, + <<_BeforeSEV:(?VMSA_OFFSET_SEV_FEATURES)/binary, SEVReadBack:64/little, _AfterSEV/binary>> = VMSA23, + <<_BeforeXCR0:(?VMSA_OFFSET_XCR0)/binary, XCR0ReadBack:64/little, _AfterXCR0/binary>> = VMSA23, + <<_BeforeMXCSR:(?VMSA_OFFSET_MXCSR)/binary, MXCSRReadBack:32/little, _AfterMXCSR/binary>> = VMSA23, + <<_BeforeFCW:(?VMSA_OFFSET_X87_FCW)/binary, FCWReadBack:16/little, _AfterFCW/binary>> = VMSA23, + <<_BeforeG_PAT:(?VMSA_OFFSET_G_PAT)/binary, G_PATReadBack:64/little, _AfterG_PAT/binary>> = VMSA23, + <<_BeforeEFER:(?VMSA_OFFSET_EFER)/binary, EFERReadBack:64/little, _AfterEFER/binary>> = VMSA23, + <<_BeforeCR4:(?VMSA_OFFSET_CR4)/binary, CR4ReadBack:64/little, _AfterCR4/binary>> = VMSA23, + <<_BeforeCR0:(?VMSA_OFFSET_CR0)/binary, CR0ReadBack:64/little, _AfterCR0/binary>> = VMSA23, + <<_BeforeRFLAGS:(?VMSA_OFFSET_RFLAGS)/binary, RFLAGSReadBack:64/little, _AfterRFLAGS/binary>> = VMSA23, + + % Read CS segment to verify CS base + <<_BeforeCS:(?VMSA_OFFSET_CS)/binary, CSSelector:16/little, CSAttrib:16/little, CSLimit:32/little, CSBase:64/little, _AfterCS/binary>> = VMSA23, + + ?event(snp, {vmsa_field_verification, {explicit, #{ + % Segment registers + cs_selector => CSSelector, + cs_attrib => CSAttrib, + cs_limit => CSLimit, + cs_base_expected => CSBaseValue, + cs_base_read_back => CSBase, + cs_base_match => CSBaseValue =:= CSBase, + % Control registers + efer_expected => ?VMSA_EFER_VALUE, + efer_read_back => EFERReadBack, + efer_match => ?VMSA_EFER_VALUE =:= EFERReadBack, + cr4_expected => ?VMSA_CR4_VALUE, + cr4_read_back => CR4ReadBack, + cr4_match => ?VMSA_CR4_VALUE =:= CR4ReadBack, + cr0_expected => ?VMSA_CR0_VALUE, + cr0_read_back => CR0ReadBack, + cr0_match => ?VMSA_CR0_VALUE =:= CR0ReadBack, + rflags_expected => ?VMSA_RFLAGS_VALUE, + rflags_read_back => RFLAGSReadBack, + rflags_match => ?VMSA_RFLAGS_VALUE =:= RFLAGSReadBack, + % RIP + eip_expected => EIP, + rip_expected => RIPValue, + rip_read_back => RIPReadBack, + rip_match => RIPValue =:= RIPReadBack, + % Other fields + g_pat_expected => ?VMSA_G_PAT_VALUE, + g_pat_read_back => G_PATReadBack, + g_pat_match => ?VMSA_G_PAT_VALUE =:= G_PATReadBack, + rdx_expected => RDXValue, + rdx_read_back => RDXReadBack, + rdx_match => RDXValue =:= RDXReadBack, + sev_features_expected => GuestFeatures, + sev_features_read_back => SEVReadBack, + sev_features_match => GuestFeatures =:= SEVReadBack, + xcr0_expected => ?VMSA_XCR0_VALUE, + xcr0_read_back => XCR0ReadBack, + xcr0_match => ?VMSA_XCR0_VALUE =:= XCR0ReadBack, + mxcsr_expected => MXCSRValue, + mxcsr_read_back => MXCSRReadBack, + mxcsr_match => MXCSRValue =:= MXCSRReadBack, + x87_fcw_expected => FCWValue, + x87_fcw_read_back => FCWReadBack, + x87_fcw_match => FCWValue =:= FCWReadBack + }}}), + + % Log key byte ranges for comparison with Rust + % CS base (offset 0x18-0x1F, which is CS base field within CS segment) + CSBaseOffset = ?VMSA_OFFSET_CS + 8, % CS base is at offset 8 within CS segment (16 bytes total) + <<_BeforeCSBase:CSBaseOffset/binary, CSBaseBytes:8/binary, _AfterCSBase/binary>> = VMSA23, + % EFER (offset ?VMSA_OFFSET_EFER) + <<_BeforeEFERBytes:(?VMSA_OFFSET_EFER)/binary, EFERBytes:8/binary, _AfterEFERBytes/binary>> = VMSA23, + % CR4 (offset ?VMSA_OFFSET_CR4) + <<_BeforeCR4Bytes:(?VMSA_OFFSET_CR4)/binary, CR4Bytes:8/binary, _AfterCR4Bytes/binary>> = VMSA23, + % RIP (offset ?VMSA_OFFSET_RIP) + <<_BeforeRIPBytes:(?VMSA_OFFSET_RIP)/binary, RIPBytes:8/binary, _AfterRIPBytes/binary>> = VMSA23, + % RDX (offset ?VMSA_OFFSET_RDX) - matching Rust comparison output + <<_BeforeRDXBytes:(?VMSA_OFFSET_RDX)/binary, RDXBytes:8/binary, _AfterRDXBytes/binary>> = VMSA23, + % SEV Features (offset ?VMSA_OFFSET_SEV_FEATURES) - matching Rust struct + <<_BeforeSEVBytes:(?VMSA_OFFSET_SEV_FEATURES)/binary, SEVBytes:8/binary, _AfterSEVBytes/binary>> = VMSA23, + % MXCSR (offset ?VMSA_OFFSET_MXCSR) - matching Rust comparison output + <<_BeforeMXCSRBytes:(?VMSA_OFFSET_MXCSR)/binary, MXCSRBytes:4/binary, _AfterMXCSRBytes/binary>> = VMSA23, + % X87 FCW (offset ?VMSA_OFFSET_X87_FCW) - matching Rust comparison output + <<_BeforeFCWBytes:(?VMSA_OFFSET_X87_FCW)/binary, FCWBytes:2/binary, _AfterFCWBytes/binary>> = VMSA23, + + % Compute hash of VMSA page for verification (don't log full binary dumps) + VMSAHash = crypto:hash(sha384, VMSA23), + + % Log key field hashes instead of full values for security + KeyFieldsHash = crypto:hash(sha256, <>), + ?event(snp, {vmsa_key_fields_summary, #{ + key_fields_hash => snp_util:binary_to_hex_string(KeyFieldsHash), + eip => EIP + }}), + + % Determine if this is BSP or AP based on EIP + VMSAType = if EIP =:= ?BSP_EIP -> <<"BSP">>; true -> <<"AP">> end, + ?event(snp_short, {create_vmsa_page_complete, #{ + vmsa_type => VMSAType, + size => byte_size(VMSA23), + vmsa_hash_hex => snp_util:binary_to_hex_string(VMSAHash), + eip => EIP, + % Log all field values we set for comparison + field_values => #{ + cs_flags => CSFlags, + ss_flags => SSFlags, + tr_flags => TRFlags, + rdx_value => RDXValue, + mxcsr_value => MXCSRValue, + fcw_value => FCWValue, + rip_value => RIPValue, + cs_base_value => CSBaseValue, + guest_features => GuestFeatures + }, + % Dump the full VMSA page for byte-by-byte comparison with Rust + full_vmsa_page_hex => snp_util:binary_to_hex_string(VMSA23) + }}), + VMSA23. + +%% Helper function to determine VMM flags and values +-spec determine_vmm_flags(EIP :: non_neg_integer(), VCPUType :: integer(), VMMType :: integer()) -> + {integer(), integer(), integer(), integer(), integer(), integer()}. +determine_vmm_flags(EIP, VCPUType, VMMType) -> + case VMMType of + ?VMM_TYPE_QEMU -> % VMMType::QEMU + % For QEMU: (?VMM_QEMU_CS_FLAGS, ?VMM_QEMU_SS_FLAGS, ?VMM_QEMU_TR_FLAGS, vcpu_type.sig(), ?VMM_QEMU_MXCSR, ?VMM_QEMU_FCW) + VCPUSig = get_vcpu_sig(VCPUType), + {?VMM_QEMU_CS_FLAGS, ?VMM_QEMU_SS_FLAGS, ?VMM_QEMU_TR_FLAGS, VCPUSig, ?VMM_QEMU_MXCSR, ?VMM_QEMU_FCW}; + ?VMM_TYPE_EC2 -> % VMMType::EC2 + % For EC2: depends on EIP + if EIP =:= ?BSP_EIP -> + {?VMM_EC2_BSP_CS_FLAGS, ?VMM_EC2_BSP_SS_FLAGS, ?VMM_EC2_BSP_TR_FLAGS, 0, 0, 0}; + true -> + {?VMM_EC2_AP_CS_FLAGS, ?VMM_EC2_AP_SS_FLAGS, ?VMM_EC2_AP_TR_FLAGS, 0, 0, 0} + end; + _ -> % Default/other + {?VMM_QEMU_CS_FLAGS, ?VMM_QEMU_SS_FLAGS, ?VMM_QEMU_TR_FLAGS, 0, ?VMM_QEMU_MXCSR, ?VMM_QEMU_FCW} + end. + +%% Helper function to set all VMSA segment registers +-spec set_all_vmsa_segments(VMSA :: binary(), EIP :: non_neg_integer(), CSFlags :: integer(), + SSFlags :: integer(), TRFlags :: integer()) -> binary(). +set_all_vmsa_segments(VMSA, EIP, CSFlags, SSFlags, TRFlags) -> + CSBaseValue = EIP band ?EIP_UPPER_16_MASK, + VMSA1 = set_vmsa_segment(VMSA, ?VMSA_OFFSET_ES, 0, ?VMSA_SEGMENT_ATTRIB_ES, ?VMSA_SEGMENT_LIMIT, 0), + VMSA2 = set_vmsa_segment(VMSA1, ?VMSA_OFFSET_CS, ?VMSA_CS_SELECTOR, CSFlags, ?VMSA_SEGMENT_LIMIT, CSBaseValue), + VMSA3 = set_vmsa_segment(VMSA2, ?VMSA_OFFSET_SS, 0, SSFlags, ?VMSA_SEGMENT_LIMIT, 0), + VMSA4 = set_vmsa_segment(VMSA3, ?VMSA_OFFSET_DS, 0, ?VMSA_SEGMENT_ATTRIB_DS, ?VMSA_SEGMENT_LIMIT, 0), + VMSA5 = set_vmsa_segment(VMSA4, ?VMSA_OFFSET_FS, 0, ?VMSA_SEGMENT_ATTRIB_FS, ?VMSA_SEGMENT_LIMIT, 0), + VMSA6 = set_vmsa_segment(VMSA5, ?VMSA_OFFSET_GS, 0, ?VMSA_SEGMENT_ATTRIB_GS, ?VMSA_SEGMENT_LIMIT, 0), + VMSA7 = set_vmsa_segment(VMSA6, ?VMSA_OFFSET_GDTR, 0, 0, ?VMSA_SEGMENT_LIMIT, 0), + VMSA8 = set_vmsa_segment(VMSA7, ?VMSA_OFFSET_LDTR, 0, ?VMSA_SEGMENT_ATTRIB_LDTR, ?VMSA_SEGMENT_LIMIT, 0), + VMSA9 = set_vmsa_segment(VMSA8, ?VMSA_OFFSET_IDTR, 0, 0, ?VMSA_SEGMENT_LIMIT, 0), + set_vmsa_segment(VMSA9, ?VMSA_OFFSET_TR, 0, TRFlags, ?VMSA_SEGMENT_LIMIT, 0). + +%% Helper function to set all VMSA control registers +-spec set_all_vmsa_control_registers(VMSA :: binary(), RIPValue :: non_neg_integer()) -> binary(). +set_all_vmsa_control_registers(VMSA, RIPValue) -> + VMSA1 = set_vmsa_field(VMSA, ?VMSA_OFFSET_EFER, ?VMSA_EFER_VALUE, 8), + VMSA2 = set_vmsa_field(VMSA1, ?VMSA_OFFSET_CR4, ?VMSA_CR4_VALUE, 8), + VMSA3 = set_vmsa_field(VMSA2, ?VMSA_OFFSET_CR0, ?VMSA_CR0_VALUE, 8), + VMSA4 = set_vmsa_field(VMSA3, ?VMSA_OFFSET_DR7, ?VMSA_DR7_VALUE, 8), + VMSA5 = set_vmsa_field(VMSA4, ?VMSA_OFFSET_DR6, ?VMSA_DR6_VALUE, 8), + VMSA6 = set_vmsa_field(VMSA5, ?VMSA_OFFSET_RFLAGS, ?VMSA_RFLAGS_VALUE, 8), + set_vmsa_field(VMSA6, ?VMSA_OFFSET_RIP, RIPValue, 8). + +%% Helper function to set all VMSA other fields +-spec set_all_vmsa_other_fields(VMSA :: binary(), RDXValue :: integer(), GuestFeatures :: integer(), + MXCSRValue :: integer(), FCWValue :: integer()) -> binary(). +set_all_vmsa_other_fields(VMSA, RDXValue, GuestFeatures, MXCSRValue, FCWValue) -> + VMSA1 = set_vmsa_field(VMSA, ?VMSA_OFFSET_G_PAT, ?VMSA_G_PAT_VALUE, 8), + VMSA2 = set_vmsa_field(VMSA1, ?VMSA_OFFSET_RDX, RDXValue, 8), + VMSA3 = set_vmsa_field(VMSA2, ?VMSA_OFFSET_SEV_FEATURES, GuestFeatures, 8), + VMSA4 = set_vmsa_field(VMSA3, ?VMSA_OFFSET_XCR0, ?VMSA_XCR0_VALUE, 8), + VMSA5 = set_vmsa_field(VMSA4, ?VMSA_OFFSET_MXCSR, MXCSRValue, 4), + set_vmsa_field(VMSA5, ?VMSA_OFFSET_X87_FCW, FCWValue, 2). + +%% Set a VmcbSeg segment register (16 bytes: selector:u16, attrib:u16, limit:u32, base:u64) +-spec set_vmsa_segment(VMSA :: binary(), Offset :: non_neg_integer(), Selector :: integer(), + Attrib :: integer(), Limit :: integer(), Base :: non_neg_integer()) -> binary(). +set_vmsa_segment(VMSA, Offset, Selector, Attrib, Limit, Base) -> + % VmcbSeg structure: selector (2 bytes), attrib (2 bytes), limit (4 bytes), base (8 bytes) + <> = VMSA, + Segment = <>, + % Log the segment bytes we're creating for debugging + ?event(snp, {set_vmsa_segment_bytes, #{ + offset => Offset, + selector => Selector, + attrib => Attrib, + limit => Limit, + base => Base, + segment_bytes_hex => snp_util:binary_to_hex_string(Segment) + }}), + Result = <>, + % Verify the segment was set correctly by reading it back + <<_BeforeRead:Offset/binary, ReadSelector:16/little, ReadAttrib:16/little, ReadLimit:32/little, ReadBase:64/little, _AfterRead/binary>> = Result, + ?event(snp, {set_vmsa_segment_verification, #{ + offset => Offset, + selector_match => Selector =:= ReadSelector, + attrib_match => Attrib =:= ReadAttrib, + limit_match => Limit =:= ReadLimit, + base_match => Base =:= ReadBase + }}), + Result. + +%% Get CPU signature for VCPU type (matching Rust cpu_sig function exactly) +%% Rust: cpu_sig(family, model, stepping) = +%% if family > 0xf: +%% family_low = 0xf, family_high = (family - 0x0f) & 0xff +%% else: +%% family_low = family, family_high = 0 +%% model_low = model & 0xf, model_high = (model >> 4) & 0xf +%% stepping_low = stepping & 0xf +%% result = (family_high << 20) | (model_high << 16) | (family_low << 8) | (model_low << 4) | stepping_low +-spec get_vcpu_sig(VCPUType :: integer()) -> integer(). +get_vcpu_sig(VCPUType) -> + case VCPUType of + 0 -> % Epyc = cpu_sig(23, 1, 2) + cpu_sig(23, 1, 2); + 1 -> % EpycV1 = cpu_sig(23, 1, 2) + cpu_sig(23, 1, 2); + 3 -> % EpycIBPB = cpu_sig(23, 1, 2) + cpu_sig(23, 1, 2); + 4 -> % EpycV3 = cpu_sig(23, 1, 2) + cpu_sig(23, 1, 2); + 5 -> % EpycV4 = cpu_sig(23, 1, 2) + cpu_sig(23, 1, 2); + 6 -> % EpycRome = cpu_sig(23, 49, 0) + cpu_sig(23, 49, 0); + 7 -> % EpycRomeV1 = cpu_sig(23, 49, 0) + cpu_sig(23, 49, 0); + 8 -> % EpycRomeV2 = cpu_sig(23, 49, 0) + cpu_sig(23, 49, 0); + 9 -> % EpycRomeV3 = cpu_sig(23, 49, 0) + cpu_sig(23, 49, 0); + 10 -> % EpycMilan = cpu_sig(25, 1, 1) + cpu_sig(25, 1, 1); + 11 -> % EpycMilanV1 = cpu_sig(25, 1, 1) + cpu_sig(25, 1, 1); + 12 -> % EpycMilanV2 = cpu_sig(25, 1, 1) + cpu_sig(25, 1, 1); + 13 -> % EpycGenoa = cpu_sig(25, 17, 0) + cpu_sig(25, 17, 0); + 14 -> % EpycGenoaV1 = cpu_sig(25, 17, 0) + cpu_sig(25, 17, 0); + _ -> % Default to Epyc signature + cpu_sig(23, 1, 2) + end. + +%% Calculate CPU signature (matching Rust cpu_sig function exactly) +-spec cpu_sig(Family :: integer(), Model :: integer(), Stepping :: integer()) -> integer(). +cpu_sig(Family, Model, Stepping) -> + {FamilyLow, FamilyHigh} = if + Family > 16#F -> + {16#F, (Family - 16#F) band 16#FF}; + true -> + {Family, 0} + end, + ModelLow = Model band 16#F, + ModelHigh = (Model bsr 4) band 16#F, + SteppingLow = Stepping band 16#F, + (FamilyHigh bsl 20) bor (ModelHigh bsl 16) bor (FamilyLow bsl 8) bor (ModelLow bsl 4) bor SteppingLow. + +%% Set a field in VMSA page +-spec set_vmsa_field(VMSA :: binary(), Offset :: non_neg_integer(), Value :: integer(), Size :: non_neg_integer()) -> binary(). +set_vmsa_field(VMSA, Offset, Value, Size) when + is_binary(VMSA), + Offset >= 0, + Size > 0, + Offset + Size =< byte_size(VMSA) -> + ?event(snp, {set_vmsa_field_valid, #{offset => Offset, size => Size, value => Value, vmsa_size => byte_size(VMSA)}}), + <> = VMSA, + Result = <>, + % Verify the value was set correctly by reading it back + <<_BeforeRead:Offset/binary, ReadValue:(Size*8)/little, _AfterRead/binary>> = Result, + ?event(snp, {set_vmsa_field_complete, #{ + result_size => byte_size(Result), + value_written => Value, + value_read_back => ReadValue, + match => Value =:= ReadValue + }}), + Result; +set_vmsa_field(VMSA, Offset, Value, Size) -> + % Return original VMSA if offset/size is invalid + ?event(snp_error, {set_vmsa_field_invalid, #{ + offset => Offset, + size => Size, + value => Value, + vmsa_size => case is_binary(VMSA) of true -> byte_size(VMSA); false -> undefined end + }}), + VMSA. + diff --git a/src/snp_message.erl b/src/snp_message.erl new file mode 100644 index 000000000..9d5f9d7e2 --- /dev/null +++ b/src/snp_message.erl @@ -0,0 +1,188 @@ +%%% @doc Message extraction and normalization for SNP commitment reports. +%%% +%%% This module handles the extraction and normalization of SNP commitment +%%% messages from the input, including extracting the report, address, and +%%% node message ID. +-module(snp_message). +-export([extract_and_normalize_message/2, extract_node_message_id/2, validate_message_structure/1]). +-include("include/hb.hrl"). + +%% @doc Extract and normalize the SNP commitment message from the input. +%% +%% This function processes the raw message and extracts all necessary components +%% for verification: +%% 1. Searches for a `body' key in the message, using it as the report source +%% 2. Applies message commitment and signing filters +%% 3. Extracts and decodes the JSON report +%% 4. Normalizes the message structure by merging report data +%% 5. Extracts the node address and message ID +%% +%% @param M2 The input message containing the SNP report +%% @param NodeOpts A map of configuration options +%% @returns `{ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport, Report}}' +%% on success with all extracted components, or `{error, Reason}' on failure. +%% Msg is the message without the report; report-derived fields (e.g. policy) must +%% be read from Report, not from Msg, so trust/debug/measurement use only +%% message or signed-report data. +-spec extract_and_normalize_message(M2 :: term(), NodeOpts :: map()) -> + {ok, {map(), binary(), binary(), binary(), map(), map()}} | {error, term()}. +extract_and_normalize_message(M2, NodeOpts) -> + maybe + % Validate message structure early + case validate_message_structure(M2) of + ok -> ok; + {error, ValidationErrors} -> + ?event(snp_error, {message_structure_validation_failed, #{ + operation => <<"extract_and_normalize_message">>, + validation_errors => ValidationErrors, + suggestion => <<"Ensure the message contains all required fields: 'report' (JSON string), 'address' (binary), and optionally 'node-message' or 'node-message-id'.">> + }}), + throw({error, {validation_failed, ValidationErrors}}) + end, + % Search for a `body' key in the message, and if found use it as the source + % of the report. If not found, use the message itself as the source. + RawMsg = hb_ao:get(<<"body">>, M2, M2, NodeOpts#{ hashpath => ignore }), + MsgWithJSONReport = + hb_util:ok( + hb_message:with_only_committed( + hb_message:with_only_committers( + RawMsg, + hb_message:signers( + RawMsg, + NodeOpts + ), + NodeOpts + ), + NodeOpts + ) + ), + % Normalize the request message: do NOT merge report JSON into Msg. + % Report may contain attacker-controlled keys; merging would let them + % override local-hashes, address, policy, etc. used for trust/debug/ + % measurement checks before the report signature is verified. + ReportJSON = hb_ao:get(<<"report">>, MsgWithJSONReport, NodeOpts), + {ok, Report} = snp_util:safe_json_decode(ReportJSON), + Msg = maps:without([<<"report">>], MsgWithJSONReport), + ?event(snp_short, {snp_message_normalized, #{msg_keys => maps:keys(Msg), report_not_merged => true}}), + + % Extract address and node message ID from the message (not from Report) + Address = hb_ao:get(<<"address">>, Msg, NodeOpts), + {ok, NodeMsgID} ?= extract_node_message_id(Msg, NodeOpts), + {ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport, Report}} + else + {error, Reason} -> {error, Reason}; + Error -> {error, Error} + end. + +%% @doc Extract the node message ID from the SNP message. +%% +%% This function handles the extraction of the node message ID, which can be +%% provided either directly as a field or embedded within a node message that +%% needs to be processed to generate the ID. +%% +%% @param Msg The normalized SNP message +%% @param NodeOpts A map of configuration options +%% @returns `{ok, NodeMsgID}' on success with the extracted ID, or +%% `{error, missing_node_msg_id}' if no ID can be found +-spec extract_node_message_id(Msg :: map(), NodeOpts :: map()) -> + {ok, binary()} | {error, missing_node_msg_id}. +extract_node_message_id(Msg, NodeOpts) -> + case {hb_ao:get(<<"node-message">>, Msg, NodeOpts#{ hashpath => ignore }), + hb_ao:get(<<"node-message-id">>, Msg, NodeOpts)} of + {undefined, undefined} -> + {error, missing_node_msg_id}; + {undefined, ID} -> + {ok, ID}; + {NodeMsg, _} -> + dev_message:id(NodeMsg, #{}, NodeOpts) + end. + +%% @doc Validate message structure for required fields and types. +%% Validates that the message contains all required fields with correct types. +%% @param Message The message to validate (can be a map or any term) +%% @returns ok if valid, {error, [ValidationErrors]} if invalid +-spec validate_message_structure(Message :: term()) -> ok | {error, [binary()]}. +validate_message_structure(Message) when is_map(Message) -> + ValidationErrors = [], + ValidationErrors1 = validate_report_field(Message, ValidationErrors), + ValidationErrors2 = validate_address_field(Message, ValidationErrors1), + case ValidationErrors2 of + [] -> ok; + Errors -> {error, Errors} + end; +validate_message_structure(Message) -> + % If message is not a map, we can't validate it here + % It might be processed later, so we allow it but log a warning + ?event(snp, {message_structure_validation_skipped, #{ + message_type => case Message of + B when is_binary(B) -> <<"binary">>; + L when is_list(L) -> <<"list">>; + _ -> <<"other">> + end, + reason => <<"Message is not a map, validation will be performed during extraction">> + }}), + ok. + +%% Validate report field +-spec validate_report_field(Message :: map(), Errors :: [binary()]) -> [binary()]. +validate_report_field(Message, Errors) -> + case hb_ao:get(<<"report">>, Message, undefined, #{}) of + undefined -> + % Check if report might be in body + case hb_ao:get(<<"body">>, Message, undefined, #{}) of + undefined -> + ErrorMsg = <<"Missing required field 'report': The message must contain a 'report' field with the SNP report JSON, or a 'body' field containing the report.">>, + [ErrorMsg | Errors]; + _ -> + % Body exists, validation will happen during extraction + Errors + end; + Report when is_binary(Report) -> + % Validate it's valid JSON + case snp_util:safe_json_decode(Report) of + {ok, _ReportMap} -> + Errors; + {error, _Reason} -> + ErrorMsg = <<"Invalid 'report' field type: expected valid JSON string that decodes to a map, got invalid JSON.">>, + [ErrorMsg | Errors] + end; + Report when is_map(Report) -> + % Report is already decoded, which is fine + Errors; + Invalid -> + ErrorMsg = <<"Invalid 'report' field type: expected binary (JSON string) or map, got ", + (hb_util:bin(case Invalid of + L when is_list(L) -> "list"; + I when is_integer(I) -> "integer"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate address field +-spec validate_address_field(Message :: map(), Errors :: [binary()]) -> [binary()]. +validate_address_field(Message, Errors) -> + case hb_ao:get(<<"address">>, Message, undefined, #{}) of + undefined -> + % Address might be in NodeOpts, so we don't fail here + % It will be checked during extraction + Errors; + Address when is_binary(Address) -> + case byte_size(Address) of + 0 -> + ErrorMsg = <<"Invalid 'address' field: address cannot be empty.">>, + [ErrorMsg | Errors]; + _ -> + Errors + end; + Invalid -> + ErrorMsg = <<"Invalid 'address' field type: expected binary, got ", + (hb_util:bin(case Invalid of + M when is_map(M) -> "map"; + L when is_list(L) -> "list"; + I when is_integer(I) -> "integer"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + diff --git a/src/snp_nif.erl b/src/snp_nif.erl new file mode 100644 index 000000000..8d4e3ee60 --- /dev/null +++ b/src/snp_nif.erl @@ -0,0 +1,128 @@ +%%% @doc Main NIF interface layer for SNP commitment reports. +%%% +%%% This module provides the main interface for SNP operations, delegating +%%% to specialized modules for different aspects of SNP functionality. +%%% It maintains backward compatibility with the original dev_snp_nif API. +-module(snp_nif). +-export([generate_attestation_report/2, compute_launch_digest/1, check_snp_support/0]). +-export([verify_measurement/2, verify_signature/3]). +-export([fetch_cert_chain/1, fetch_vcek/6]). +-export([report_binary_to_json/1, report_json_to_binary/1]). +-export([pem_to_der_chain/1, pem_cert_to_der/1]). +-export([parse_ovmf_sev_hashes_gpa/1]). +-export([verify_signature_nif/3, verify_report_signature/2]). + +-include("include/hb.hrl"). +-include_lib("public_key/include/public_key.hrl"). + +-on_load(init/0). + +%% @doc Check if SEV-SNP is supported on the current system. +%% This function will be replaced by the C NIF when loaded. +-spec check_snp_support() -> {ok, boolean()} | {error, term()}. +check_snp_support() -> + erlang:nif_error(not_loaded). + +%% @doc Generate an attestation report from the SEV-SNP hardware. +%% This function will be replaced by the C NIF when loaded. +-spec generate_attestation_report(UniqueData :: binary(), VMPL :: 0..3) -> + {ok, binary()} | {error, {integer(), binary()}}. +generate_attestation_report(_UniqueData, _VMPL) -> + erlang:nif_error(not_loaded). + +%% @doc Compute launch digest. +%% Delegates to snp_launch_digest module. +-spec compute_launch_digest(Args :: map()) -> {ok, binary()} | {error, term()}. +compute_launch_digest(Args) -> + snp_launch_digest:compute_launch_digest(Args). + +%% @doc Verify that the measurement in the report matches the expected measurement. +%% Delegates to snp_verification module. +-spec verify_measurement(ReportJSON :: binary(), ExpectedMeasurement :: binary()) -> + {ok, true} | {ok, false} | {error, binary()}. +verify_measurement(ReportJSON, ExpectedMeasurement) -> + snp_verification:verify_measurement(ReportJSON, ExpectedMeasurement). + +%% @doc Verify the signature of an attestation report. +%% Delegates to snp_verification module. +-spec verify_signature(ReportBinary :: binary(), CertChainPEM :: binary(), VcekDER :: binary()) -> + {ok, true} | {error, binary() | {term(), binary()}}. +verify_signature(ReportBinary, CertChainPEM, VcekDER) -> + snp_verification:verify_signature(ReportBinary, CertChainPEM, VcekDER). + +%% @doc Fetches the AMD certificate chain (ASK + ARK) for the given SEV product name. +%% Delegates to snp_certificates module. +-spec fetch_cert_chain(SevProdName :: undefined | binary() | string()) -> + {ok, binary()} | {error, term()}. +fetch_cert_chain(SevProdName) -> + snp_certificates:fetch_cert_chain(SevProdName). + +%% @doc Fetches the VCEK certificate for the given chip ID and TCB version. +%% Delegates to snp_certificates module. +-spec fetch_vcek(ChipId :: binary(), BootloaderSPL :: integer(), TeeSPL :: integer(), + SnpSPL :: integer(), UcodeSPL :: integer(), SevProdName :: undefined | binary() | string()) -> + {ok, binary()} | {error, term()}. +fetch_vcek(ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, SevProdName) -> + snp_certificates:fetch_vcek(ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, SevProdName). + +%% @doc Convert binary report structure (1184 bytes) to JSON map. +%% Delegates to snp_report_format module. +-spec report_binary_to_json(ReportBinary :: binary()) -> map() | {error, binary()}. +report_binary_to_json(ReportBinary) -> + snp_report_format:report_binary_to_json(ReportBinary). + +%% @doc Convert JSON report map to binary report structure (1184 bytes). +%% Delegates to snp_report_format module. +-spec report_json_to_binary(ReportJSON :: binary() | map()) -> binary() | {error, term()}. +report_json_to_binary(ReportJSON) -> + snp_report_format:report_json_to_binary(ReportJSON). + +%% @doc Convert PEM certificate chain to DER-encoded binary. +%% Delegates to snp_certificates module. +-spec pem_to_der_chain(CertChainPEM :: binary()) -> binary() | {error, term()}. +pem_to_der_chain(CertChainPEM) -> + snp_certificates:pem_to_der_chain(CertChainPEM). + +%% @doc Convert a single PEM certificate to DER. +%% Delegates to snp_certificates module. +-spec pem_cert_to_der(CertPEM :: binary()) -> binary() | {error, term()}. +pem_cert_to_der(CertPEM) -> + snp_certificates:pem_cert_to_der(CertPEM). + +%% @doc Parse OVMF file to extract SEV hashes table GPA. +%% Delegates to snp_ovmf module. +-spec parse_ovmf_sev_hashes_gpa(OvmfPath :: string() | binary()) -> + {ok, non_neg_integer()} | {error, term()}. +parse_ovmf_sev_hashes_gpa(OvmfPath) -> + snp_ovmf:parse_ovmf_sev_hashes_gpa(OvmfPath). + +%% @doc Verify signature - calls C NIF for actual verification. +%% This function verifies both the certificate chain (ARK -> ASK -> VCEK) and +%% the report signature. The C NIF uses OpenSSL to perform full cryptographic +%% chain verification, including RSASSA-PSS signature support. +%% +%% The certificate chain verification ensures: +%% 1. VCEK is signed by ASK +%% 2. ASK is signed by ARK (root of trust) +%% 3. Report signature is valid using VCEK's public key +%% +%% This provides full cryptographic verification of the attestation report's +%% authenticity, rather than relying solely on fetching certificates from AMD's KDS. +-spec verify_signature_nif(ReportBinary :: binary(), CertChainDER :: binary(), VcekDER :: binary()) -> + {ok, true} | {error, term()}. +verify_signature_nif(_ReportBinary, _CertChainDER, _VcekDER) -> + % C NIF handles both certificate chain verification and report signature verification + % This will be replaced by the C NIF when loaded + erlang:nif_error(not_loaded). + +%% @doc Verify report signature - calls C NIF for actual verification. +%% This function will be replaced by the C NIF when loaded. +-spec verify_report_signature(ReportBinary :: binary(), VcekDER :: binary()) -> + {ok, true} | {error, term()}. +verify_report_signature(_ReportBinary, _VcekDER) -> + erlang:nif_error(not_loaded). + +init() -> + SoName = filename:join([code:priv_dir(hb), "snp_nif"]), + erlang:load_nif(SoName, 0). + diff --git a/src/snp_nonce.erl b/src/snp_nonce.erl new file mode 100644 index 000000000..1586416df --- /dev/null +++ b/src/snp_nonce.erl @@ -0,0 +1,63 @@ +%%% @doc Nonce generation and validation for SNP commitment reports. +%%% +%%% This module handles the generation and validation of nonces used in +%%% AMD SEV-SNP attestation reports. Nonces bind reports to specific +%%% verification requests by combining the node's address and message ID. +-module(snp_nonce). +-export([generate_nonce/2, report_data_matches/3]). +-include("include/hb.hrl"). + +%% Type definitions +-type nonce() :: binary(). % Nonce is a binary formed by concatenating address and node message ID + +%% @doc Generate the nonce to use in the SNP commitment report. +%% +%% This function creates a unique nonce by concatenating the node's native +%% address and message ID. This nonce is embedded in the hardware attestation +%% report to bind it to a specific verification request. +%% +%% @param RawAddress The node's raw address identifier +%% @param RawNodeMsgID The raw node message identifier +%% @returns A binary nonce formed by concatenating the native address and message ID +-spec generate_nonce(RawAddress :: binary(), RawNodeMsgID :: binary()) -> nonce(). +generate_nonce(RawAddress, RawNodeMsgID) -> + Address = hb_util:native_id(RawAddress), + NodeMsgID = hb_util:native_id(RawNodeMsgID), + << Address/binary, NodeMsgID/binary >>. + +%% @doc Validate that the report data matches the expected nonce. +%% +%% This function ensures that the nonce in the SNP report was generated +%% using the same address and node message ID that are expected for this +%% verification request. Uses constant-time comparison to avoid timing +%% leaks of the nonce content. +%% +%% @param Address The node's address used in nonce generation +%% @param NodeMsgID The node message ID used in nonce generation +%% @param ReportData The actual nonce data from the SNP report +%% @returns `true' if the report data matches the expected nonce, `false' otherwise +-spec report_data_matches(Address :: binary(), NodeMsgID :: binary(), + ReportData :: binary()) -> boolean(). +report_data_matches(Address, NodeMsgID, ReportData) -> + ExpectedNonce = generate_nonce(Address, NodeMsgID), + Match = constant_time_eq(ExpectedNonce, ReportData), + NonceHash = crypto:hash(sha256, ExpectedNonce), + ReportDataHash = crypto:hash(sha256, ReportData), + ?event(snp_short, {nonce_validation, #{ + expected_nonce_size => byte_size(ExpectedNonce), + expected_nonce_hash => snp_util:binary_to_hex_string(NonceHash), + report_data_size => byte_size(ReportData), + report_data_hash => snp_util:binary_to_hex_string(ReportDataHash), + match => Match + }}), + Match. + +%% @doc Constant-time equality for two binaries (avoids timing leaks). +%% Returns true only if same size and all bytes equal. +-spec constant_time_eq(binary(), binary()) -> boolean(). +constant_time_eq(A, B) when is_binary(A), is_binary(B), byte_size(A) =:= byte_size(B) -> + Xored = crypto:exor(A, B), + 0 =:= lists:foldl(fun(Byte, Acc) -> Byte bor Acc end, 0, binary_to_list(Xored)); +constant_time_eq(_, _) -> + false. + diff --git a/src/snp_ovmf.erl b/src/snp_ovmf.erl new file mode 100644 index 000000000..1898ed9a5 --- /dev/null +++ b/src/snp_ovmf.erl @@ -0,0 +1,239 @@ +%%% @doc OVMF file parsing for SNP commitment reports. +%%% +%%% This module handles parsing of OVMF (Open Virtual Machine Firmware) files +%%% to extract SEV-related metadata, including SEV hashes table GPA and reset EIP. +-module(snp_ovmf). +-export([read_ovmf_gpa/0, parse_ovmf_sev_hashes_gpa/1, parse_ovmf_reset_eip/1]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% @doc Read OVMF file and extract SEV hashes table GPA. +%% OVMF is copied to priv/ovmf/ at build time (rebar); same layout as snp_launch_digest_ovmf. +%% @returns {ok, GPA} or {error, Reason} +-spec read_ovmf_gpa() -> {ok, non_neg_integer()} | {error, term()}. +read_ovmf_gpa() -> + {ok, Cwd} = file:get_cwd(), + OvmfPaths = [ + % Canonical path: priv/ovmf/ (build-time copy) + filename:join([code:priv_dir(hb), "ovmf", "OVMF-1.55.fd"]), + % Fallback: repo root (dev, before compile) + filename:join([Cwd, "OVMF-1.55.fd"]) + ], + ?event(snp, {ovmf_search_paths, OvmfPaths}), + read_ovmf_gpa(OvmfPaths). + +%% Internal helper to try multiple OVMF paths +%% @param Paths [string()] - List of paths to try +%% @returns {ok, non_neg_integer()} or {error, ovmf_file_not_found} +-spec read_ovmf_gpa(Paths :: [string()]) -> {ok, non_neg_integer()} | {error, ovmf_file_not_found}. +read_ovmf_gpa([]) -> + {error, ovmf_file_not_found}; +read_ovmf_gpa([Path | Rest]) -> + case parse_ovmf_sev_hashes_gpa(Path) of + {ok, Gpa} -> {ok, Gpa}; + {error, _Reason} -> read_ovmf_gpa(Rest) + end. + +%% @doc Parse OVMF file to extract SEV hashes table GPA. +%% This reads the OVMF footer table and finds the SEV_HASH_TABLE_RV_GUID entry. +%% @param OvmfPath Path to the OVMF file (e.g. priv/ovmf/OVMF-1.55.fd) +%% @returns {ok, GPA} where GPA is a 64-bit integer, or {error, Reason} on failure +-spec parse_ovmf_sev_hashes_gpa(OvmfPath :: string() | binary()) -> {ok, non_neg_integer()} | {error, term()}. +parse_ovmf_sev_hashes_gpa(OvmfPath) when is_binary(OvmfPath) -> + parse_ovmf_sev_hashes_gpa(hb_util:list(OvmfPath)); +parse_ovmf_sev_hashes_gpa(OvmfPath) when is_list(OvmfPath) -> + % Print current working directory for debugging + {ok, Cwd} = file:get_cwd(), + ?event(snp, {parse_ovmf_sev_hashes_gpa_start, #{cwd => Cwd, ovmf_path => OvmfPath}}), + case file:read_file(OvmfPath) of + {ok, OvmfData} -> + parse_ovmf_footer_table(OvmfData); + {error, Reason} -> + {error, {file_read_error, Reason}} + end; +parse_ovmf_sev_hashes_gpa(_) -> + {error, invalid_path}. + +%% Internal function to parse OVMF footer table +%% Internal helper to parse OVMF footer table +%% @param OvmfData binary() - OVMF file contents +%% @returns {ok, non_neg_integer()} or {error, term()} +-spec parse_ovmf_footer_table(OvmfData :: binary()) -> {ok, non_neg_integer()} | {error, term()}. +parse_ovmf_footer_table(OvmfData) -> + Size = byte_size(OvmfData), + if + Size < ?OVMF_MIN_FILE_SIZE -> {error, file_too_small}; + true -> + % Footer entry is at offset: Size - ?OVMF_FOOTER_OFFSET - ?OVMF_ENTRY_HEADER_SIZE + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, % 2 bytes size + 16 bytes GUID + FooterEntryOffset = Size - ?OVMF_FOOTER_OFFSET - ENTRY_HEADER_SIZE, + if + FooterEntryOffset < 0 -> {error, invalid_file_format}; + true -> + % Read footer entry + FooterEntry = binary:part(OvmfData, FooterEntryOffset, ENTRY_HEADER_SIZE), + <> = FooterEntry, + + % Check if this is the OVMF_TABLE_FOOTER_GUID + % GUID: 96b582de-1fb2-45f7-baea-a366c55a082d (little-endian) + % Python: uuid.UUID('96b582de-1fb2-45f7-baea-a366c55a082d').bytes_le + % = de 82 b5 96 b2 1f f7 45 ba ea a3 66 c5 5a 08 2d + ExpectedGuid = <<222, 130, 181, 150, 178, 31, 247, 69, 186, 234, 163, 102, 197, 90, 8, 45>>, + if + FooterGuid =/= ExpectedGuid -> {error, invalid_footer_guid}; + FooterSize < ENTRY_HEADER_SIZE -> {error, invalid_footer_size}; + true -> + % Calculate table size and start + TableSize = FooterSize - ENTRY_HEADER_SIZE, + TableStart = FooterEntryOffset - TableSize, + if + TableStart < 0 -> {error, invalid_table_offset}; + true -> + % Read the table + TableData = binary:part(OvmfData, TableStart, TableSize), + % Parse entries backwards to find SEV_HASH_TABLE_RV_GUID (from snp_guids.hrl) + SevHashTableGuid = ?SEV_HASH_TABLE_RV_GUID, + find_sev_hashes_gpa(TableData, SevHashTableGuid, TableSize) + end + end + end + end. + +%% Find SEV hashes table GPA in the table data +find_sev_hashes_gpa(TableData, TargetGuid, TableSize) -> + find_sev_hashes_gpa(TableData, TargetGuid, TableSize, TableSize). + +find_sev_hashes_gpa(_TableData, _TargetGuid, _TableSize, Offset) when Offset < ?OVMF_ENTRY_HEADER_SIZE -> + {error, guid_not_found}; +find_sev_hashes_gpa(TableData, TargetGuid, TableSize, Offset) -> + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, + EntryHeaderOffset = Offset - ENTRY_HEADER_SIZE, + <> = binary:part(TableData, EntryHeaderOffset, ENTRY_HEADER_SIZE), + + % Debug: log the GUID we're checking (first call only to avoid spam) + case Offset =:= TableSize of + true -> + EntryGuidHex = hb_util:to_hex(EntryGuid), + TargetGuidHex = hb_util:to_hex(TargetGuid), + ?event(snp, {searching_sev_hashes_guid, {explicit, #{ + target_guid_hex => TargetGuidHex, + first_entry_guid_hex => EntryGuidHex, + entry_size => EntrySize, + table_size => TableSize + }}}); + false -> ok + end, + + if + EntrySize < ENTRY_HEADER_SIZE -> {error, invalid_entry_size}; + Offset < EntrySize -> {error, invalid_entry_offset}; + EntryGuid =:= TargetGuid -> + % Found it! Entry data is before the header + DataOffset = Offset - EntrySize, + if + DataOffset + ?OVMF_METADATA_OFFSET_SIZE > TableSize -> {error, invalid_data_offset}; + true -> + % First ?OVMF_GPA_EIP_SIZE bytes are the GPA (little-endian u32) + <> = binary:part(TableData, DataOffset, ?OVMF_GPA_EIP_SIZE), + ?event(snp_short, {sev_hashes_gpa_found, #{gpa => GpaU32}}), + {ok, GpaU32} + end; + true -> + % Continue searching backwards + find_sev_hashes_gpa(TableData, TargetGuid, TableSize, Offset - EntrySize) + end. + +%% Parse reset EIP from OVMF footer table (matching Rust ovmf.sev_es_reset_eip()) +%% GUID: 00f771de-1a7e-4fcb-890e-68c77e2fb44e +-spec parse_ovmf_reset_eip(OvmfPath :: string() | binary()) -> {ok, non_neg_integer()} | {error, term()}. +parse_ovmf_reset_eip(OvmfPath) when is_binary(OvmfPath) -> + parse_ovmf_reset_eip(hb_util:list(OvmfPath)); +parse_ovmf_reset_eip(OvmfPath) when is_list(OvmfPath) -> + case file:read_file(OvmfPath) of + {ok, OvmfData} -> + DataSize = byte_size(OvmfData), + if + DataSize < ?OVMF_MIN_FILE_SIZE -> {error, file_too_small}; + true -> + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, + FooterEntryOffset = DataSize - ?OVMF_FOOTER_OFFSET - ENTRY_HEADER_SIZE, + if + FooterEntryOffset < 0 -> {error, invalid_file_format}; + true -> + FooterEntry = binary:part(OvmfData, FooterEntryOffset, ENTRY_HEADER_SIZE), + <> = FooterEntry, + + % Check if this is the OVMF_TABLE_FOOTER_GUID + % GUID: 96b582de-1fb2-45f7-baea-a366c55a082d (little-endian) + % Python: uuid.UUID('96b582de-1fb2-45f7-baea-a366c55a082d').bytes_le + % = de 82 b5 96 b2 1f f7 45 ba ea a3 66 c5 5a 08 2d + ExpectedGuid = <<222, 130, 181, 150, 178, 31, 247, 69, 186, 234, 163, 102, 197, 90, 8, 45>>, + if + FooterGuid =/= ExpectedGuid -> {error, invalid_footer_guid}; + FooterSize < ENTRY_HEADER_SIZE -> {error, invalid_footer_size}; + true -> + TableSize = FooterSize - ENTRY_HEADER_SIZE, + TableStart = FooterEntryOffset - TableSize, + if + TableStart < 0 -> {error, invalid_table_offset}; + true -> + TableData = binary:part(OvmfData, TableStart, TableSize), + % SEV_ES_RESET_BLOCK_GUID: 00f771de-1a7e-4fcb-890e-68c77e2fb44e (little-endian) + % Python: uuid.UUID('00f771de-1a7e-4fcb-890e-68c77e2fb44e').bytes_le + % = de 71 f7 00 7e 1a cb 4f 89 0e 68 c7 7e 2f b4 4e + ResetBlockGuid = ?SEV_ES_RESET_BLOCK_GUID, + find_reset_eip(TableData, ResetBlockGuid, TableSize) + end + end + end + end; + {error, Reason} -> {error, Reason} + end; +parse_ovmf_reset_eip(_) -> + {error, invalid_path}. + +%% Find reset EIP in the footer table +find_reset_eip(TableData, TargetGuid, TableSize) -> + find_reset_eip(TableData, TargetGuid, TableSize, TableSize). + +find_reset_eip(_TableData, _TargetGuid, _TableSize, Offset) when Offset < ?OVMF_ENTRY_HEADER_SIZE -> + {error, guid_not_found}; +find_reset_eip(TableData, TargetGuid, TableSize, Offset) -> + ENTRY_HEADER_SIZE = ?OVMF_ENTRY_HEADER_SIZE, + EntryHeaderOffset = Offset - ENTRY_HEADER_SIZE, + <> = binary:part(TableData, EntryHeaderOffset, ENTRY_HEADER_SIZE), + + % Debug: log the GUID we're checking (first call only to avoid spam) + case Offset =:= TableSize of + true -> + EntryGuidHex = hb_util:to_hex(EntryGuid), + TargetGuidHex = hb_util:to_hex(TargetGuid), + ?event(snp, {searching_reset_eip_guid, {explicit, #{ + target_guid_hex => TargetGuidHex, + first_entry_guid_hex => EntryGuidHex, + entry_size => EntrySize, + table_size => TableSize + }}}); + false -> ok + end, + + if + EntrySize < ENTRY_HEADER_SIZE -> {error, invalid_entry_size}; + Offset < EntrySize -> {error, invalid_entry_offset}; + EntryGuid =:= TargetGuid -> + % Found it! Entry data is before the header + DataOffset = Offset - EntrySize, + if + DataOffset + ?OVMF_METADATA_OFFSET_SIZE > TableSize -> {error, invalid_data_offset}; + true -> + % First ?OVMF_GPA_EIP_SIZE bytes are the EIP (little-endian u32) + <> = binary:part(TableData, DataOffset, ?OVMF_GPA_EIP_SIZE), + ?event(snp_short, {reset_eip_found, #{eip => EIP}}), + {ok, EIP} + end; + true -> + % Continue searching backwards + find_reset_eip(TableData, TargetGuid, TableSize, Offset - EntrySize) + end. + diff --git a/src/snp_report_format.erl b/src/snp_report_format.erl new file mode 100644 index 000000000..74e738153 --- /dev/null +++ b/src/snp_report_format.erl @@ -0,0 +1,824 @@ +%%% @doc Report format conversion for SNP commitment reports. +%%% +%%% This module handles conversion between binary (1184-byte) and JSON formats +%%% for AMD SEV-SNP attestation reports. +-module(snp_report_format). +-export([report_binary_to_json/1, report_json_to_binary/1, validate_report_schema/1]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% Type definitions +-type report_binary() :: binary(). % Exactly ?REPORT_SIZE bytes +-type report_json() :: binary() | map(). % JSON string or decoded map + +%% Helper function to construct TCB binary from map +%% Optimized to avoid repeated pattern matching +-spec build_tcb_binary(TCBMap :: map()) -> binary(). +build_tcb_binary(TCBMap) -> + <<(maps:get(<<"bootloader">>, TCBMap, 0)):8, + (maps:get(<<"tee">>, TCBMap, 0)):8, + 0:(?TCB_RESERVED_BYTES * 8), % ?TCB_RESERVED_BYTES reserved bytes (bytes 2-5) + (maps:get(<<"snp">>, TCBMap, 0)):8, + (maps:get(<<"microcode">>, TCBMap, 0)):8>>. + +%% Helper function to normalize binary to exact size (pad or truncate) +%% Optimized to avoid multiple pattern matches +-spec normalize_binary_size(Binary :: binary(), TargetSize :: non_neg_integer()) -> binary(). +normalize_binary_size(Binary, TargetSize) when is_binary(Binary) -> + case byte_size(Binary) of + TargetSize -> Binary; + Size when Size > TargetSize -> binary:part(Binary, 0, TargetSize); + Size when Size < TargetSize -> + PaddingSize = TargetSize - Size, + <> + end; +normalize_binary_size(_, TargetSize) -> + <<0:(TargetSize * 8)>>. + +%% @doc Convert binary report structure (1184 bytes) to JSON map. +%% This replaces the C JSON serialization for better error handling. +%% @param ReportBinary 1184-byte binary containing the raw report structure +%% @returns Map containing the report fields as Erlang terms +-spec report_binary_to_json(ReportBinary :: report_binary()) -> {ok, map()} | {error, binary()}. +report_binary_to_json(ReportBinary) when byte_size(ReportBinary) =:= ?REPORT_SIZE -> + <> = ReportBinary, + + #{ + <<"version">> => Version, + <<"guest_svn">> => GuestSvn, + <<"policy">> => Policy, + <<"family_id">> => hb_util:list(FamilyId), + <<"image_id">> => hb_util:list(ImageId), + <<"vmpl">> => Vmpl, + <<"sig_algo">> => SigAlgo, + <<"current_tcb">> => begin + % TcbVersion structure: bootloader(?TCB_OFFSET_BOOTLOADER), tee(?TCB_OFFSET_TEE), _reserved(2-5), snp(?TCB_OFFSET_SNP), microcode(?TCB_OFFSET_MICROCODE) + Bootloader = binary:at(CurrentTcb, ?TCB_OFFSET_BOOTLOADER), + Tee = binary:at(CurrentTcb, ?TCB_OFFSET_TEE), + Snp = binary:at(CurrentTcb, ?TCB_OFFSET_SNP), % Skip ?TCB_RESERVED_BYTES reserved bytes (2-5) + Microcode = binary:at(CurrentTcb, ?TCB_OFFSET_MICROCODE), + ?event(snp, {binary_to_json_current_tcb, #{ + raw_binary_hex => snp_util:binary_to_hex_string(CurrentTcb), + bootloader => Bootloader, + tee => Tee, + snp => Snp, + microcode => Microcode + }}), + #{ + <<"bootloader">> => Bootloader, + <<"tee">> => Tee, + <<"snp">> => Snp, + <<"microcode">> => Microcode + } + end, + <<"plat_info">> => PlatInfo, + <<"_author_key_en">> => AuthorKeyEn, + <<"_reserved_0">> => Reserved0, + <<"report_data">> => hb_util:list(ReportData), + <<"measurement">> => hb_util:list(Measurement), + <<"host_data">> => hb_util:list(HostData), + <<"id_key_digest">> => hb_util:list(IdKeyDigest), + <<"author_key_digest">> => hb_util:list(AuthorKeyDigest), + <<"report_id">> => hb_util:list(ReportId), + <<"report_id_ma">> => hb_util:list(ReportIdMa), + <<"reported_tcb">> => #{ + <<"bootloader">> => binary:at(ReportedTcb, ?TCB_OFFSET_BOOTLOADER), + <<"tee">> => binary:at(ReportedTcb, ?TCB_OFFSET_TEE), + <<"snp">> => binary:at(ReportedTcb, ?TCB_OFFSET_SNP), % Skip ?TCB_RESERVED_BYTES reserved bytes (2-5) + <<"microcode">> => binary:at(ReportedTcb, ?TCB_OFFSET_MICROCODE) + }, + <<"chip_id">> => hb_util:list(ChipId), + <<"committed_tcb">> => #{ + <<"bootloader">> => binary:at(CommittedTcb, ?TCB_OFFSET_BOOTLOADER), + <<"tee">> => binary:at(CommittedTcb, ?TCB_OFFSET_TEE), + <<"snp">> => binary:at(CommittedTcb, ?TCB_OFFSET_SNP), % Skip ?TCB_RESERVED_BYTES reserved bytes (2-5) + <<"microcode">> => binary:at(CommittedTcb, ?TCB_OFFSET_MICROCODE) + }, + <<"current_build">> => CurrentBuild, + <<"current_minor">> => CurrentMinor, + <<"current_major">> => CurrentMajor, + <<"_reserved_2">> => Reserved2, + <<"committed_build">> => CommittedBuild, + <<"committed_minor">> => CommittedMinor, + <<"committed_major">> => CommittedMajor, + <<"_reserved_3">> => Reserved3, + <<"launch_tcb">> => #{ + <<"bootloader">> => binary:at(LaunchTcb, ?TCB_OFFSET_BOOTLOADER), + <<"tee">> => binary:at(LaunchTcb, ?TCB_OFFSET_TEE), + <<"snp">> => binary:at(LaunchTcb, ?TCB_OFFSET_SNP), % Skip ?TCB_RESERVED_BYTES reserved bytes (2-5) + <<"microcode">> => binary:at(LaunchTcb, ?TCB_OFFSET_MICROCODE) + }, + <<"signature">> => #{ + <<"r">> => hb_util:list(SignatureR), + <<"s">> => hb_util:list(SignatureS) + } + }; +report_binary_to_json(InvalidBinary) -> + ActualSize = case is_binary(InvalidBinary) of + true -> byte_size(InvalidBinary); + false -> <<"not_a_binary">> + end, + ?event(snp_error, {report_binary_to_json_invalid_size, #{ + operation => <<"report_binary_to_json">>, + actual_size => ActualSize, + expected_size => ?REPORT_SIZE, + actual_type => case is_binary(InvalidBinary) of true -> <<"binary">>; false -> <<"not_binary">> end, + suggestion => <<"Ensure the report binary is exactly ", (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, " bytes as specified in the SNP report format.">> + }}), + {error, <<"Report binary validation failed: expected exactly ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + " bytes, got ", + (hb_util:bin(case is_binary(InvalidBinary) of true -> integer_to_list(byte_size(InvalidBinary)); false -> "not a binary" end))/binary, + ". Ensure the report is a complete ", (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, "-byte binary.">>}. + +%% @doc Convert JSON report map to binary report structure (?REPORT_SIZE bytes). +%% This reconstructs the binary structure from parsed JSON for signature verification. +%% @param ReportJSON Binary containing JSON report OR map +%% @returns 1184-byte binary containing the raw report structure +-spec report_json_to_binary(ReportJSON :: report_json()) -> report_binary() | {error, term()}. +report_json_to_binary(ReportJSON) when is_binary(ReportJSON) -> + ?event(snp_short, {json_input_size, byte_size(ReportJSON)}), + case snp_util:safe_json_decode(ReportJSON) of + {ok, ReportMap} -> + ?event(snp_short, {json_decoded_to_map, #{ + has_current_tcb => maps:is_key(<<"current_tcb">>, ReportMap), + map_size => map_size(ReportMap) + }}), + report_json_to_binary(ReportMap); + {error, {conversion_failed, _, _, {invalid_format, TypeMsg}}} -> + ?event(snp_error, {report_json_to_binary_invalid_json, #{ + operation => <<"report_json_to_binary">>, + actual_type => TypeMsg, + expected => <<"valid JSON that decodes to a map">>, + suggestion => <<"Ensure the input is valid JSON that decodes to a map/object containing all required SNP report fields.">> + }}), + {error, <<"JSON format validation failed: expected valid JSON that decodes to a map, got invalid format. Ensure the JSON is properly formatted and contains all required fields.">>}; + {error, {conversion_failed, _, _, {Error, Reason}}} -> + ?event(snp_error, {report_json_to_binary_decode_error, #{ + operation => <<"report_json_to_binary">>, + error => Error, + reason => Reason, + suggestion => <<"JSON decode failed. Ensure the input is valid JSON format.">> + }}), + {error, <<"JSON decode failed: ", (hb_util:bin(io_lib:format("~p", [Reason])))/binary, ". Ensure the input is valid JSON format.">>}; + {error, Reason} -> + ?event(snp_error, {report_json_to_binary_error, #{ + operation => <<"report_json_to_binary">>, + reason => Reason, + suggestion => <<"JSON processing failed. Check the input format.">> + }}), + {error, Reason} + end; +report_json_to_binary(ReportMap) when is_map(ReportMap) -> + % Validate report schema and field values before conversion + case validate_report_schema(ReportMap) of + ok -> + report_json_to_binary_validated(ReportMap); + {error, ValidationErrors} -> + ?event(snp_error, {report_json_to_binary_validation_failed, #{ + operation => <<"report_json_to_binary">>, + validation_errors => ValidationErrors, + error_count => length(ValidationErrors), + suggestion => <<"Fix the validation errors before converting the report. Check that all fields are present, have correct types, and values are within valid ranges.">> + }}), + {error, {validation_failed, ValidationErrors}} + end; +report_json_to_binary(InvalidInput) -> + ActualType = case InvalidInput of + B when is_binary(B) -> <<"binary">>; + M when is_map(M) -> <<"map">>; + L when is_list(L) -> <<"list">>; + _ -> <<"other">> + end, + ?event(snp_error, {report_json_to_binary_invalid_input, #{ + operation => <<"report_json_to_binary">>, + actual_type => ActualType, + expected => <<"binary (JSON string) or map">>, + suggestion => <<"Provide either a JSON-encoded binary string or a map containing the SNP report fields.">> + }}), + {error, <<"Report format validation failed: expected binary (JSON) or map, got ", + ActualType/binary, ". Provide a valid JSON string or map containing the SNP report data.">>}. + +%% Internal function to perform conversion after validation +-spec report_json_to_binary_validated(ReportMap :: map()) -> binary() | {error, term()}. +report_json_to_binary_validated(ReportMap) -> + try + Version = maps:get(<<"version">>, ReportMap), + GuestSvn = maps:get(<<"guest_svn">>, ReportMap), + Policy = maps:get(<<"policy">>, ReportMap), + FamilyId = hb_util:bin(maps:get(<<"family_id">>, ReportMap)), + ImageId = hb_util:bin(maps:get(<<"image_id">>, ReportMap)), + Vmpl = maps:get(<<"vmpl">>, ReportMap), + SigAlgo = maps:get(<<"sig_algo">>, ReportMap), + CurrentTcbMap = maps:get(<<"current_tcb">>, ReportMap), + ?event(snp, {current_tcb_map_raw, #{ + map_keys => maps:keys(CurrentTcbMap), + map_size => maps:size(CurrentTcbMap), + bootloader_value => maps:get(<<"bootloader">>, CurrentTcbMap, not_found), + tee_value => maps:get(<<"tee">>, CurrentTcbMap, not_found), + snp_value => maps:get(<<"snp">>, CurrentTcbMap, not_found), + microcode_value => maps:get(<<"microcode">>, CurrentTcbMap, not_found), + all_entries => maps:to_list(CurrentTcbMap) + }}), + CurrentTcbBootloader = maps:get(<<"bootloader">>, CurrentTcbMap, 0), + CurrentTcbTee = maps:get(<<"tee">>, CurrentTcbMap, 0), + CurrentTcbSnp = maps:get(<<"snp">>, CurrentTcbMap, 0), + CurrentTcbMicrocode = maps:get(<<"microcode">>, CurrentTcbMap, 0), + ?event(snp, {current_tcb_values, #{ + bootloader => CurrentTcbBootloader, + tee => CurrentTcbTee, + snp => CurrentTcbSnp, + microcode => CurrentTcbMicrocode + }}), + % TcbVersion structure: bootloader(?TCB_OFFSET_BOOTLOADER), tee(?TCB_OFFSET_TEE), _reserved(2-5), snp(?TCB_OFFSET_SNP), microcode(?TCB_OFFSET_MICROCODE) + CurrentTcb = build_tcb_binary(CurrentTcbMap), + PlatInfo = maps:get(<<"plat_info">>, ReportMap), + AuthorKeyEn = maps:get(<<"_author_key_en">>, ReportMap, 0), + Reserved0 = maps:get(<<"_reserved_0">>, ReportMap, 0), + ReportData = hb_util:bin(maps:get(<<"report_data">>, ReportMap)), + Measurement = hb_util:bin(maps:get(<<"measurement">>, ReportMap)), + HostData = hb_util:bin(maps:get(<<"host_data">>, ReportMap)), + IdKeyDigest = hb_util:bin(maps:get(<<"id_key_digest">>, ReportMap)), + AuthorKeyDigest = hb_util:bin(maps:get(<<"author_key_digest">>, ReportMap)), + ReportId = hb_util:bin(maps:get(<<"report_id">>, ReportMap)), + ReportIdMa = hb_util:bin(maps:get(<<"report_id_ma">>, ReportMap)), + ReportedTcbMap = maps:get(<<"reported_tcb">>, ReportMap), + % TcbVersion structure: bootloader(?TCB_OFFSET_BOOTLOADER), tee(?TCB_OFFSET_TEE), _reserved(2-5), snp(?TCB_OFFSET_SNP), microcode(?TCB_OFFSET_MICROCODE) + ReportedTcb = build_tcb_binary(ReportedTcbMap), + ChipId = hb_util:bin(maps:get(<<"chip_id">>, ReportMap)), + CommittedTcbMap = maps:get(<<"committed_tcb">>, ReportMap), + % TcbVersion structure: bootloader(?TCB_OFFSET_BOOTLOADER), tee(?TCB_OFFSET_TEE), _reserved(2-5), snp(?TCB_OFFSET_SNP), microcode(?TCB_OFFSET_MICROCODE) + CommittedTcb = build_tcb_binary(CommittedTcbMap), + CurrentBuild = maps:get(<<"current_build">>, ReportMap, 0), + CurrentMinor = maps:get(<<"current_minor">>, ReportMap, 0), + CurrentMajor = maps:get(<<"current_major">>, ReportMap, 0), + Reserved2 = maps:get(<<"_reserved_2">>, ReportMap, 0), + CommittedBuild = maps:get(<<"committed_build">>, ReportMap, 0), + CommittedMinor = maps:get(<<"committed_minor">>, ReportMap, 0), + CommittedMajor = maps:get(<<"committed_major">>, ReportMap, 0), + Reserved3 = maps:get(<<"_reserved_3">>, ReportMap, 0), + LaunchTcbMap = maps:get(<<"launch_tcb">>, ReportMap), + % TcbVersion structure: bootloader(?TCB_OFFSET_BOOTLOADER), tee(?TCB_OFFSET_TEE), _reserved(2-5), snp(?TCB_OFFSET_SNP), microcode(?TCB_OFFSET_MICROCODE) + LaunchTcb = build_tcb_binary(LaunchTcbMap), + SignatureMap = maps:get(<<"signature">>, ReportMap), + SignatureRList = maps:get(<<"r">>, SignatureMap), + SignatureSList = maps:get(<<"s">>, SignatureMap), + ?event(snp, {signature_from_json, #{ + r_list_length => length(SignatureRList), + s_list_length => length(SignatureSList), + r_first_8 => lists:sublist(SignatureRList, 1, min(8, length(SignatureRList))), + s_first_8 => lists:sublist(SignatureSList, 1, min(8, length(SignatureSList))) + }}), + SignatureR = hb_util:bin(SignatureRList), + SignatureS = hb_util:bin(SignatureSList), + ?event(snp, {signature_converted_to_binary, #{ + r_size => byte_size(SignatureR), + s_size => byte_size(SignatureS), + r_first_8_bytes_hex => snp_util:binary_to_hex_string(binary:part(SignatureR, 0, min(8, byte_size(SignatureR)))), + s_first_8_bytes_hex => snp_util:binary_to_hex_string(binary:part(SignatureS, 0, min(8, byte_size(SignatureS)))) + }}), + + % Reconstruct binary report structure + ?event(snp, {before_binary_construction, #{ + signature_r_size => byte_size(SignatureR), + signature_s_size => byte_size(SignatureS), + signature_r_first_8_hex => snp_util:binary_to_hex_string(binary:part(SignatureR, 0, min(8, byte_size(SignatureR)))), + signature_s_first_8_hex => snp_util:binary_to_hex_string(binary:part(SignatureS, 0, min(8, byte_size(SignatureS)))) + }}), + % Construct main portion (everything before signature) + % Calculate expected size: 4+4+8+16+16+4+4+8+8+4+4+64+48+32+48+48+32+32+8+24+64+8+1+1+1+1+1+1+1+1+8+168 = 672 bytes + % But signature should start at 1016, so there might be padding or the structure is different + ?event(snp, {before_main_portion_construction, #{ + expected_main_portion_size => ?REPORT_MAIN_PORTION_SIZE, + calculated_field_sizes => 672, + current_tcb_binary_hex => snp_util:binary_to_hex_string(CurrentTcb) + }}), + MainPortion = << + Version:32/little-unsigned-integer, + GuestSvn:32/little-unsigned-integer, + Policy:64/little-unsigned-integer, + FamilyId:?FAMILY_ID_SIZE/binary, + ImageId:?IMAGE_ID_SIZE/binary, + Vmpl:32/little-unsigned-integer, + SigAlgo:32/little-unsigned-integer, + CurrentTcb:?TCB_SIZE/binary, + PlatInfo:64/little-unsigned-integer, + AuthorKeyEn:32/little-unsigned-integer, + Reserved0:32/little-unsigned-integer, + ReportData:?CHIP_ID_SIZE/binary, + Measurement:?LAUNCH_DIGEST_SIZE/binary, + HostData:?HOST_DATA_SIZE/binary, + IdKeyDigest:?LAUNCH_DIGEST_SIZE/binary, + AuthorKeyDigest:?LAUNCH_DIGEST_SIZE/binary, + ReportId:?REPORT_ID_SIZE/binary, + ReportIdMa:?REPORT_ID_SIZE/binary, + ReportedTcb:?TCB_SIZE/binary, + 0:?RESERVED1_BITS, % Reserved1 (?RESERVED1_SIZE bytes) + ChipId:?CHIP_ID_SIZE/binary, + CommittedTcb:?TCB_SIZE/binary, + CurrentBuild:8, + CurrentMinor:8, + CurrentMajor:8, + Reserved2:8, + CommittedBuild:8, + CommittedMinor:8, + CommittedMajor:8, + Reserved3:8, + LaunchTcb:?TCB_SIZE/binary, + 0:?RESERVED4_BITS % Reserved4 (?REPORT_SIGNATURE_SIZE bytes) + >>, + MainPortionSize = byte_size(MainPortion), + ?event(snp, {main_portion_constructed, #{ + main_portion_size => MainPortionSize, + expected_size => ?REPORT_MAIN_PORTION_SIZE, + padding_needed => ?REPORT_MAIN_PORTION_SIZE - MainPortionSize + }}), + % Pad MainPortion to exactly ?REPORT_MAIN_PORTION_SIZE bytes to match the actual binary format + % The Rust struct may have padding for alignment, but the binary format requires ?REPORT_MAIN_PORTION_SIZE bytes before signature + MainPortionPadded = normalize_binary_size(MainPortion, ?REPORT_MAIN_PORTION_SIZE), + ?event(snp, {main_portion_padded, #{ + padded_size => byte_size(MainPortionPadded), + expected_size => ?REPORT_MAIN_PORTION_SIZE + }}), + % Construct the signature portion separately to ensure correct insertion + % Signature reserved is ?SIGNATURE_RESERVED_SIZE bytes (?SIGNATURE_RESERVED_BITS bits) + SignaturePortion = <>, + % Verify signature portion before concatenation + ?event(snp, {signature_portion_constructed, #{ + sig_portion_size => byte_size(SignaturePortion), + expected_size => ?SIGNATURE_R_SIZE + ?SIGNATURE_S_SIZE + ?SIGNATURE_RESERVED_SIZE, + sig_r_first_8_hex => snp_util:binary_to_hex_string(binary:part(SignatureR, 0, min(8, byte_size(SignatureR)))), + sig_s_first_8_hex => snp_util:binary_to_hex_string(binary:part(SignatureS, 0, min(8, byte_size(SignatureS)))), + portion_r_first_8_hex => snp_util:binary_to_hex_string(binary:part(SignaturePortion, 0, min(8, byte_size(SignaturePortion)))), + portion_r_at_offset_1016 => case byte_size(SignaturePortion) >= 8 of + true -> snp_util:binary_to_hex_string(binary:part(SignaturePortion, 0, 8)); + false -> <<"too_small">> + end + }}), + % Concatenate the main portion with the signature portion + ReportBinary = <>, + % Verify signature was correctly placed in binary + ?event(snp, {after_binary_construction, #{ + report_binary_size => byte_size(ReportBinary), + expected_size => ?REPORT_SIZE + }}), + % Extract signature from constructed binary to verify + case byte_size(ReportBinary) >= ?REPORT_MAIN_PORTION_SIZE + ?SIGNATURE_PORTION_SIZE of + true -> + <<_:(?REPORT_MAIN_PORTION_SIZE)/binary, SigRFromBinary:?SIGNATURE_R_SIZE/binary, SigSFromBinary:?SIGNATURE_S_SIZE/binary, _/binary>> = ReportBinary, + ?event(snp, {signature_in_constructed_binary, #{ + r_first_8_hex => snp_util:binary_to_hex_string(binary:part(SigRFromBinary, 0, min(8, byte_size(SigRFromBinary)))), + s_first_8_hex => snp_util:binary_to_hex_string(binary:part(SigSFromBinary, 0, min(8, byte_size(SigSFromBinary)))), + r_all_zeros => (SigRFromBinary =:= <<0:?SIGNATURE_R_BITS>>), + s_all_zeros => (SigSFromBinary =:= <<0:?SIGNATURE_S_BITS>>) + }}); + false -> + ?event(snp, {binary_too_small_for_signature, #{ + actual_size => byte_size(ReportBinary), + required_size => ?REPORT_MAIN_PORTION_SIZE + ?SIGNATURE_PORTION_SIZE + }}) + end, + ReportBinary + catch + Error:Reason -> + ?event(snp_error, {report_json_to_binary_conversion_error, #{ + operation => <<"report_json_to_binary">>, + error => Error, + reason => Reason, + suggestion => <<"Check that all required fields are present and have the correct types. Required fields include: version, guest_svn, policy, current_tcb, chip_id, measurement, and signature components.">> + }}), + {error, {conversion_error, Error, Reason}} + end. + + +%% @doc Validate report schema and field values +%% @param ReportMap map() - Report map to validate +%% @returns ok | {error, ValidationErrors} where ValidationErrors is a list of detailed error messages +-spec validate_report_schema(ReportMap :: map()) -> ok | {error, [binary()]}. +validate_report_schema(ReportMap) when is_map(ReportMap) -> + ValidationErrors = [], + ValidationErrors1 = validate_required_fields(ReportMap, ValidationErrors), + ValidationErrors2 = validate_version(ReportMap, ValidationErrors1), + ValidationErrors3 = validate_guest_svn(ReportMap, ValidationErrors2), + ValidationErrors4 = validate_policy(ReportMap, ValidationErrors3), + ValidationErrors5 = validate_vmpl(ReportMap, ValidationErrors4), + ValidationErrors6 = validate_sig_algo(ReportMap, ValidationErrors5), + ValidationErrors7 = validate_tcb_fields(ReportMap, ValidationErrors6), + ValidationErrors8 = validate_version_numbers(ReportMap, ValidationErrors7), + ValidationErrors9 = validate_binary_fields(ReportMap, ValidationErrors8), + ValidationErrors10 = validate_signature(ReportMap, ValidationErrors9), + case ValidationErrors10 of + [] -> ok; + Errors -> {error, Errors} + end; +validate_report_schema(InvalidInput) -> + {error, [<<"Report schema validation failed: expected map, got ", + (hb_util:bin(case InvalidInput of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>]}. + +%% Validate required fields are present +-spec validate_required_fields(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_required_fields(ReportMap, Errors) -> + RequiredFields = [ + <<"version">>, <<"guest_svn">>, <<"policy">>, <<"family_id">>, <<"image_id">>, + <<"vmpl">>, <<"sig_algo">>, <<"current_tcb">>, <<"plat_info">>, + <<"report_data">>, <<"measurement">>, <<"host_data">>, <<"id_key_digest">>, + <<"author_key_digest">>, <<"report_id">>, <<"report_id_ma">>, <<"reported_tcb">>, + <<"chip_id">>, <<"committed_tcb">>, <<"launch_tcb">>, <<"signature">> + ], + MissingFields = lists:filter(fun(Field) -> not maps:is_key(Field, ReportMap) end, RequiredFields), + case MissingFields of + [] -> Errors; + _ -> + MissingFieldsStr = string:join([hb_util:list(F) || F <- MissingFields], ", "), + ErrorMsg = <<"Missing required fields: ", (hb_util:bin(MissingFieldsStr))/binary, + ". All SNP report fields must be present.">>, + [ErrorMsg | Errors] + end. + +%% Validate version field +-spec validate_version(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_version(ReportMap, Errors) -> + case maps:get(<<"version">>, ReportMap, undefined) of + undefined -> Errors; + Version when is_integer(Version), Version >= 0, Version =< 16#FFFFFFFF -> + Errors; + Version when is_integer(Version) -> + ErrorMsg = <<"Invalid version: expected unsigned 32-bit integer (0-4294967295), got ", + (hb_util:bin(integer_to_list(Version)))/binary, ".">>, + [ErrorMsg | Errors]; + Invalid -> + ErrorMsg = <<"Invalid version type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate guest_svn field +-spec validate_guest_svn(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_guest_svn(ReportMap, Errors) -> + case maps:get(<<"guest_svn">>, ReportMap, undefined) of + undefined -> Errors; + GuestSvn when is_integer(GuestSvn), GuestSvn >= 0, GuestSvn =< 16#FFFFFFFF -> + Errors; + GuestSvn when is_integer(GuestSvn) -> + ErrorMsg = <<"Invalid guest_svn: expected unsigned 32-bit integer (0-4294967295), got ", + (hb_util:bin(integer_to_list(GuestSvn)))/binary, ".">>, + [ErrorMsg | Errors]; + Invalid -> + ErrorMsg = <<"Invalid guest_svn type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate policy field +-spec validate_policy(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_policy(ReportMap, Errors) -> + case maps:get(<<"policy">>, ReportMap, undefined) of + undefined -> Errors; + Policy when is_integer(Policy), Policy >= 0, Policy =< 16#FFFFFFFFFFFFFFFF -> + Errors; + Policy when is_integer(Policy) -> + ErrorMsg = <<"Invalid policy: expected unsigned 64-bit integer (0-18446744073709551615), got ", + (hb_util:bin(integer_to_list(Policy)))/binary, ".">>, + [ErrorMsg | Errors]; + Invalid -> + ErrorMsg = <<"Invalid policy type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate VMPL field (0-3) +-spec validate_vmpl(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_vmpl(ReportMap, Errors) -> + case maps:get(<<"vmpl">>, ReportMap, undefined) of + undefined -> Errors; + Vmpl when is_integer(Vmpl), Vmpl >= 0, Vmpl =< 3 -> + Errors; + Vmpl when is_integer(Vmpl) -> + ErrorMsg = <<"Invalid vmpl: expected integer in range 0-3, got ", + (hb_util:bin(integer_to_list(Vmpl)))/binary, + ". VMPL (Virtual Machine Privilege Level) must be between 0 and 3.">>, + [ErrorMsg | Errors]; + Invalid -> + ErrorMsg = <<"Invalid vmpl type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate signature algorithm field +-spec validate_sig_algo(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_sig_algo(ReportMap, Errors) -> + case maps:get(<<"sig_algo">>, ReportMap, undefined) of + undefined -> Errors; + SigAlgo when is_integer(SigAlgo), SigAlgo =:= 1 -> + Errors; % ECDSA-P384_SHA384 = 1 + SigAlgo when is_integer(SigAlgo) -> + ErrorMsg = <<"Invalid sig_algo: expected 1 (ECDSA-P384_SHA384), got ", + (hb_util:bin(integer_to_list(SigAlgo)))/binary, ".">>, + [ErrorMsg | Errors]; + Invalid -> + ErrorMsg = <<"Invalid sig_algo type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + +%% Validate TCB fields (SPL values must be 0-255) +-spec validate_tcb_fields(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_tcb_fields(ReportMap, Errors) -> + TcbFields = [ + {<<"current_tcb">>, <<"current_tcb">>}, + {<<"reported_tcb">>, <<"reported_tcb">>}, + {<<"committed_tcb">>, <<"committed_tcb">>}, + {<<"launch_tcb">>, <<"launch_tcb">>} + ], + lists:foldl( + fun({FieldName, FieldLabel}, AccErrors) -> + case maps:get(FieldName, ReportMap, undefined) of + undefined -> AccErrors; + TcbMap when is_map(TcbMap) -> + validate_tcb_map(TcbMap, FieldLabel, AccErrors); + Invalid -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, " type: expected map, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | AccErrors] + end + end, + Errors, + TcbFields + ). + +%% Validate a single TCB map +-spec validate_tcb_map(TCBMap :: map(), FieldLabel :: binary(), Errors :: [binary()]) -> [binary()]. +validate_tcb_map(TCBMap, FieldLabel, Errors) -> + SPLFields = [ + {<<"bootloader">>, <<"bootloader">>}, + {<<"tee">>, <<"tee">>}, + {<<"snp">>, <<"snp">>}, + {<<"microcode">>, <<"microcode">>} + ], + lists:foldl( + fun({FieldName, SPLName}, AccErrors) -> + case maps:get(FieldName, TCBMap, undefined) of + undefined -> + ErrorMsg = <<"Missing ", FieldLabel/binary, ".", SPLName/binary, + ": required SPL field must be present.">>, + [ErrorMsg | AccErrors]; + SPLValue when is_integer(SPLValue), SPLValue >= 0, SPLValue =< ?MAX_SPL_VALUE -> + AccErrors; + SPLValue when is_integer(SPLValue) -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, ".", SPLName/binary, + ": expected integer in range 0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, + ", got ", (hb_util:bin(integer_to_list(SPLValue)))/binary, ".">>, + [ErrorMsg | AccErrors]; + Invalid -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, ".", SPLName/binary, + " type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | AccErrors] + end + end, + Errors, + SPLFields + ). + +%% Validate version numbers (current/committed build/minor/major) +-spec validate_version_numbers(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_version_numbers(ReportMap, Errors) -> + VersionFields = [ + {<<"current_build">>, <<"current_build">>}, + {<<"current_minor">>, <<"current_minor">>}, + {<<"current_major">>, <<"current_major">>}, + {<<"committed_build">>, <<"committed_build">>}, + {<<"committed_minor">>, <<"committed_minor">>}, + {<<"committed_major">>, <<"committed_major">>} + ], + lists:foldl( + fun({FieldName, FieldLabel}, AccErrors) -> + case maps:get(FieldName, ReportMap, undefined) of + undefined -> AccErrors; + Version when is_integer(Version), Version >= 0, Version =< 255 -> + AccErrors; + Version when is_integer(Version) -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, + ": expected unsigned 8-bit integer (0-255), got ", + (hb_util:bin(integer_to_list(Version)))/binary, ".">>, + [ErrorMsg | AccErrors]; + Invalid -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, " type: expected integer, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | AccErrors] + end + end, + Errors, + VersionFields + ). + +%% Validate binary field sizes +-spec validate_binary_fields(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_binary_fields(ReportMap, Errors) -> + BinaryFields = [ + {<<"family_id">>, ?FAMILY_ID_SIZE, <<"family_id">>}, + {<<"image_id">>, ?IMAGE_ID_SIZE, <<"image_id">>}, + {<<"report_data">>, ?CHIP_ID_SIZE, <<"report_data">>}, + {<<"measurement">>, ?LAUNCH_DIGEST_SIZE, <<"measurement">>}, + {<<"host_data">>, ?HOST_DATA_SIZE, <<"host_data">>}, + {<<"id_key_digest">>, ?LAUNCH_DIGEST_SIZE, <<"id_key_digest">>}, + {<<"author_key_digest">>, ?LAUNCH_DIGEST_SIZE, <<"author_key_digest">>}, + {<<"report_id">>, ?REPORT_ID_SIZE, <<"report_id">>}, + {<<"report_id_ma">>, ?REPORT_ID_SIZE, <<"report_id_ma">>}, + {<<"chip_id">>, ?CHIP_ID_SIZE, <<"chip_id">>} + ], + lists:foldl( + fun({FieldName, ExpectedSize, FieldLabel}, AccErrors) -> + case maps:get(FieldName, ReportMap, undefined) of + undefined -> AccErrors; + FieldValue when is_binary(FieldValue) -> + FieldSize = byte_size(FieldValue), + if + FieldSize =:= ExpectedSize -> AccErrors; + true -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, " size: expected ", + (hb_util:bin(integer_to_list(ExpectedSize)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(FieldSize)))/binary, ".">>, + [ErrorMsg | AccErrors] + end; + FieldValue when is_list(FieldValue) -> + % Convert list to binary to check size + FieldBinary = hb_util:bin(FieldValue), + FieldBinarySize = byte_size(FieldBinary), + if + FieldBinarySize =:= ExpectedSize -> AccErrors; + true -> + ErrorMsg = <<"Invalid ", FieldLabel/binary, " size: expected ", + (hb_util:bin(integer_to_list(ExpectedSize)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(FieldBinarySize)))/binary, + " (after converting from list).">>, + [ErrorMsg | AccErrors] + end; + Invalid -> + InvalidType = case Invalid of + I when is_integer(I) -> "integer"; + M when is_map(M) -> "map"; + _ -> "other" + end, + ErrorMsg = <<"Invalid ", FieldLabel/binary, " type: expected binary or list, got ", + (hb_util:bin(InvalidType))/binary, ".">>, + [ErrorMsg | AccErrors] + end + end, + Errors, + BinaryFields + ). + +%% Validate signature field +-spec validate_signature(ReportMap :: map(), Errors :: [binary()]) -> [binary()]. +validate_signature(ReportMap, Errors) -> + case maps:get(<<"signature">>, ReportMap, undefined) of + undefined -> Errors; + SignatureMap when is_map(SignatureMap) -> + Errors1 = case maps:get(<<"r">>, SignatureMap, undefined) of + undefined -> + [<<"Missing signature.r: required signature component must be present.">> | Errors]; + SignatureR when is_binary(SignatureR) -> + case byte_size(SignatureR) of + ?SIGNATURE_R_SIZE -> Errors; + ActualSize -> + ErrorMsg = <<"Invalid signature.r size: expected ", + (hb_util:bin(integer_to_list(?SIGNATURE_R_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSize)))/binary, ".">>, + [ErrorMsg | Errors] + end; + SignatureR when is_list(SignatureR) -> + SignatureRBin = hb_util:bin(SignatureR), + case byte_size(SignatureRBin) of + ?SIGNATURE_R_SIZE -> Errors; + ActualSize -> + ErrorMsg = <<"Invalid signature.r size: expected ", + (hb_util:bin(integer_to_list(?SIGNATURE_R_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSize)))/binary, + " (after converting from list).">>, + [ErrorMsg | Errors] + end; + Invalid -> + ErrorMsg = <<"Invalid signature.r type: expected binary or list, got ", + (hb_util:bin(case Invalid of + I when is_integer(I) -> "integer"; + M when is_map(M) -> "map"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end, + Errors2 = case maps:get(<<"s">>, SignatureMap, undefined) of + undefined -> + [<<"Missing signature.s: required signature component must be present.">> | Errors1]; + SignatureS when is_binary(SignatureS) -> + case byte_size(SignatureS) of + ?SIGNATURE_S_SIZE -> Errors1; + ActualSizeS -> + ErrorMsgS = <<"Invalid signature.s size: expected ", + (hb_util:bin(integer_to_list(?SIGNATURE_S_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSizeS)))/binary, ".">>, + [ErrorMsgS | Errors1] + end; + SignatureS when is_list(SignatureS) -> + SignatureSBin = hb_util:bin(SignatureS), + case byte_size(SignatureSBin) of + ?SIGNATURE_S_SIZE -> Errors1; + ActualSizeSList -> + ErrorMsgSList = <<"Invalid signature.s size: expected ", + (hb_util:bin(integer_to_list(?SIGNATURE_S_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSizeSList)))/binary, + " (after converting from list).">>, + [ErrorMsgSList | Errors1] + end; + InvalidS -> + ErrorMsgS = <<"Invalid signature.s type: expected binary or list, got ", + (hb_util:bin(case InvalidS of + IS when is_integer(IS) -> "integer"; + MS when is_map(MS) -> "map"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsgS | Errors1] + end, + Errors2; + Invalid -> + ErrorMsg = <<"Invalid signature type: expected map, got ", + (hb_util:bin(case Invalid of + B when is_binary(B) -> "binary"; + L when is_list(L) -> "list"; + I when is_integer(I) -> "integer"; + _ -> "other" + end))/binary, ".">>, + [ErrorMsg | Errors] + end. + diff --git a/src/snp_trust.erl b/src/snp_trust.erl new file mode 100644 index 000000000..8829f9b1f --- /dev/null +++ b/src/snp_trust.erl @@ -0,0 +1,132 @@ +%%% @doc Software trust validation for SNP commitment reports. +%%% +%%% This module handles the validation of software configurations against +%%% trusted software lists, including filtering by enforced keys and matching +%%% against trusted configurations. +-module(snp_trust). +-export([execute_is_trusted/3, get_filtered_local_hashes/2, + get_enforced_keys/1, is_software_trusted/3]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). + +%% @doc Validate that all software hashes match trusted configurations. +%% +%% This function ensures that the firmware, kernel, and other system components +%% in the SNP report match approved configurations. The validation process: +%% 1. Extracts local hashes from the message +%% 2. Filters hashes to only include enforced keys +%% 3. Compares filtered hashes against trusted software configurations +%% 4. Returns true only if the configuration matches a trusted entry +%% +%% Configuration options in NodeOpts map: +%% - snp_trusted: List of maps containing trusted software configurations +%% - snp_enforced_keys: Keys to enforce during validation (defaults to all +%% committed parameters) +%% +%% @param _M1 Ignored parameter +%% @param Msg The SNP message containing local software hashes +%% @param NodeOpts A map of configuration options including trusted software +%% @returns `{ok, true}' if software is trusted, `{ok, false}' otherwise +-spec execute_is_trusted(M1 :: term(), Msg :: map(), NodeOpts :: map()) -> + {ok, boolean()}. +execute_is_trusted(_M1, Msg, NodeOpts) -> + FilteredLocalHashes = get_filtered_local_hashes(Msg, NodeOpts), + TrustedSoftware = hb_opts:get(snp_trusted, [#{}], NodeOpts), + IsTrusted = + is_software_trusted( + FilteredLocalHashes, + TrustedSoftware, + NodeOpts + ), + ?event(snp_short, {is_all_software_trusted, IsTrusted}), + {ok, IsTrusted}. + +%% @doc Extract local hashes filtered to only include enforced keys. +%% +%% This function retrieves the local software hashes from the message and +%% filters them to only include the keys that are configured for enforcement. +%% Local-hashes keys are normalized to binary so that atom-key and binary-key +%% maps are both handled correctly (avoids empty filter when key types differ). +%% +%% @param Msg The SNP message containing local hashes +%% @param NodeOpts A map of configuration options +%% @returns A map of filtered local hashes with only enforced keys (binary keys) +-spec get_filtered_local_hashes(Msg :: map(), NodeOpts :: map()) -> map(). +get_filtered_local_hashes(Msg, NodeOpts) -> + LocalHashesRaw = hb_ao:get(<<"local-hashes">>, Msg, NodeOpts), + LocalHashes = normalize_map_keys_to_binary(LocalHashesRaw), + EnforcedKeys = get_enforced_keys(NodeOpts), + FilteredLocalHashes = hb_cache:ensure_all_loaded( + maps:with(EnforcedKeys, LocalHashes), + NodeOpts + ), + FilteredLocalHashes. + +%% @doc Normalize a map so all keys are binaries (for consistent filtering with EnforcedKeys). +%% Non-map input is treated as empty map. +-spec normalize_map_keys_to_binary(term()) -> map(). +normalize_map_keys_to_binary(M) when is_map(M) -> + maps:fold( + fun(K, V, Acc) -> + maps:put(ensure_binary_key(K), V, Acc) + end, + #{}, + M + ); +normalize_map_keys_to_binary(_) -> + #{}. + +-spec ensure_binary_key(atom() | binary() | term()) -> binary(). +ensure_binary_key(K) when is_binary(K) -> K; +ensure_binary_key(K) when is_atom(K) -> atom_to_binary(K, utf8); +ensure_binary_key(K) -> hb_util:bin(K). + +%% @doc Get the list of enforced keys for software validation. +%% +%% This function retrieves the configuration specifying which software +%% component keys should be enforced during trust validation. +%% +%% @param NodeOpts A map of configuration options +%% @returns A list of binary keys that should be enforced +-spec get_enforced_keys(NodeOpts :: map()) -> [binary()]. +get_enforced_keys(NodeOpts) -> + lists:map( + fun atom_to_binary/1, + hb_opts:get(snp_enforced_keys, ?COMMITTED_PARAMETERS, NodeOpts) + ). + +%% @doc Check if filtered local hashes match any trusted configurations. +%% +%% This function compares the filtered local hashes against a list of +%% trusted software configurations, returning true if any configuration +%% matches exactly. It handles three cases: +%% 1. Empty list of trusted configurations (returns false) +%% 2. Valid list of trusted configurations (performs matching) +%% 3. Invalid trusted software configuration (returns false) +%% +%% @param FilteredLocalHashes The software hashes to validate +%% @param TrustedSoftware List of trusted software configurations or invalid input +%% @param NodeOpts Configuration options for matching +%% @returns `true' if hashes match a trusted configuration, `false' otherwise +-spec is_software_trusted(map(), [] | [map()] | term(), map()) -> boolean(). +is_software_trusted(_FilteredLocalHashes, [], _NodeOpts) -> + false; +is_software_trusted(FilteredLocalHashes, TrustedSoftware, NodeOpts) + when is_list(TrustedSoftware) -> + lists:any( + fun(TrustedMap) -> + TrustedNormalized = normalize_map_keys_to_binary(TrustedMap), + Match = + hb_message:match( + FilteredLocalHashes, + TrustedNormalized, + primary, + NodeOpts + ), + is_map(TrustedMap) andalso Match == true + end, + TrustedSoftware + ); +is_software_trusted(_FilteredLocalHashes, _TrustedSoftware, _NodeOpts) -> + false. + diff --git a/src/snp_util.erl b/src/snp_util.erl new file mode 100644 index 000000000..641753ce3 --- /dev/null +++ b/src/snp_util.erl @@ -0,0 +1,196 @@ +%%% @doc Shared utility functions for SNP commitment reports. +%%% +%%% This module provides common utility functions used across SNP modules to +%%% eliminate code duplication and ensure consistent behavior. +-module(snp_util). +-export([hex_to_binary/1, binary_to_hex_string/1, hex_char_to_int/1]). +-export([get_type_name/1]). +-export([build_validation_error/4, build_field_error/3]). +-export([is_pem_binary/1, is_json_binary/1]). +-export([safe_bin/1, safe_json_decode/1]). +-export([wrap_error/3, wrap_error/4]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). + +%% Standard error reason types +-type error_reason() :: + {validation_failed, FieldName :: binary(), Reason :: term(), Context :: map()} | + {conversion_failed, From :: term(), To :: binary(), Reason :: term()} | + {missing_field, FieldName :: binary()} | + {invalid_type, FieldName :: binary(), Expected :: binary(), Actual :: term()} | + {network_error, Operation :: binary(), Reason :: term()} | + {system_error, Operation :: binary(), Reason :: term()} | + {operation_failed, Step :: atom(), Reason :: term(), Context :: map()}. + +%% Common result types +-type result(T) :: {ok, T} | {error, error_reason()}. +-type maybe_result(T) :: T | {error, error_reason()}. + +%% @doc Convert hex string to binary. +%% @param Hex binary() - Hex string (must have even number of bytes, valid hex chars) +%% @returns {ok, binary()} on success, {error, invalid_hex} on invalid or odd-length input +%% @example +%% hex_to_binary(<<"48656c6c6f">>) =:= {ok, <<"Hello">>} +-spec hex_to_binary(Hex :: binary()) -> {ok, binary()} | {error, invalid_hex}. +hex_to_binary(Hex) when is_binary(Hex), byte_size(Hex) rem 2 =:= 0 -> + ?event(snp, {hex_to_binary_start, #{hex_size => byte_size(Hex)}}), + try + Result = << <<(hex_char_to_int(H) bsl 4 + hex_char_to_int(L))>> || <> <= Hex >>, + ?event(snp, {hex_to_binary_success, #{result_size => byte_size(Result)}}), + {ok, Result} + catch + _:_ -> + ?event(snp_error, {hex_to_binary_error, #{hex_size => byte_size(Hex)}}), + {error, invalid_hex} + end; +hex_to_binary(Hex) -> + ?event(snp_error, {hex_to_binary_invalid_input, #{hex_size => case is_binary(Hex) of true -> byte_size(Hex); false -> undefined end}}), + {error, invalid_hex}. + +%% @doc Convert binary to hex string for logging. +%% @param Binary binary() - Binary to convert +%% @returns string() - Hex string representation +%% @example +%% binary_to_hex_string(<<"Hello">>) =:= "48656c6c6f" % true +-spec binary_to_hex_string(Binary :: binary()) -> string(). +binary_to_hex_string(Binary) -> + hb_util:list(hb_util:to_hex(Binary)). + +%% @doc Convert hex character to integer. +%% @param Char char() - Hex character ('0'-'9', 'a'-'f', 'A'-'F') +%% @returns 0..15 - Integer value of hex character +%% @example +%% hex_char_to_int($A) =:= 10 % true +-spec hex_char_to_int(Char :: char()) -> 0..15. +hex_char_to_int($0) -> 0; +hex_char_to_int($1) -> 1; +hex_char_to_int($2) -> 2; +hex_char_to_int($3) -> 3; +hex_char_to_int($4) -> 4; +hex_char_to_int($5) -> 5; +hex_char_to_int($6) -> 6; +hex_char_to_int($7) -> 7; +hex_char_to_int($8) -> 8; +hex_char_to_int($9) -> 9; +hex_char_to_int($a) -> 10; +hex_char_to_int($A) -> 10; +hex_char_to_int($b) -> 11; +hex_char_to_int($B) -> 11; +hex_char_to_int($c) -> 12; +hex_char_to_int($C) -> 12; +hex_char_to_int($d) -> 13; +hex_char_to_int($D) -> 13; +hex_char_to_int($e) -> 14; +hex_char_to_int($E) -> 14; +hex_char_to_int($f) -> 15; +hex_char_to_int($F) -> 15. + +%% @doc Get type name of a term for error messages. +%% @param T term() - Term to get type name for +%% @returns binary() - Type name as binary +%% @example +%% get_type_name(<<"test">>) =:= <<"binary">> % true +%% get_type_name([1,2,3]) =:= <<"list">> % true +-spec get_type_name(term()) -> binary(). +get_type_name(T) when is_binary(T) -> <<"binary">>; +get_type_name(T) when is_list(T) -> <<"list">>; +get_type_name(T) when is_map(T) -> <<"map">>; +get_type_name(T) when is_integer(T) -> <<"integer">>; +get_type_name(T) when is_atom(T) -> <<"atom">>; +get_type_name(_) -> <<"other">>. + +%% @doc Build a validation error message. +%% @param FieldName binary() - Name of the field being validated +%% @param ExpectedType binary() - Expected type description +%% @param ActualValue term() - Actual value that failed validation +%% @param Suggestion binary() - Suggestion for fixing the error +%% @returns binary() - Formatted error message +-spec build_validation_error(FieldName :: binary(), ExpectedType :: binary(), + ActualValue :: term(), Suggestion :: binary()) -> binary(). +build_validation_error(FieldName, ExpectedType, ActualValue, Suggestion) -> + <>. + +%% @doc Build a field error message. +%% @param FieldName binary() - Name of the field +%% @param ExpectedType binary() - Expected type description +%% @param ActualValue term() - Actual value that failed validation +%% @returns binary() - Formatted error message +-spec build_field_error(FieldName :: binary(), ExpectedType :: binary(), + ActualValue :: term()) -> binary(). +build_field_error(FieldName, ExpectedType, ActualValue) -> + <<"Invalid ", FieldName/binary, " type: expected ", ExpectedType/binary, + ", got ", (get_type_name(ActualValue))/binary, ".">>. + +%% @doc Check if binary is PEM format. +%% @param PemBinary binary() - Binary to check +%% @returns boolean() - true if binary appears to be PEM format +-spec is_pem_binary(binary()) -> boolean(). +is_pem_binary(<<"-----BEGIN", _/binary>>) -> true; +is_pem_binary(_) -> false. + +%% @doc Check if binary is JSON format (basic check). +%% @param JsonBinary binary() - Binary to check +%% @returns boolean() - true if binary appears to be JSON format +-spec is_json_binary(binary()) -> boolean(). +is_json_binary(<<"{", _/binary>>) -> true; +is_json_binary(<<"[", _/binary>>) -> true; +is_json_binary(_) -> false. + +%% @doc Safely convert a value to binary, handling errors. +%% @param Value term() - Value to convert +%% @returns {ok, binary()} | {error, error_reason()} +-spec safe_bin(term()) -> {ok, binary()} | {error, error_reason()}. +safe_bin(Value) -> + try + Binary = hb_util:bin(Value), + case is_binary(Binary) of + true -> {ok, Binary}; + false -> {error, {conversion_failed, Value, <<"binary">>, <<"hb_util:bin returned non-binary">>}} + end + catch + Error:Reason -> + {error, {conversion_failed, Value, <<"binary">>, {Error, Reason}}} + end. + +%% @doc Safely decode JSON, handling errors. +%% @param JsonBinary binary() - JSON string to decode +%% @returns {ok, map()} | {error, error_reason()} +-spec safe_json_decode(binary()) -> {ok, map()} | {error, error_reason()}. +safe_json_decode(JsonBinary) when is_binary(JsonBinary) -> + try + Decoded = hb_json:decode(JsonBinary), + case Decoded of + Map when is_map(Map) -> {ok, Map}; + Other -> + {error, {conversion_failed, JsonBinary, <<"map">>, + {invalid_format, <<"JSON decoded to ", (get_type_name(Other))/binary>>}}} + end + catch + Error:Reason -> + {error, {conversion_failed, JsonBinary, <<"map">>, {Error, Reason}}} + end; +safe_json_decode(Invalid) -> + {error, {invalid_type, <<"json">>, <<"binary">>, Invalid}}. + +%% @doc Wrap an error with operation context. +%% @param Step atom() - The step/operation that failed +%% @param Reason term() - The original error reason +%% @param Context map() - Additional context about the operation +%% @returns {error, error_reason()} +-spec wrap_error(Step :: atom(), Reason :: term(), Context :: map()) -> + {error, error_reason()}. +wrap_error(Step, Reason, Context) -> + {error, {operation_failed, Step, Reason, Context}}. + +%% @doc Wrap an error with operation context and field name. +%% @param Step atom() - The step/operation that failed +%% @param FieldName binary() - The field that caused the error +%% @param Reason term() - The original error reason +%% @param Context map() - Additional context about the operation +%% @returns {error, error_reason()} +-spec wrap_error(Step :: atom(), FieldName :: binary(), Reason :: term(), Context :: map()) -> + {error, error_reason()}. +wrap_error(Step, FieldName, Reason, Context) -> + {error, {validation_failed, FieldName, Reason, Context#{step => Step}}}. + diff --git a/src/snp_validation.erl b/src/snp_validation.erl new file mode 100644 index 000000000..a324734cf --- /dev/null +++ b/src/snp_validation.erl @@ -0,0 +1,379 @@ +%%% @doc Centralized input validation for SNP commitment reports. +%%% +%%% This module provides consistent validation functions for common input types +%%% used across SNP modules, including ChipId, SPL values, report binaries, +%%% and PEM certificates. +-module(snp_validation). +-export([validate_chip_id/1, validate_spl_value/2, validate_spl_values/4, + validate_report_binary/1, validate_pem_binary/1]). +-export([validate_size/3, validate_type/3, validate_range/4]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% Type definitions for validation results +-type validation_result(T) :: {ok, T} | {error, binary()}. +-type spl_name() :: atom() | binary(). +-type spl_value() :: 0..255. + +%% @doc Validate ChipId is exactly 64 bytes. +%% @param ChipId The chip ID to validate (can be binary or list) +%% @returns {ok, ChipIdBinary} if valid, {error, Reason} if invalid +-spec validate_chip_id(ChipId :: binary() | list()) -> + validation_result(binary()). +validate_chip_id(ChipId) when is_binary(ChipId) -> + case byte_size(ChipId) of + ?CHIP_ID_SIZE -> + {ok, ChipId}; + ActualSize -> + ErrorMsg = <<"ChipId validation failed: expected exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSize)))/binary, + ". Ensure ChipId is a ", (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + "-byte binary from the SNP report.">>, + ?event(snp_error, {validate_chip_id_failed, #{ + operation => <<"validate_chip_id">>, + expected_size => ?CHIP_ID_SIZE, + actual_size => ActualSize, + chip_id_type => <<"binary">>, + suggestion => <<"Ensure ChipId is exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes. Extract it from the 'chip_id' field in the SNP report.">> + }}), + {error, ErrorMsg} + end; +validate_chip_id(ChipId) when is_list(ChipId) -> + case length(ChipId) of + ?CHIP_ID_SIZE -> + ChipIdBinary = hb_util:bin(ChipId), + {ok, ChipIdBinary}; + ActualLength -> + ErrorMsg = <<"ChipId validation failed: expected list of exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualLength)))/binary, + ". Ensure ChipId is a list containing ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes from the SNP report.">>, + ?event(snp_error, {validate_chip_id_failed, #{ + operation => <<"validate_chip_id">>, + expected_size => ?CHIP_ID_SIZE, + actual_size => ActualLength, + chip_id_type => <<"list">>, + suggestion => <<"Ensure ChipId is a list containing exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes. Extract it from the 'chip_id' field in the SNP report.">> + }}), + {error, ErrorMsg} + end; +validate_chip_id(Invalid) -> + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <<"ChipId validation failed: expected binary or list, got ", TypeName/binary, ".">>, + ?event(snp_error, {validate_chip_id_failed, #{ + operation => <<"validate_chip_id">>, + expected_type => <<"binary or list">>, + actual_type => TypeName, + suggestion => <<"ChipId must be a binary or list containing exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, + " bytes. Extract it from the 'chip_id' field in the SNP report.">> + }}), + {error, ErrorMsg}. + +%% @doc Validate a single SPL value is in valid range (0-255). +%% @param SPLValue The SPL value to validate +%% @param SPLName The name of the SPL field (for error messages) +%% @returns {ok, SPLValue} if valid, {error, Reason} if invalid +-spec validate_spl_value(SPLValue :: term(), SPLName :: spl_name()) -> + validation_result(spl_value()). +validate_spl_value(SPLValue, _SPLName) when is_integer(SPLValue), + SPLValue >= 0, + SPLValue =< ?MAX_SPL_VALUE -> + {ok, SPLValue}; +validate_spl_value(SPLValue, SPLName) when is_integer(SPLValue) -> + SPLNameBin = case SPLName of + A when is_atom(A) -> hb_util:bin(atom_to_list(A)); + B when is_binary(B) -> B; + _ -> <<"spl">> + end, + ErrorMsg = <<"SPL validation failed: ", SPLNameBin/binary, + " expected integer in range 0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, + ", got ", (hb_util:bin(integer_to_list(SPLValue)))/binary, ".">>, + ?event(snp_error, {validate_spl_value_failed, #{ + operation => <<"validate_spl_value">>, + spl_name => SPLNameBin, + expected_range => <<"0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary>>, + actual_value => SPLValue, + suggestion => <<"Ensure ", SPLNameBin/binary, + " is an integer in the range 0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, + ". Check the TCB structure in the SNP report.">> + }}), + {error, ErrorMsg}; +validate_spl_value(Invalid, SPLName) -> + SPLNameBin = case SPLName of + A when is_atom(A) -> hb_util:bin(atom_to_list(A)); + B when is_binary(B) -> B; + _ -> <<"spl">> + end, + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <<"SPL validation failed: ", SPLNameBin/binary, + " expected integer, got ", (hb_util:bin(TypeName))/binary, ".">>, + ?event(snp_error, {validate_spl_value_failed, #{ + operation => <<"validate_spl_value">>, + spl_name => SPLNameBin, + expected_type => <<"integer">>, + actual_type => TypeName, + suggestion => <<"Ensure ", SPLNameBin/binary, + " is an integer in the range 0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, + ". Check the TCB structure in the SNP report.">> + }}), + {error, ErrorMsg}. + +%% @doc Validate all four SPL values are in valid range (0-255). +%% @param BootloaderSPL Bootloader SPL value +%% @param TeeSPL TEE SPL value +%% @param SnpSPL SNP SPL value +%% @param UcodeSPL Microcode SPL value +%% @returns ok if all valid, {error, Reason} if any invalid +-spec validate_spl_values(BootloaderSPL :: integer(), TeeSPL :: integer(), + SnpSPL :: integer(), UcodeSPL :: integer()) -> + ok | {error, binary()}. +validate_spl_values(BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL) -> + SPLValues = [ + {bootloader, BootloaderSPL}, + {tee, TeeSPL}, + {snp, SnpSPL}, + {ucode, UcodeSPL} + ], + ValidationResults = lists:map( + fun({Name, Value}) -> + {Name, validate_spl_value(Value, Name)} + end, + SPLValues + ), + InvalidResults = lists:filter( + fun({_Name, Result}) -> + case Result of + {error, _} -> true; + _ -> false + end + end, + ValidationResults + ), + case InvalidResults of + [] -> + ok; + _ -> + InvalidDetails = lists:map( + fun({Name, {error, ErrorMsg}}) -> + <<(hb_util:bin(atom_to_list(Name)))/binary, ": ", ErrorMsg/binary>> + end, + InvalidResults + ), + ErrorMsg = <<"SPL validation failed: ", + (hb_util:bin(string:join([hb_util:list(D) || D <- InvalidDetails], "; ")))/binary, + ". All SPL values must be integers in range 0-", + (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, ".">>, + ?event(snp_error, {validate_spl_values_failed, #{ + operation => <<"validate_spl_values">>, + invalid_count => length(InvalidResults), + invalid_values => InvalidResults, + expected_range => <<"0-", (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary>>, + suggestion => <<"Ensure all SPL values (bootloader, tee, snp, ucode) are integers in the range 0-", + (hb_util:bin(integer_to_list(?MAX_SPL_VALUE)))/binary, + ". Check the TCB structure in the SNP report.">> + }}), + {error, ErrorMsg} + end. + +%% @doc Validate report binary is exactly 1184 bytes. +%% @param ReportBinary The report binary to validate +%% @returns {ok, ReportBinary} if valid, {error, Reason} if invalid +-spec validate_report_binary(ReportBinary :: binary()) -> + validation_result(binary()). +validate_report_binary(ReportBinary) when is_binary(ReportBinary) -> + case byte_size(ReportBinary) of + ?REPORT_SIZE -> + {ok, ReportBinary}; + ActualSize -> + ErrorMsg = <<"Report binary validation failed: expected exactly ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + " bytes, got ", (hb_util:bin(integer_to_list(ActualSize)))/binary, + ". Ensure the report is a complete ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + "-byte binary as specified in the SNP report format.">>, + ?event(snp_error, {validate_report_binary_failed, #{ + operation => <<"validate_report_binary">>, + expected_size => ?REPORT_SIZE, + actual_size => ActualSize, + suggestion => <<"Ensure the report binary is exactly ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + " bytes. The SNP report format requires a fixed-size binary structure.">> + }}), + {error, ErrorMsg} + end; +validate_report_binary(Invalid) -> + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <<"Report binary validation failed: expected binary, got ", (hb_util:bin(TypeName))/binary, ".">>, + ?event(snp_error, {validate_report_binary_failed, #{ + operation => <<"validate_report_binary">>, + expected_type => <<"binary">>, + actual_type => TypeName, + suggestion => <<"Ensure the report is a binary containing exactly ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + " bytes. Convert JSON to binary using snp_report_format:report_json_to_binary/1 if needed.">> + }}), + {error, ErrorMsg}. + +%% @doc Validate binary is PEM format. +%% @param PemBinary The binary to validate +%% @returns {ok, PemBinary} if valid, {error, Reason} if invalid +-spec validate_pem_binary(PemBinary :: binary()) -> + validation_result(binary()). +validate_pem_binary(PemBinary) when is_binary(PemBinary) -> + case snp_util:is_pem_binary(PemBinary) of + true -> + {ok, PemBinary}; + false -> + ErrorMsg = <<"PEM validation failed: binary does not appear to be in PEM format. ", + "PEM format should start with '-----BEGIN'.">>, + ?event(snp_error, {validate_pem_binary_failed, #{ + operation => <<"validate_pem_binary">>, + binary_preview => binary:part(PemBinary, 0, min(50, byte_size(PemBinary))), + suggestion => <<"Ensure the certificate is in PEM format (text-based, starts with '-----BEGIN'). ", + "If you have DER format, convert it to PEM first.">> + }}), + {error, ErrorMsg} + end; +validate_pem_binary(Invalid) -> + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <<"PEM validation failed: expected binary, got ", (hb_util:bin(TypeName))/binary, ".">>, + ?event(snp_error, {validate_pem_binary_failed, #{ + operation => <<"validate_pem_binary">>, + expected_type => <<"binary">>, + actual_type => TypeName, + suggestion => <<"Ensure the certificate is a binary in PEM format (text-based, starts with '-----BEGIN').">> + }}), + {error, ErrorMsg}. + +%% @doc Generic validation helper: validate size of binary or list. +%% @param Value binary() | list() - Value to validate +%% @param ExpectedSize non_neg_integer() - Expected size +%% @param FieldName binary() - Field name for error messages +%% @returns {ok, binary()} if valid, {error, binary()} if invalid +-spec validate_size(Value :: binary() | list(), ExpectedSize :: non_neg_integer(), + FieldName :: binary()) -> validation_result(binary()). +validate_size(Value, ExpectedSize, FieldName) when is_binary(Value) -> + ActualSize = byte_size(Value), + case ActualSize =:= ExpectedSize of + true -> {ok, Value}; + false -> + ErrorMsg = <>, + ?event(snp_error, {validate_size_failed, #{ + operation => <<"validate_size">>, + field_name => FieldName, + expected_size => ExpectedSize, + actual_size => ActualSize, + suggestion => <<"Ensure ", FieldName/binary, " is exactly ", + (hb_util:bin(integer_to_list(ExpectedSize)))/binary, " bytes.">> + }}), + {error, ErrorMsg} + end; +validate_size(Value, ExpectedSize, FieldName) when is_list(Value) -> + ActualSize = length(Value), + case ActualSize =:= ExpectedSize of + true -> + ValueBinary = hb_util:bin(Value), + {ok, ValueBinary}; + false -> + ErrorMsg = <>, + ?event(snp_error, {validate_size_failed, #{ + operation => <<"validate_size">>, + field_name => FieldName, + expected_size => ExpectedSize, + actual_size => ActualSize, + suggestion => <<"Ensure ", FieldName/binary, " is a list containing exactly ", + (hb_util:bin(integer_to_list(ExpectedSize)))/binary, " bytes.">> + }}), + {error, ErrorMsg} + end; +validate_size(Invalid, _ExpectedSize, FieldName) -> + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <>, + ?event(snp_error, {validate_size_failed, #{ + operation => <<"validate_size">>, + field_name => FieldName, + expected_type => <<"binary or list">>, + actual_type => TypeName, + suggestion => <<"Ensure ", FieldName/binary, " is a binary or list.">> + }}), + {error, ErrorMsg}. + +%% @doc Generic validation helper: validate type of a value. +%% @param Value term() - Value to validate +%% @param TypeCheck fun((term()) -> boolean()) - Function to check if value is correct type +%% @param FieldName binary() - Field name for error messages +%% @returns ok if valid, {error, binary()} if invalid +-spec validate_type(Value :: term(), TypeCheck :: fun((term()) -> boolean()), + FieldName :: binary()) -> ok | {error, binary()}. +validate_type(Value, TypeCheck, FieldName) when is_function(TypeCheck, 1) -> + case TypeCheck(Value) of + true -> ok; + false -> + TypeName = snp_util:get_type_name(Value), + ErrorMsg = <>, + ?event(snp_error, {validate_type_failed, #{ + operation => <<"validate_type">>, + field_name => FieldName, + actual_type => TypeName, + suggestion => <<"Ensure ", FieldName/binary, " has the correct type.">> + }}), + {error, ErrorMsg} + end. + +%% @doc Generic validation helper: validate integer is in valid range. +%% @param Value integer() - Value to validate +%% @param Min integer() - Minimum allowed value (inclusive) +%% @param Max integer() - Maximum allowed value (inclusive) +%% @param FieldName binary() - Field name for error messages +%% @returns {ok, integer()} if valid, {error, binary()} if invalid +-spec validate_range(Value :: integer(), Min :: integer(), Max :: integer(), + FieldName :: binary()) -> validation_result(integer()). +validate_range(Value, Min, Max, FieldName) when is_integer(Value) -> + case Value >= Min andalso Value =< Max of + true -> {ok, Value}; + false -> + ErrorMsg = <>, + ?event(snp_error, {validate_range_failed, #{ + operation => <<"validate_range">>, + field_name => FieldName, + expected_range => <<(hb_util:bin(integer_to_list(Min)))/binary, "-", + (hb_util:bin(integer_to_list(Max)))/binary>>, + actual_value => Value, + suggestion => <<"Ensure ", FieldName/binary, + " is an integer in the range ", + (hb_util:bin(integer_to_list(Min)))/binary, "-", + (hb_util:bin(integer_to_list(Max)))/binary, ".">> + }}), + {error, ErrorMsg} + end; +validate_range(Invalid, _Min, _Max, FieldName) -> + TypeName = snp_util:get_type_name(Invalid), + ErrorMsg = <>, + ?event(snp_error, {validate_range_failed, #{ + operation => <<"validate_range">>, + field_name => FieldName, + expected_type => <<"integer">>, + actual_type => TypeName, + suggestion => <<"Ensure ", FieldName/binary, " is an integer.">> + }}), + {error, ErrorMsg}. + diff --git a/src/snp_verification.erl b/src/snp_verification.erl new file mode 100644 index 000000000..2ea710e2e --- /dev/null +++ b/src/snp_verification.erl @@ -0,0 +1,929 @@ +%%% @doc Verification functions for SNP commitment reports. +%%% +%%% This module handles verification of SNP attestation reports, including +%%% measurement verification, signature verification, and higher-level +%%% verification pipelines. +-module(snp_verification). +-export([verify_measurement/2, verify_signature/3, verify_signature_and_address/3, + verify_debug_disabled/1, verify_measurement/3, verify_report_integrity/2, + verify_nonce/4, verify_trusted_software/3, is_verification_failure/1, + verify/3]). +-include("include/hb.hrl"). +-include("include/snp_constants.hrl"). +-include("include/snp_guids.hrl"). + +%% Type definitions +-type verification_result() :: {ok, true} | {ok, false} | {error, term()}. +-type trusted_software_config() :: map(). % Map containing trusted software hashes/config +-type trusted_software_list() :: [trusted_software_config()]. + +%% Helper function to validate verification configuration options +-spec validate_verify_config(NodeOpts :: map()) -> {ok, map()} | {error, term()}. +validate_verify_config(NodeOpts) -> + maybe + % Validate snp_trusted (required) + {ok, _} ?= validate_snp_trusted_for_verify(NodeOpts), + % Validate snp_enforced_keys (optional, but if present must be valid) + {ok, _} ?= validate_snp_enforced_keys(NodeOpts), + {ok, NodeOpts} + else + {error, Reason} -> {error, Reason}; + Error -> {error, {config_validation_error, Error}} + end. + +%% Helper function to validate snp_trusted for verification +-spec validate_snp_trusted_for_verify(NodeOpts :: map()) -> {ok, trusted_software_list()} | {error, term()}. +validate_snp_trusted_for_verify(NodeOpts) -> + case hb_opts:get(snp_trusted, [#{}], NodeOpts) of + [] -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + operation => <<"verify">>, + reason => <<"empty_list">>, + expected => <<"Non-empty list of trusted software configuration maps">>, + suggestion => <<"snp_trusted must contain at least one trusted software configuration map for verification.">> + }}), + {error, {empty_trusted_configs, <<"snp_trusted cannot be empty for verification">>}}; + TrustedList when is_list(TrustedList) -> + % Validate each trusted config in the list + validate_trusted_configs_list_for_verify(TrustedList, 0); + InvalidTrusted -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + operation => <<"verify">>, + actual_type => snp_util:get_type_name(InvalidTrusted), + expected => <<"list of maps">>, + suggestion => <<"snp_trusted must be a list of maps, each containing trusted software configuration.">> + }}), + {error, {invalid_trusted_type, <<"snp_trusted must be a list">>}} + end. + +%% Helper function to validate each trusted config in the list for verification +-spec validate_trusted_configs_list_for_verify(TrustedList :: [map()], Index :: non_neg_integer()) -> + {ok, trusted_software_list()} | {error, term()}. +validate_trusted_configs_list_for_verify([], _Index) -> + {ok, []}; +validate_trusted_configs_list_for_verify([Config | Rest], Index) -> + case is_map(Config) of + true -> + % Validate that config contains at least some expected keys + ConfigKeys = maps:keys(Config), + BinaryKeys = [K || K <- ConfigKeys, is_binary(K)], + AtomKeys = [K || K <- ConfigKeys, is_atom(K)], + AllKeys = BinaryKeys ++ AtomKeys, + case length(AllKeys) > 0 of + true -> + validate_trusted_configs_list_for_verify(Rest, Index + 1); + false -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + operation => <<"verify">>, + index => Index, + reason => <<"empty_config_map">>, + expected => <<"Map with at least one configuration key">>, + suggestion => <<"Each trusted software configuration must contain at least one key (e.g., firmware, kernel, vcpus, etc.).">> + }}), + {error, {empty_trusted_config, Index, <<"Trusted config at index ", (hb_util:bin(integer_to_list(Index)))/binary, " is empty">>}} + end; + false -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_trusted">>, + operation => <<"verify">>, + index => Index, + actual_type => snp_util:get_type_name(Config), + expected => <<"map">>, + suggestion => <<"Each element in snp_trusted must be a map containing trusted software configuration.">> + }}), + {error, {invalid_trusted_config_type, Index, <<"Config at index ", (hb_util:bin(integer_to_list(Index)))/binary, " must be a map">>}} + end. + +%% Helper function to validate snp_enforced_keys (optional) +-spec validate_snp_enforced_keys(NodeOpts :: map()) -> {ok, [atom()]} | {error, term()}. +validate_snp_enforced_keys(NodeOpts) -> + case hb_opts:get(snp_enforced_keys, undefined, NodeOpts) of + undefined -> + % Optional, use default + {ok, ?COMMITTED_PARAMETERS}; + [] -> + % Empty list means use default + {ok, ?COMMITTED_PARAMETERS}; + EnforcedKeys when is_list(EnforcedKeys), length(EnforcedKeys) > 0 -> + % Validate that all keys are atoms and are valid committed parameters + validate_enforced_keys_list(EnforcedKeys); + InvalidEnforced -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_enforced_keys">>, + operation => <<"verify">>, + actual_type => snp_util:get_type_name(InvalidEnforced), + expected => <<"list of atoms">>, + suggestion => <<"snp_enforced_keys must be a list of atoms representing committed parameters (e.g., [vcpus, vcpu_type, firmware, kernel]).">> + }}), + {error, {invalid_enforced_keys_type, <<"snp_enforced_keys must be a list of atoms">>}} + end. + +%% Helper function to validate enforced keys list +%% Note: Empty lists are handled by validate_snp_enforced_keys before calling this function. +%% However, this function is called recursively, so it will eventually be called with [] +%% when all keys have been validated. In that case, return {ok, []} to indicate success. +-spec validate_enforced_keys_list(EnforcedKeys :: [term()]) -> {ok, [atom()]} | {error, term()}. +validate_enforced_keys_list(EnforcedKeys) -> + validate_enforced_keys_list(EnforcedKeys, []). + +%% Internal helper that accumulates validated keys +-spec validate_enforced_keys_list(EnforcedKeys :: [term()], Acc :: [atom()]) -> {ok, [atom()]} | {error, term()}. +validate_enforced_keys_list([], Acc) -> + % Base case: all keys have been validated successfully, return them in reverse order + {ok, lists:reverse(Acc)}; +validate_enforced_keys_list([Key | Rest], Acc) -> + case is_atom(Key) of + true -> + % Check if key is a valid committed parameter + case lists:member(Key, ?COMMITTED_PARAMETERS) of + true -> + validate_enforced_keys_list(Rest, [Key | Acc]); + false -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_enforced_keys">>, + operation => <<"verify">>, + invalid_key => Key, + valid_keys => ?COMMITTED_PARAMETERS, + suggestion => <<"snp_enforced_keys must only contain valid committed parameters: ", (hb_util:bin(io_lib:format("~p", [?COMMITTED_PARAMETERS])))/binary>> + }}), + {error, {invalid_enforced_key, Key, <<"Key must be one of: ", (hb_util:bin(io_lib:format("~p", [?COMMITTED_PARAMETERS])))/binary>>}} + end; + false -> + ?event(snp_error, {config_validation_failed, #{ + option => <<"snp_enforced_keys">>, + operation => <<"verify">>, + invalid_key => Key, + actual_type => case Key of + L when is_list(L) -> <<"list">>; + B when is_binary(B) -> <<"binary">>; + M when is_map(M) -> <<"map">>; + _ -> <<"other">> + end, + expected => <<"atom">>, + suggestion => <<"All keys in snp_enforced_keys must be atoms (e.g., vcpus, firmware, kernel).">> + }}), + {error, {invalid_enforced_key_type, Key, <<"All keys must be atoms">>}} + end; +validate_enforced_keys_list(_, _Acc) -> + {ok, []}. + +%% @doc Verify that the measurement in the report matches the expected measurement. +%% This is a simple byte comparison, so it's done in Erlang. +%% @param ReportJSON Binary containing the JSON attestation report +%% @param ExpectedMeasurement Binary containing the expected measurement (?LAUNCH_DIGEST_SIZE bytes) +%% @returns {ok, true} if measurements match, {ok, false} if they don't match, +%% {error, Reason} if JSON parsing fails or measurement field is missing +-spec verify_measurement(ReportJSON :: binary(), ExpectedMeasurement :: binary()) -> + verification_result(). +verify_measurement(ReportJSON, ExpectedMeasurement) -> + case snp_util:safe_json_decode(ReportJSON) of + {ok, ReportMap} -> + case maps:find(<<"measurement">>, ReportMap) of + {ok, ActualMeasurement} when is_list(ActualMeasurement) -> + ActualBin = hb_util:bin(ActualMeasurement), + case ActualBin =:= ExpectedMeasurement of + true -> + ?event(snp_short, {verify_measurement_match, true}), + {ok, true}; + false -> + {ok, false} % Measurement mismatch, not an error + end; + {ok, ActualMeasurement} when is_binary(ActualMeasurement) -> + case ActualMeasurement =:= ExpectedMeasurement of + true -> + ?event(snp_short, {verify_measurement_match, true}), + {ok, true}; + false -> + {ok, false} % Measurement mismatch, not an error + end; + error -> + ?event(snp_error, {verify_measurement_missing_field, #{ + operation => <<"verify_measurement">>, + report_keys => maps:keys(ReportMap), + expected_field => <<"measurement">>, + suggestion => <<"Ensure the report JSON contains a 'measurement' field with the launch digest value.">> + }}), + {error, <<"Measurement verification failed: 'measurement' field not found in report. Expected a field named 'measurement' containing the launch digest (", + (hb_util:bin(integer_to_list(?LAUNCH_DIGEST_SIZE)))/binary, " bytes).">>} + end; + {error, Reason} -> + ?event(snp_error, {verify_measurement_decode_error, #{ + operation => <<"verify_measurement">>, + reason => Reason, + suggestion => <<"JSON decode failed. Ensure the input is valid JSON format.">> + }}), + {error, Reason} + end. + +%% @doc Verify the signature of an attestation report. +%% Accepts binary report structure and DER-encoded certificates for better performance. +%% @param ReportBinary Binary containing the raw report structure (?REPORT_SIZE bytes) OR JSON binary +%% @param CertChainPEM Binary containing the PEM-encoded certificate chain (ARK + ASK) OR DER binary +%% @param VcekDER Binary containing the DER-encoded VCEK certificate +%% @returns {ok, true} if signature is valid, {error, {ErrorCode, ErrorMsg}} if verification fails +-spec verify_signature(ReportBinary :: binary(), CertChainPEM :: binary(), VcekDER :: binary()) -> + {ok, true} | {error, binary() | {term(), binary()}}. +verify_signature(ReportBinary, CertChainPEM, VcekDER) -> + % Convert JSON to binary if needed + ReportBin = case snp_util:is_json_binary(ReportBinary) of + true -> + ?event(snp, {verify_signature_converting_json}), + case snp_report_format:report_json_to_binary(ReportBinary) of + {error, Reason1} -> + ?event(snp_error, {verify_signature_json_conversion_error, #{ + operation => <<"verify_signature">>, + error => Reason1, + suggestion => <<"Ensure the report JSON is valid and contains all required fields.">> + }}), + {error, Reason1}; + Bin -> {ok, Bin} + end; + false -> + case is_binary(ReportBinary) andalso byte_size(ReportBinary) =:= ?REPORT_SIZE of + true -> {ok, ReportBinary}; + false -> + ReportSize = case is_binary(ReportBinary) of + true -> byte_size(ReportBinary); + false -> <<"not_a_binary">> + end, + ReportType = case is_binary(ReportBinary) of + true -> <<"binary">>; + false -> <<"not_binary">> + end, + ?event(snp_error, {verify_signature_invalid_report, #{ + operation => <<"verify_signature">>, + actual_size => ReportSize, + expected_size => ?REPORT_SIZE, + actual_type => ReportType, + suggestion => <<"Ensure the report is either a ", (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, "-byte binary or valid JSON format.">> + }}), + SizeStr = case is_binary(ReportBinary) of + true -> integer_to_list(byte_size(ReportBinary)); + false -> "not a binary" + end, + {error, <<"Report validation failed: expected ", + (hb_util:bin(integer_to_list(?REPORT_SIZE)))/binary, + "-byte binary or valid JSON, got ", + (hb_util:bin(SizeStr))/binary, + " bytes.">>} + end + end, + % Convert PEM to DER if needed + CertChainDER = case snp_util:is_pem_binary(CertChainPEM) of + true -> + ?event(snp, {verify_signature_converting_pem}), + case snp_certificates:pem_to_der_chain(CertChainPEM) of + {error, Reason2} -> + ?event(snp_error, {verify_signature_pem_conversion_error, #{ + operation => <<"verify_signature">>, + error => Reason2, + suggestion => <<"Ensure the certificate chain is valid PEM format containing ASK and ARK certificates.">> + }}), + {error, Reason2}; + DER -> {ok, DER} + end; + false -> + case is_binary(CertChainPEM) of + true -> {ok, CertChainPEM}; + false -> + ?event(snp_error, {verify_signature_invalid_cert_chain, #{ + operation => <<"verify_signature">>, + actual_type => case is_binary(CertChainPEM) of true -> <<"binary">>; false -> <<"not_binary">> end, + expected => <<"PEM or DER binary">>, + suggestion => <<"Ensure the certificate chain is a valid PEM or DER-encoded binary.">> + }}), + {error, <<"Certificate chain validation failed: expected PEM or DER binary, got ", + (hb_util:bin(case is_binary(CertChainPEM) of true -> <<"binary">>; false -> <<"not_binary">> end))/binary, + ". Provide a valid certificate chain in PEM or DER format.">>} + end + end, + % Validate VCEK DER + VcekDERValid = case is_binary(VcekDER) andalso byte_size(VcekDER) > 0 of + true -> {ok, VcekDER}; + false -> + ActualSize = case is_binary(VcekDER) of + true -> byte_size(VcekDER); + false -> 0 + end, + ?event(snp_error, {verify_signature_invalid_vcek, #{ + operation => <<"verify_signature">>, + actual_size => ActualSize, + actual_type => snp_util:get_type_name(VcekDER), + expected => <<"non-empty DER-encoded binary">>, + suggestion => <<"Ensure VCEK is a valid DER-encoded certificate binary fetched from AMD KDS.">> + }}), + {error, <<"VCEK validation failed: expected non-empty DER-encoded binary, got ", + (hb_util:bin(case is_binary(VcekDER) of true -> integer_to_list(byte_size(VcekDER)); false -> hb_util:list(snp_util:get_type_name(VcekDER)) end))/binary, + " bytes. Ensure VCEK is fetched from AMD KDS and is in DER format.">>} + end, + case {ReportBin, CertChainDER, VcekDERValid} of + {{ok, RB}, {ok, CCD}, {ok, VD}} -> + ?event(snp_short, {verify_signature_start, #{ + report_size => byte_size(RB), + cert_chain_size => byte_size(CCD), + vcek_size => byte_size(VD) + }}), + % All NIF calls go through snp_nif.erl + {NifTimeMicros, NifResult} = timer:tc(fun() -> snp_nif:verify_signature_nif(RB, CCD, VD) end), + NifTimeMs = NifTimeMicros / 1000, + Result = NifResult, + case Result of + {ok, true} -> + ?event(snp_short, {verify_signature_success, #{ + time_ms => NifTimeMs + }}); + {ok, false} -> + ?event(snp_error, {verify_signature_failed, #{ + operation => <<"verify_signature">>, + time_ms => NifTimeMs, + suggestion => <<"The report signature is invalid. This may indicate a compromised or tampered report. Verify the report source and certificates.">> + }}); + Error -> + ?event(snp_error, {verify_signature_error, #{ + operation => <<"verify_signature">>, + error => Error, + time_ms => NifTimeMs + }}) + end, + Result; + {{error, Error1}, _, _} -> {error, Error1}; + {_, {error, Error2}, _} -> {error, Error2}; + {_, _, {error, Error3}} -> {error, Error3} + end. + + +%% @doc Verify message signature and address. +%% @param MsgWithJSONReport The message containing the JSON report +%% @param Address The expected address +%% @param NodeOpts Node options +%% @returns {ok, true} if signature and address are valid, {error, signature_or_address_invalid} otherwise +-spec verify_signature_and_address(term(), binary(), map()) -> + {ok, true} | {error, signature_or_address_invalid}. +verify_signature_and_address(MsgWithJSONReport, Address, NodeOpts) -> + Signers = hb_message:signers(MsgWithJSONReport, NodeOpts), + SigIsValid = hb_message:verify(MsgWithJSONReport, Signers), + AddressIsValid = lists:member(Address, Signers), + case SigIsValid andalso AddressIsValid of + true -> + ?event(snp_short, {verify_signature_and_address_success, true}), + {ok, true}; + false -> + ?event(snp_error, {verify_signature_and_address_failed, #{ + operation => <<"verify_signature_and_address">>, + signature_valid => SigIsValid, + address_valid => AddressIsValid, + suggestion => case {SigIsValid, AddressIsValid} of + {false, _} -> <<"Message signature is invalid. Verify the message was signed correctly.">>; + {true, false} -> <<"Address mismatch: expected address not found in signers. Verify the message was signed by the expected address.">> + end + }}), + {error, signature_or_address_invalid} + end. + +%% @doc Verify that the debug flag is disabled in the SNP policy. +%% +%% This function checks the SNP guest policy DEBUG bit: if set, the report is from +%% a debug-enabled guest; if clear, non-debug/production. We use policy only (not +%% TCB/SVN). The report is verified (signature + VCEK/ASK/ARK chain) in the same +%% pipeline, so policy.DEBUG is cryptographically bound to the attestation. +%% +%% @param ReportMap The decoded SNP report map (from report JSON) +%% @returns `{ok, true}' if debug is disabled, or `{error, debug_enabled}' if enabled +-spec verify_debug_disabled(ReportMap :: map()) -> {ok, true} | {error, debug_enabled}. +verify_debug_disabled(ReportMap) -> + PolicyRaw = hb_ao:get(<<"policy">>, ReportMap, undefined, #{}), + % Missing policy: treat as debug enabled (fail verification) + DebugDisabled = case PolicyRaw of + undefined -> false; + _ -> (policy_to_integer(PolicyRaw) band ?SNP_GUEST_POLICY_DEBUG) =:= 0 + end, + PolicyInt = policy_to_integer(PolicyRaw), + DebugBitMask = ?SNP_GUEST_POLICY_DEBUG, + DebugBitSet = (PolicyInt band DebugBitMask) =/= 0, + ?event(snp_short, {verify_debug_disabled_check, #{ + policy_raw => PolicyRaw, + policy_int => PolicyInt, + debug_bit => ?DEBUG_FLAG_BIT, + debug_disabled => DebugDisabled + }}), + ?event(snp_short, {snp_debug_policy_check, #{ + policy_int => PolicyInt, + debug_bit => ?DEBUG_FLAG_BIT, + debug_bit_mask => DebugBitMask, + debug_bit_set => DebugBitSet, + debug_disabled => DebugDisabled, + note => <<"If debug_bit_set is false, report has debug bit clear (policy from attestation)">> + }}), + case DebugDisabled of + true -> + ?event(snp_short, {verify_debug_disabled_success, true}), + {ok, true}; + false -> + ?event(snp_error, {verify_debug_disabled_failed, #{ + operation => <<"verify_debug_disabled">>, + policy_raw => PolicyRaw, + policy_int => PolicyInt, + suggestion => <<"Debug mode is enabled in the SNP policy. This is not allowed in production. Disable debug mode by clearing bit ", + (hb_util:bin(integer_to_list(?DEBUG_FLAG_BIT)))/binary, " in the policy field.">> + }}), + {error, debug_enabled} + end. + +%% Helper to check if debug is enabled in the report. +%% Policy is read directly from the report map (decoded JSON); we coerce to +%% integer so that decoders that return floats (e.g. 720896.0) still work. +-spec is_debug(Report :: map()) -> boolean(). +is_debug(Report) -> + PolicyInt = policy_to_integer(hb_ao:get(<<"policy">>, Report, undefined, #{})), + (PolicyInt band ?SNP_GUEST_POLICY_DEBUG) =/= 0. + +%% Coerce report policy value to integer for bit test (handles JSON int/float). +-spec policy_to_integer(term()) -> non_neg_integer(). +policy_to_integer(P) when is_integer(P), P >= 0 -> P; +policy_to_integer(P) when is_float(P), P >= 0 -> round(P); +policy_to_integer(P) when is_binary(P) -> + try binary_to_integer(P) of + N when N >= 0 -> N + catch + _:_ -> 0 + end; +policy_to_integer(_) -> 0. + +%% @doc Verify that the measurement in the SNP report is valid. +%% +%% This function validates the SNP measurement by: +%% 1. Extracting committed parameters from the message +%% 2. Computing the expected launch digest using those parameters +%% 3. Comparing the computed digest with the measurement in the report +%% +%% @param Msg The normalized SNP message containing local hashes +%% @param ReportJSON The raw JSON report containing the measurement +%% @param NodeOpts A map of configuration options +%% @returns `{ok, true}' if the measurement is valid, or +%% `{error, measurement_invalid}' on failure +-spec verify_measurement(Msg :: map(), ReportJSON :: binary(), + NodeOpts :: map()) -> {ok, true} | {error, measurement_invalid | {measurement_verification_failed, term()}}. +verify_measurement(Msg, ReportJSON, NodeOpts) -> + Args = extract_measurement_args(Msg, NodeOpts), + % Try to read OVMF file and extract SEV hashes table GPA + ArgsWithGpa = case snp_ovmf:read_ovmf_gpa() of + {ok, Gpa} -> + ?event(snp_short, {ovmf_gpa_found, Gpa}), + Args#{<<"sev_hashes_gpa">> => Gpa}; + {error, GpaReason} -> + ?event(snp, {ovmf_gpa_not_found, GpaReason}), + Args % Continue without GPA if file not found + end, + {ok, ExpectedBin} = snp_launch_digest:compute_launch_digest(ArgsWithGpa), + % verify_measurement is now implemented in Erlang + % Returns {ok, true} on match, {ok, false} on mismatch, {error, Reason} on parse errors + case verify_measurement(ReportJSON, ExpectedBin) of + {ok, true} -> + ?event(snp_short, {verify_measurement_success, true}), + {ok, true}; + {ok, false} -> + ?event(snp_error, {verify_measurement_mismatch, #{ + operation => <<"verify_measurement">>, + suggestion => <<"Measurement mismatch indicates the launch digest does not match. Verify that all committed parameters (vcpus, vcpu_type, vmm_type, guest_features, firmware, kernel, initrd, append) match the expected values.">> + }}), + {error, measurement_invalid}; + {error, Reason} -> + % JSON parsing or other errors - distinguish from measurement mismatch + ?event(snp_error, {measurement_verification_error, #{ + operation => <<"verify_measurement">>, + error => Reason, + suggestion => <<"Failed to parse or extract measurement from report. Ensure the report JSON is valid and contains a 'measurement' field.">> + }}), + {error, {measurement_verification_failed, Reason}} + end. + +%% @doc Extract measurement arguments from the SNP message. +%% +%% This function extracts and formats the committed parameters needed for +%% measurement computation from the local hashes in the message. +%% +%% @param Msg The normalized SNP message containing local hashes +%% @param NodeOpts A map of configuration options +%% @returns A map of measurement arguments with binary keys (for launch digest Args) +-spec extract_measurement_args(Msg :: map(), NodeOpts :: map()) -> map(). +extract_measurement_args(Msg, NodeOpts) -> + EnforcedKeys = lists:map(fun atom_to_binary/1, ?COMMITTED_PARAMETERS), + LocalHashes = hb_cache:ensure_all_loaded( + hb_ao:get(<<"local-hashes">>, Msg, NodeOpts), + NodeOpts + ), + maps:with(EnforcedKeys, LocalHashes). + +%% Helper function to parse and validate report JSON +-spec parse_and_validate_report_json(ReportJSON :: binary()) -> map(). +parse_and_validate_report_json(ReportJSON) -> + Report = hb_json:decode(ReportJSON), + case Report of + ReportMap when is_map(ReportMap) -> + ?event(snp, {report_map_valid, map_size(ReportMap)}), + ReportMap; + Other -> + ReportTypeStr = case Other of + R2 when is_map(R2) -> <<"map">>; + L2 when is_list(L2) -> <<"list">>; + B2 when is_binary(B2) -> <<"binary">>; + _ -> <<"other">> + end, + ?event(snp_error, {report_map_invalid, #{ + operation => <<"verify_report_integrity">>, + report_type => ReportTypeStr, + expected => <<"map">>, + suggestion => <<"The report JSON must decode to a map/object. Ensure the JSON is valid and properly formatted.">> + }}), + throw({error, invalid_report_format}) + end. + +%% Helper function to extract and validate chip_id from report +-spec extract_and_validate_chip_id(ReportMap :: map()) -> binary(). +extract_and_validate_chip_id(ReportMap) -> + ChipIdRaw = hb_ao:get(<<"chip_id">>, ReportMap, undefined, #{}), + % Use centralized ChipId validation + ChipId = case ChipIdRaw of + undefined -> + ?event(snp_error, {missing_chip_id, #{ + operation => <<"verify_report_integrity">>, + expected_field => <<"chip_id">>, + suggestion => <<"The report must contain a 'chip_id' field. Ensure the SNP report is complete and properly formatted.">> + }}), + throw({error, missing_chip_id}); + ChipIdRawValue -> + case snp_validation:validate_chip_id(ChipIdRawValue) of + {ok, ValidChipId} -> + ?event(snp_short, {chip_id_valid, byte_size(ValidChipId)}), + ValidChipId; + {error, Reason} -> + ?event(snp_error, {invalid_chip_id_format, #{ + operation => <<"verify_report_integrity">>, + error => Reason, + suggestion => <<"The 'chip_id' field must be a list or binary containing exactly ", + (hb_util:bin(integer_to_list(?CHIP_ID_SIZE)))/binary, " bytes.">> + }}), + throw({error, {invalid_chip_id, Reason}}) + end + end, + ?event(snp_short, {chip_id_extracted, byte_size(ChipId)}), + ChipId. + +%% Helper function to extract and validate current_tcb map +-spec extract_and_validate_tcb(ReportMap :: map()) -> map(). +extract_and_validate_tcb(ReportMap) -> + CurrentTcbRaw = hb_ao:get(<<"current_tcb">>, ReportMap, undefined, #{}), + ?event(snp, {current_tcb_raw, is_map(CurrentTcbRaw)}), + case CurrentTcbRaw of + undefined -> + ?event(snp_error, {missing_current_tcb, #{ + operation => <<"verify_report_integrity">>, + expected_field => <<"current_tcb">>, + suggestion => <<"The report must contain a 'current_tcb' field. Ensure the SNP report is complete and properly formatted.">> + }}), + throw({error, missing_current_tcb}); + TcbMap when is_map(TcbMap) -> + ?event(snp_short, {current_tcb_valid, map_size(TcbMap)}), + TcbMap; + InvalidTcb -> + ?event(snp_error, {invalid_current_tcb_format, #{ + operation => <<"verify_report_integrity">>, + actual_type => case InvalidTcb of + TcbList when is_list(TcbList) -> <<"list">>; + TcbBin when is_binary(TcbBin) -> <<"binary">>; + _ -> <<"other">> + end, + expected => <<"map">>, + suggestion => <<"The 'current_tcb' field must be a map/object containing bootloader, tee, snp, and microcode SPL values.">> + }}), + throw({error, invalid_current_tcb_format}) + end. + +%% Helper function to extract SPL field from TCB map +-spec extract_spl_field(TCBMap :: map(), FieldName :: binary(), FieldLabel :: binary()) -> integer(). +extract_spl_field(TCBMap, FieldName, FieldLabel) -> + FieldRaw = hb_ao:get(FieldName, TCBMap, undefined, #{}), + ?event(snp, {FieldLabel, is_integer(FieldRaw)}), + case FieldRaw of + undefined -> + ?event(snp_error, {missing_spl_field, #{ + operation => <<"verify_report_integrity">>, + expected_field => <<"current_tcb.", FieldName/binary>>, + suggestion => <<"The 'current_tcb' map must contain a '", FieldName/binary, "' field with an integer SPL value (0-255).">> + }}), + throw({error, {missing_spl_field, FieldName}}); + Val when is_integer(Val) -> + ?event(snp_short, {spl_field_valid, #{field => FieldLabel, value => Val}}), + Val; + Invalid -> + ?event(snp_error, {invalid_spl_field, #{ + operation => <<"verify_report_integrity">>, + field => FieldLabel, + actual_value => Invalid, + actual_type => case Invalid of + I when is_integer(I) -> <<"integer">>; + B when is_binary(B) -> <<"binary">>; + L when is_list(L) -> <<"list">>; + _ -> <<"other">> + end, + expected => <<"integer in range 0-255">>, + suggestion => <<"The '", FieldName/binary, "' SPL value must be an integer in the range 0-255.">> + }}), + throw({error, {invalid_spl_field, FieldName}}) + end. + +%% Helper function to convert report to binary and verify signature +-spec convert_and_verify_signature(ReportJSON :: binary(), CertChainPEM :: binary(), + VcekDER :: binary()) -> boolean(). +convert_and_verify_signature(ReportJSON, CertChainPEM, VcekDER) -> + ?event(snp, {converting_report_json_to_binary}), % Verbose: conversion step + ReportBinary = case snp_report_format:report_json_to_binary(ReportJSON) of + {error, Reason} = E -> + ?event(snp_error, {report_json_to_binary_error, #{ + operation => <<"verify_report_integrity">>, + error => Reason, + suggestion => <<"Ensure the report JSON contains all required fields and is properly formatted.">> + }}), + throw(E); + Bin -> + ?event(snp_short, {report_json_to_binary_success, byte_size(Bin)}), + Bin + end, + + ?event(snp_short, {verifying_signature_start, #{ + report_binary_size => byte_size(ReportBinary), + cert_chain_size => byte_size(CertChainPEM), + vcek_size => byte_size(VcekDER) + }}), + {VerifyTimeMicros, VerifyResult} = timer:tc(fun() -> + verify_signature(ReportBinary, CertChainPEM, VcekDER) + end), + VerifyTimeMs = VerifyTimeMicros / 1000, + {ok, ReportIsValid} = VerifyResult, + ?event(snp_short, {signature_verification_complete, #{ + is_valid => ReportIsValid, + time_ms => VerifyTimeMs + }}), + ReportIsValid. + +%% @doc Verify the integrity of the SNP report's digital signature. +%% +%% This function validates the cryptographic signature of the SNP report +%% against the hardware root of trust to ensure the report has not been +%% tampered with and originates from genuine AMD SEV-SNP hardware. +%% +%% The function: +%% 1. Parses the JSON report to extract chip ID and TCB version +%% 2. Fetches the certificate chain (ARK + ASK) from AMD KDS +%% 3. Fetches the VCEK certificate from AMD KDS +%% 4. Verifies the signature using the Rust NIF +%% +%% @param ReportJSON The raw JSON report to verify +%% @returns `{ok, true}' if the report signature is valid, or +%% `{error, report_signature_invalid}' on failure +-spec verify_report_integrity(ReportJSON :: binary(), NodeOpts :: map()) -> + {ok, true} | {error, report_signature_invalid | term()}. +verify_report_integrity(ReportJSON, NodeOpts) -> + ?event(snp_short, {verify_report_integrity_start, byte_size(ReportJSON)}), + {IntegrityTimeMicros, Result} = timer:tc(fun() -> + maybe + % Parse and validate report JSON + ReportMap = parse_and_validate_report_json(ReportJSON), + + % Extract and validate chip_id + ChipId = extract_and_validate_chip_id(ReportMap), + + % Extract and validate TCB map + CurrentTcb = extract_and_validate_tcb(ReportMap), + + % Extract all SPL fields + BootloaderSPL = extract_spl_field(CurrentTcb, <<"bootloader">>, <<"bootloader_spl_raw">>), + TeeSPL = extract_spl_field(CurrentTcb, <<"tee">>, <<"tee_spl_raw">>), + SnpSPL = extract_spl_field(CurrentTcb, <<"snp">>, <<"snp_spl_raw">>), + UcodeSPL = extract_spl_field(CurrentTcb, <<"microcode">>, <<"ucode_spl_raw">>), + ?event(snp_short, {all_tcb_fields_extracted, #{ + bootloader => BootloaderSPL, + tee => TeeSPL, + snp => SnpSPL, + microcode => UcodeSPL + }}), + + % Fetch certificates (KDS fetch failure returns {error, Reason}) + case snp_certificates:fetch_verification_certificates( + ChipId, BootloaderSPL, TeeSPL, SnpSPL, UcodeSPL, NodeOpts) of + {ok, {CertChainPEM, VcekDER}} -> + % Convert and verify signature + ReportIsValid = convert_and_verify_signature(ReportJSON, CertChainPEM, VcekDER), + case ReportIsValid of + true -> + ?event(snp_short, {verify_report_integrity_success}), + {ok, true}; + false -> + ?event(snp_error, {signature_invalid, #{ + operation => <<"verify_report_integrity">>, + suggestion => <<"The report signature is invalid. This may indicate a compromised or tampered report. Verify the report source and certificates.">> + }}), + {error, report_signature_invalid} + end; + {error, Reason} -> + {error, Reason} + end + else + {error, ErrorReason} -> + ?event(snp_error, {report_verification_error, #{ + operation => <<"verify_report_integrity">>, + error => ErrorReason, + suggestion => <<"Check the error details above for specific validation failures.">> + }}), + {error, ErrorReason} + end + end), + IntegrityTimeMs = IntegrityTimeMicros / 1000, + ?event(snp_short, {verify_report_integrity_time_ms, IntegrityTimeMs}), + Result. + +%% @doc Verify that the nonce in the report matches the expected value. +%% +%% This function validates that the nonce in the SNP report was generated +%% using the correct address and node message ID, ensuring the report +%% corresponds to the expected request. +%% +%% @param Address The node's address used in nonce generation +%% @param NodeMsgID The node message ID used in nonce generation +%% @param Msg The normalized SNP message containing the nonce +%% @param NodeOpts A map of configuration options +%% @returns `{ok, true}' if the nonce matches, or `{error, nonce_mismatch}' on failure +-spec verify_nonce(Address :: binary(), NodeMsgID :: binary(), + Msg :: map(), NodeOpts :: map()) -> {ok, true} | {error, nonce_mismatch}. +verify_nonce(Address, NodeMsgID, Msg, NodeOpts) -> + Nonce = hb_util:decode(hb_ao:get(<<"nonce">>, Msg, NodeOpts)), + NonceMatches = snp_nonce:report_data_matches(Address, NodeMsgID, Nonce), + case NonceMatches of + true -> + ?event(snp_short, {verify_nonce_success, true}), + {ok, true}; + false -> + ?event(snp_error, {verify_nonce_mismatch, #{ + operation => <<"verify_nonce">>, + suggestion => <<"Nonce mismatch indicates the report was not generated for this specific address and message ID. Verify the report corresponds to the expected request.">> + }}), + {error, nonce_mismatch} + end. + +%% @doc Verify that the software configuration is trusted. +%% +%% This function validates that the firmware, kernel, and other system +%% components match approved configurations by delegating to the +%% software trust validation system. +%% +%% @param M1 The previous message in the verification chain +%% @param Msg The normalized SNP message containing software hashes +%% @param NodeOpts A map of configuration options including trusted software list +%% @returns `{ok, true}' if the software is trusted, or `{error, untrusted_software}' +%% on failure +-spec verify_trusted_software(M1 :: term(), Msg :: map(), NodeOpts :: map()) -> + verification_result(). +verify_trusted_software(M1, Msg, NodeOpts) -> + {ok, IsTrustedSoftware} = snp_trust:execute_is_trusted(M1, Msg, NodeOpts), + ?event(snp_short, {trusted_software, IsTrustedSoftware}), + case IsTrustedSoftware of + true -> + ?event(snp_short, {verify_trusted_software_success, true}), + {ok, true}; + false -> + ?event(snp_error, {verify_trusted_software_failed, #{ + operation => <<"verify_trusted_software">>, + suggestion => <<"The software configuration (firmware, kernel, etc.) does not match the trusted software list. Ensure all software components are approved and match the expected hashes.">> + }}), + {error, untrusted_software} + end. + +%% @doc Determine if an error is a verification failure (report is invalid) +%% vs a system error (missing config, network failure, etc.) +%% Verification failures should return {ok, false}, system errors should propagate +-spec is_verification_failure(Reason :: term()) -> boolean(). +is_verification_failure(Reason) -> + case Reason of + nonce_mismatch -> true; + signature_or_address_invalid -> true; + debug_enabled -> true; + untrusted_software -> true; + measurement_invalid -> true; + report_signature_invalid -> true; + {measurement_verification_failed, _} -> true; % Measurement parse error treated as verification failure + _ -> false % All other errors are system errors + end. + +%% @doc Verify an AMD SEV-SNP commitment report message. +%% +%% This function validates the identity of a remote node, its ephemeral private +%% address, and the integrity of the hardware-backed attestation report. +%% The verification process performs the following checks: +%% 1. Verify the address and the node message ID are the same as the ones +%% used to generate the nonce. +%% 2. Verify the address that signed the message is the same as the one used +%% to generate the nonce. +%% 3. Verify that the debug flag is disabled. +%% 4. Verify that the firmware, kernel, and OS (VMSAs) hashes, part of the +%% measurement, are trusted. +%% 5. Verify the measurement is valid. +%% 6. Verify the report's certificate chain to hardware root of trust. +%% +%% Required configuration in NodeOpts map: +%% - snp_trusted: List of trusted software configurations +%% - snp_enforced_keys: Keys to enforce during validation (optional) +%% +%% @param M1 The previous message in the verification chain +%% @param M2 The message containing the SNP commitment report +%% @param NodeOpts A map of configuration options for verification +%% @returns `{ok, true}' on successful verification, `{ok, false}' on verification +%% failure (report is invalid), or `{error, Reason}' on system errors +%% (missing config, network failures, etc.) +-spec verify(M1 :: term(), M2 :: term(), NodeOpts :: map()) -> + {ok, boolean()} | {error, term()}. +verify(M1, M2, NodeOpts) -> + ?event(snp_short, {verify_called}), + {VerifyTimeMicros, Result} = timer:tc(fun() -> + maybe + % Validate configuration options + {ok, _} ?= validate_verify_config(NodeOpts), + {ok, {Msg, Address, NodeMsgID, ReportJSON, MsgWithJSONReport, Report}} + ?= snp_message:extract_and_normalize_message(M2, NodeOpts), + ?event(snp_short, {snp_verify_step, extract_ok, #{report_keys => maps:keys(Report)}}), + % Perform all validation steps (policy from Report, not Msg) + {ok, NonceResult} ?= verify_nonce(Address, NodeMsgID, Msg, NodeOpts), + ?event(snp_short, {snp_verify_step, nonce, NonceResult}), + {ok, SigResult} ?= + verify_signature_and_address( + MsgWithJSONReport, + Address, + NodeOpts + ), + ?event(snp_short, {snp_verify_step, signature, SigResult}), + {ok, DebugResult} ?= verify_debug_disabled(Report), + ?event(snp_short, {snp_verify_step, debug_disabled, DebugResult}), + {ok, TrustedResult} ?= verify_trusted_software(M1, Msg, NodeOpts), + ?event(snp_short, {snp_verify_step, trusted_software, TrustedResult}), + {ok, MeasurementResult} ?= verify_measurement(Msg, ReportJSON, NodeOpts), + ?event(snp_short, {snp_verify_step, measurement, MeasurementResult}), + {ok, ReportResult} ?= verify_report_integrity(ReportJSON, NodeOpts), + ?event(snp_short, {snp_verify_step, report_integrity, ReportResult}), + Valid = lists:all( + fun(Bool) -> Bool end, + [ + NonceResult, + SigResult, + DebugResult, + TrustedResult, + MeasurementResult, + ReportResult + ] + ), + ?event(snp_short, {final_validation_result, Valid}), + ?event(snp_short, {snp_verify_done, #{valid => Valid}}), + % Return boolean value (not binary) for consistency with dev_message:verify expectations + % dev_message:verify_commitment expects {ok, boolean()}, so we must return {ok, false} + % for verification failures, not {error, ...} + {ok, Valid} + else + % Distinguish between verification failures and system errors + % Verification failures (report is invalid) should return {ok, false} + % System errors (missing config, network failures, etc.) should return {error, Reason} + % even if it crashes dev_message:verify_commitment, because these indicate + % exceptional conditions that need to be handled differently + {error, Reason} = ErrorTuple -> + case is_verification_failure(Reason) of + true -> + % Verification failure: report is invalid + ?event(snp_error, {snp_verification_failed, #{ + operation => <<"verify">>, + reason => Reason, + suggestion => <<"The SNP report failed verification. Check individual validation steps above for details.">> + }}), + {ok, false}; + false -> + % System error: propagate to caller + ?event(snp_error, {snp_system_error, #{ + operation => <<"verify">>, + reason => Reason, + suggestion => <<"System error during verification. Check network connectivity, configuration, and system resources.">> + }}), + ErrorTuple + end; + Error -> + % Unexpected error (exception, etc.) - treat as system error + ?event(snp_error, {snp_system_error, #{ + operation => <<"verify">>, + error => Error, + suggestion => <<"Unexpected error during verification. Check system logs for details.">> + }}), + {error, Error} + end + end), + VerifyTimeMs = VerifyTimeMicros / 1000, + ?event(snp_short, {verify_total_time_ms, VerifyTimeMs}), + Result. +