From 482fd5d0857d29cbe9d5a937d3e7da3ca4e0f527 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 02:45:55 -0700 Subject: [PATCH 01/50] Add Arti system TCP transport --- Cargo.lock | 4292 ++++++++++++++++++++++++++++++--- burrow/Cargo.toml | 32 +- burrow/build.rs | 2 +- burrow/src/daemon/instance.rs | 374 ++- burrow/src/daemon/mod.rs | 9 +- burrow/src/database.rs | 2 +- burrow/src/lib.rs | 12 +- burrow/src/main.rs | 2 + burrow/src/tor/config.rs | 125 + burrow/src/tor/mod.rs | 6 + burrow/src/tor/runtime.rs | 116 + burrow/src/tor/system.rs | 856 +++++++ docs/TOR.md | 41 + proto/burrow.proto | 1 + tun/Cargo.toml | 8 +- 15 files changed, 5427 insertions(+), 451 deletions(-) create mode 100644 burrow/src/tor/config.rs create mode 100644 burrow/src/tor/mod.rs create mode 100644 burrow/src/tor/runtime.rs create mode 100644 burrow/src/tor/system.rs create mode 100644 docs/TOR.md diff --git a/Cargo.lock b/Cargo.lock index 22a3bf3..a7833c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,15 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" version = "2.0.1" @@ -36,18 +27,7 @@ dependencies = [ "cfg-if", "cipher", "cpufeatures", -] - -[[package]] -name = "ahash" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", + "zeroize", ] [[package]] @@ -59,6 +39,76 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloca" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] + +[[package]] +name = "amplify" +version = "4.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f7fb4ac7c881e54a8e7015e399b6112a2a5bc958b6c89ac510840ff20273b31" +dependencies = [ + "amplify_derive", + "amplify_num", + "ascii", + "getrandom 0.2.16", + "getrandom 0.3.3", + "wasm-bindgen", +] + +[[package]] +name = "amplify_derive" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a6309e6b8d89b36b9f959b7a8fa093583b94922a0f6438a24fb08936de4d428" +dependencies = [ + "amplify_syn", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "amplify_num" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99bcb75a2982047f733547042fc3968c0f460dfcf7d90b90dea3b2744580e9ad" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "amplify_syn" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7736fb8d473c0d83098b5bac44df6a561e20470375cd8bcae30516dc889fd62a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.20" @@ -115,6 +165,111 @@ version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "arti-client" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89842cae6e3bda0fd128a5c66eb3392ed412065dc698c77d9fcc4b77e4159f2" +dependencies = [ + "async-trait", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "educe", + "fs-mistrust", + "futures", + "hostname-validator", + "humantime", + "humantime-serde", + "libc", + "once_cell", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "thiserror 2.0.16", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-chanmgr", + "tor-circmgr", + "tor-config", + "tor-config-path", + "tor-dircommon", + "tor-dirmgr", + "tor-error", + "tor-guardmgr", + "tor-keymgr", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-rtcompat", + "tracing", + "void", +] + +[[package]] +name = "ascii" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom 7.1.3", + "num-traits", + "rusticata-macros", + "thiserror 2.0.16", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + [[package]] name = "async-channel" version = "2.5.0" @@ -127,6 +282,31 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compression" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93c1f86859c1af3d514fa19e8323147ff10ea98684e6c7b307912509f50e67b2" +dependencies = [ + "compression-codecs", + "compression-core", + "futures-core", + "futures-io", + "pin-project-lite", +] + +[[package]] +name = "async-native-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9343dc5acf07e79ff82d0c37899f079db3534d99f189a1837c8e549c99405bec" +dependencies = [ + "futures-util", + "native-tls", + "thiserror 1.0.69", + "url", +] + [[package]] name = "async-stream" version = "0.2.1" @@ -181,6 +361,49 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "async_executors" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a982d2f86de6137cc05c9db9a915a19886c97911f9790d04f174cede74be01a5" +dependencies = [ + "blanket", + "futures-core", + "futures-task", + "futures-util", + "pin-project", + "rustc_version", + "tokio", +] + +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + +[[package]] +name = "atomic" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" + +[[package]] +name = "atomic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +dependencies = [ + "bytemuck", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -193,6 +416,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.6.20" @@ -208,7 +453,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.32", "itoa", - "matchit", + "matchit 0.7.3", "memchr", "mime", "percent-encoding", @@ -223,33 +468,32 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.9" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ - "async-trait", - "axum-core 0.4.5", + "axum-core 0.5.6", "bytes", + "form_urlencoded", "futures-util", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", - "tower 0.5.2", + "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -274,19 +518,17 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ - "async-trait", "bytes", - "futures-util", + "futures-core", "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", - "rustversion", "sync_wrapper 1.0.2", "tower-layer", "tower-service", @@ -294,19 +536,10 @@ dependencies = [ ] [[package]] -name = "backtrace" -version = "0.3.75" +name = "base16ct" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] name = "base64" @@ -326,6 +559,16 @@ version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +[[package]] +name = "bincode" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" +dependencies = [ + "serde", + "unty", +] + [[package]] name = "bindgen" version = "0.64.0" @@ -383,6 +626,18 @@ version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "blake2" version = "0.10.6" @@ -392,6 +647,17 @@ dependencies = [ "digest", ] +[[package]] +name = "blanket" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -401,6 +667,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.19.0" @@ -413,9 +690,10 @@ version = "0.1.0" dependencies = [ "aead", "anyhow", + "arti-client", "async-channel", "async-stream 0.2.1", - "axum 0.7.9", + "axum 0.8.8", "base64 0.21.7", "blake2", "caps", @@ -431,28 +709,31 @@ dependencies = [ "insta", "ip_network", "ip_network_table", + "libc", "libsystemd", "log", "nix 0.27.1", "once_cell", "parking_lot", - "prost 0.13.5", - "prost-types 0.13.5", + "prost 0.14.3", + "prost-types 0.14.3", "rand 0.8.5", "rand_core 0.6.4", - "reqwest 0.12.23", + "reqwest", "ring", "rusqlite", "rust-ini", - "schemars", + "schemars 0.8.22", "serde", "serde_json", "tokio", "tokio-stream", - "toml", - "tonic 0.12.3", - "tonic-build", - "tower 0.4.13", + "tokio-util", + "toml 0.8.23", + "tonic 0.14.5", + "tonic-prost", + "tonic-prost-build", + "tower 0.5.3", "tracing", "tracing-journald", "tracing-log 0.1.4", @@ -462,6 +743,18 @@ dependencies = [ "x25519-dalek", ] +[[package]] +name = "by_address" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64fa3c856b712db6612c019f14756e64e4bcea13337a6b33b696333a9eaa2d06" + +[[package]] +name = "bytemuck" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + [[package]] name = "byteorder" version = "1.5.0" @@ -505,10 +798,22 @@ dependencies = [ ] [[package]] -name = "cc" -version = "1.2.38" +name = "caret" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" +checksum = "beae2cb9f60bc3f21effaaf9c64e51f6627edd54eedc9199ba07f519ef2a2101" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" dependencies = [ "find-msvc-tools", "jobserver", @@ -516,6 +821,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -561,6 +872,45 @@ dependencies = [ "zeroize", ] +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link 0.2.0", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.4.4" @@ -602,7 +952,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.1", ] [[package]] @@ -623,12 +973,61 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + +[[package]] +name = "coarsetime" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e58eb270476aa4fc7843849f8a35063e8743b4dbcdf6dd0f8ea0886980c204c2" +dependencies = [ + "libc", + "wasix", + "wasm-bindgen", +] + [[package]] name = "colorchoice" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "compression-codecs" +version = "0.4.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "680dc087785c5230f8e8843e2e57ac7c1c90488b6a91b88caa265410568f441b" +dependencies = [ + "compression-core", + "flate2", + "liblzma", + "zstd 0.13.3", + "zstd-safe 7.2.4", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -688,6 +1087,12 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "const-random" version = "0.1.18" @@ -714,6 +1119,24 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "cookie-factory" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9885fa71e26b8ab7855e2ec7cae6e9b380edff76cd052e07c683a0319d51b3a2" +dependencies = [ + "futures", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -724,6 +1147,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -748,6 +1181,51 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" +dependencies = [ + "alloca", + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "page_size", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-cycles-per-byte" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5396de42a52e9e5d8f67ef0702dae30451f310a9ba1c3094dcf228f0be0e54bc" +dependencies = [ + "cfg-if", + "criterion", +] + +[[package]] +name = "criterion-plot" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" +dependencies = [ + "cast", + "itertools 0.13.0", +] + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -757,6 +1235,34 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -769,6 +1275,18 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -780,6 +1298,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -789,6 +1316,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", + "digest", "fiat-crypto", "rustc_version", "subtle", @@ -806,6 +1334,140 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", +] + +[[package]] +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core 0.23.0", + "darling_macro 0.23.0", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.106", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core 0.23.0", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "data-encoding" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "cookie-factory", + "displaydoc", + "nom 7.1.3", + "num-traits", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.5.3" @@ -813,6 +1475,89 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc" dependencies = [ "powerfmt", + "serde", +] + +[[package]] +name = "derive-deftly" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284db66a66f03c3dafbe17360d959eb76b83f77cfe191677e2a7899c0da291f3" +dependencies = [ + "derive-deftly-macros", + "heck", +] + +[[package]] +name = "derive-deftly-macros" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caef6056a5788d05d173cdc3c562ac28ae093828f851f69378b74e4e3d578e41" +dependencies = [ + "heck", + "indexmap 2.11.4", + "itertools 0.14.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "sha3", + "strum", + "syn 2.0.106", + "void", +] + +[[package]] +name = "derive_builder_core_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24c1b715c79be6328caa9a5e1a387a196ea503740f0722ec3dd8f67a9e72314d" +dependencies = [ + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3eae24d595f4d0ecc90a9a5a6d11c2bd8dafe2375ec4a1ec63250e5ade7d228" +dependencies = [ + "derive_builder_macro_fork_arti", +] + +[[package]] +name = "derive_builder_macro_fork_arti" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69887769a2489cd946bf782eb2b1bb2cb7bc88551440c94a765d4f040c08ebf3" +dependencies = [ + "derive_builder_core_fork_arti", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.106", + "unicode-xid", ] [[package]] @@ -822,10 +1567,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] +[[package]] +name = "directories" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f5094c54661b38d03bd7e50df373292118db60b585c08a411c6d840017fe7d" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.0", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -852,18 +1628,101 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +[[package]] +name = "downcast-rs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "merlin", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + +[[package]] +name = "educe" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encode_unicode" version = "1.0.0" @@ -879,6 +1738,52 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-ordinalize" +version = "3.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "enumset" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25b07a8dfbbbfc0064c0a6bdf9edcf966de6b1c33ce344bdeca3b41615452634" +dependencies = [ + "enumset_derive", +] + +[[package]] +name = "enumset_derive" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f43e744e4ea338060faee68ed933e46e722fb7f3617e722a5772d7e856d8b3ce" +dependencies = [ + "darling 0.21.3", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -954,6 +1859,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -961,10 +1876,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] -name = "find-msvc-tools" -version = "0.1.2" +name = "figment" +version = "0.10.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" +dependencies = [ + "atomic 0.6.1", + "serde", + "toml 0.8.23", + "uncased", + "version_check", +] + +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fixedbitset" @@ -982,12 +1921,30 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluid-let" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749cff877dc1af878a0b31a41dd221a753634401ea0ef2f87b62d3171522485a" + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1012,6 +1969,43 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs-mistrust" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189ebb6d350de8d03181999fa9ebe8a021c5ab041004388f29e4dd2c52dc88a2" +dependencies = [ + "derive_builder_fork_arti", + "dirs", + "libc", + "pwd-grp", + "serde", + "thiserror 2.0.16", + "walkdir", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "fslock" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.3.31" @@ -1109,6 +2103,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -1133,16 +2128,35 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi", + "r-efi 5.3.0", "wasi 0.14.7+wasi-0.2.4", "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.31.1" +name = "getrandom" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.106", +] [[package]] name = "glob" @@ -1150,6 +2164,23 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "glob-match" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d" + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "h2" version = "0.3.27" @@ -1188,6 +2219,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1199,23 +2241,32 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "ahash", + "foldhash 0.1.5", ] [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", +] [[package]] name = "hashlink" -version = "0.9.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.16.1", ] [[package]] @@ -1243,6 +2294,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1261,6 +2321,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "hostname-validator" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + [[package]] name = "http" version = "0.2.12" @@ -1335,6 +2401,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.32" @@ -1361,9 +2437,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ "atomic-waker", "bytes", @@ -1389,14 +2465,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "rustls", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", - "webpki-roots", ] [[package]] @@ -1417,48 +2492,60 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.32", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-util" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", - "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.7.0", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.0", + "socket2 0.6.3", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.62.1", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", ] [[package]] @@ -1547,6 +2634,18 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "1.1.0" @@ -1576,6 +2675,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -1585,7 +2685,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "inotify" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd5b3eaf1a28b758ac0faa5a4254e8ab2705605496f1b1f3fbbc3988ad73d199" +dependencies = [ + "bitflags 2.9.4", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", ] [[package]] @@ -1610,14 +2732,12 @@ dependencies = [ ] [[package]] -name = "io-uring" -version = "0.7.10" +name = "inventory" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" +checksum = "009ae045c87e7082cb72dab0ccd01ae075dd00141ddc108f43a0ea150a9e7227" dependencies = [ - "bitflags 2.9.4", - "cfg-if", - "libc", + "rustversion", ] [[package]] @@ -1673,6 +2793,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -1688,6 +2817,28 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -1700,19 +2851,51 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.80" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", ] +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] [[package]] name = "lazycell" @@ -1720,6 +2903,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" version = "0.2.176" @@ -1747,10 +2936,48 @@ dependencies = [ ] [[package]] -name = "libsqlite3-sys" -version = "0.28.0" +name = "liblzma" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +checksum = "b6033b77c21d1f56deeae8014eb9fbe7bdf1765185a6c508b5ca82eeaed7f899" +dependencies = [ + "liblzma-sys", +] + +[[package]] +name = "liblzma-sys" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f2db66f3268487b5033077f266da6777d057949b8f93c8ad82e441df25e6186" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "libm" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + +[[package]] +name = "libredox" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +dependencies = [ + "bitflags 2.9.4", + "libc", + "plain", + "redox_syscall 0.7.3", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" dependencies = [ "cc", "pkg-config", @@ -1830,12 +3057,27 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +[[package]] +name = "memmap2" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.7.1" @@ -1854,6 +3096,18 @@ dependencies = [ "autocfg", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "miette" version = "5.10.0" @@ -1905,6 +3159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", + "log", "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] @@ -1924,10 +3179,10 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "tempfile", ] @@ -1988,6 +3243,47 @@ dependencies = [ "memchr", ] +[[package]] +name = "nonany" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6b8866ec53810a9a4b3d434a29801e78c707430a9ae11c2db4b8b62bb9675a0" + +[[package]] +name = "notify" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" +dependencies = [ + "bitflags 2.9.4", + "inotify", + "kqueue", + "libc", + "log", + "mio", + "notify-types", + "walkdir", + "windows-sys 0.60.2", +] + +[[package]] +name = "notify-types" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a" +dependencies = [ + "bitflags 2.9.4", +] + +[[package]] +name = "ntapi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" +dependencies = [ + "winapi", +] + [[package]] name = "nu-ansi-term" version = "0.50.1" @@ -1998,10 +3294,56 @@ dependencies = [ ] [[package]] -name = "num-conv" -version = "0.1.0" +name = "num-bigint" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] [[package]] name = "num-traits" @@ -2010,15 +3352,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", ] [[package]] -name = "object" -version = "0.36.7" +name = "num_enum" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "5d0bca838442ec211fa11de3a8b0e0e8f3a4522575b5c4c06ed722e005036f26" dependencies = [ - "memchr", + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "680998035259dcfcafe653688bf2aa6d3e2dc05e98be6ab46afb089dc84f1df8" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" +dependencies = [ + "bitflags 2.9.4", +] + +[[package]] +name = "objc2-io-kit" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15" +dependencies = [ + "libc", + "objc2-core-foundation", ] [[package]] @@ -2033,6 +3408,21 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oneshot-fused-workaround" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17b52d0e4a06a4c7eb8d2943c0015fa628cf4ccc409429cebc0f5bed6d33a82" +dependencies = [ + "futures", +] + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -2071,6 +3461,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "openssl-sys" version = "0.9.109" @@ -2083,6 +3479,21 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "ordered-multimap" version = "0.7.3" @@ -2093,6 +3504,63 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" +dependencies = [ + "memchr", +] + +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct", + "ecdsa", + "elliptic-curve", + "primeorder", + "rand_core 0.6.4", + "sha2", +] + +[[package]] +name = "page_size" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "parking" version = "2.2.1" @@ -2117,7 +3585,7 @@ checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.17", "smallvec", "windows-targets 0.52.6", ] @@ -2133,6 +3601,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "pbkdf2" version = "0.11.0" @@ -2151,6 +3625,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -2159,14 +3642,58 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.7.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", + "hashbrown 0.15.5", "indexmap 2.11.4", ] +[[package]] +name = "phf" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" +dependencies = [ + "phf_macros", + "phf_shared", + "serde", +] + +[[package]] +name = "phf_generator" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" +dependencies = [ + "fastrand", + "phf_shared", +] + +[[package]] +name = "phf_macros" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "phf_shared" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57fef6bc5981e38c2ce2d63bfa546861309f875b8a75f092d1d54ae2d64f266" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.1.10" @@ -2199,12 +3726,67 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -2216,6 +3798,21 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "postage" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af3fb618632874fb76937c2361a7f22afd393c982a2165595407edc75b06d3c1" +dependencies = [ + "atomic 0.5.3", + "crossbeam-queue", + "futures", + "parking_lot", + "pin-project", + "static_assertions", + "thiserror 1.0.69", +] + [[package]] name = "potential_utf" version = "0.1.3" @@ -2250,6 +3847,57 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + +[[package]] +name = "priority-queue" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93980406f12d9f8140ed5abe7155acb10bb1e69ea55c88960b9c2f117445ef96" +dependencies = [ + "equivalent", + "indexmap 2.11.4", + "serde", +] + +[[package]] +name = "proc-macro-crate" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" +dependencies = [ + "toml_edit 0.25.4+spec-1.1.0", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "proc-macro2" version = "1.0.101" @@ -2271,29 +3919,30 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", - "prost-derive 0.13.5", + "prost-derive 0.14.3", ] [[package]] name = "prost-build" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ "heck", "itertools 0.14.0", "log", "multimap", - "once_cell", "petgraph", "prettyplease", - "prost 0.13.5", - "prost-types 0.13.5", + "prost 0.14.3", + "prost-types 0.14.3", + "pulldown-cmark", + "pulldown-cmark-to-cmark", "regex", "syn 2.0.106", "tempfile", @@ -2314,9 +3963,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", @@ -2336,11 +3985,43 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ - "prost 0.13.5", + "prost 0.14.3", +] + +[[package]] +name = "pulldown-cmark" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83c41efbf8f90ac44de7f3a868f0867851d261b56291732d0cbf7cceaaeb55a6" +dependencies = [ + "bitflags 2.9.4", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "22.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" +dependencies = [ + "pulldown-cmark", +] + +[[package]] +name = "pwd-grp" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2023f41b5fcb7c30eb5300a5733edfaa9e0e0d502d51b586f65633fd39e40c" +dependencies = [ + "derive-deftly", + "libc", + "paste", + "thiserror 2.0.16", ] [[package]] @@ -2356,7 +4037,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls", - "socket2 0.6.0", + "socket2 0.6.3", "thiserror 2.0.16", "tokio", "tracing", @@ -2369,6 +4050,7 @@ version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ + "aws-lc-rs", "bytes", "getrandom 0.3.3", "lru-slab", @@ -2393,7 +4075,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.0", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] @@ -2413,6 +4095,18 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.8.5" @@ -2472,6 +4166,46 @@ dependencies = [ "getrandom 0.3.3", ] +[[package]] +name = "rand_jitter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b16df48f071248e67b8fc5e866d9448d45c08ad8b672baaaf796e2f15e606ff0" +dependencies = [ + "libc", + "rand_core 0.9.3", + "winapi", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rdrand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92195228612ac8eed47adbc2ed0f04e513a4ccb98175b6f2bd04d963b533655" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "redox_syscall" version = "0.5.17" @@ -2481,6 +4215,46 @@ dependencies = [ "bitflags 2.9.4", ] +[[package]] +name = "redox_syscall" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" +dependencies = [ + "bitflags 2.9.4", +] + +[[package]] +name = "redox_users" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 2.0.16", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "regex" version = "1.11.2" @@ -2512,80 +4286,61 @@ checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "reqwest" -version = "0.11.27" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "reqwest" -version = "0.12.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-core", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-rustls", "hyper-util", "js-sys", "log", + "mime", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", - "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", "tokio-rustls", - "tower 0.5.2", + "tower 0.5.3", "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", +] + +[[package]] +name = "retry-error" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c322ea521636c5a3f13685a6266055b2dda7e54e2be35214d7c2a5d0672a5db" +dependencies = [ + "humantime", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", ] [[package]] @@ -2603,10 +4358,41 @@ dependencies = [ ] [[package]] -name = "rusqlite" -version = "0.31.0" +name = "rsa" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "sha2", + "signature", + "spki", + "subtle", + "zeroize", +] + +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.16", +] + +[[package]] +name = "rusqlite" +version = "0.38.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" dependencies = [ "bitflags 2.9.4", "fallible-iterator", @@ -2614,6 +4400,8 @@ dependencies = [ "hashlink", "libsqlite3-sys", "smallvec", + "sqlite-wasm-rs", + "time", ] [[package]] @@ -2626,12 +4414,6 @@ dependencies = [ "ordered-multimap", ] -[[package]] -name = "rustc-demangle" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -2653,6 +4435,15 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "rustix" version = "0.38.44" @@ -2685,8 +4476,8 @@ version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ + "aws-lc-rs", "once_cell", - "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -2694,12 +4485,15 @@ dependencies = [ ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "rustls-native-certs" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "base64 0.21.7", + "openssl-probe 0.2.1", + "rustls-pki-types", + "schannel", + "security-framework 3.5.1", ] [[package]] @@ -2712,12 +4506,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework 3.5.1", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -2735,6 +4557,43 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "safelog" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8949ab2810bf603caef654634e5b4cedcbc05c120342a177cf8aaa122ef4bb76" +dependencies = [ + "derive_more", + "educe", + "either", + "fluid-let", + "thiserror 2.0.16", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "sanitize-filename" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc984f4f9ceb736a7bb755c3e3bd17dc56370af2600c9780dcc48c66453da34d" +dependencies = [ + "regex", +] + +[[package]] +name = "saturating-time" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b63583a1dd0647d1484228529ab4ecaa874048d2956f117362aa5f5826456230" + [[package]] name = "schannel" version = "0.1.28" @@ -2756,6 +4615,30 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schemars_derive" version = "0.8.22" @@ -2774,6 +4657,20 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.11.1" @@ -2781,7 +4678,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.9.4", - "core-foundation", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -2805,28 +4715,38 @@ checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.226" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] -name = "serde_core" -version = "1.0.226" +name = "serde-value" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.226" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -2844,6 +4764,16 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "serde_ignored" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115dffd5f3853e06e746965a20dcbae6ee747ae30b543d91b0e089668bb07798" +dependencies = [ + "serde", + "serde_core", +] + [[package]] name = "serde_json" version = "1.0.145" @@ -2877,6 +4807,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2889,6 +4828,37 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "3.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.11.4", + "schemars 0.9.0", + "schemars 1.2.1", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" +dependencies = [ + "darling 0.23.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "sha-1" version = "0.10.1" @@ -2922,6 +4892,16 @@ dependencies = [ "digest", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -2931,6 +4911,17 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shellexpand" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32824fab5e16e6c4d86dc1ba84489390419a39f97699852b66480bb87d297ed8" +dependencies = [ + "bstr", + "dirs", + "os_str_bytes", +] + [[package]] name = "shlex" version = "1.3.0" @@ -2946,18 +4937,57 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core 0.6.4", +] + [[package]] name = "similar" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + [[package]] name = "slab" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "slotmap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdd58c3c93c3d278ca835519292445cb4b0d4dc59ccfdf7ceadaab3f8aeb4038" +dependencies = [ + "serde", + "version_check", +] + +[[package]] +name = "slotmap-careful" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed92816c1fbb29891a525b92d5fa95757c9dee47044f76c8e06ceb1e052a8d64" +dependencies = [ + "paste", + "serde", + "slotmap", + "thiserror 2.0.16", + "void", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -2976,12 +5006,82 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + +[[package]] +name = "ssh-cipher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" +dependencies = [ + "cipher", + "ssh-encoding", +] + +[[package]] +name = "ssh-encoding" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9242b9ef4108a78e8cd1a2c98e193ef372437f8c22be363075233321dd4a15" +dependencies = [ + "base64ct", + "pem-rfc7468", + "sha2", +] + +[[package]] +name = "ssh-key" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" +dependencies = [ + "num-bigint-dig", + "p256", + "p384", + "p521", + "rand_core 0.6.4", + "rsa", + "sec1", + "sha2", + "signature", + "ssh-cipher", + "ssh-encoding", + "subtle", + "zeroize", ] [[package]] @@ -3006,12 +5106,45 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "subtle" version = "2.6.1" @@ -3067,26 +5200,46 @@ dependencies = [ ] [[package]] -name = "system-configuration" -version = "0.5.1" +name = "sysinfo" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "252800745060e7b9ffb7b2badbd8b31cfa4aa2e61af879d0a3bf2a317c20217d" dependencies = [ - "bitflags 1.3.2", - "core-foundation", + "libc", + "memchr", + "ntapi", + "objc2-core-foundation", + "objc2-io-kit", + "windows 0.61.3", +] + +[[package]] +name = "system-configuration" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tempfile" version = "3.23.0" @@ -3151,22 +5304,35 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", + "itoa", + "js-sys", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", + "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] [[package]] name = "tiny-keccak" @@ -3187,6 +5353,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.10.0" @@ -3204,22 +5380,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.47.1" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "slab", - "socket2 0.6.0", + "socket2 0.6.3", "tokio-macros", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -3234,25 +5407,15 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" dependencies = [ "proc-macro2", "quote", "syn 2.0.106", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.3" @@ -3265,9 +5428,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -3276,12 +5439,13 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.16" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -3294,9 +5458,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml" +version = "0.9.12+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" +dependencies = [ + "indexmap 2.11.4", + "serde_core", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -3308,6 +5487,24 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.5+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_datetime" +version = "1.0.0+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" @@ -3316,18 +5513,45 @@ checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.11.4", "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", "toml_write", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.25.4+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" +dependencies = [ + "indexmap 2.11.4", + "toml_datetime 1.0.0+spec-1.1.0", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.9+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +dependencies = [ + "winnow", +] + [[package]] name = "toml_write" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" +[[package]] +name = "toml_writer" +version = "1.0.6+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" + [[package]] name = "tonic" version = "0.10.2" @@ -3357,29 +5581,28 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ - "async-stream 0.3.6", "async-trait", - "axum 0.7.9", + "axum 0.8.8", "base64 0.22.1", "bytes", "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.5", - "socket2 0.5.10", + "socket2 0.6.3", + "sync_wrapper 1.0.2", "tokio", "tokio-stream", - "tower 0.4.13", + "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -3387,16 +5610,994 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.12.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +checksum = "1882ac3bf5ef12877d7ed57aad87e75154c11931c2ba7e6cde5e22d63522c734" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "tonic-prost" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" +dependencies = [ + "bytes", + "prost 0.14.3", + "tonic 0.14.5", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3144df636917574672e93d0f56d7edec49f90305749c668df5101751bb8f95a" dependencies = [ "prettyplease", "proc-macro2", "prost-build", - "prost-types 0.13.5", + "prost-types 0.14.3", "quote", "syn 2.0.106", + "tempfile", + "tonic-build", +] + +[[package]] +name = "tor-async-utils" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "895c61a46909134501c6815eceeb66c9c80fc494ee89429821b0f05ccf34b4f5" +dependencies = [ + "derive-deftly", + "educe", + "futures", + "oneshot-fused-workaround", + "pin-project", + "postage", + "thiserror 2.0.16", + "void", +] + +[[package]] +name = "tor-basic-utils" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fac6e4d7e131b7d69bc85558383cd4ac61e46b4dd0d4ed51632f28fac98cac0c" +dependencies = [ + "derive_more", + "hex", + "itertools 0.14.0", + "libc", + "paste", + "rand 0.9.2", + "rand_chacha 0.9.0", + "serde", + "slab", + "smallvec", + "thiserror 2.0.16", + "weak-table", +] + +[[package]] +name = "tor-bytes" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64454947258e49f238a5f06a06250a0c54598a1c7409898b5c79505e6a99e7af" +dependencies = [ + "bytes", + "derive-deftly", + "digest", + "educe", + "getrandom 0.4.2", + "safelog", + "thiserror 2.0.16", + "tor-error", + "tor-llcrypto", + "zeroize", +] + +[[package]] +name = "tor-cell" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ab0c79bc92a957e85959cf397a2d8f9c8294c35fa4f65247a9393b20ac95551" +dependencies = [ + "amplify", + "bitflags 2.9.4", + "bytes", + "caret", + "derive-deftly", + "derive_more", + "educe", + "itertools 0.14.0", + "paste", + "rand 0.9.2", + "smallvec", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-bytes", + "tor-cert", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-protover", + "tor-units", + "void", +] + +[[package]] +name = "tor-cert" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "debc911738298ee801fce4577c36a50c55295b0bb9c5519461b83cc486a1f86e" +dependencies = [ + "caret", + "derive_builder_fork_arti", + "derive_more", + "digest", + "thiserror 2.0.16", + "tor-bytes", + "tor-checkable", + "tor-error", + "tor-llcrypto", +] + +[[package]] +name = "tor-chanmgr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7af5b7c2f1e16d1304b5185fcbc91ca5c8df991c21be00702f925f055573eea1" +dependencies = [ + "async-trait", + "caret", + "cfg-if", + "derive-deftly", + "derive_more", + "educe", + "futures", + "oneshot-fused-workaround", + "percent-encoding", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "serde_with", + "thiserror 2.0.16", + "tor-async-utils", + "tor-basic-utils", + "tor-cell", + "tor-config", + "tor-error", + "tor-keymgr", + "tor-linkspec", + "tor-llcrypto", + "tor-memquota", + "tor-netdir", + "tor-proto", + "tor-rtcompat", + "tor-socksproto", + "tor-units", + "tracing", + "url", + "void", +] + +[[package]] +name = "tor-checkable" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b13a5b50bb55037f2e81b25dde42f420d57c75154216b8ef989006cea3ebee" +dependencies = [ + "humantime", + "signature", + "thiserror 2.0.16", + "tor-llcrypto", +] + +[[package]] +name = "tor-circmgr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b878f3f7c6be0c7f130d90b347ada2e7c46519dfbdde8273eae2e5d1792caa87" +dependencies = [ + "amplify", + "async-trait", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "downcast-rs", + "dyn-clone", + "educe", + "futures", + "humantime-serde", + "itertools 0.14.0", + "once_cell", + "oneshot-fused-workaround", + "pin-project", + "rand 0.9.2", + "retry-error", + "safelog", + "serde", + "thiserror 2.0.16", + "tor-async-utils", + "tor-basic-utils", + "tor-cell", + "tor-chanmgr", + "tor-config", + "tor-dircommon", + "tor-error", + "tor-guardmgr", + "tor-linkspec", + "tor-memquota", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-relay-selection", + "tor-rtcompat", + "tor-units", + "tracing", + "void", + "weak-table", +] + +[[package]] +name = "tor-config" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc74a00ab15bb986e3747c6969e40a58a63065d6f99077e7ee2f4657bb8b03" +dependencies = [ + "amplify", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "educe", + "either", + "figment", + "fs-mistrust", + "futures", + "humantime-serde", + "itertools 0.14.0", + "notify", + "paste", + "postage", + "regex", + "serde", + "serde-value", + "serde_ignored", + "strum", + "thiserror 2.0.16", + "toml 0.9.12+spec-1.1.0", + "tor-basic-utils", + "tor-error", + "tor-rtcompat", + "tracing", + "void", +] + +[[package]] +name = "tor-config-path" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3005ab7b9a26a7271e5adf3dfb4ae18c09a943e32aeccc4f6d1c53a60de74b8d" +dependencies = [ + "directories", + "serde", + "shellexpand", + "thiserror 2.0.16", + "tor-error", + "tor-general-addr", +] + +[[package]] +name = "tor-consdiff" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bfa2b7b71c72830f61c48da4bb3e13191e0c0e1404b9c5168c795e4f5feb4a8" +dependencies = [ + "digest", + "hex", + "thiserror 2.0.16", + "tor-llcrypto", +] + +[[package]] +name = "tor-dirclient" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccd6fac844ac77c33ccdfcb56bf23ff40ebbb821ea708be79a481ec30e8c39c" +dependencies = [ + "async-compression", + "base64ct", + "derive_more", + "futures", + "hex", + "http 1.3.1", + "httparse", + "httpdate", + "itertools 0.14.0", + "memchr", + "thiserror 2.0.16", + "tor-circmgr", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-proto", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-dircommon" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0cf39a3c30321d145a4d60753ae7ef5bb58a66a00ac9e2bfc30bd823faf2a4" +dependencies = [ + "base64ct", + "derive-deftly", + "getset", + "humantime", + "humantime-serde", + "serde", + "tor-basic-utils", + "tor-checkable", + "tor-config", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tracing", +] + +[[package]] +name = "tor-dirmgr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b52919aa9dbb82a354c5b904bef82e91beb702b9f8ad14e6eac4237d6128bf67" +dependencies = [ + "async-trait", + "base64ct", + "derive_builder_fork_arti", + "derive_more", + "digest", + "educe", + "event-listener", + "fs-mistrust", + "fslock", + "futures", + "hex", + "humantime", + "humantime-serde", + "itertools 0.14.0", + "memmap2", + "oneshot-fused-workaround", + "paste", + "postage", + "rand 0.9.2", + "rusqlite", + "safelog", + "scopeguard", + "serde", + "serde_json", + "signature", + "static_assertions", + "strum", + "thiserror 2.0.16", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-checkable", + "tor-circmgr", + "tor-config", + "tor-consdiff", + "tor-dirclient", + "tor-dircommon", + "tor-error", + "tor-guardmgr", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-protover", + "tor-rtcompat", + "tracing", +] + +[[package]] +name = "tor-error" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595b005e6f571ac3890a34a00f361200aab781fd0218f2c528c86fc7af088df5" +dependencies = [ + "derive_more", + "futures", + "paste", + "retry-error", + "static_assertions", + "strum", + "thiserror 2.0.16", + "tracing", + "void", +] + +[[package]] +name = "tor-general-addr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727b8c8bc01c1587486055edab5c2cd0d5c960f5bb3fac796fc9911872b8b397" +dependencies = [ + "derive_more", + "thiserror 2.0.16", + "void", +] + +[[package]] +name = "tor-guardmgr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d337f465a477c0fb3b2faafa4654d70ff9df3590e57d22707591dddb4e4450c1" +dependencies = [ + "amplify", + "base64ct", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "dyn-clone", + "educe", + "futures", + "humantime", + "humantime-serde", + "itertools 0.14.0", + "num_enum", + "oneshot-fused-workaround", + "pin-project", + "postage", + "rand 0.9.2", + "safelog", + "serde", + "strum", + "thiserror 2.0.16", + "tor-async-utils", + "tor-basic-utils", + "tor-config", + "tor-dircommon", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdir", + "tor-netdoc", + "tor-persist", + "tor-proto", + "tor-relay-selection", + "tor-rtcompat", + "tor-units", + "tracing", +] + +[[package]] +name = "tor-hscrypto" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3693cd43f05cd01ac0aaa060dae5c5e53c4364f89e0d769e33cd629a2fd3118" +dependencies = [ + "data-encoding", + "derive-deftly", + "derive_more", + "digest", + "hex", + "humantime", + "itertools 0.14.0", + "paste", + "rand 0.9.2", + "safelog", + "serde", + "signature", + "subtle", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-bytes", + "tor-error", + "tor-key-forge", + "tor-llcrypto", + "tor-units", + "void", +] + +[[package]] +name = "tor-key-forge" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ade9065ae49cfe2ab020ca9ca9f2b3c5c9b5fc0d8980fa681d8b3a0668e042f" +dependencies = [ + "derive-deftly", + "derive_more", + "downcast-rs", + "paste", + "rand 0.9.2", + "rsa", + "signature", + "ssh-key", + "thiserror 2.0.16", + "tor-bytes", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-llcrypto", +] + +[[package]] +name = "tor-keymgr" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243c3163d376c4723cd67271fcd6e5d6b498a6865c6b98299640e1be01c38826" +dependencies = [ + "amplify", + "arrayvec", + "cfg-if", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "downcast-rs", + "dyn-clone", + "fs-mistrust", + "glob-match", + "humantime", + "inventory", + "itertools 0.14.0", + "rand 0.9.2", + "safelog", + "serde", + "signature", + "ssh-key", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-bytes", + "tor-config", + "tor-config-path", + "tor-error", + "tor-hscrypto", + "tor-key-forge", + "tor-llcrypto", + "tor-persist", + "tracing", + "visibility", + "walkdir", + "zeroize", +] + +[[package]] +name = "tor-linkspec" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f1ea8786900d6fbe4c9f775d341b1ba01bbd1f750d89bd63be78b6b01e1836" +dependencies = [ + "base64ct", + "by_address", + "caret", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "hex", + "itertools 0.14.0", + "safelog", + "serde", + "serde_with", + "strum", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-bytes", + "tor-config", + "tor-llcrypto", + "tor-memquota", + "tor-protover", +] + +[[package]] +name = "tor-llcrypto" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6989a1c6d06ffd6835e2917edaae4aeef544f8e5fdd68b54cc365f2af523de" +dependencies = [ + "aes", + "base64ct", + "ctr", + "curve25519-dalek", + "der-parser", + "derive-deftly", + "derive_more", + "digest", + "ed25519-dalek", + "educe", + "getrandom 0.4.2", + "hex", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_core 0.6.4", + "rand_core 0.9.3", + "rand_jitter", + "rdrand", + "rsa", + "safelog", + "serde", + "sha1", + "sha2", + "sha3", + "signature", + "subtle", + "thiserror 2.0.16", + "tor-error", + "tor-memquota-cost", + "visibility", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "tor-log-ratelim" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f1cd642180923d12e3fab5996b4aa2189718da7f465df6eb196ce2b9c70e293" +dependencies = [ + "futures", + "humantime", + "thiserror 2.0.16", + "tor-error", + "tor-rtcompat", + "tracing", + "weak-table", +] + +[[package]] +name = "tor-memquota" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "599daea60fd3272eb72a795d1c593b45bbe15343cbc702340a81db124c06eed5" +dependencies = [ + "cfg-if", + "derive-deftly", + "derive_more", + "dyn-clone", + "educe", + "futures", + "itertools 0.14.0", + "paste", + "pin-project", + "serde", + "slotmap-careful", + "static_assertions", + "sysinfo", + "thiserror 2.0.16", + "tor-async-utils", + "tor-basic-utils", + "tor-config", + "tor-error", + "tor-log-ratelim", + "tor-memquota-cost", + "tor-rtcompat", + "tracing", + "void", +] + +[[package]] +name = "tor-memquota-cost" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd92b07c0fc24e6d8166a5ff45e5b8654e68d89743c46d01889a16ab74c0b578" +dependencies = [ + "derive-deftly", + "itertools 0.14.0", + "paste", + "void", +] + +[[package]] +name = "tor-netdir" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41be8f47f521fc95206d2ba5facac8fb1a5b5b82169bd41ebeecdf46d1e77246" +dependencies = [ + "async-trait", + "bitflags 2.9.4", + "derive_more", + "futures", + "humantime", + "itertools 0.14.0", + "num_enum", + "rand 0.9.2", + "serde", + "strum", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-netdoc", + "tor-protover", + "tor-units", + "tracing", + "typed-index-collections", +] + +[[package]] +name = "tor-netdoc" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea8bce73d2c78bd78a2a927336ca639cf6bd5d8ad092ebcd0b3fdeaa47dcc77e" +dependencies = [ + "amplify", + "base64ct", + "cipher", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "digest", + "educe", + "enumset", + "hex", + "humantime", + "itertools 0.14.0", + "memchr", + "paste", + "phf", + "saturating-time", + "serde", + "serde_with", + "signature", + "smallvec", + "strum", + "subtle", + "thiserror 2.0.16", + "time", + "tinystr", + "tor-basic-utils", + "tor-bytes", + "tor-cell", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-llcrypto", + "tor-protover", + "void", + "zeroize", +] + +[[package]] +name = "tor-persist" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507ab4b6a3d59ed0df5804eeed66dcacde75e3be13d3694216cdfdb666bce625" +dependencies = [ + "derive-deftly", + "derive_more", + "filetime", + "fs-mistrust", + "fslock", + "futures", + "itertools 0.14.0", + "oneshot-fused-workaround", + "paste", + "sanitize-filename", + "serde", + "serde_json", + "thiserror 2.0.16", + "time", + "tor-async-utils", + "tor-basic-utils", + "tor-error", + "tracing", + "void", +] + +[[package]] +name = "tor-proto" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfc552d535d36539d5782bb02028590bc472d219e49da51a96810725e80ff56" +dependencies = [ + "amplify", + "async-trait", + "asynchronous-codec", + "bitvec", + "bytes", + "caret", + "cfg-if", + "cipher", + "coarsetime", + "criterion-cycles-per-byte", + "derive-deftly", + "derive_builder_fork_arti", + "derive_more", + "digest", + "educe", + "enum_dispatch", + "futures", + "futures-util", + "hkdf", + "hmac", + "itertools 0.14.0", + "nonany", + "oneshot-fused-workaround", + "pin-project", + "postage", + "rand 0.9.2", + "rand_core 0.9.3", + "safelog", + "slotmap-careful", + "smallvec", + "static_assertions", + "subtle", + "sync_wrapper 1.0.2", + "thiserror 2.0.16", + "tokio", + "tokio-util", + "tor-async-utils", + "tor-basic-utils", + "tor-bytes", + "tor-cell", + "tor-cert", + "tor-checkable", + "tor-config", + "tor-error", + "tor-linkspec", + "tor-llcrypto", + "tor-log-ratelim", + "tor-memquota", + "tor-protover", + "tor-relay-crypto", + "tor-rtcompat", + "tor-rtmock", + "tor-units", + "tracing", + "typenum", + "visibility", + "void", + "zeroize", +] + +[[package]] +name = "tor-protover" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aed88527d070c4b7ea4e55a36d2d56d0500e30ca66298b5264f047f7f2f89cfa" +dependencies = [ + "caret", + "paste", + "serde_with", + "thiserror 2.0.16", + "tor-basic-utils", + "tor-bytes", +] + +[[package]] +name = "tor-relay-crypto" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e57e9f71b22ae1df63dbccc8e428cb07feec0abd654735109fa563c10bbb90" +dependencies = [ + "derive-deftly", + "derive_more", + "humantime", + "tor-cert", + "tor-checkable", + "tor-error", + "tor-key-forge", + "tor-keymgr", + "tor-llcrypto", + "tor-persist", +] + +[[package]] +name = "tor-relay-selection" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a372072ac9dea7d17e49693cc3f3ae77b3abf8125630516c9f2d622239b1920a" +dependencies = [ + "rand 0.9.2", + "serde", + "tor-basic-utils", + "tor-linkspec", + "tor-netdir", + "tor-netdoc", +] + +[[package]] +name = "tor-rtcompat" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14428b930e59003e801c0c32697c0aeb9b0495ad33ecbe8c6753bdb596233270" +dependencies = [ + "async-native-tls", + "async-trait", + "async_executors", + "asynchronous-codec", + "cfg-if", + "coarsetime", + "derive_more", + "dyn-clone", + "educe", + "futures", + "hex", + "libc", + "native-tls", + "paste", + "pin-project", + "socket2 0.6.3", + "thiserror 2.0.16", + "tokio", + "tokio-util", + "tor-error", + "tor-general-addr", + "tracing", + "void", + "zeroize", +] + +[[package]] +name = "tor-rtmock" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2da91a432cdaee8a93e0bb21b02f3e9c7667832ccbb4b54e00d9c1214638e70" +dependencies = [ + "amplify", + "assert_matches", + "async-trait", + "derive-deftly", + "derive_more", + "educe", + "futures", + "humantime", + "itertools 0.14.0", + "oneshot-fused-workaround", + "pin-project", + "priority-queue", + "slotmap-careful", + "strum", + "thiserror 2.0.16", + "tor-error", + "tor-general-addr", + "tor-rtcompat", + "tracing", + "tracing-test", + "void", +] + +[[package]] +name = "tor-socksproto" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adbc9115a2f506d9bb86ae4446f0ca70eb523dc2f5e900a33582e7c39decc23a" +dependencies = [ + "amplify", + "caret", + "derive-deftly", + "educe", + "safelog", + "subtle", + "thiserror 2.0.16", + "tor-bytes", + "tor-error", +] + +[[package]] +name = "tor-units" +version = "0.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da90e93b4b4aa4ec356ecbe9e19aced36fdd655e94ca459d1915120d873363f0" +dependencies = [ + "derive-deftly", + "derive_more", + "serde", + "thiserror 2.0.16", + "tor-memquota", ] [[package]] @@ -3421,15 +6622,18 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", + "indexmap 2.11.4", "pin-project-lite", + "slab", "sync_wrapper 1.0.2", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -3437,9 +6641,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.9.4", "bytes", @@ -3448,7 +6652,7 @@ dependencies = [ "http-body 1.0.1", "iri-string", "pin-project-lite", - "tower 0.5.2", + "tower 0.5.3", "tower-layer", "tower-service", ] @@ -3564,6 +6768,27 @@ dependencies = [ "tracing-log 0.2.0", ] +[[package]] +name = "tracing-test" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a4c448db514d4f24c5ddb9f73f2ee71bfb24c526cf0c570ba142d1119e0051" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad06847b7afb65c7866a36664b75c40b895e318cea4f71299f013fb22965329d" +dependencies = [ + "quote", + "syn 2.0.106", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -3584,8 +6809,8 @@ dependencies = [ "libloading 0.7.4", "log", "nix 0.26.4", - "reqwest 0.11.27", - "schemars", + "reqwest", + "schemars 0.8.22", "serde", "socket2 0.5.10", "ssri", @@ -3593,22 +6818,53 @@ dependencies = [ "tokio", "tracing", "widestring", - "windows", + "windows 0.48.0", "zip", ] +[[package]] +name = "typed-index-collections" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "898160f1dfd383b4e92e17f0512a7d62f3c51c44937b23b6ffc3a1614a8eaccd" +dependencies = [ + "bincode", + "serde", +] + [[package]] name = "typenum" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +[[package]] +name = "uncased" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicase" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + [[package]] name = "unicode-ident" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-width" version = "0.1.14" @@ -3621,6 +6877,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "universal-hash" version = "0.5.1" @@ -3638,10 +6900,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] -name = "url" -version = "2.5.7" +name = "unty" +version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", @@ -3690,6 +6958,33 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -3720,14 +7015,32 @@ version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen", + "wit-bindgen 0.46.0", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", +] + +[[package]] +name = "wasix" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1757e0d1f8456693c7e5c6c629bdb54884e032aa0bb53c155f6a39f94440d332" +dependencies = [ + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] name = "wasm-bindgen" -version = "0.2.103" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -3736,27 +7049,14 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.106", - "wasm-bindgen-shared", -] - [[package]] name = "wasm-bindgen-futures" -version = "0.4.53" +version = "0.4.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0b221ff421256839509adbb55998214a70d829d3a28c69b4a6672e9d2a42f67" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -3765,9 +7065,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.103" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3775,31 +7075,71 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.103" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ + "bumpalo", "proc-macro2", "quote", "syn 2.0.106", - "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.103" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] [[package]] -name = "web-sys" -version = "0.3.80" +name = "wasm-encoder" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbe734895e869dc429d78c4b433f8d17d95f8d05317440b4fad5ab2d33e596dc" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.11.4", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.9.4", + "hashbrown 0.15.5", + "indexmap 2.11.4", + "semver", +] + +[[package]] +name = "weak-table" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" + +[[package]] +name = "web-sys" +version = "0.3.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -3816,10 +7156,10 @@ dependencies = [ ] [[package]] -name = "webpki-roots" -version = "1.0.2" +name = "webpki-root-certs" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" dependencies = [ "rustls-pki-types", ] @@ -3858,6 +7198,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -3873,6 +7222,87 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core 0.61.2", + "windows-future", + "windows-link 0.1.3", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-core" +version = "0.62.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.2.0", + "windows-result 0.4.0", + "windows-strings 0.5.0", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.3", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "windows-link" version = "0.1.3" @@ -3886,12 +7316,69 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] -name = "windows-sys" -version = "0.48.0" +name = "windows-numerics" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ - "windows-targets 0.48.5", + "windows-core 0.61.2", + "windows-link 0.1.3", +] + +[[package]] +name = "windows-registry" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f91f87ce112ffb7275000ea98eb1940912c21c1567c9312fde20261f3eadd29" +dependencies = [ + "windows-link 0.2.0", + "windows-result 0.4.0", + "windows-strings 0.5.0", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-result" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +dependencies = [ + "windows-link 0.2.0", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows-strings" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +dependencies = [ + "windows-link 0.2.0", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", ] [[package]] @@ -3930,6 +7417,21 @@ dependencies = [ "windows-link 0.2.0", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -3978,6 +7480,21 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link 0.1.3", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -3996,6 +7513,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -4014,6 +7537,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -4044,6 +7573,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -4062,6 +7597,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -4080,6 +7621,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -4098,6 +7645,12 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -4125,28 +7678,115 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wit-bindgen" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.11.4", + "prettyplease", + "syn 2.0.106", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.106", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.9.4", + "indexmap 2.11.4", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.11.4", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + [[package]] name = "writeable" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "x25519-dalek" version = "2.0.1" @@ -4300,7 +7940,7 @@ dependencies = [ "pbkdf2", "sha1", "time", - "zstd", + "zstd 0.11.2+zstd.1.5.2", ] [[package]] @@ -4309,7 +7949,16 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe", + "zstd-safe 5.0.2+zstd.1.5.2", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe 7.2.4", ] [[package]] @@ -4322,6 +7971,15 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.16+zstd.1.5.7" diff --git a/burrow/Cargo.toml b/burrow/Cargo.toml index d5e56c1..3bae2ae 100644 --- a/burrow/Cargo.toml +++ b/burrow/Cargo.toml @@ -10,7 +10,7 @@ crate-type = ["lib", "staticlib"] [dependencies] anyhow = "1.0" -tokio = { version = "1.37", features = [ +tokio = { version = "1.50.0", features = [ "rt", "macros", "sync", @@ -50,22 +50,25 @@ async-channel = "2.1" schemars = "0.8" futures = "0.3.28" once_cell = "1.19" +arti-client = "0.40.0" +tokio-util = { version = "0.7.18", features = ["compat"] } console-subscriber = { version = "0.2.0", optional = true } console = "0.15.8" -axum = "0.7.4" -reqwest = { version = "0.12", default-features = false, features = [ +axum = "0.8.8" +reqwest = { version = "0.13.2", default-features = false, features = [ "json", - "rustls-tls", + "rustls", ] } -rusqlite = { version = "0.31.0", features = ["blob"] } +rusqlite = { version = "0.38.0", features = ["blob"] } dotenv = "0.15.0" -tonic = "0.12.0" -prost = "0.13.1" -prost-types = "0.13.1" -tokio-stream = "0.1" +tonic = "0.14.5" +tonic-prost = "0.14.5" +prost = "0.14.3" +prost-types = "0.14.3" +tokio-stream = "0.1.18" async-stream = "0.2" -tower = "0.4.13" -hyper-util = "0.1.6" +tower = "0.5.3" +hyper-util = "0.1.20" toml = "0.8.15" rust-ini = "0.21.0" @@ -73,10 +76,11 @@ rust-ini = "0.21.0" caps = "0.5" libsystemd = "0.7" tracing-journald = "0.3" +libc = "0.2" [target.'cfg(target_vendor = "apple")'.dependencies] -nix = { version = "0.27" } -rusqlite = { version = "0.31.0", features = ["bundled", "blob"] } +nix = { version = "0.27", features = ["ioctl"] } +rusqlite = { version = "0.38.0", features = ["bundled", "blob"] } [dev-dependencies] insta = { version = "1.32", features = ["yaml"] } @@ -96,4 +100,4 @@ bundled = ["rusqlite/bundled"] [build-dependencies] -tonic-build = "0.12.0" +tonic-prost-build = "0.14.5" diff --git a/burrow/build.rs b/burrow/build.rs index 8eea5dc..9ecd9a8 100644 --- a/burrow/build.rs +++ b/burrow/build.rs @@ -1,4 +1,4 @@ fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/burrow.proto")?; + tonic_prost_build::compile_protos("../proto/burrow.proto")?; Ok(()) } diff --git a/burrow/src/daemon/instance.rs b/burrow/src/daemon/instance.rs index ce96fa5..f21678e 100644 --- a/burrow/src/daemon/instance.rs +++ b/burrow/src/daemon/instance.rs @@ -1,61 +1,186 @@ use std::{ - ops::Deref, path::{Path, PathBuf}, sync::Arc, - time::Duration, }; -use anyhow::Result; +use anyhow::{anyhow, Context, Result}; use rusqlite::Connection; -use tokio::sync::{mpsc, watch, Notify, RwLock}; +use tokio::{ + sync::{mpsc, watch, RwLock}, + task::JoinHandle, +}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status as RspStatus}; -use tracing::{debug, info, warn}; +use tracing::warn; use tun::{tokio::TunInterface, TunOptions}; -use super::rpc::grpc_defs::{ - networks_server::Networks, - tunnel_server::Tunnel, - Empty, - Network, - NetworkDeleteRequest, - NetworkListResponse, - NetworkReorderRequest, - State as RPCTunnelState, - TunnelConfigurationResponse, - TunnelStatusResponse, +use super::rpc::{ + grpc_defs::{ + networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, + NetworkListResponse, NetworkReorderRequest, NetworkType, State as RPCTunnelState, + TunnelConfigurationResponse, TunnelStatusResponse, + }, + ServerConfig, }; use crate::{ - daemon::rpc::{ - DaemonCommand, - DaemonNotification, - DaemonResponse, - DaemonResponseData, - ServerConfig, - ServerInfo, - }, - database::{ - add_network, - delete_network, - get_connection, - list_networks, - load_interface, - reorder_network, - }, - wireguard::{Config, Interface}, + database::{add_network, delete_network, get_connection, list_networks, reorder_network}, + tor::{self, Config as TorConfig, TorHandle}, + wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface}, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] enum RunState { Running, Idle, } impl RunState { - pub fn to_rpc(&self) -> RPCTunnelState { + fn to_rpc(&self) -> RPCTunnelState { match self { - RunState::Running => RPCTunnelState::Running, - RunState::Idle => RPCTunnelState::Stopped, + Self::Running => RPCTunnelState::Running, + Self::Idle => RPCTunnelState::Stopped, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +enum RuntimeIdentity { + DefaultWireGuard, + Network { id: i32, network_type: NetworkType }, +} + +#[derive(Clone, Debug)] +enum ResolvedTunnel { + WireGuard { + identity: RuntimeIdentity, + config: WireGuardConfig, + }, + Tor { + identity: RuntimeIdentity, + config: TorConfig, + }, +} + +impl ResolvedTunnel { + fn from_networks(networks: &[Network], fallback: &WireGuardConfig) -> Result { + let Some(network) = networks.first() else { + return Ok(Self::WireGuard { + identity: RuntimeIdentity::DefaultWireGuard, + config: fallback.clone(), + }); + }; + + let identity = RuntimeIdentity::Network { + id: network.id, + network_type: network.r#type(), + }; + + match network.r#type() { + NetworkType::WireGuard => { + let payload = String::from_utf8(network.payload.clone()) + .context("wireguard payload must be valid UTF-8")?; + let config = WireGuardConfig::from_content_fmt(&payload, "ini")?; + Ok(Self::WireGuard { identity, config }) + } + NetworkType::Tor => { + let config = TorConfig::from_payload(&network.payload)?; + Ok(Self::Tor { identity, config }) + } + NetworkType::HackClub => { + Err(anyhow!("HackClub runtime is not available on this branch")) + } + } + } + + fn identity(&self) -> &RuntimeIdentity { + match self { + Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity, + } + } + + fn server_config(&self) -> Result { + match self { + Self::WireGuard { config, .. } => ServerConfig::try_from(config), + Self::Tor { config, .. } => Ok(ServerConfig { + address: config.address.clone(), + name: config.tun_name.clone(), + mtu: config.mtu.map(|mtu| mtu as i32), + }), + } + } + + async fn start(self, tun_interface: Arc>>) -> Result { + match self { + Self::WireGuard { identity, config } => { + let tun = TunOptions::new() + .address(config.interface.address.clone()) + .open()?; + tun_interface.write().await.replace(tun); + + let mut interface: WireGuardInterface = config.try_into()?; + interface.set_tun_ref(tun_interface.clone()).await; + let interface = Arc::new(RwLock::new(interface)); + let run_interface = interface.clone(); + let task = tokio::spawn(async move { + let guard = run_interface.read().await; + guard.run().await + }); + + Ok(ActiveTunnel::WireGuard { identity, interface, task }) + } + Self::Tor { identity, config } => { + let mut tun_options = TunOptions::new().address(config.address.clone()); + if let Some(name) = config.tun_name.as_deref() { + tun_options = tun_options.name(name); + } + let tun = tun_options.open()?; + tun_interface.write().await.replace(tun); + + match tor::spawn(config).await { + Ok(handle) => Ok(ActiveTunnel::Tor { identity, handle }), + Err(err) => { + tun_interface.write().await.take(); + Err(err) + } + } + } + } + } +} + +enum ActiveTunnel { + WireGuard { + identity: RuntimeIdentity, + interface: Arc>, + task: JoinHandle>, + }, + Tor { + identity: RuntimeIdentity, + handle: TorHandle, + }, +} + +impl ActiveTunnel { + fn identity(&self) -> &RuntimeIdentity { + match self { + Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity, + } + } + + async fn shutdown(self, tun_interface: &Arc>>) -> Result<()> { + match self { + Self::WireGuard { interface, task, .. } => { + interface.read().await.remove_tun().await; + let task_result = task.await; + tun_interface.write().await.take(); + task_result??; + Ok(()) + } + Self::Tor { handle, .. } => { + let result = handle.shutdown().await; + tun_interface.write().await.take(); + result + } } } } @@ -63,30 +188,26 @@ impl RunState { #[derive(Clone)] pub struct DaemonRPCServer { tun_interface: Arc>>, - wg_interface: Arc>, - config: Arc>, + default_config: Arc>, db_path: Option, wg_state_chan: (watch::Sender, watch::Receiver), network_update_chan: (watch::Sender<()>, watch::Receiver<()>), + active_tunnel: Arc>>, } impl DaemonRPCServer { - pub fn new( - wg_interface: Arc>, - config: Arc>, - db_path: Option<&Path>, - ) -> Result { + pub fn new(config: Arc>, db_path: Option<&Path>) -> Result { Ok(Self { tun_interface: Arc::new(RwLock::new(None)), - wg_interface, - config, - db_path: db_path.map(|p| p.to_owned()), + default_config: config, + db_path: db_path.map(Path::to_owned), wg_state_chan: watch::channel(RunState::Idle), network_update_chan: watch::channel(()), + active_tunnel: Arc::new(RwLock::new(None)), }) } - pub fn get_connection(&self) -> Result { + fn get_connection(&self) -> Result { get_connection(self.db_path.as_deref()).map_err(proc_err) } @@ -94,13 +215,70 @@ impl DaemonRPCServer { self.wg_state_chan.0.send(state).map_err(proc_err) } - async fn get_wg_state(&self) -> RunState { - self.wg_state_chan.1.borrow().to_owned() - } - async fn notify_network_update(&self) -> Result<(), RspStatus> { self.network_update_chan.0.send(()).map_err(proc_err) } + + async fn resolve_tunnel(&self) -> Result { + let conn = self.get_connection()?; + let networks = list_networks(&conn).map_err(proc_err)?; + let fallback = self.default_config.read().await.clone(); + ResolvedTunnel::from_networks(&networks, &fallback).map_err(proc_err) + } + + async fn current_tunnel_configuration(&self) -> Result { + let config = self + .resolve_tunnel() + .await? + .server_config() + .map_err(proc_err)?; + Ok(TunnelConfigurationResponse { + addresses: config.address, + mtu: config.mtu.unwrap_or(1500), + }) + } + + async fn stop_active_tunnel(&self) -> Result { + let current = { self.active_tunnel.write().await.take() }; + let Some(current) = current else { + return Ok(false); + }; + + current + .shutdown(&self.tun_interface) + .await + .map_err(proc_err)?; + self.set_wg_state(RunState::Idle).await?; + Ok(true) + } + + async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> { + let _ = self.stop_active_tunnel().await?; + let active = desired + .start(self.tun_interface.clone()) + .await + .map_err(proc_err)?; + self.active_tunnel.write().await.replace(active); + self.set_wg_state(RunState::Running).await?; + Ok(()) + } + + async fn reconcile_runtime(&self) -> Result<(), RspStatus> { + let desired = self.resolve_tunnel().await?; + let needs_restart = { + let guard = self.active_tunnel.read().await; + guard + .as_ref() + .map(|active| active.identity() != desired.identity()) + .unwrap_or(false) + }; + + if needs_restart { + self.replace_active_tunnel(desired).await?; + } + + Ok(()) + } } #[tonic::async_trait] @@ -113,55 +291,46 @@ impl Tunnel for DaemonRPCServer { _request: Request, ) -> Result, RspStatus> { let (tx, rx) = mpsc::channel(10); + let server = self.clone(); + let mut sub = self.network_update_chan.1.clone(); + tokio::spawn(async move { - let serv_config = ServerConfig::default(); - tx.send(Ok(TunnelConfigurationResponse { - mtu: serv_config.mtu.unwrap_or(1000), - addresses: serv_config.address, - })) - .await + loop { + let response = server.current_tunnel_configuration().await; + if tx.send(response).await.is_err() { + break; + } + if sub.changed().await.is_err() { + break; + } + } }); + Ok(Response::new(ReceiverStream::new(rx))) } async fn tunnel_start(&self, _request: Request) -> Result, RspStatus> { - let wg_state = self.get_wg_state().await; - match wg_state { - RunState::Idle => { - let tun_if = TunOptions::new().open()?; - debug!("Setting tun on wg_interface"); - self.tun_interface.write().await.replace(tun_if); - self.wg_interface - .write() - .await - .set_tun_ref(self.tun_interface.clone()) - .await; - debug!("tun set on wg_interface"); + let desired = self.resolve_tunnel().await?; + let already_running = { + let guard = self.active_tunnel.read().await; + guard + .as_ref() + .map(|active| active.identity() == desired.identity()) + .unwrap_or(false) + }; - debug!("Setting tun_interface"); - debug!("tun_interface set: {:?}", self.tun_interface); - - debug!("Cloning wg_interface"); - let tmp_wg = self.wg_interface.clone(); - let run_task = tokio::spawn(async move { - let twlock = tmp_wg.read().await; - twlock.run().await - }); - self.set_wg_state(RunState::Running).await?; - } - - RunState::Running => { - warn!("Got start, but tun interface already up."); - } + if already_running { + warn!("Got start, but active tunnel already matches desired network."); + return Ok(Response::new(Empty {})); } - return Ok(Response::new(Empty {})); + self.replace_active_tunnel(desired).await?; + Ok(Response::new(Empty {})) } async fn tunnel_stop(&self, _request: Request) -> Result, RspStatus> { - self.wg_interface.write().await.remove_tun().await; - self.set_wg_state(RunState::Idle).await?; - return Ok(Response::new(Empty {})); + let _ = self.stop_active_tunnel().await?; + Ok(Response::new(Empty {})) } async fn tunnel_status( @@ -172,13 +341,16 @@ impl Tunnel for DaemonRPCServer { let mut state_rx = self.wg_state_chan.1.clone(); tokio::spawn(async move { let cur = state_rx.borrow_and_update().to_owned(); - tx.send(Ok(status_rsp(cur))).await; + if tx.send(Ok(status_rsp(cur))).await.is_err() { + return; + } + loop { - state_rx.changed().await.unwrap(); + if state_rx.changed().await.is_err() { + break; + } let cur = state_rx.borrow().to_owned(); - let res = tx.send(Ok(status_rsp(cur))).await; - if res.is_err() { - eprintln!("Tunnel status channel closed"); + if tx.send(Ok(status_rsp(cur))).await.is_err() { break; } } @@ -196,6 +368,7 @@ impl Networks for DaemonRPCServer { let network = request.into_inner(); add_network(&conn, &network).map_err(proc_err)?; self.notify_network_update().await?; + self.reconcile_runtime().await?; Ok(Response::new(Empty {})) } @@ -203,7 +376,6 @@ impl Networks for DaemonRPCServer { &self, _request: Request, ) -> Result, RspStatus> { - debug!("Mock network_list called"); let (tx, rx) = mpsc::channel(10); let conn = self.get_connection()?; let mut sub = self.network_update_chan.1.clone(); @@ -212,12 +384,12 @@ impl Networks for DaemonRPCServer { let networks = list_networks(&conn) .map(|res| NetworkListResponse { network: res }) .map_err(proc_err); - let res = tx.send(networks).await; - if res.is_err() { - eprintln!("Network list channel closed"); + if tx.send(networks).await.is_err() { + break; + } + if sub.changed().await.is_err() { break; } - sub.changed().await.unwrap(); } }); Ok(Response::new(ReceiverStream::new(rx))) @@ -230,6 +402,7 @@ impl Networks for DaemonRPCServer { let conn = self.get_connection()?; reorder_network(&conn, request.into_inner()).map_err(proc_err)?; self.notify_network_update().await?; + self.reconcile_runtime().await?; Ok(Response::new(Empty {})) } @@ -240,6 +413,7 @@ impl Networks for DaemonRPCServer { let conn = self.get_connection()?; delete_network(&conn, request.into_inner()).map_err(proc_err)?; self.notify_network_update().await?; + self.reconcile_runtime().await?; Ok(Response::new(Empty {})) } } @@ -251,6 +425,6 @@ fn proc_err(err: impl ToString) -> RspStatus { fn status_rsp(state: RunState) -> TunnelStatusResponse { TunnelStatusResponse { state: state.to_rpc().into(), - start: None, // TODO: Add timestamp + start: None, } } diff --git a/burrow/src/daemon/mod.rs b/burrow/src/daemon/mod.rs index f6b973f..8ec0ce2 100644 --- a/burrow/src/daemon/mod.rs +++ b/burrow/src/daemon/mod.rs @@ -15,12 +15,11 @@ use tokio::{ }; use tokio_stream::wrappers::UnixListenerStream; use tonic::transport::Server; -use tracing::{error, info}; +use tracing::info; use crate::{ daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer}, database::{get_connection, load_interface}, - wireguard::Interface, }; pub async fn daemon_main( @@ -33,11 +32,7 @@ pub async fn daemon_main( } let conn = get_connection(db_path)?; let config = load_interface(&conn, "1")?; - let burrow_server = DaemonRPCServer::new( - Arc::new(RwLock::new(config.clone().try_into()?)), - Arc::new(RwLock::new(config)), - db_path.clone(), - )?; + let burrow_server = DaemonRPCServer::new(Arc::new(RwLock::new(config)), db_path.clone())?; let spp = socket_path.clone(); let tmp = get_socket_path(); let sock_path = spp.unwrap_or(Path::new(tmp.as_str())); diff --git a/burrow/src/database.rs b/burrow/src/database.rs index 9a9aac3..c650d55 100644 --- a/burrow/src/database.rs +++ b/burrow/src/database.rs @@ -56,7 +56,7 @@ END; pub fn initialize_tables(conn: &Connection) -> Result<()> { conn.execute(CREATE_WG_INTERFACE_TABLE, [])?; conn.execute(CREATE_WG_PEER_TABLE, [])?; - conn.execute(CREATE_NETWORK_TABLE, [])?; + conn.execute_batch(CREATE_NETWORK_TABLE)?; Ok(()) } diff --git a/burrow/src/lib.rs b/burrow/src/lib.rs index 6aae1fb..b77ce36 100644 --- a/burrow/src/lib.rs +++ b/burrow/src/lib.rs @@ -1,22 +1,20 @@ #[cfg(any(target_os = "linux", target_vendor = "apple"))] +pub mod tor; +#[cfg(any(target_os = "linux", target_vendor = "apple"))] pub mod wireguard; +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +mod auth; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod daemon; #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub mod database; -#[cfg(any(target_os = "linux", target_vendor = "apple"))] -mod auth; pub(crate) mod tracing; #[cfg(target_vendor = "apple")] pub use daemon::apple::spawn_in_process; #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub use daemon::{ - rpc::DaemonResponse, - rpc::ServerInfo, - DaemonClient, - DaemonCommand, - DaemonResponseData, + rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData, DaemonStartOptions, }; diff --git a/burrow/src/main.rs b/burrow/src/main.rs index e87b4c9..db62a7b 100644 --- a/burrow/src/main.rs +++ b/burrow/src/main.rs @@ -3,6 +3,8 @@ use clap::{Args, Parser, Subcommand}; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod daemon; +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +mod tor; pub(crate) mod tracing; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod wireguard; diff --git a/burrow/src/tor/config.rs b/burrow/src/tor/config.rs new file mode 100644 index 0000000..c2e0bc2 --- /dev/null +++ b/burrow/src/tor/config.rs @@ -0,0 +1,125 @@ +use std::{net::SocketAddr, str}; + +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub address: Vec, + #[serde(default)] + pub dns: Vec, + #[serde(default)] + pub mtu: Option, + #[serde(default)] + pub tun_name: Option, + #[serde(default)] + pub arti: ArtiConfig, + #[serde(default)] + pub tcp_stack: TcpStackConfig, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ArtiConfig { + pub state_dir: String, + pub cache_dir: String, +} + +impl Default for ArtiConfig { + fn default() -> Self { + Self { + state_dir: "/var/lib/burrow/arti/state".to_string(), + cache_dir: "/var/cache/burrow/arti".to_string(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "snake_case")] +pub enum TcpStackConfig { + System(SystemTcpStackConfig), +} + +impl Default for TcpStackConfig { + fn default() -> Self { + Self::System(SystemTcpStackConfig::default()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct SystemTcpStackConfig { + #[serde(default = "default_system_listen")] + pub listen: String, +} + +impl Default for SystemTcpStackConfig { + fn default() -> Self { + Self { + listen: default_system_listen(), + } + } +} + +impl Config { + pub fn from_payload(payload: &[u8]) -> Result { + if let Ok(config) = serde_json::from_slice(payload) { + return Ok(config); + } + + let payload = str::from_utf8(payload).context("tor payload must be valid UTF-8")?; + toml::from_str(payload).context("failed to parse tor payload as JSON or TOML") + } + + pub fn listen_addr(&self) -> Result { + match &self.tcp_stack { + TcpStackConfig::System(config) => config + .listen + .parse() + .with_context(|| format!("invalid system tcp listen address '{}'", config.listen)), + } + } +} + +fn default_system_listen() -> String { + "127.0.0.1:9040".to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_json_payload() { + let payload = br#"{ + "address":["100.64.0.2/32"], + "mtu":1400, + "arti":{"state_dir":"/tmp/state","cache_dir":"/tmp/cache"}, + "tcp_stack":{"kind":"system","listen":"127.0.0.1:9150"} + }"#; + + let config = Config::from_payload(payload).unwrap(); + assert_eq!(config.address, vec!["100.64.0.2/32"]); + assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9150"); + } + + #[test] + fn parses_toml_payload() { + let payload = r#" +address = ["100.64.0.3/32"] +mtu = 1280 +tun_name = "burrow-tor" + +[arti] +state_dir = "/tmp/state" +cache_dir = "/tmp/cache" + +[tcp_stack] +kind = "system" +listen = "127.0.0.1:9140" +"#; + + let config = Config::from_payload(payload.as_bytes()).unwrap(); + assert_eq!(config.tun_name.as_deref(), Some("burrow-tor")); + assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9140"); + } +} diff --git a/burrow/src/tor/mod.rs b/burrow/src/tor/mod.rs new file mode 100644 index 0000000..d275c7e --- /dev/null +++ b/burrow/src/tor/mod.rs @@ -0,0 +1,6 @@ +mod config; +mod runtime; +mod system; + +pub use config::{ArtiConfig, Config, SystemTcpStackConfig, TcpStackConfig}; +pub use runtime::{spawn, TorHandle}; diff --git a/burrow/src/tor/runtime.rs b/burrow/src/tor/runtime.rs new file mode 100644 index 0000000..a7deb3c --- /dev/null +++ b/burrow/src/tor/runtime.rs @@ -0,0 +1,116 @@ +use std::{sync::Arc, time::Duration}; + +use anyhow::{Context, Result}; +use arti_client::{config::TorClientConfigBuilder, TorClient}; +use tokio::{ + sync::watch, + task::{JoinError, JoinSet}, +}; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tracing::{debug, error, info, warn}; + +use super::{system::SystemTcpStackRuntime, Config, TcpStackConfig}; + +#[derive(Debug)] +pub struct TorHandle { + shutdown: watch::Sender, + task: tokio::task::JoinHandle<()>, +} + +impl TorHandle { + pub async fn shutdown(self) -> Result<()> { + let _ = self.shutdown.send(true); + match self.task.await { + Ok(()) => Ok(()), + Err(err) if err.is_cancelled() => Ok(()), + Err(err) => Err(join_error(err)), + } + } +} + +pub async fn spawn(config: Config) -> Result { + let builder = + TorClientConfigBuilder::from_directories(&config.arti.state_dir, &config.arti.cache_dir); + let tor_config = builder.build().context("failed to build arti config")?; + let tor_client = Arc::new( + TorClient::create_bootstrapped(tor_config) + .await + .context("failed to bootstrap arti client")?, + ); + + let (shutdown_tx, mut shutdown_rx) = watch::channel(false); + let task = match config.tcp_stack.clone() { + TcpStackConfig::System(system_config) => tokio::spawn(async move { + let stack = match SystemTcpStackRuntime::bind(&system_config).await { + Ok(stack) => stack, + Err(err) => { + error!(?err, "failed to bind system tcp stack listener"); + return; + } + }; + info!( + listen = %stack.local_addr(), + "system tcp stack listener bound for tor transparent proxy" + ); + + let mut connections = JoinSet::new(); + loop { + tokio::select! { + changed = shutdown_rx.changed() => { + match changed { + Ok(()) if *shutdown_rx.borrow() => break, + Ok(()) => continue, + Err(_) => break, + } + } + Some(res) = connections.join_next(), if !connections.is_empty() => { + match res { + Ok(Ok(())) => {} + Ok(Err(err)) => warn!(?err, "transparent proxy task failed"), + Err(err) => warn!(?err, "transparent proxy task panicked"), + } + } + accepted = stack.accept() => { + let (mut inbound, original_dst) = match accepted { + Ok(pair) => pair, + Err(err) => { + warn!(?err, "failed to accept transparent tcp connection"); + tokio::time::sleep(Duration::from_millis(50)).await; + continue; + } + }; + + let tor_client = tor_client.clone(); + connections.spawn(async move { + debug!(%original_dst, "accepted transparent tcp connection"); + let tor_stream = tor_client + .connect((original_dst.ip().to_string(), original_dst.port())) + .await + .with_context(|| format!("failed to connect to {original_dst} over tor"))?; + let mut tor_stream = tor_stream.compat(); + tokio::io::copy_bidirectional(&mut inbound, &mut tor_stream) + .await + .with_context(|| format!("failed to bridge tor stream for {original_dst}"))?; + Result::<()>::Ok(()) + }); + } + } + } + + connections.abort_all(); + while let Some(res) = connections.join_next().await { + match res { + Ok(Ok(())) => {} + Ok(Err(err)) => debug!(?err, "transparent proxy task failed during shutdown"), + Err(err) => debug!(?err, "transparent proxy task exited during shutdown"), + } + } + }), + }; + + Ok(TorHandle { shutdown: shutdown_tx, task }) +} + +fn join_error(err: JoinError) -> anyhow::Error { + anyhow::anyhow!("tor runtime task failed: {err}") +} diff --git a/burrow/src/tor/system.rs b/burrow/src/tor/system.rs new file mode 100644 index 0000000..c049835 --- /dev/null +++ b/burrow/src/tor/system.rs @@ -0,0 +1,856 @@ +use std::net::SocketAddr; + +use anyhow::{Context, Result}; +use tokio::net::{TcpListener, TcpStream}; + +use super::SystemTcpStackConfig; + +pub struct SystemTcpStackRuntime { + listener: TcpListener, + #[cfg(target_vendor = "apple")] + flow_tracker: AppleFlowTracker, +} + +impl SystemTcpStackRuntime { + pub async fn bind(config: &SystemTcpStackConfig) -> Result { + let listener = TcpListener::bind(&config.listen) + .await + .with_context(|| format!("failed to bind transparent listener on {}", config.listen))?; + #[cfg(target_vendor = "apple")] + let flow_tracker = AppleFlowTracker::new( + listener + .local_addr() + .expect("listener should always have a local address"), + ) + .context("failed to open /dev/pf for transparent destination lookups")?; + Ok(Self { + listener, + #[cfg(target_vendor = "apple")] + flow_tracker, + }) + } + + pub fn local_addr(&self) -> SocketAddr { + self.listener + .local_addr() + .expect("listener should always have a local address") + } + + pub async fn accept(&self) -> Result<(TcpStream, SocketAddr)> { + let (stream, _) = self + .listener + .accept() + .await + .context("failed to accept transparent listener connection")?; + #[cfg(target_vendor = "apple")] + let original_dst = self.flow_tracker.resolve(&stream)?; + #[cfg(not(target_vendor = "apple"))] + let original_dst = original_destination(&stream)?; + Ok((stream, original_dst)) + } +} + +#[cfg(target_os = "linux")] +fn original_destination(stream: &TcpStream) -> Result { + use std::{ + mem::{size_of, MaybeUninit}, + os::fd::AsRawFd, + }; + + let level = if stream.local_addr()?.is_ipv6() { + libc::SOL_IPV6 + } else { + libc::SOL_IP + }; + + let mut addr = MaybeUninit::::zeroed(); + let mut len = size_of::() as libc::socklen_t; + let rc = unsafe { + libc::getsockopt( + stream.as_raw_fd(), + level, + 80, + addr.as_mut_ptr().cast(), + &mut len, + ) + }; + if rc != 0 { + return Err(std::io::Error::last_os_error()).context("SO_ORIGINAL_DST lookup failed"); + } + + socket_addr_from_storage(unsafe { &addr.assume_init() }, len as usize) +} + +#[cfg(all(not(target_os = "linux"), not(target_vendor = "apple")))] +fn original_destination(_stream: &TcpStream) -> Result { + anyhow::bail!("system tcp stack transparent destination lookup is only implemented on linux") +} + +#[cfg(target_vendor = "apple")] +mod apple_pf { + use std::{ + collections::HashMap, + fs::File, + io, + mem::zeroed, + io::Read, + net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, + os::fd::{AsRawFd, RawFd}, + time::{Duration, Instant}, + }; + + use anyhow::{anyhow, bail, Context, Result}; + use nix::{ioctl_readwrite, libc}; + use parking_lot::Mutex; + use tokio::net::TcpStream; + + ioctl_readwrite!(pf_natlook, b'D', 23, PfiocNatlook); + + const FLOW_CACHE_LIMIT: usize = 4096; + const FLOW_CACHE_TTL: Duration = Duration::from_secs(30); + const PF_OUT: u8 = 2; + const PFLOG_RULESET_NAME_SIZE: usize = 16; + const PFLOG_DEVICE: &str = "pflog0"; + const OBSERVER_WAIT_STEPS: usize = 20; + const OBSERVER_WAIT_INTERVAL: Duration = Duration::from_millis(10); + + pub(super) struct AppleFlowTracker { + pf: File, + listener_addr: SocketAddr, + state: Mutex, + } + + impl AppleFlowTracker { + pub(super) fn new(listener_addr: SocketAddr) -> io::Result { + Ok(Self { + pf: File::options().read(true).write(true).open("/dev/pf")?, + listener_addr, + state: Mutex::new(FlowState { + cache: HashMap::new(), + observer: PacketObserver::new(listener_addr).ok(), + }), + }) + } + + pub(super) fn resolve(&self, stream: &TcpStream) -> Result { + let key = FlowKey::from_stream(stream)?; + if let Some(original_dst) = self.cached_destination(key) { + return Ok(original_dst); + } + + match lookup_pf_original_destination(self.pf.as_raw_fd(), key.peer, key.local) { + Ok(original_dst) => { + self.remember(key, original_dst); + Ok(original_dst) + } + Err(err) + if matches!( + err.raw_os_error(), + Some(code) if code == libc::EPERM || code == libc::ENOENT + ) => + { + if let Some(original_dst) = self.wait_for_observer(key) { + return Ok(original_dst); + } + match err.raw_os_error() { + Some(code) if code == libc::EPERM => Err(anyhow!( + "PF NAT lookups are denied on this macOS build and no logged redirect flow was observed for {} -> {}", + key.peer, + key.local + )), + Some(code) if code == libc::ENOENT => Err(anyhow!( + "PF did not have a redirect state for {} -> {} and no logged redirect flow was observed; ensure outbound TCP is redirected and logged before Burrow accepts it", + key.peer, + key.local + )), + _ => unreachable!(), + } + } + Err(err) => Err(err).context("DIOCNATLOOK failed"), + } + } + + fn cached_destination(&self, key: FlowKey) -> Option { + let mut state = self.state.lock(); + state.prune(); + state.drain_observer(self.listener_addr); + state.cache.get(&key).map(|entry| entry.original_dst) + } + + fn remember(&self, key: FlowKey, original_dst: SocketAddr) { + let mut state = self.state.lock(); + state.prune(); + remember_flow(&mut state.cache, key, original_dst, Instant::now()); + } + + fn wait_for_observer(&self, key: FlowKey) -> Option { + for _ in 0..OBSERVER_WAIT_STEPS { + if let Some(original_dst) = self.cached_destination(key) { + return Some(original_dst); + } + std::thread::sleep(OBSERVER_WAIT_INTERVAL); + } + None + } + } + + struct FlowState { + cache: HashMap, + observer: Option, + } + + impl FlowState { + fn prune(&mut self) { + let now = Instant::now(); + self.cache.retain(|_, entry| entry.expires_at > now); + } + + fn drain_observer(&mut self, listener_addr: SocketAddr) { + let Some(mut observer) = self.observer.take() else { + return; + }; + if observer.drain(listener_addr, &mut self.cache).is_ok() { + self.observer = Some(observer); + } + } + } + + #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] + struct FlowKey { + peer: SocketAddr, + local: SocketAddr, + } + + impl FlowKey { + fn from_stream(stream: &TcpStream) -> Result { + let peer = stream.peer_addr().context("failed to read transparent peer address")?; + let local = stream + .local_addr() + .context("failed to read transparent listener address")?; + match (peer, local) { + (SocketAddr::V4(_), SocketAddr::V4(_)) | (SocketAddr::V6(_), SocketAddr::V6(_)) => { + Ok(Self { peer, local }) + } + _ => bail!("transparent socket had mismatched source/destination address families"), + } + } + } + + #[derive(Clone, Copy, Debug)] + struct FlowEntry { + original_dst: SocketAddr, + expires_at: Instant, + } + + fn remember_flow( + cache: &mut HashMap, + key: FlowKey, + original_dst: SocketAddr, + now: Instant, + ) { + cache.retain(|_, entry| entry.expires_at > now); + if cache.len() >= FLOW_CACHE_LIMIT { + if let Some(oldest) = cache + .iter() + .min_by_key(|(_, entry)| entry.expires_at) + .map(|(flow_key, _)| *flow_key) + { + cache.remove(&oldest); + } + } + cache.insert( + key, + FlowEntry { + original_dst, + expires_at: now + FLOW_CACHE_TTL, + }, + ); + } + + fn lookup_pf_original_destination( + fd: RawFd, + peer: SocketAddr, + local: SocketAddr, + ) -> io::Result { + let mut request = PfiocNatlook::for_flow(peer, local) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; + let ioctl_result = unsafe { pf_natlook(fd, &mut request) }; + if let Err(errno) = ioctl_result { + return Err(io::Error::from(errno)); + } + request + .original_destination() + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } + + struct PacketObserver { + file: File, + buffer: Vec, + } + + impl PacketObserver { + fn new(listener_addr: SocketAddr) -> io::Result { + if listener_addr.ip().is_unspecified() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "packet observer requires an explicit listener address", + )); + } + + let file = open_bpf_device()?; + bind_bpf_to_interface(file.as_raw_fd(), PFLOG_DEVICE)?; + set_bpf_flag(file.as_raw_fd(), libc::BIOCIMMEDIATE, 1)?; + set_bpf_flag(file.as_raw_fd(), libc::BIOCSSEESENT, 1)?; + set_nonblocking(file.as_raw_fd())?; + + let mut buffer_len: libc::c_uint = 0; + ioctl_value(file.as_raw_fd(), libc::BIOCGBLEN, &mut buffer_len)?; + Ok(Self { + file, + buffer: vec![0; buffer_len as usize], + }) + } + + fn drain( + &mut self, + listener_addr: SocketAddr, + cache: &mut HashMap, + ) -> io::Result<()> { + loop { + match self.file.read(&mut self.buffer) { + Ok(0) => break, + Ok(read) => self.consume(&self.buffer[..read], listener_addr, cache), + Err(err) if err.kind() == io::ErrorKind::WouldBlock => break, + Err(err) => return Err(err), + } + } + Ok(()) + } + + fn consume( + &self, + buffer: &[u8], + listener_addr: SocketAddr, + cache: &mut HashMap, + ) { + let mut offset = 0usize; + let now = Instant::now(); + while offset + std::mem::size_of::() <= buffer.len() { + let header = unsafe { + std::ptr::read_unaligned(buffer[offset..].as_ptr() as *const libc::bpf_hdr) + }; + let header_len = header.bh_hdrlen as usize; + let captured_len = header.bh_caplen as usize; + let packet_start = offset + header_len; + let packet_end = packet_start + captured_len; + let next_record = offset + bpf_wordalign(header_len + captured_len); + if packet_end > buffer.len() || next_record > buffer.len() { + break; + } + + if let Some((peer, original_dst)) = + parse_logged_syn(&buffer[packet_start..packet_end], listener_addr) + { + remember_flow( + cache, + FlowKey { + peer, + local: listener_addr, + }, + original_dst, + now, + ); + } + + offset = next_record; + } + } + } + + fn open_bpf_device() -> io::Result { + for index in 0..=255 { + match File::options() + .read(true) + .open(format!("/dev/bpf{index}")) + { + Ok(file) => return Ok(file), + Err(err) if err.raw_os_error() == Some(libc::EBUSY) => continue, + Err(err) => return Err(err), + } + } + Err(io::Error::new( + io::ErrorKind::NotFound, + "no free /dev/bpf devices were available", + )) + } + + fn bind_bpf_to_interface(fd: RawFd, ifname: &str) -> io::Result<()> { + let mut ifreq = unsafe { zeroed::() }; + let bytes = ifname.as_bytes(); + let max = std::cmp::min(bytes.len(), libc::IFNAMSIZ.saturating_sub(1)); + for (index, byte) in bytes.iter().take(max).enumerate() { + ifreq.ifr_name[index] = *byte as libc::c_char; + } + ioctl_value(fd, libc::BIOCSETIF, &mut ifreq) + } + + fn set_bpf_flag(fd: RawFd, request: libc::c_ulong, value: libc::c_uint) -> io::Result<()> { + let mut flag = value; + ioctl_value(fd, request, &mut flag) + } + + fn set_nonblocking(fd: RawFd) -> io::Result<()> { + let current = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + if current < 0 { + return Err(io::Error::last_os_error()); + } + if unsafe { libc::fcntl(fd, libc::F_SETFL, current | libc::O_NONBLOCK) } != 0 { + return Err(io::Error::last_os_error()); + } + Ok(()) + } + + fn ioctl_value(fd: RawFd, request: libc::c_ulong, value: &mut T) -> io::Result<()> { + if unsafe { libc::ioctl(fd, request, value) } != 0 { + return Err(io::Error::last_os_error()); + } + Ok(()) + } + + fn parse_logged_syn( + record: &[u8], + listener_addr: SocketAddr, + ) -> Option<(SocketAddr, SocketAddr)> { + let header = read_pflog_header(record)?; + if header.dir != PF_OUT { + return None; + } + let packet = record.get(header.length as usize..)?; + match header.af as i32 { + libc::AF_INET => parse_ipv4_syn(packet, listener_addr), + libc::AF_INET6 => parse_ipv6_syn(packet, listener_addr), + _ => None, + } + } + + fn parse_ipv4_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> { + if !matches!(listener_addr, SocketAddr::V4(_)) || packet.len() < 20 || packet[0] >> 4 != 4 { + return None; + } + let header_len = usize::from(packet[0] & 0x0f) * 4; + if header_len < 20 || packet.len() < header_len + 20 || packet[9] != libc::IPPROTO_TCP as u8 { + return None; + } + let tcp = &packet[header_len..]; + let flags = tcp[13]; + if flags & 0x02 == 0 || flags & 0x10 != 0 { + return None; + } + let source_ip = Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]); + let dest_ip = Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]); + let source_port = u16::from_be_bytes([tcp[0], tcp[1]]); + let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]); + Some(( + SocketAddr::V4(SocketAddrV4::new(source_ip, source_port)), + SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port)), + )) + } + + fn parse_ipv6_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> { + if !matches!(listener_addr, SocketAddr::V6(_)) || packet.len() < 40 || packet[0] >> 4 != 6 { + return None; + } + if packet[6] != libc::IPPROTO_TCP as u8 || packet.len() < 60 { + return None; + } + let tcp = &packet[40..]; + let flags = tcp[13]; + if flags & 0x02 == 0 || flags & 0x10 != 0 { + return None; + } + let source_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[8..24]).ok()?); + let dest_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[24..40]).ok()?); + let source_port = u16::from_be_bytes([tcp[0], tcp[1]]); + let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]); + Some(( + SocketAddr::V6(SocketAddrV6::new(source_ip, source_port, 0, 0)), + SocketAddr::V6(SocketAddrV6::new(dest_ip, dest_port, 0, 0)), + )) + } + + fn read_pflog_header(record: &[u8]) -> Option { + if record.len() < std::mem::size_of::() { + return None; + } + let header = + unsafe { std::ptr::read_unaligned(record.as_ptr() as *const PflogHdr) }; + if header.length as usize > record.len() || (header.length as usize) < PFLOG_REAL_HDRLEN { + return None; + } + Some(header) + } + + const fn bpf_wordalign(len: usize) -> usize { + let alignment = std::mem::size_of::(); + (len + (alignment - 1)) & !(alignment - 1) + } + + #[repr(C)] + #[derive(Clone, Copy)] + struct PfiocNatlook { + saddr: PfAddr, + daddr: PfAddr, + rsaddr: PfAddr, + rdaddr: PfAddr, + sxport: PfStateXport, + dxport: PfStateXport, + rsxport: PfStateXport, + rdxport: PfStateXport, + af: libc::sa_family_t, + proto: u8, + proto_variant: u8, + direction: u8, + } + + impl PfiocNatlook { + fn for_flow(peer: SocketAddr, local: SocketAddr) -> Result { + let (saddr, sxport, source_af) = pf_endpoint(peer); + let (daddr, dxport, destination_af) = pf_endpoint(local); + if source_af != destination_af { + bail!("transparent flow key changed address family across redirect"); + } + Ok(Self { + saddr, + daddr, + rsaddr: PfAddr::default(), + rdaddr: PfAddr::default(), + sxport, + dxport, + rsxport: PfStateXport::default(), + rdxport: PfStateXport::default(), + af: source_af, + proto: libc::IPPROTO_TCP as u8, + proto_variant: 0, + direction: PF_OUT, + }) + } + + fn original_destination(&self) -> Result { + socket_addr_from_pf(self.af, self.rdaddr, self.rdxport) + } + } + + fn pf_endpoint(addr: SocketAddr) -> (PfAddr, PfStateXport, libc::sa_family_t) { + let port = PfStateXport { + port: u16::to_be(addr.port()), + }; + match addr { + SocketAddr::V4(addr) => ( + PfAddr::from_ipv4(*addr.ip()), + port, + libc::AF_INET as libc::sa_family_t, + ), + SocketAddr::V6(addr) => ( + PfAddr::from_ipv6(*addr.ip()), + port, + libc::AF_INET6 as libc::sa_family_t, + ), + } + } + + fn socket_addr_from_pf( + af: libc::sa_family_t, + addr: PfAddr, + port: PfStateXport, + ) -> Result { + match af as i32 { + libc::AF_INET => Ok(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::from(addr.v4_octets()), + u16::from_be(unsafe { port.port }), + ))), + libc::AF_INET6 => Ok(SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::from(addr.v6_octets()), + u16::from_be(unsafe { port.port }), + 0, + 0, + ))), + family => bail!("unsupported PF address family {family}"), + } + } + + #[repr(C)] + #[derive(Clone, Copy)] + union PfAddrRepr { + v4addr: libc::in_addr, + v6addr: libc::in6_addr, + addr8: [u8; 16], + addr16: [u16; 8], + addr32: [u32; 4], + } + + #[repr(C)] + #[derive(Clone, Copy)] + struct PfAddr { + pfa: PfAddrRepr, + } + + impl Default for PfAddr { + fn default() -> Self { + Self { + pfa: PfAddrRepr { addr32: [0; 4] }, + } + } + } + + impl PfAddr { + fn from_ipv4(ip: Ipv4Addr) -> Self { + let mut bytes = [0u8; 16]; + bytes[..4].copy_from_slice(&ip.octets()); + Self { + pfa: PfAddrRepr { addr8: bytes }, + } + } + + fn from_ipv6(ip: Ipv6Addr) -> Self { + Self { + pfa: PfAddrRepr { + addr8: ip.octets(), + }, + } + } + + fn v4_octets(self) -> [u8; 4] { + let bytes = unsafe { self.pfa.addr8 }; + [bytes[0], bytes[1], bytes[2], bytes[3]] + } + + fn v6_octets(self) -> [u8; 16] { + unsafe { self.pfa.addr8 } + } + } + + #[repr(C)] + #[derive(Clone, Copy)] + union PfStateXport { + port: u16, + call_id: u16, + spi: u32, + } + + #[repr(C)] + #[derive(Clone, Copy)] + struct PflogHdr { + length: u8, + af: libc::sa_family_t, + action: u8, + reason: u8, + ifname: [libc::c_char; libc::IFNAMSIZ], + ruleset: [libc::c_char; PFLOG_RULESET_NAME_SIZE], + rulenr: u32, + subrulenr: u32, + uid: libc::uid_t, + pid: libc::pid_t, + rule_uid: libc::uid_t, + rule_pid: libc::pid_t, + dir: u8, + pad: [u8; 3], + } + + const PFLOG_REAL_HDRLEN: usize = std::mem::offset_of!(PflogHdr, pad); + + impl Default for PfStateXport { + fn default() -> Self { + unsafe { zeroed() } + } + } + + #[cfg(test)] + mod tests { + use super::*; + + #[test] + fn builds_natlook_request_from_redirected_flow() { + let request = PfiocNatlook::for_flow( + "192.0.2.10:41000".parse().unwrap(), + "127.0.0.1:9040".parse().unwrap(), + ) + .unwrap(); + assert_eq!(request.af as i32, libc::AF_INET); + assert_eq!(request.proto, libc::IPPROTO_TCP as u8); + assert_eq!(request.direction, PF_OUT); + assert_eq!(request.saddr.v4_octets(), [192, 0, 2, 10]); + assert_eq!(request.daddr.v4_octets(), [127, 0, 0, 1]); + assert_eq!(u16::from_be(unsafe { request.sxport.port }), 41000); + assert_eq!(u16::from_be(unsafe { request.dxport.port }), 9040); + } + + #[test] + fn decodes_original_ipv6_destination() { + let mut request = + PfiocNatlook::for_flow("[::1]:41000".parse().unwrap(), "[::1]:9040".parse().unwrap()) + .unwrap(); + request.rdaddr = PfAddr::from_ipv6("2001:db8::42".parse().unwrap()); + request.rdxport = PfStateXport { + port: u16::to_be(443), + }; + + assert_eq!( + request.original_destination().unwrap(), + "[2001:db8::42]:443".parse::().unwrap() + ); + } + + #[test] + fn parses_logged_ipv4_syn() { + let mut record = Vec::new(); + record.extend_from_slice(&[ + PFLOG_REAL_HDRLEN as u8, + libc::AF_INET as u8, + 0, + 0, + ]); + record.extend_from_slice(&[0; libc::IFNAMSIZ]); + record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.push(PF_OUT); + + record.extend_from_slice(&[ + 0x45, 0, 0, 40, 0, 0, 0, 0, 64, libc::IPPROTO_TCP as u8, 0, 0, 192, 0, 2, 10, + 198, 51, 100, 42, + ]); + record.extend_from_slice(&[ + 0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0, + 0, + ]); + + assert_eq!( + parse_logged_syn(&record, "127.0.0.1:9040".parse().unwrap()), + Some(( + "192.0.2.10:39976".parse().unwrap(), + "198.51.100.42:443".parse().unwrap(), + )) + ); + } + + #[test] + fn parses_logged_ipv6_syn() { + let mut record = Vec::new(); + record.extend_from_slice(&[ + PFLOG_REAL_HDRLEN as u8, + libc::AF_INET6 as u8, + 0, + 0, + ]); + record.extend_from_slice(&[0; libc::IFNAMSIZ]); + record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.extend_from_slice(&0u32.to_ne_bytes()); + record.push(PF_OUT); + + let source_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x10).octets(); + let dest_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x42).octets(); + record.extend_from_slice(&[ + 0x60, 0, 0, 0, 0, 20, libc::IPPROTO_TCP as u8, 64, + ]); + record.extend_from_slice(&source_ip); + record.extend_from_slice(&dest_ip); + record.extend_from_slice(&[ + 0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0, + 0, + ]); + + assert_eq!( + parse_logged_syn(&record, "[::1]:9040".parse().unwrap()), + Some(( + "[2001:db8::10]:39976".parse().unwrap(), + "[2001:db8::42]:443".parse().unwrap(), + )) + ); + } + } +} + +#[cfg(target_vendor = "apple")] +use apple_pf::AppleFlowTracker; + +#[cfg(target_os = "linux")] +fn socket_addr_from_storage(addr: &libc::sockaddr_storage, len: usize) -> Result { + use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; + + if len < std::mem::size_of::() { + anyhow::bail!("socket address buffer was too short"); + } + + match addr.ss_family as i32 { + libc::AF_INET => { + let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in) }; + let ip = Ipv4Addr::from(u32::from_be(addr_in.sin_addr.s_addr)); + let port = u16::from_be(addr_in.sin_port); + Ok(SocketAddr::V4(SocketAddrV4::new(ip, port))) + } + libc::AF_INET6 => { + let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in6) }; + let ip = Ipv6Addr::from(addr_in.sin6_addr.s6_addr); + let port = u16::from_be(addr_in.sin6_port); + Ok(SocketAddr::V6(SocketAddrV6::new( + ip, + port, + addr_in.sin6_flowinfo, + addr_in.sin6_scope_id, + ))) + } + family => anyhow::bail!("unsupported socket address family {family}"), + } +} + +#[cfg(all(test, target_os = "linux"))] +mod tests { + use super::*; + use std::{ + mem::size_of, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, + }; + + #[test] + fn parses_ipv4_socket_addr() { + let mut storage = unsafe { std::mem::zeroed::() }; + let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in) }; + addr_in.sin_family = libc::AF_INET as libc::sa_family_t; + addr_in.sin_port = u16::to_be(9040); + addr_in.sin_addr = libc::in_addr { + s_addr: u32::to_be(u32::from(Ipv4Addr::new(127, 0, 0, 1))), + }; + + let parsed = socket_addr_from_storage(&storage, size_of::()).unwrap(); + assert_eq!( + parsed, + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 9040)) + ); + } + + #[test] + fn parses_ipv6_socket_addr() { + let mut storage = unsafe { std::mem::zeroed::() }; + let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in6) }; + addr_in.sin6_family = libc::AF_INET6 as libc::sa_family_t; + addr_in.sin6_port = u16::to_be(9150); + addr_in.sin6_addr = libc::in6_addr { + s6_addr: Ipv6Addr::LOCALHOST.octets(), + }; + + let parsed = socket_addr_from_storage(&storage, size_of::()).unwrap(); + assert_eq!( + parsed, + SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9150, 0, 0)) + ); + } +} diff --git a/docs/TOR.md b/docs/TOR.md new file mode 100644 index 0000000..81b8a1a --- /dev/null +++ b/docs/TOR.md @@ -0,0 +1,41 @@ +# Tor Transport + +Burrow now has a `Tor` network type that boots an in-process [Arti](https://gitlab.torproject.org/tpo/core/arti) client and exposes a transparent TCP listener for outbound stream forwarding. + +The first implementation is intentionally narrow: + +- `tcp_stack.kind = "system"` is the only supported TCP stack backend. +- transparent destination recovery uses Linux `SO_ORIGINAL_DST` and macOS PF lookups. +- on macOS, Burrow first tries PF `DIOCNATLOOK`, then falls back to a `pflog0` observer backed by an in-memory flow cache keyed by the redirected socket tuple. +- Burrow does not yet install firewall redirect rules for you. +- traffic reaches Arti only if the host already redirects outbound TCP flows to Burrow's local listener. +- the macOS observer fallback only works when the redirect rule is logged to `pflog0` and Burrow listens on an explicit local address such as `127.0.0.1:9040`. +- destination handling is IP-and-port based, so this does not yet capture DNS or `.onion` names before local resolution. +- Burrow still does not install loop-avoidance rules for Arti's own relay connections, so redirect rules must exempt those flows externally for now. + +## Payload format + +`Network.payload` can be JSON or TOML. + +```json +{ + "address": ["100.64.0.2/32"], + "tun_name": "burrow-tor", + "mtu": 1400, + "arti": { + "state_dir": "/var/lib/burrow/arti/state", + "cache_dir": "/var/cache/burrow/arti" + }, + "tcp_stack": { + "kind": "system", + "listen": "127.0.0.1:9040" + } +} +``` + +## Next steps + +- teach Burrow to program and tear down redirect rules safely. +- add loop-avoidance for Arti's own relay connections before enabling automatic redirect. +- add DNS capture or hostname-aware forwarding for `.onion` and other unresolved destinations. +- add alternate pure-Rust TCP stack backends behind the same `tcp_stack` enum. diff --git a/proto/burrow.proto b/proto/burrow.proto index 2355b8d..efbb064 100644 --- a/proto/burrow.proto +++ b/proto/burrow.proto @@ -46,6 +46,7 @@ message Network { enum NetworkType { WireGuard = 0; HackClub = 1; + Tor = 2; } message NetworkListResponse { diff --git a/tun/Cargo.toml b/tun/Cargo.toml index 1b07833..019439d 100644 --- a/tun/Cargo.toml +++ b/tun/Cargo.toml @@ -8,7 +8,7 @@ libc = "0.2" fehler = "1.0" nix = { version = "0.26", features = ["ioctl"] } socket2 = "0.5" -tokio = { version = "1.37", default-features = false, optional = true } +tokio = { version = "1.50.0", default-features = false, optional = true } byteorder = "1.4" tracing = "0.1" log = "0.4" @@ -19,7 +19,7 @@ futures = { version = "0.3.28", optional = true } [features] serde = ["dep:serde", "dep:schemars"] -tokio = ["tokio/net", "dep:tokio", "dep:futures"] +tokio = ["tokio/macros", "tokio/net", "tokio/rt", "dep:tokio", "dep:futures"] [target.'cfg(windows)'.dependencies] lazy_static = "1.4" @@ -34,7 +34,7 @@ windows = { version = "0.48", features = [ [target.'cfg(windows)'.build-dependencies] anyhow = "1.0" bindgen = "0.65" -reqwest = { version = "0.11" } +reqwest = { version = "0.13.2" } ssri = { version = "9.0", default-features = false } -tokio = { version = "1.28", features = ["rt", "macros"] } +tokio = { version = "1.50.0", features = ["rt", "macros"] } zip = { version = "0.6", features = ["deflate"] } From 865b676c990ffeb4cf147129ae82ed04e3f1bb90 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 02:49:55 -0700 Subject: [PATCH 02/50] Add Forgejo namespace workflow stack --- .cargo/config.toml | 3 - .forgejo/workflows/build-apple.yml | 97 +++ .forgejo/workflows/build-rust.yml | 31 + .forgejo/workflows/build-site.yml | 31 + .gitignore | 3 +- CONSTITUTION.md | 38 + Makefile | 14 +- README.md | 11 +- Scripts/_burrow-flake.sh | 95 +++ Scripts/bootstrap-forge-intake.sh | 113 +++ Scripts/check-forge-host.sh | 143 ++++ Scripts/cloudflare-upsert-a-record.sh | 165 ++++ Scripts/forge-deploy.sh | 100 +++ Scripts/hcloud-upload-nixos-image.sh | 327 ++++++++ Scripts/hetzner-forge.sh | 284 +++++++ Scripts/nsc-build-and-upload-image.sh | 542 ++++++++++++++ Scripts/provision-forgejo-nsc.sh | 237 ++++++ Scripts/sync-forgejo-nsc-config.sh | 132 ++++ Tools/forwardemail-custom-s3.sh | 171 +++++ Tools/forwardemail-hetzner-storage.py | 261 +++++++ docs/FORWARDEMAIL.md | 101 +++ docs/PROTOCOL_ROADMAP.md | 31 + docs/WIREGUARD_LINEAGE.md | 30 + evolution/README.md | 60 ++ evolution/proposals/0000-template.md | 57 ++ ...BEP-0001-sovereign-forge-and-governance.md | 61 ++ ...-control-plane-bootstrap-and-local-auth.md | 60 ++ ...0003-connect-ip-and-negotiation-roadmap.md | 61 ++ .../BEP-0004-hosted-mail-and-saas-identity.md | 68 ++ flake.lock | 86 +++ flake.nix | 190 +++++ nixos/README.md | 53 ++ nixos/hetzner-cloud-config.yaml | 10 + nixos/hosts/burrow-forge/default.nix | 46 ++ nixos/hosts/burrow-forge/disko-config.nix | 36 + .../burrow-forge/hardware-configuration.nix | 11 + nixos/keys/agent_at_burrow_net.pub | 1 + nixos/keys/contact_at_burrow_net.pub | 1 + nixos/modules/burrow-forge-runner.nix | 213 ++++++ nixos/modules/burrow-forge.nix | 247 ++++++ nixos/modules/burrow-forgejo-nsc.nix | 234 ++++++ rust-toolchain.toml | 4 + services/forgejo-nsc/README.md | 183 +++++ services/forgejo-nsc/autoscaler.example.yaml | 34 + .../cmd/forgejo-nsc-autoscaler/main.go | 46 ++ .../cmd/forgejo-nsc-dispatcher/main.go | 90 +++ services/forgejo-nsc/config.example.yaml | 27 + services/forgejo-nsc/deploy/autoscaler.yaml | 35 + services/forgejo-nsc/deploy/dispatcher.yaml | 37 + services/forgejo-nsc/go.mod | 65 ++ services/forgejo-nsc/go.sum | 575 ++++++++++++++ services/forgejo-nsc/internal/app/service.go | 253 +++++++ .../forgejo-nsc/internal/app/service_test.go | 160 ++++ .../forgejo-nsc/internal/autoscaler/config.go | 108 +++ .../internal/autoscaler/service.go | 385 ++++++++++ .../forgejo-nsc/internal/config/config.go | 185 +++++ .../internal/config/config_test.go | 41 + .../forgejo-nsc/internal/forgejo/client.go | 454 +++++++++++ .../forgejo-nsc/internal/nsc/dispatcher.go | 460 ++++++++++++ services/forgejo-nsc/internal/nsc/macos.go | 708 ++++++++++++++++++ .../forgejo-nsc/internal/nsc/macos_nsc.go | 373 +++++++++ services/forgejo-nsc/internal/nsc/windows.go | 59 ++ .../forgejo-nsc/internal/nsc/windows_test.go | 98 +++ .../forgejo-nsc/internal/nsc/windows_winrm.go | 499 ++++++++++++ .../nsc/windows_winrm_integration_test.go | 59 ++ .../internal/nsc/windows_winrm_test.go | 65 ++ .../forgejo-nsc/internal/server/server.go | 151 ++++ .../internal/server/server_test.go | 111 +++ 68 files changed, 9709 insertions(+), 11 deletions(-) create mode 100644 .forgejo/workflows/build-apple.yml create mode 100644 .forgejo/workflows/build-rust.yml create mode 100644 .forgejo/workflows/build-site.yml create mode 100644 CONSTITUTION.md create mode 100755 Scripts/_burrow-flake.sh create mode 100644 Scripts/bootstrap-forge-intake.sh create mode 100755 Scripts/check-forge-host.sh create mode 100755 Scripts/cloudflare-upsert-a-record.sh create mode 100755 Scripts/forge-deploy.sh create mode 100755 Scripts/hcloud-upload-nixos-image.sh create mode 100755 Scripts/hetzner-forge.sh create mode 100755 Scripts/nsc-build-and-upload-image.sh create mode 100755 Scripts/provision-forgejo-nsc.sh create mode 100755 Scripts/sync-forgejo-nsc-config.sh create mode 100755 Tools/forwardemail-custom-s3.sh create mode 100755 Tools/forwardemail-hetzner-storage.py create mode 100644 docs/FORWARDEMAIL.md create mode 100644 docs/PROTOCOL_ROADMAP.md create mode 100644 docs/WIREGUARD_LINEAGE.md create mode 100644 evolution/README.md create mode 100644 evolution/proposals/0000-template.md create mode 100644 evolution/proposals/BEP-0001-sovereign-forge-and-governance.md create mode 100644 evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md create mode 100644 evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md create mode 100644 evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 nixos/README.md create mode 100644 nixos/hetzner-cloud-config.yaml create mode 100644 nixos/hosts/burrow-forge/default.nix create mode 100644 nixos/hosts/burrow-forge/disko-config.nix create mode 100644 nixos/hosts/burrow-forge/hardware-configuration.nix create mode 100644 nixos/keys/agent_at_burrow_net.pub create mode 100644 nixos/keys/contact_at_burrow_net.pub create mode 100644 nixos/modules/burrow-forge-runner.nix create mode 100644 nixos/modules/burrow-forge.nix create mode 100644 nixos/modules/burrow-forgejo-nsc.nix create mode 100644 rust-toolchain.toml create mode 100644 services/forgejo-nsc/README.md create mode 100644 services/forgejo-nsc/autoscaler.example.yaml create mode 100644 services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go create mode 100644 services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go create mode 100644 services/forgejo-nsc/config.example.yaml create mode 100644 services/forgejo-nsc/deploy/autoscaler.yaml create mode 100644 services/forgejo-nsc/deploy/dispatcher.yaml create mode 100644 services/forgejo-nsc/go.mod create mode 100644 services/forgejo-nsc/go.sum create mode 100644 services/forgejo-nsc/internal/app/service.go create mode 100644 services/forgejo-nsc/internal/app/service_test.go create mode 100644 services/forgejo-nsc/internal/autoscaler/config.go create mode 100644 services/forgejo-nsc/internal/autoscaler/service.go create mode 100644 services/forgejo-nsc/internal/config/config.go create mode 100644 services/forgejo-nsc/internal/config/config_test.go create mode 100644 services/forgejo-nsc/internal/forgejo/client.go create mode 100644 services/forgejo-nsc/internal/nsc/dispatcher.go create mode 100644 services/forgejo-nsc/internal/nsc/macos.go create mode 100644 services/forgejo-nsc/internal/nsc/macos_nsc.go create mode 100644 services/forgejo-nsc/internal/nsc/windows.go create mode 100644 services/forgejo-nsc/internal/nsc/windows_test.go create mode 100644 services/forgejo-nsc/internal/nsc/windows_winrm.go create mode 100644 services/forgejo-nsc/internal/nsc/windows_winrm_integration_test.go create mode 100644 services/forgejo-nsc/internal/nsc/windows_winrm_test.go create mode 100644 services/forgejo-nsc/internal/server/server.go create mode 100644 services/forgejo-nsc/internal/server/server_test.go diff --git a/.cargo/config.toml b/.cargo/config.toml index 302ce48..767d03a 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,6 +1,3 @@ -[target.'cfg(unix)'] -runner = "sudo -E" - [alias] # command aliases rr = "run --release" bb = "build --release" diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml new file mode 100644 index 0000000..32c6903 --- /dev/null +++ b/.forgejo/workflows/build-apple.yml @@ -0,0 +1,97 @@ +name: Build Apple + +on: + pull_request: + branches: + - "**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + +jobs: + build: + name: Build App (${{ matrix.platform }}) + runs-on: namespace-profile-macos-large + strategy: + fail-fast: false + matrix: + include: + - platform: macOS + destination: platform=macOS + rust-targets: x86_64-apple-darwin,aarch64-apple-darwin + - platform: iOS Simulator + destination: platform=iOS Simulator,name=iPhone 17 Pro + rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios + env: + CARGO_INCREMENTAL: 0 + RUST_BACKTRACE: short + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + submodules: recursive + + - name: Select Xcode + shell: bash + run: | + set -euo pipefail + candidates=( + "/Applications/Xcode_26.1.app/Contents/Developer" + "/Applications/Xcode_26_1.app/Contents/Developer" + "/Applications/Xcode.app/Contents/Developer" + "/Applications/Xcode/Xcode.app/Contents/Developer" + ) + selected="" + for candidate in "${candidates[@]}"; do + if [[ -d "$candidate" ]]; then + selected="$candidate" + break + fi + done + if [[ -z "$selected" ]] && command -v xcode-select >/dev/null 2>&1; then + selected="$(xcode-select -p)" + fi + if [[ -z "$selected" ]]; then + echo "::error ::Unable to locate an Xcode toolchain" >&2 + exit 1 + fi + echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV" + DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: 1.85.0 + targets: ${{ matrix.rust-targets }} + + - name: Install Protobuf + shell: bash + run: | + set -euo pipefail + if ! command -v protoc >/dev/null 2>&1; then + brew install protobuf + fi + + - name: Build + shell: bash + working-directory: Apple + run: | + set -euo pipefail + xcodebuild build \ + -project Burrow.xcodeproj \ + -scheme App \ + -destination '${{ matrix.destination }}' \ + -skipPackagePluginValidation \ + -skipMacroValidation \ + -onlyUsePackageVersionsFromResolvedFile \ + -clonedSourcePackagesDirPath SourcePackages \ + -packageCachePath "$PWD/PackageCache" \ + -derivedDataPath "$PWD/DerivedData" \ + CODE_SIGNING_ALLOWED=NO \ + CODE_SIGNING_REQUIRED=NO \ + CODE_SIGN_IDENTITY="" \ + DEVELOPMENT_TEAM="" diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml new file mode 100644 index 0000000..2df1ad3 --- /dev/null +++ b/.forgejo/workflows/build-rust.yml @@ -0,0 +1,31 @@ +name: Build Rust + +on: + push: + branches: + - main + pull_request: + branches: + - "**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + rust: + name: Cargo Test + runs-on: [self-hosted, linux, x86_64, burrow-forge] + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + + - name: Test + shell: bash + run: | + set -euo pipefail + nix develop .#ci -c cargo test --workspace --all-features diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml new file mode 100644 index 0000000..6f7c5e2 --- /dev/null +++ b/.forgejo/workflows/build-site.yml @@ -0,0 +1,31 @@ +name: Build Site + +on: + push: + branches: + - main + pull_request: + branches: + - "**" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + site: + name: Next.js Build + runs-on: [self-hosted, linux, x86_64, burrow-forge] + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + + - name: Build + shell: bash + run: | + set -euo pipefail + nix develop .#ci -c bash -lc 'cd site && npm install && npm run build' diff --git a/.gitignore b/.gitignore index 1b300b4..3c80ef9 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,5 @@ target/ tmp/ *.db -*.sock \ No newline at end of file +*.sqlite3 +*.sock diff --git a/CONSTITUTION.md b/CONSTITUTION.md new file mode 100644 index 0000000..f97e683 --- /dev/null +++ b/CONSTITUTION.md @@ -0,0 +1,38 @@ +# Burrow Constitution + +1. Mission + +Burrow exists to build a proper VPN: fast, inspectable, deployable on infrastructure the project controls, and legible enough that future contributors can extend it without guesswork. + +2. Commitments + +- Protocol work must favor correctness over novelty. Burrow does not claim support for a transport or control-plane feature until the wire format, state handling, and recovery behavior are implemented and tested. +- Security is a design constraint, not a cleanup phase. Key material, bootstrap credentials, control-plane tokens, and routing policy must have explicit storage and rotation paths. +- Performance matters. Burrow should avoid needless copies, hidden blocking, and ad hoc process graphs that make packet forwarding or control-plane convergence harder to reason about. +- Source, infrastructure, and release logic live in the repository. If the forge cannot be rebuilt from the tree, the work is incomplete. +- Non-trivial changes require a Burrow Evolution Proposal. Durable rationale belongs in the repository, not only in chat. + +3. Infrastructure + +Burrow controls its own forge, runners, deployment automation, and edge configuration for `burrow.net` and `burrow.rs`. + +- Dedicated compute is preferred over SaaS dependencies when the dependency would hold release, source, or identity authority. +- Secrets may be bootstrapped from local intake for initial bring-up, but long-lived operation must converge on encrypted, versioned secret handling. +- Production access must be attributable. Automation identities, SSH keys, and service accounts must be named and documented. + +4. Contributors + +- Read this constitution before drafting product, protocol, or infrastructure changes. +- Capture intent, testing expectations, and rollback procedures in proposals. +- Prefer reversible migrations. If a change is destructive, document the preconditions and teardown plan first. +- Security-sensitive work requires explicit reviewer attention, even when the implementation is performed by an agent. + +5. Governance + +- Burrow Evolution Proposals (BEPs) are the primary design record for architectural, protocol, forge, and deployment changes. +- Accepted proposals are authoritative until superseded. +- Constitutional changes require a dedicated proposal that quotes the affected text and records the decision. + +6. Origin + +Burrow started as a firewall-burrowing client and now carries its own transport, daemon, mesh, and control-plane work. This constitution exists so the project can finish that evolution coherently. diff --git a/Makefile b/Makefile index 6563ab1..f927f5f 100644 --- a/Makefile +++ b/Makefile @@ -1,21 +1,23 @@ tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) -cargo_console := RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -cargo_norm := RUST_BACKTRACE=1 RUST_LOG=debug cargo run +cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- +cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- +sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- +sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- check: @cargo check build: - @cargo run build + @cargo build daemon-console: - @$(cargo_console) daemon + @$(sudo_cargo_console) daemon daemon: - @$(cargo_norm) daemon + @$(sudo_cargo_norm) daemon start: - @$(cargo_norm) start + @$(sudo_cargo_norm) start stop: @$(cargo_norm) stop diff --git a/README.md b/README.md index 89914d0..b8684c3 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,19 @@ Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/). `burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them. +Routine verification now runs unprivileged with `cargo test --workspace --all-features`; only tunnel startup needs elevation. + +The repository now carries its own design and deployment record: + +- [Constitution](./CONSTITUTION.md) +- [Burrow Evolution](./evolution/README.md) +- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md) +- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md) +- [Forward Email Runbook](./docs/FORWARDEMAIL.md) ## Contributing -Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow! Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. +Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh). The project structure is divided in the following folders: diff --git a/Scripts/_burrow-flake.sh b/Scripts/_burrow-flake.sh new file mode 100755 index 0000000..ba4e372 --- /dev/null +++ b/Scripts/_burrow-flake.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +burrow_require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "missing required command: $1" >&2 + exit 1 + fi +} + +burrow_cleanup_flake_tmpdirs() { + if [[ "${#BURROW_FLAKE_TMPDIRS[@]}" -eq 0 ]]; then + return + fi + rm -rf "${BURROW_FLAKE_TMPDIRS[@]}" +} + +burrow_prepare_flake_ref() { + local input="${1:-.}" + + case "${input}" in + path:*|git+*|github:*|tarball+*|http://*|https://*) + printf '%s\n' "${input}" + return 0 + ;; + esac + + local resolved + resolved="$(cd "${input}" && pwd)" + + local cache_root="${HOME}/.cache/burrow" + mkdir -p "${cache_root}" + + local copy_root + copy_root="$(mktemp -d "${cache_root}/flake-XXXXXX")" + mkdir -p "${copy_root}/repo" + + rsync -a \ + --delete \ + --exclude '.git' \ + --exclude '.direnv' \ + --exclude 'result' \ + --exclude 'burrow.sock' \ + --exclude 'node_modules' \ + --exclude 'target' \ + --exclude 'build' \ + "${resolved}/" "${copy_root}/repo/" + + BURROW_FLAKE_TMPDIRS+=("${copy_root}") + printf 'path:%s/repo\n' "${copy_root}" +} + +burrow_resolve_image_artifact() { + local store_path="$1" + + if [[ -f "${store_path}" ]]; then + printf '%s\n' "${store_path}" + return 0 + fi + + if [[ -d "${store_path}" ]]; then + local candidate + candidate="$( + find "${store_path}" -type f \ + \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) \ + | sort \ + | head -n1 + )" + if [[ -n "${candidate}" ]]; then + printf '%s\n' "${candidate}" + return 0 + fi + fi + + echo "unable to locate disk image artifact under ${store_path}" >&2 + exit 1 +} + +burrow_detect_compression() { + local artifact="$1" + + case "${artifact}" in + *.bz2) + printf 'bz2\n' + ;; + *.xz) + printf 'xz\n' + ;; + *.zst|*.zstd) + printf 'zstd\n' + ;; + *) + printf '\n' + ;; + esac +} diff --git a/Scripts/bootstrap-forge-intake.sh b/Scripts/bootstrap-forge-intake.sh new file mode 100644 index 0000000..0cc1d91 --- /dev/null +++ b/Scripts/bootstrap-forge-intake.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +usage() { + cat <<'EOF' +Usage: Scripts/bootstrap-forge-intake.sh [options] + +Copy the minimum Burrow forge bootstrap secrets onto the target host under +/var/lib/burrow/intake with the ownership expected by the NixOS services. + +Options: + --host SSH target (default: root@git.burrow.net) + --ssh-key SSH private key used to reach the host + (default: intake/agent_at_burrow_net_ed25519) + --password-file Forgejo admin bootstrap password file + (default: intake/forgejo_pass_contact_at_burrow_net.txt) + --agent-key-file Agent SSH private key copied for runner bootstrap + (default: intake/agent_at_burrow_net_ed25519) + --no-verify Skip remote ls/stat verification after install + -h, --help Show this help text +EOF +} + +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}" +AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" +VERIFY=1 + +while [[ $# -gt 0 ]]; do + case "$1" in + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --password-file) + PASSWORD_FILE="${2:?missing value for --password-file}" + shift 2 + ;; + --agent-key-file) + AGENT_KEY_FILE="${2:?missing value for --agent-key-file}" + shift 2 + ;; + --no-verify) + VERIFY=0 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" + +for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do + if [[ ! -s "${path}" ]]; then + echo "required file missing or empty: ${path}" >&2 + exit 1 + fi +done + +ssh_opts=( + -i "${SSH_KEY}" + -o IdentitiesOnly=yes + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" + -o StrictHostKeyChecking=accept-new +) + +remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")" +cleanup() { + if [[ -n "${remote_tmp:-}" ]]; then + ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +scp "${ssh_opts[@]}" \ + "${PASSWORD_FILE}" \ + "${AGENT_KEY_FILE}" \ + "${HOST}:${remote_tmp}/" + +ssh "${ssh_opts[@]}" "${HOST}" " + set -euo pipefail + install -d -m 0755 /var/lib/burrow/intake + install -m 0400 -o forgejo -g forgejo '${remote_tmp}/$(basename "${PASSWORD_FILE}")' /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt + install -m 0400 -o root -g root '${remote_tmp}/$(basename "${AGENT_KEY_FILE}")' /var/lib/burrow/intake/agent_at_burrow_net_ed25519 +" + +if [[ "${VERIFY}" -eq 1 ]]; then + ssh "${ssh_opts[@]}" "${HOST}" " + set -euo pipefail + ls -l \ + /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt \ + /var/lib/burrow/intake/agent_at_burrow_net_ed25519 + " +fi + +echo "Burrow forge bootstrap intake sync complete (host=${HOST})." diff --git a/Scripts/check-forge-host.sh b/Scripts/check-forge-host.sh new file mode 100755 index 0000000..ddfb83a --- /dev/null +++ b/Scripts/check-forge-host.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +usage() { + cat <<'EOF' +Usage: Scripts/check-forge-host.sh [options] + +Run a post-boot verification pass against the Burrow forge host. + +Options: + --host SSH target (default: root@git.burrow.net) + --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) + --expect-nsc Fail if forgejo-nsc services are not active + -h, --help Show this help text +EOF +} + +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" +EXPECT_NSC=0 + +while [[ $# -gt 0 ]]; do + case "$1" in + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --expect-nsc) + EXPECT_NSC=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" + +if [[ ! -f "${SSH_KEY}" ]]; then + echo "forge SSH key not found: ${SSH_KEY}" >&2 + exit 1 +fi + +ssh \ + -i "${SSH_KEY}" \ + -o IdentitiesOnly=yes \ + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \ + -o StrictHostKeyChecking=accept-new \ + "${HOST}" \ + EXPECT_NSC="${EXPECT_NSC}" \ + 'bash -s' <<'EOF' +set -euo pipefail + +base_services=( + forgejo.service + caddy.service + burrow-forgejo-bootstrap.service + burrow-forgejo-runner-bootstrap.service + burrow-forgejo-runner.service +) + +nsc_services=( + forgejo-nsc-dispatcher.service + forgejo-nsc-autoscaler.service +) + +show_service() { + local service="$1" + systemctl show \ + --no-pager \ + --property Id \ + --property LoadState \ + --property UnitFileState \ + --property ActiveState \ + --property SubState \ + --property Result \ + "${service}" +} + +service_is_healthy() { + local service="$1" + local active_state + local result + local unit_type + + active_state="$(systemctl show --property ActiveState --value "${service}")" + result="$(systemctl show --property Result --value "${service}")" + unit_type="$(systemctl show --property Type --value "${service}")" + + if [[ "${active_state}" == "active" ]]; then + return 0 + fi + + if [[ "${unit_type}" == "oneshot" && "${active_state}" == "inactive" && "${result}" == "success" ]]; then + return 0 + fi + + return 1 +} + +for service in "${base_services[@]}"; do + echo "== ${service} ==" + show_service "${service}" + if ! service_is_healthy "${service}"; then + echo "required service is not active: ${service}" >&2 + exit 1 + fi +done + +for service in "${nsc_services[@]}"; do + echo "== ${service} ==" + show_service "${service}" || true + if [[ "${EXPECT_NSC}" == "1" && "$(systemctl is-active "${service}" 2>/dev/null || true)" != "active" ]]; then + echo "required NSC service is not active: ${service}" >&2 + exit 1 + fi +done + +echo "== intake ==" +ls -l /var/lib/burrow/intake || true + +if command -v curl >/dev/null 2>&1; then + echo "== http-local ==" + curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login + curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/ + curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login +fi +EOF diff --git a/Scripts/cloudflare-upsert-a-record.sh b/Scripts/cloudflare-upsert-a-record.sh new file mode 100755 index 0000000..88745af --- /dev/null +++ b/Scripts/cloudflare-upsert-a-record.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: Scripts/cloudflare-upsert-a-record.sh --zone --name --ipv4
[options] + +Upsert a DNS-only or proxied Cloudflare A record without putting the API token on +the process list. + +Options: + --zone Cloudflare zone name, for example burrow.net + --name Fully-qualified DNS record name + --ipv4
IPv4 address for the A record + --token-file Cloudflare API token file + default: intake/cloudflare-token.txt + --ttl Record TTL, or auto + default: auto + --proxied Whether to proxy through Cloudflare + default: false + -h, --help Show this help +EOF +} + +ZONE_NAME="" +RECORD_NAME="" +IPV4="" +TOKEN_FILE="intake/cloudflare-token.txt" +TTL_VALUE="auto" +PROXIED="false" + +while [[ $# -gt 0 ]]; do + case "$1" in + --zone) + ZONE_NAME="${2:?missing value for --zone}" + shift 2 + ;; + --name) + RECORD_NAME="${2:?missing value for --name}" + shift 2 + ;; + --ipv4) + IPV4="${2:?missing value for --ipv4}" + shift 2 + ;; + --token-file) + TOKEN_FILE="${2:?missing value for --token-file}" + shift 2 + ;; + --ttl) + TTL_VALUE="${2:?missing value for --ttl}" + shift 2 + ;; + --proxied) + PROXIED="${2:?missing value for --proxied}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then + usage >&2 + exit 2 +fi + +if [[ ! -f "${TOKEN_FILE}" ]]; then + echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2 + exit 1 +fi + +if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + echo "Invalid IPv4 address: ${IPV4}" >&2 + exit 1 +fi + +case "${PROXIED}" in + true|false) + ;; + *) + echo "--proxied must be true or false" >&2 + exit 1 + ;; +esac + +case "${TTL_VALUE}" in + auto) + TTL_JSON=1 + ;; + ''|*[!0-9]*) + echo "--ttl must be a number of seconds or auto" >&2 + exit 1 + ;; + *) + TTL_JSON="${TTL_VALUE}" + ;; +esac + +TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" +if [[ -z "${TOKEN}" ]]; then + echo "Cloudflare token file is empty: ${TOKEN_FILE}" >&2 + exit 1 +fi + +cf_api() { + local method="$1" + local path="$2" + local body="${3-}" + if [[ -n "${body}" ]]; then + curl -fsS -X "${method}" \ + -H "Authorization: Bearer ${TOKEN}" \ + -H "Content-Type: application/json" \ + --data "${body}" \ + "https://api.cloudflare.com/client/v4${path}" + else + curl -fsS -X "${method}" \ + -H "Authorization: Bearer ${TOKEN}" \ + -H "Content-Type: application/json" \ + "https://api.cloudflare.com/client/v4${path}" + fi +} + +zone_lookup="$(cf_api GET "/zones?name=${ZONE_NAME}&status=active")" +zone_id="$(jq -r '.result[0].id // empty' <<<"${zone_lookup}")" + +if [[ -z "${zone_id}" ]]; then + echo "Active Cloudflare zone not found: ${ZONE_NAME}" >&2 + exit 1 +fi + +payload="$(jq -cn \ + --arg type "A" \ + --arg name "${RECORD_NAME}" \ + --arg content "${IPV4}" \ + --argjson proxied "${PROXIED}" \ + --argjson ttl "${TTL_JSON}" \ + '{type: $type, name: $name, content: $content, proxied: $proxied, ttl: $ttl}')" + +record_lookup="$(cf_api GET "/zones/${zone_id}/dns_records?type=A&name=${RECORD_NAME}")" +record_id="$(jq -r '.result[0].id // empty' <<<"${record_lookup}")" + +if [[ -n "${record_id}" ]]; then + result="$(cf_api PUT "/zones/${zone_id}/dns_records/${record_id}" "${payload}")" + action="updated" +else + result="$(cf_api POST "/zones/${zone_id}/dns_records" "${payload}")" + action="created" +fi + +jq -r --arg action "${action}" ' + if .success != true then + .errors | tostring | halt_error(1) + else + "Cloudflare DNS " + $action + ": " + .result.name + " -> " + .result.content + + " (proxied=" + (.result.proxied | tostring) + ", ttl=" + (.result.ttl | tostring) + ")" + end +' <<<"${result}" diff --git a/Scripts/forge-deploy.sh b/Scripts/forge-deploy.sh new file mode 100755 index 0000000..5c4b959 --- /dev/null +++ b/Scripts/forge-deploy.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# shellcheck source=Scripts/_burrow-flake.sh +source "${SCRIPT_DIR}/_burrow-flake.sh" + +usage() { + cat <<'EOF' +Usage: Scripts/forge-deploy.sh [--test|--switch] [--flake-attr ] [--allow-dirty] + +Standardized remote deploy path for the Burrow forge host. + +Defaults: + --switch + --flake-attr burrow-forge + +Environment: + BURROW_FORGE_HOST root@git.burrow.net + BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519 +EOF +} + +MODE="switch" +FLAKE_ATTR="burrow-forge" +ALLOW_DIRTY=0 +BURROW_FLAKE_TMPDIRS=() + +cleanup() { + burrow_cleanup_flake_tmpdirs +} +trap cleanup EXIT + +while [[ $# -gt 0 ]]; do + case "$1" in + --test) + MODE="test" + shift + ;; + --switch) + MODE="switch" + shift + ;; + --flake-attr) + FLAKE_ATTR="${2:?missing value for --flake-attr}" + shift 2 + ;; + --allow-dirty) + ALLOW_DIRTY=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +REPO_ROOT="$(git rev-parse --show-toplevel)" +cd "${REPO_ROOT}" + +if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then + echo "Refusing to deploy from a dirty checkout. Commit first, or pass --allow-dirty for incident-only work." >&2 + exit 1 +fi + +FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" + +if [[ -z "${FORGE_SSH_KEY}" ]]; then + if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then + FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" + else + FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519" + fi +fi + +if [[ ! -f "${FORGE_SSH_KEY}" ]]; then + echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2 + echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2 + exit 1 +fi + +FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" +mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")" + +export NIX_SSHOPTS="-i ${FORGE_SSH_KEY} -o IdentitiesOnly=yes -o UserKnownHostsFile=${FORGE_KNOWN_HOSTS_FILE} -o StrictHostKeyChecking=accept-new" +flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")" + +nix --extra-experimental-features "nix-command flakes" shell nixpkgs#nixos-rebuild -c \ + nixos-rebuild "${MODE}" \ + --flake "${flake_ref}#${FLAKE_ATTR}" \ + --build-host "${FORGE_HOST}" \ + --target-host "${FORGE_HOST}" diff --git a/Scripts/hcloud-upload-nixos-image.sh b/Scripts/hcloud-upload-nixos-image.sh new file mode 100755 index 0000000..2590519 --- /dev/null +++ b/Scripts/hcloud-upload-nixos-image.sh @@ -0,0 +1,327 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=Scripts/_burrow-flake.sh +source "${SCRIPT_DIR}/_burrow-flake.sh" + +DEFAULT_CONFIG="burrow-forge" +DEFAULT_FLAKE="." +DEFAULT_LOCATION="hel1" +DEFAULT_ARCHITECTURE="x86" +DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt" + +CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}" +FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}" +LOCATION="${HCLOUD_IMAGE_LOCATION:-${DEFAULT_LOCATION}}" +ARCHITECTURE="${HCLOUD_IMAGE_ARCHITECTURE:-${DEFAULT_ARCHITECTURE}}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${DEFAULT_TOKEN_FILE}}" +DESCRIPTION="${HCLOUD_IMAGE_DESCRIPTION:-}" +UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}" +UPLOAD_VERBOSE="${HCLOUD_IMAGE_UPLOAD_VERBOSE:-0}" +ARTIFACT_PATH_INPUT="" +OUTPUT_HASH="" +NO_UPDATE=0 +BUILDER_SPEC="${HCLOUD_IMAGE_BUILDER_SPEC:-}" +EXTRA_LABELS=() +NIX_BUILD_FLAGS=() +BURROW_FLAKE_TMPDIRS=() +LOCAL_STORE_DIR="" + +usage() { + cat <<'EOF' +Usage: Scripts/hcloud-upload-nixos-image.sh [options] + +Build a raw Burrow NixOS image and upload it into Hetzner Cloud as a snapshot. + +Options: + --config images.-raw output to build (default: burrow-forge) + --flake Flake path to build from (default: .) + --location Hetzner location for the temporary upload server (default: hel1) + --architecture CPU architecture of the image (default: x86) + --server-type Hetzner server type for the temporary upload server + --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) + --artifact-path Prebuilt raw image artifact to upload directly + --output-hash Stable hash label for --artifact-path uploads + --builder-spec Complete builders string passed to nix build + --description Description for the resulting snapshot + --upload-verbose Pass -v N times to hcloud-upload-image + --label key=value Extra Hetzner image label (repeatable) + --nix-flag Extra argument passed to nix build (repeatable) + --no-update Reuse an existing snapshot with the same config/output hash + -h, --help Show this help text +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --config) + CONFIG="${2:?missing value for --config}" + shift 2 + ;; + --flake) + FLAKE="${2:?missing value for --flake}" + shift 2 + ;; + --location) + LOCATION="${2:?missing value for --location}" + shift 2 + ;; + --architecture) + ARCHITECTURE="${2:?missing value for --architecture}" + shift 2 + ;; + --server-type) + UPLOAD_SERVER_TYPE="${2:?missing value for --server-type}" + shift 2 + ;; + --token-file) + TOKEN_FILE="${2:?missing value for --token-file}" + shift 2 + ;; + --artifact-path) + ARTIFACT_PATH_INPUT="${2:?missing value for --artifact-path}" + shift 2 + ;; + --output-hash) + OUTPUT_HASH="${2:?missing value for --output-hash}" + shift 2 + ;; + --builder-spec) + BUILDER_SPEC="${2:?missing value for --builder-spec}" + shift 2 + ;; + --description) + DESCRIPTION="${2:?missing value for --description}" + shift 2 + ;; + --upload-verbose) + UPLOAD_VERBOSE="${2:?missing value for --upload-verbose}" + shift 2 + ;; + --label) + EXTRA_LABELS+=("${2:?missing value for --label}") + shift 2 + ;; + --nix-flag) + NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}") + shift 2 + ;; + --no-update) + NO_UPDATE=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +cleanup() { + burrow_cleanup_flake_tmpdirs + if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then + rm -rf "${LOCAL_STORE_DIR}" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +burrow_require_cmd nix +burrow_require_cmd curl +burrow_require_cmd python3 +burrow_require_cmd rsync + +if [[ ! -f "${TOKEN_FILE}" ]]; then + echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2 + exit 1 +fi + +HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" +if [[ -z "${HCLOUD_TOKEN}" ]]; then + echo "Hetzner API token file is empty: ${TOKEN_FILE}" >&2 + exit 1 +fi + +flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")" + +if [[ -z "${DESCRIPTION}" ]]; then + DESCRIPTION="Burrow ${CONFIG} $(date -u +%Y-%m-%dT%H:%M:%SZ)" +fi + +printf 'Building raw image for %s from %s\n' "${CONFIG}" "${flake_ref}" >&2 + +if [[ -z "${ARTIFACT_PATH_INPUT}" && -n "${BUILDER_SPEC}" && -z "${NIX_BUILD_STORE:-}" ]]; then + mkdir -p "${HOME}/.cache/burrow" + LOCAL_STORE_DIR="$(mktemp -d "${HOME}/.cache/burrow/local-store-XXXXXX")" +fi + +artifact_path="" +compression="" +output_hash="${OUTPUT_HASH}" +if [[ -n "${ARTIFACT_PATH_INPUT}" ]]; then + artifact_path="${ARTIFACT_PATH_INPUT}" + if [[ ! -f "${artifact_path}" ]]; then + echo "artifact path does not exist: ${artifact_path}" >&2 + exit 1 + fi + compression="$(burrow_detect_compression "${artifact_path}")" + if [[ -z "${output_hash}" ]]; then + if command -v sha256sum >/dev/null 2>&1; then + output_hash="$(sha256sum "${artifact_path}" | awk '{print $1}')" + else + output_hash="$(shasum -a 256 "${artifact_path}" | awk '{print $1}')" + fi + fi +else + nix_build_cmd=( + nix + --extra-experimental-features + "nix-command flakes" + build + "${flake_ref}#images.${CONFIG}-raw" + --no-link + --print-out-paths + ) + + if [[ -n "${BUILDER_SPEC}" ]]; then + nix_build_cmd+=(--builders "${BUILDER_SPEC}") + fi + if [[ -n "${NIX_BUILD_STORE:-}" ]]; then + nix_build_cmd+=(--store "${NIX_BUILD_STORE}") + elif [[ -n "${LOCAL_STORE_DIR}" ]]; then + nix_build_cmd+=(--store "${LOCAL_STORE_DIR}") + fi + + if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then + nix_build_cmd+=("${NIX_BUILD_FLAGS[@]}") + fi + + build_output="" + if ! build_output="$("${nix_build_cmd[@]}" 2>&1)"; then + printf '%s\n' "${build_output}" >&2 + exit 1 + fi + + store_path="$(printf '%s\n' "${build_output}" | tail -n1)" + if [[ -z "${store_path}" ]]; then + echo "nix build did not return a store path" >&2 + printf '%s\n' "${build_output}" >&2 + exit 1 + fi + + artifact_path="$(burrow_resolve_image_artifact "${store_path}")" + compression="$(burrow_detect_compression "${artifact_path}")" + output_hash="$(basename "${store_path}")" + output_hash="${output_hash%%-*}" +fi + +label_args=( + "burrow.nixos-config=${CONFIG}" + "burrow.nixos-output-hash=${output_hash}" +) +if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then + label_args+=("${EXTRA_LABELS[@]}") +fi +label_csv="$(IFS=,; printf '%s' "${label_args[*]}")" + +find_existing_image() { + HCLOUD_TOKEN="${HCLOUD_TOKEN}" \ + BURROW_LABEL_SELECTOR="burrow.nixos-config=${CONFIG},burrow.nixos-output-hash=${output_hash}" \ + python3 - <<'PY' +import json +import os +import sys +import urllib.parse +import urllib.request + +selector = urllib.parse.quote(os.environ["BURROW_LABEL_SELECTOR"], safe=",=") +req = urllib.request.Request( + f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}", + headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"}, +) +with urllib.request.urlopen(req, timeout=30) as resp: + data = json.load(resp) + +images = sorted(data.get("images", []), key=lambda item: item.get("created") or "") +if images: + print(images[-1]["id"]) +PY +} + +if [[ "${NO_UPDATE}" -eq 1 ]]; then + existing_id="$(find_existing_image || true)" + if [[ -n "${existing_id}" ]]; then + printf 'Reusing existing Hetzner snapshot %s for %s\n' "${existing_id}" "${CONFIG}" >&2 + printf '%s\n' "${existing_id}" + exit 0 + fi +fi + +uploader_bin="${HCLOUD_UPLOAD_IMAGE_BIN:-}" +if [[ -z "${uploader_bin}" ]]; then + uploader_build_output="$( + nix --extra-experimental-features "nix-command flakes" build \ + "${flake_ref}#hcloud-upload-image" \ + --no-link \ + --print-out-paths 2>&1 + )" || { + printf '%s\n' "${uploader_build_output}" >&2 + exit 1 + } + uploader_bin="$(printf '%s\n' "${uploader_build_output}" | tail -n1)/bin/hcloud-upload-image" +fi + +if [[ ! -x "${uploader_bin}" ]]; then + echo "unable to resolve an executable hcloud-upload-image binary; set HCLOUD_UPLOAD_IMAGE_BIN explicitly" >&2 + exit 1 +fi + +upload_cmd=( + "${uploader_bin}" +) +if [[ "${UPLOAD_VERBOSE}" =~ ^[0-9]+$ ]] && [[ "${UPLOAD_VERBOSE}" -gt 0 ]]; then + for _ in $(seq 1 "${UPLOAD_VERBOSE}"); do + upload_cmd+=(-v) + done +fi +upload_cmd+=( + upload + --image-path "${artifact_path}" + --location "${LOCATION}" + --description "${DESCRIPTION}" + --labels "${label_csv}" +) +if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then + upload_cmd+=(--server-type "${UPLOAD_SERVER_TYPE}") +else + upload_cmd+=(--architecture "${ARCHITECTURE}") +fi +if [[ -n "${compression}" ]]; then + upload_cmd+=(--compression "${compression}") +fi + +printf 'Uploading %s to Hetzner Cloud via %s\n' "${artifact_path}" "${uploader_bin}" >&2 +HCLOUD_TOKEN="${HCLOUD_TOKEN}" "${upload_cmd[@]}" >&2 + +image_id="" +for _ in $(seq 1 24); do + image_id="$(find_existing_image || true)" + if [[ -n "${image_id}" ]]; then + break + fi + sleep 5 +done + +if [[ -z "${image_id}" ]]; then + echo "failed to locate uploaded Hetzner snapshot after upload completed" >&2 + exit 1 +fi + +printf '%s\n' "${image_id}" diff --git a/Scripts/hetzner-forge.sh b/Scripts/hetzner-forge.sh new file mode 100755 index 0000000..cfce7eb --- /dev/null +++ b/Scripts/hetzner-forge.sh @@ -0,0 +1,284 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +usage() { + cat <<'EOF' +Usage: Scripts/hetzner-forge.sh [show|create|delete|recreate|build-image|create-from-image|recreate-from-image] [options] + +Manage the Burrow forge server and its Hetzner snapshot lifecycle. + +Defaults: + action: show + server-name: burrow-forge + server-type: ccx23 + location: hel1 + image: ubuntu-24.04 + ssh keys: contact@burrow.net,agent@burrow.net + +Options: + --server-name Server name to manage. + --server-type Hetzner server type. + --location Hetzner location. + --image Image used at create time. + --config Burrow image config name for snapshot lookup/build (default: burrow-forge). + --ssh-key SSH key name to attach. Repeatable. + --token-file Hetzner API token file. + --flake Flake path used by image-build actions (default: .) + --upload-location Hetzner location used for image upload (default: same as --location) + --yes Required for delete and recreate. + -h, --help Show this help text. + +Environment: + HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt +EOF +} + +ACTION="show" +SERVER_NAME="burrow-forge" +SERVER_TYPE="ccx23" +LOCATION="hel1" +IMAGE="ubuntu-24.04" +CONFIG="burrow-forge" +FLAKE="." +UPLOAD_LOCATION="" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}" +YES=0 +SSH_KEYS=("contact@burrow.net" "agent@burrow.net") + +if [[ $# -gt 0 ]]; then + case "$1" in + show|create|delete|recreate|build-image|create-from-image|recreate-from-image) + ACTION="$1" + shift + ;; + esac +fi + +while [[ $# -gt 0 ]]; do + case "$1" in + --server-name) + SERVER_NAME="${2:?missing value for --server-name}" + shift 2 + ;; + --server-type) + SERVER_TYPE="${2:?missing value for --server-type}" + shift 2 + ;; + --location) + LOCATION="${2:?missing value for --location}" + shift 2 + ;; + --image) + IMAGE="${2:?missing value for --image}" + shift 2 + ;; + --config) + CONFIG="${2:?missing value for --config}" + shift 2 + ;; + --ssh-key) + SSH_KEYS+=("${2:?missing value for --ssh-key}") + shift 2 + ;; + --token-file) + TOKEN_FILE="${2:?missing value for --token-file}" + shift 2 + ;; + --flake) + FLAKE="${2:?missing value for --flake}" + shift 2 + ;; + --upload-location) + UPLOAD_LOCATION="${2:?missing value for --upload-location}" + shift 2 + ;; + --yes) + YES=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage >&2 + exit 2 + ;; + esac +done + +if [[ ! -f "${TOKEN_FILE}" ]]; then + echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2 + exit 1 +fi + +if [[ -z "${UPLOAD_LOCATION}" ]]; then + UPLOAD_LOCATION="${LOCATION}" +fi + +if [[ "${ACTION}" == "delete" || "${ACTION}" == "recreate" || "${ACTION}" == "recreate-from-image" ]] && [[ ${YES} -ne 1 ]]; then + echo "--yes is required for ${ACTION}" >&2 + exit 1 +fi + +latest_snapshot_id() { + HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" \ + BURROW_CONFIG="${CONFIG}" \ + python3 - <<'PY' +import json +import os +import urllib.parse +import urllib.request + +selector = urllib.parse.quote(f"burrow.nixos-config={os.environ['BURROW_CONFIG']}", safe=",=") +req = urllib.request.Request( + f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}", + headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"}, +) +with urllib.request.urlopen(req, timeout=30) as resp: + data = json.load(resp) +images = sorted(data.get("images", []), key=lambda item: item.get("created") or "") +if images: + print(images[-1]["id"]) +PY +} + +if [[ "${ACTION}" == "build-image" ]]; then + exec "${SCRIPT_DIR}/nsc-build-and-upload-image.sh" \ + --config "${CONFIG}" \ + --flake "${FLAKE}" \ + --location "${UPLOAD_LOCATION}" \ + --upload-server-type "${SERVER_TYPE}" \ + --token-file "${TOKEN_FILE}" +fi + +if [[ "${ACTION}" == "create-from-image" || "${ACTION}" == "recreate-from-image" ]]; then + if [[ "${IMAGE}" == "ubuntu-24.04" ]]; then + IMAGE="$(latest_snapshot_id)" + fi + if [[ -z "${IMAGE}" ]]; then + echo "No Burrow snapshot found for config ${CONFIG}. Run build-image first." >&2 + exit 1 + fi + if [[ "${ACTION}" == "create-from-image" ]]; then + ACTION="create" + else + ACTION="recreate" + fi +fi + +ssh_keys_csv="" +for key in "${SSH_KEYS[@]}"; do + if [[ -n "${ssh_keys_csv}" ]]; then + ssh_keys_csv+="," + fi + ssh_keys_csv+="${key}" +done + +export BURROW_HCLOUD_ACTION="${ACTION}" +export BURROW_HCLOUD_SERVER_NAME="${SERVER_NAME}" +export BURROW_HCLOUD_SERVER_TYPE="${SERVER_TYPE}" +export BURROW_HCLOUD_LOCATION="${LOCATION}" +export BURROW_HCLOUD_IMAGE="${IMAGE}" +export BURROW_HCLOUD_TOKEN_FILE="${TOKEN_FILE}" +export BURROW_HCLOUD_SSH_KEYS="${ssh_keys_csv}" + +python3 - <<'PY' +import json +import os +import sys +from pathlib import Path + +import requests + +base = "https://api.hetzner.cloud/v1" +action = os.environ["BURROW_HCLOUD_ACTION"] +server_name = os.environ["BURROW_HCLOUD_SERVER_NAME"] +server_type = os.environ["BURROW_HCLOUD_SERVER_TYPE"] +location = os.environ["BURROW_HCLOUD_LOCATION"] +image = os.environ["BURROW_HCLOUD_IMAGE"] +token = Path(os.environ["BURROW_HCLOUD_TOKEN_FILE"]).read_text(encoding="utf-8").strip() +ssh_keys = [key for key in os.environ["BURROW_HCLOUD_SSH_KEYS"].split(",") if key] + +session = requests.Session() +session.headers.update({"Authorization": f"Bearer {token}", "Content-Type": "application/json"}) + + +def request(method: str, path: str, **kwargs) -> requests.Response: + response = session.request(method, f"{base}{path}", timeout=30, **kwargs) + response.raise_for_status() + return response + + +def find_server(): + response = request("GET", "/servers", params={"name": server_name}) + data = response.json() + for server in data.get("servers", []): + if server.get("name") == server_name: + return server + return None + + +def summarize(server): + ipv4 = (((server.get("public_net") or {}).get("ipv4")) or {}).get("ip") + image_name = ((server.get("image") or {}).get("name")) or "" + summary = { + "id": server.get("id"), + "name": server.get("name"), + "status": server.get("status"), + "server_type": ((server.get("server_type") or {}).get("name")), + "location": ((server.get("location") or {}).get("name")), + "image": image_name, + "ipv4": ipv4, + "created": server.get("created"), + } + print(json.dumps(summary, indent=2)) + + +server = find_server() + +if action == "show": + if server is None: + print(json.dumps({"name": server_name, "present": False}, indent=2)) + else: + summarize(server) + sys.exit(0) + +if action == "delete": + if server is None: + print(json.dumps({"name": server_name, "deleted": False, "reason": "not found"}, indent=2)) + sys.exit(0) + request("DELETE", f"/servers/{server['id']}") + print(json.dumps({"name": server_name, "deleted": True, "id": server["id"]}, indent=2)) + sys.exit(0) + +if action == "recreate" and server is not None: + request("DELETE", f"/servers/{server['id']}") + server = None + +if action in {"create", "recreate"}: + if server is not None: + summarize(server) + sys.exit(0) + + payload = { + "name": server_name, + "server_type": server_type, + "location": location, + "image": image, + "ssh_keys": ssh_keys, + "labels": { + "project": "burrow", + "role": "forge", + }, + } + response = request("POST", "/servers", json=payload) + created = response.json()["server"] + summarize(created) + sys.exit(0) + +raise SystemExit(f"unsupported action: {action}") +PY diff --git a/Scripts/nsc-build-and-upload-image.sh b/Scripts/nsc-build-and-upload-image.sh new file mode 100755 index 0000000..6fb99a9 --- /dev/null +++ b/Scripts/nsc-build-and-upload-image.sh @@ -0,0 +1,542 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=Scripts/_burrow-flake.sh +source "${SCRIPT_DIR}/_burrow-flake.sh" + +CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}" +FLAKE="${HCLOUD_IMAGE_FLAKE:-.}" +LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}" +NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}" +NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}" +NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}" +NSC_BUILDER_JOBS="${NSC_BUILDER_JOBS:-32}" +NSC_BUILDER_FEATURES="${NSC_BUILDER_FEATURES:-kvm,big-parallel}" +NSC_BIN="${NSC_BIN:-}" +REMOTE_COMPRESSION="${HCLOUD_IMAGE_REMOTE_COMPRESSION:-auto}" +UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}" +KEEP_TMPDIR="${HCLOUD_IMAGE_KEEP_TMPDIR:-0}" +NO_UPDATE=0 +NIX_BUILD_FLAGS=() +EXTRA_LABELS=() +BURROW_FLAKE_TMPDIRS=() +BUILDER_ID="" + +usage() { + cat <<'EOF' +Usage: Scripts/nsc-build-and-upload-image.sh [options] + +Create a temporary Namespace Linux builder, build the Burrow raw image on it, +and upload the resulting artifact to Hetzner Cloud. + +Options: + --config images.-raw output to build (default: burrow-forge) + --flake Flake path to build from (default: .) + --location Hetzner upload location (default: hel1) + --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) + --machine-type Namespace machine type (default: linux/amd64:32x64) + --ssh-host Namespace SSH endpoint (default: ssh.ord2.namespace.so) + --duration Namespace builder lifetime (default: 4h) + --builder-jobs Nix builder job count advertised to the local client + --builder-features Comma-separated Nix system features (default: "kvm,big-parallel") + --remote-compression + Compress raw/image artifacts on the Namespace builder + before copy-back. Modes: auto, none, xz, zstd + (default: auto) + --upload-server-type + Hetzner server type for the temporary upload host + --label key=value Extra Hetzner snapshot label (repeatable) + --nix-flag Extra argument passed to nix build (repeatable) + --no-update Reuse an existing snapshot with the same config/output hash + -h, --help Show this help text +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --config) + CONFIG="${2:?missing value for --config}" + shift 2 + ;; + --flake) + FLAKE="${2:?missing value for --flake}" + shift 2 + ;; + --location) + LOCATION="${2:?missing value for --location}" + shift 2 + ;; + --token-file) + TOKEN_FILE="${2:?missing value for --token-file}" + shift 2 + ;; + --machine-type) + NSC_MACHINE_TYPE="${2:?missing value for --machine-type}" + shift 2 + ;; + --ssh-host) + NSC_SSH_HOST="${2:?missing value for --ssh-host}" + shift 2 + ;; + --duration) + NSC_BUILDER_DURATION="${2:?missing value for --duration}" + shift 2 + ;; + --builder-jobs) + NSC_BUILDER_JOBS="${2:?missing value for --builder-jobs}" + shift 2 + ;; + --builder-features) + NSC_BUILDER_FEATURES="${2:?missing value for --builder-features}" + shift 2 + ;; + --remote-compression) + REMOTE_COMPRESSION="${2:?missing value for --remote-compression}" + shift 2 + ;; + --upload-server-type) + UPLOAD_SERVER_TYPE="${2:?missing value for --upload-server-type}" + shift 2 + ;; + --label) + EXTRA_LABELS+=("${2:?missing value for --label}") + shift 2 + ;; + --nix-flag) + NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}") + shift 2 + ;; + --no-update) + NO_UPDATE=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +cleanup() { + if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then + "${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true + fi + burrow_cleanup_flake_tmpdirs + if [[ "${KEEP_TMPDIR}" != "1" && -n "${TMPDIR_BURROW_NSC:-}" && -d "${TMPDIR_BURROW_NSC}" ]]; then + rm -rf "${TMPDIR_BURROW_NSC}" + fi +} +trap cleanup EXIT + +burrow_require_cmd nix +burrow_require_cmd curl +burrow_require_cmd python3 +burrow_require_cmd ssh +burrow_require_cmd ssh-keygen +burrow_require_cmd ssh-keyscan +burrow_require_cmd tar + +flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")" + +if [[ -z "${NSC_BIN}" ]]; then + nsc_build_output="$( + nix --extra-experimental-features "nix-command flakes" build \ + "${flake_ref}#nsc" \ + --no-link \ + --print-out-paths 2>&1 + )" || { + printf '%s\n' "${nsc_build_output}" >&2 + exit 1 + } + NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc" +fi + +if [[ ! -x "${NSC_BIN}" ]]; then + echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2 + exit 1 +fi + +if [[ -n "${NSC_SESSION:-}" && ! -f "${HOME}/.ns/session" ]]; then + mkdir -p "${HOME}/.ns" + printf '%s\n' "${NSC_SESSION}" > "${HOME}/.ns/session" + chmod 600 "${HOME}/.ns/session" +fi + +"${NSC_BIN}" auth check-login --duration 20m >/dev/null +"${NSC_BIN}" version >/dev/null || true + +TMPDIR_BURROW_NSC="$(mktemp -d "${HOME}/.cache/burrow/nsc-XXXXXX")" +ssh_key="${TMPDIR_BURROW_NSC}/builder" +known_hosts="${TMPDIR_BURROW_NSC}/known_hosts" +id_file="${TMPDIR_BURROW_NSC}/builder.id" + +ssh-keygen -q -t ed25519 -N "" -f "${ssh_key}" +ssh-keyscan -H "${NSC_SSH_HOST}" > "${known_hosts}" + +ssh_base=( + ssh + -i "${ssh_key}" + -o UserKnownHostsFile="${known_hosts}" + -o StrictHostKeyChecking=yes +) + +wait_for_ssh() { + local instance_id="$1" + for _ in $(seq 1 30); do + if "${ssh_base[@]}" -q "${instance_id}@${NSC_SSH_HOST}" true >/dev/null 2>&1; then + return 0 + fi + sleep 5 + done + return 1 +} + +configure_builder() { + local instance_id="$1" + "${ssh_base[@]}" "${instance_id}@${NSC_SSH_HOST}" <<'EOF' +set -euo pipefail + +if ! command -v nix >/dev/null 2>&1; then + curl -fsSL https://install.determinate.systems/nix | sh -s -- install linux --determinate --init none --no-confirm +fi + +if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh +fi + +mkdir -p /etc/nix +cat </etc/nix/nix.conf +build-users-group = +trusted-users = root $USER +auto-optimise-store = true +substituters = https://cache.nixos.org +builders-use-substitutes = true +CFG + +mkdir -p /nix/var/nix/daemon-socket + +if ! pgrep -x nix-daemon >/dev/null 2>&1; then + nohup nix-daemon >/dev/null 2>&1 /dev/null 2>&1; then + nohup nix-daemon >/dev/null 2>&1 &2 +exit 1 +EOF +} + +printf 'Creating temporary Namespace builder (%s)\n' "${NSC_MACHINE_TYPE}" >&2 +"${NSC_BIN}" create \ + --bare \ + --machine_type "${NSC_MACHINE_TYPE}" \ + --ssh_key "${ssh_key}.pub" \ + --duration "${NSC_BUILDER_DURATION}" \ + --label "burrow=true" \ + --label "purpose=hetzner-image-build" \ + --output_to "${id_file}" \ + >/dev/null + +BUILDER_ID="$(tr -d '\r\n' < "${id_file}")" +if [[ -z "${BUILDER_ID}" ]]; then + echo "nsc create did not return a builder id" >&2 + exit 1 +fi + +printf 'Waiting for Namespace builder %s\n' "${BUILDER_ID}" >&2 +wait_for_ssh "${BUILDER_ID}" +configure_builder "${BUILDER_ID}" >&2 + +remote_root="burrow-image-build-${BUILDER_ID}" +remote_flake_path="./${remote_root}" +local_flake_dir="${flake_ref#path:}" +remote_build_stdout="/tmp/burrow-image-build-${BUILDER_ID}.stdout" +remote_build_stderr="/tmp/burrow-image-build-${BUILDER_ID}.stderr" + +printf 'Syncing flake to Namespace builder %s\n' "${BUILDER_ID}" >&2 +tar -C "${local_flake_dir}" -cf - . \ + | "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "rm -rf '${remote_root}' && mkdir -p '${remote_root}' && tar -C '${remote_root}' -xf -" + +run_remote_build() { + local remote_cmd=( + env + "CONFIG=${CONFIG}" + "REMOTE_FLAKE_PATH=${remote_flake_path}" + "REMOTE_BUILD_STDOUT=${remote_build_stdout}" + "REMOTE_BUILD_STDERR=${remote_build_stderr}" + bash + -s + -- + ) + if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then + remote_cmd+=("${NIX_BUILD_FLAGS[@]}") + fi + + "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "${remote_cmd[@]}" <<'EOF' +set -euo pipefail + +config="${CONFIG}" +remote_flake_path="${REMOTE_FLAKE_PATH}" +remote_build_stdout="${REMOTE_BUILD_STDOUT}" +remote_build_stderr="${REMOTE_BUILD_STDERR}" +nix_build_cmd=( + nix + --extra-experimental-features + "nix-command flakes" + build + "path:${remote_flake_path}#images.${config}-raw" + --no-link + --print-out-paths +) +if [[ "$#" -gt 0 ]]; then + nix_build_cmd+=("$@") +fi + +rm -f "${remote_build_stdout}" "${remote_build_stderr}" +if ! "${nix_build_cmd[@]}" >"${remote_build_stdout}" 2>"${remote_build_stderr}"; then + cat "${remote_build_stderr}" >&2 + exit 1 +fi +EOF +} + +resolve_remote_store_path() { + "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \ + env "REMOTE_BUILD_STDOUT=${remote_build_stdout}" "REMOTE_BUILD_STDERR=${remote_build_stderr}" bash -s <<'EOF' +set -euo pipefail + +remote_build_stdout="${REMOTE_BUILD_STDOUT}" +remote_build_stderr="${REMOTE_BUILD_STDERR}" + +if [[ ! -s "${remote_build_stdout}" ]]; then + echo "remote build stdout file is missing or empty: ${remote_build_stdout}" >&2 + if [[ -s "${remote_build_stderr}" ]]; then + cat "${remote_build_stderr}" >&2 + fi + exit 1 +fi + +tail -n1 "${remote_build_stdout}" +EOF +} + +resolve_remote_artifact_path() { + local store_path="$1" + "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \ + env "REMOTE_STORE_PATH=${store_path}" bash -s <<'EOF' +set -euo pipefail + +store_path="${REMOTE_STORE_PATH}" +artifact_path="${store_path}" +if [[ -d "${artifact_path}" ]]; then + artifact_path="$(find "${artifact_path}" -type f \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) | sort | head -n1)" +fi +if [[ -z "${artifact_path}" || ! -f "${artifact_path}" ]]; then + echo "unable to locate image artifact under ${store_path}" >&2 + exit 1 +fi + +printf '%s\n' "${artifact_path}" +EOF +} + +plan_remote_artifact_transfer() { + local artifact_path="$1" + local compression_mode="$2" + + "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \ + env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' +set -euo pipefail + +artifact_path="${REMOTE_ARTIFACT_PATH}" +compression_mode="${REMOTE_COMPRESSION}" + +case "${artifact_path}" in + *.bz2) + printf '%s\tbz2\n' "$(basename "${artifact_path}")" + exit 0 + ;; + *.xz) + printf '%s\txz\n' "$(basename "${artifact_path}")" + exit 0 + ;; + *.zst|*.zstd) + printf '%s\tzstd\n' "$(basename "${artifact_path}")" + exit 0 + ;; +esac + +select_compression() { + case "${compression_mode}" in + auto) + if command -v zstd >/dev/null 2>&1; then + printf 'zstd\n' + return 0 + fi + if command -v xz >/dev/null 2>&1; then + printf 'xz\n' + return 0 + fi + printf 'none\n' + ;; + none|xz|zstd) + printf '%s\n' "${compression_mode}" + ;; + *) + echo "unsupported remote compression mode: ${compression_mode}" >&2 + exit 1 + ;; + esac +} + +mode="$(select_compression)" +case "${mode}" in + none) + printf '%s\tnone\n' "$(basename "${artifact_path}")" + ;; + zstd) + printf '%s.zst\tzstd\n' "$(basename "${artifact_path}")" + ;; + xz) + printf '%s.xz\txz\n' "$(basename "${artifact_path}")" + ;; +esac +EOF +} + +stream_remote_artifact() { + local artifact_path="$1" + local compression_mode="$2" + local destination="$3" + + "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \ + env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' > "${destination}" +set -euo pipefail + +artifact_path="${REMOTE_ARTIFACT_PATH}" +compression_mode="${REMOTE_COMPRESSION}" + +case "${artifact_path}" in + *.bz2|*.xz|*.zst|*.zstd) + cat "${artifact_path}" + exit 0 + ;; +esac + +select_compression() { + case "${compression_mode}" in + auto) + if command -v zstd >/dev/null 2>&1; then + printf 'zstd\n' + return 0 + fi + if command -v xz >/dev/null 2>&1; then + printf 'xz\n' + return 0 + fi + printf 'none\n' + ;; + none|xz|zstd) + printf '%s\n' "${compression_mode}" + ;; + *) + echo "unsupported remote compression mode: ${compression_mode}" >&2 + exit 1 + ;; + esac +} + +mode="$(select_compression)" +case "${mode}" in + none) + cat "${artifact_path}" + ;; + zstd) + if ! command -v zstd >/dev/null 2>&1; then + echo "zstd requested but not available on Namespace builder" >&2 + exit 1 + fi + zstd -T0 -19 -c "${artifact_path}" + ;; + xz) + if ! command -v xz >/dev/null 2>&1; then + echo "xz requested but not available on Namespace builder" >&2 + exit 1 + fi + xz -T0 -c "${artifact_path}" + ;; +esac +EOF +} + +printf 'Building raw image on Namespace builder %s\n' "${BUILDER_ID}" >&2 +run_remote_build + +remote_store_path="$(resolve_remote_store_path)" +if [[ -z "${remote_store_path}" ]]; then + echo "remote build did not return a store path" >&2 + exit 1 +fi + +remote_artifact_path="$(resolve_remote_artifact_path "${remote_store_path}")" +if [[ -z "${remote_artifact_path}" ]]; then + echo "remote build did not return an artifact path" >&2 + exit 1 +fi + +transfer_plan="$(plan_remote_artifact_transfer "${remote_artifact_path}" "${REMOTE_COMPRESSION}")" +local_artifact_name="$(printf '%s\n' "${transfer_plan}" | cut -f1)" +transfer_compression="$(printf '%s\n' "${transfer_plan}" | cut -f2)" +if [[ -z "${local_artifact_name}" || -z "${transfer_compression}" ]]; then + echo "unable to determine artifact transfer plan for ${remote_artifact_path}" >&2 + exit 1 +fi + +output_hash="$(basename "${remote_store_path}")" +output_hash="${output_hash%%-*}" +local_artifact="${TMPDIR_BURROW_NSC}/${local_artifact_name}" + +printf 'Streaming built artifact back from Namespace builder %s (%s)\n' "${BUILDER_ID}" "${transfer_compression}" >&2 +stream_remote_artifact "${remote_artifact_path}" "${REMOTE_COMPRESSION}" "${local_artifact}" + +cmd=( + "${SCRIPT_DIR}/hcloud-upload-nixos-image.sh" + --config "${CONFIG}" + --flake "${FLAKE}" + --location "${LOCATION}" + --token-file "${TOKEN_FILE}" + --artifact-path "${local_artifact}" + --output-hash "${output_hash}" +) + +if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then + cmd+=(--server-type "${UPLOAD_SERVER_TYPE}") +fi + +if [[ "${NO_UPDATE}" -eq 1 ]]; then + cmd+=(--no-update) +fi +if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then + for label in "${EXTRA_LABELS[@]}"; do + cmd+=(--label "${label}") + done +fi + +"${cmd[@]}" diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh new file mode 100755 index 0000000..890d9a2 --- /dev/null +++ b/Scripts/provision-forgejo-nsc.sh @@ -0,0 +1,237 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +# shellcheck source=Scripts/_burrow-flake.sh +source "${SCRIPT_DIR}/_burrow-flake.sh" + +usage() { + cat <<'EOF' +Usage: Scripts/provision-forgejo-nsc.sh [options] + +Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the +Namespace token from the currently logged-in namespace account. + +Options: + --host SSH target used to mint the Forgejo PAT. + Default: root@git.burrow.net + --ssh-key SSH private key for the forge host. + Default: intake/agent_at_burrow_net_ed25519 + --nsc-bin Override the nsc binary. + --no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists. + --token-name Forgejo PAT name prefix (default: forgejo-nsc) + --contact-user Forgejo username used for PAT creation (default: contact) + --scope-owner Forgejo org/user owner for the default NSC scope (default: hackclub) + --scope-name Forgejo repository name for the default NSC scope (default: burrow) + -h, --help Show this help text. +EOF +} + +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +NSC_BIN="${NSC_BIN:-}" +KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" +REFRESH_TOKEN=1 +TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}" +CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}" +SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}" +SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}" +BURROW_FLAKE_TMPDIRS=() + +cleanup() { + burrow_cleanup_flake_tmpdirs +} +trap cleanup EXIT + +while [[ $# -gt 0 ]]; do + case "$1" in + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --nsc-bin) + NSC_BIN="${2:?missing value for --nsc-bin}" + shift 2 + ;; + --no-refresh-token) + REFRESH_TOKEN=0 + shift + ;; + --token-name) + TOKEN_NAME_PREFIX="${2:?missing value for --token-name}" + shift 2 + ;; + --contact-user) + CONTACT_USER="${2:?missing value for --contact-user}" + shift 2 + ;; + --scope-owner) + SCOPE_OWNER="${2:?missing value for --scope-owner}" + shift 2 + ;; + --scope-name) + SCOPE_NAME="${2:?missing value for --scope-name}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" + +burrow_require_cmd nix +burrow_require_cmd ssh +burrow_require_cmd python3 + +if [[ ! -f "${SSH_KEY}" ]]; then + echo "forge SSH key not found: ${SSH_KEY}" >&2 + exit 1 +fi + +mkdir -p "${REPO_ROOT}/intake" +chmod 700 "${REPO_ROOT}/intake" + +flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")" +if [[ -z "${NSC_BIN}" ]]; then + if command -v nsc >/dev/null 2>&1; then + NSC_BIN="$(command -v nsc)" + else + nsc_build_output="$( + nix --extra-experimental-features "nix-command flakes" build \ + "${flake_ref}#nsc" \ + --no-link \ + --print-out-paths 2>&1 + )" || { + printf '%s\n' "${nsc_build_output}" >&2 + exit 1 + } + NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc" + fi +fi + +if [[ ! -x "${NSC_BIN}" ]]; then + echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2 + exit 1 +fi + +token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" +dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" +autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" +dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml" +autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml" + +if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then + "${NSC_BIN}" auth check-login --duration 20m >/dev/null + "${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null + chmod 600 "${token_file}" +fi + +webhook_secret="$(python3 - <<'PY' +import secrets +print(secrets.token_hex(32)) +PY +)" + +token_name="${TOKEN_NAME_PREFIX}-$(date -u +%Y%m%dT%H%M%SZ)" +forgejo_pat="$( + ssh \ + -i "${SSH_KEY}" \ + -o IdentitiesOnly=yes \ + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \ + -o StrictHostKeyChecking=accept-new \ + "${HOST}" \ + "set -euo pipefail; forgejo_bin=\$(systemctl show -p ExecStart forgejo.service --value | sed -E 's/^\\{ path=([^ ;]+).*/\\1/'); sudo -u forgejo \"\${forgejo_bin}\" --config /var/lib/forgejo/custom/conf/app.ini --custom-path /var/lib/forgejo/custom --work-path /var/lib/forgejo admin user generate-access-token --username '${CONTACT_USER}' --scopes all --raw --token-name '${token_name}'" \ + | tr -d '\r\n' +)" + +if [[ -z "${forgejo_pat}" ]]; then + echo "failed to mint Forgejo PAT on ${HOST}" >&2 + exit 1 +fi + +ssh \ + -i "${SSH_KEY}" \ + -o IdentitiesOnly=yes \ + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \ + -o StrictHostKeyChecking=accept-new \ + "${HOST}" \ + 'bash -s' </tmp/forgejo-provision-org.json <&2 + cat /tmp/forgejo-provision-response.json >&2 + exit 1 + fi +fi + +repo_code="\$(api "\${base_url}/api/v1/repos/\${scope_owner}/\${scope_name}")" +if [[ "\${repo_code}" == "404" ]]; then + cat >/tmp/forgejo-provision-repo.json <&2 + cat /tmp/forgejo-provision-response.json >&2 + exit 1 + fi +fi +EOF + +FORGEJO_PAT="${forgejo_pat}" \ +WEBHOOK_SECRET="${webhook_secret}" \ +DISPATCHER_SRC="${dispatcher_src}" \ +AUTOSCALER_SRC="${autoscaler_src}" \ +DISPATCHER_OUT="${dispatcher_out}" \ +AUTOSCALER_OUT="${autoscaler_out}" \ +python3 - <<'PY' +import os +from pathlib import Path + +def render(src: str, dst: str) -> None: + text = Path(src).read_text(encoding="utf-8") + text = text.replace("PENDING-FORGEJO-PAT", os.environ["FORGEJO_PAT"]) + text = text.replace("PENDING-WEBHOOK-SECRET", os.environ["WEBHOOK_SECRET"]) + Path(dst).write_text(text, encoding="utf-8") + +render(os.environ["DISPATCHER_SRC"], os.environ["DISPATCHER_OUT"]) +render(os.environ["AUTOSCALER_SRC"], os.environ["AUTOSCALER_OUT"]) +PY + +chmod 600 "${dispatcher_out}" "${autoscaler_out}" + +echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml." +echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}." diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh new file mode 100755 index 0000000..77581f8 --- /dev/null +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: Scripts/sync-forgejo-nsc-config.sh [options] + +Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and +restart the dispatcher/autoscaler units. + +Options: + --host SSH target (default: root@git.burrow.net) + --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) + --rotate-pat Re-render the intake files before syncing. + --no-restart Copy files only. + -h, --help Show this help text. +EOF +} + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" +ROTATE_PAT=0 +NO_RESTART=0 + +while [[ $# -gt 0 ]]; do + case "$1" in + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --rotate-pat) + ROTATE_PAT=1 + shift + ;; + --no-restart) + NO_RESTART=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" + +burrow_require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "missing required command: $1" >&2 + exit 1 + fi +} + +burrow_require_cmd ssh +burrow_require_cmd scp + +if [[ ! -f "${SSH_KEY}" ]]; then + echo "forge SSH key not found: ${SSH_KEY}" >&2 + exit 1 +fi + +if [[ "${ROTATE_PAT}" -eq 1 ]]; then + "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" +fi + +token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" +dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" +autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" + +for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do + if [[ ! -s "${path}" ]]; then + echo "required runtime input missing or empty: ${path}" >&2 + exit 1 + fi +done + +ssh_opts=( + -i "${SSH_KEY}" + -o IdentitiesOnly=yes + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" + -o StrictHostKeyChecking=accept-new +) + +remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")" +cleanup() { + if [[ -n "${remote_tmp:-}" ]]; then + ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +scp "${ssh_opts[@]}" \ + "${token_file}" \ + "${dispatcher_file}" \ + "${autoscaler_file}" \ + "${HOST}:${remote_tmp}/" + +ssh "${ssh_opts[@]}" "${HOST}" " + set -euo pipefail + install -d -m 0755 /var/lib/burrow/intake + install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt + install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml + install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml +" + +if [[ "${NO_RESTART}" -eq 0 ]]; then + ssh "${ssh_opts[@]}" "${HOST}" " + set -euo pipefail + systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service + systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service + ls -l \ + /var/lib/burrow/intake/forgejo_nsc_token.txt \ + /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \ + /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml + " +fi + +echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))." diff --git a/Tools/forwardemail-custom-s3.sh b/Tools/forwardemail-custom-s3.sh new file mode 100755 index 0000000..5f39ddd --- /dev/null +++ b/Tools/forwardemail-custom-s3.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +set -euo pipefail +umask 077 + +usage() { + cat <<'EOF' +Usage: + Tools/forwardemail-custom-s3.sh \ + --domain burrow.net \ + --api-token-file intake/forwardemail_api_token.txt \ + --s3-endpoint https:// \ + --s3-region \ + --s3-bucket \ + --s3-access-key-file intake/hetzner-s3-user.txt \ + --s3-secret-key-file intake/hetzner-s3-secret.txt + +Options: + --domain Forward Email domain to update. + --api-token-file File containing the Forward Email API token. + --s3-endpoint S3-compatible endpoint URL. + --s3-region S3 region string expected by Forward Email. + --s3-bucket Bucket used for alias backup uploads. + --s3-access-key-file File containing the S3 access key id. + --s3-secret-key-file File containing the S3 secret access key. + --test-only Skip the update call and only test the saved connection. + --help Show this help text. + +Notes: + - Secrets are passed to curl through a temporary config file to avoid putting + them in the process list. + - By default the script updates the domain settings and then calls + /test-s3-connection. + - For Hetzner Object Storage, use the regional S3 endpoint such as + https://hel1.your-objectstorage.com, not an account alias endpoint. +EOF +} + +fail() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +require_file() { + local path="$1" + [[ -f "$path" ]] || fail "missing file: $path" +} + +read_secret() { + local path="$1" + local value + value="$(tr -d '\r\n' < "$path")" + [[ -n "$value" ]] || fail "empty secret file: $path" + printf '%s' "$value" +} + +domain="" +api_token_file="" +s3_endpoint="" +s3_region="" +s3_bucket="" +s3_access_key_file="" +s3_secret_key_file="" +test_only=false + +while [[ $# -gt 0 ]]; do + case "$1" in + --domain) + domain="${2:-}" + shift 2 + ;; + --api-token-file) + api_token_file="${2:-}" + shift 2 + ;; + --s3-endpoint) + s3_endpoint="${2:-}" + shift 2 + ;; + --s3-region) + s3_region="${2:-}" + shift 2 + ;; + --s3-bucket) + s3_bucket="${2:-}" + shift 2 + ;; + --s3-access-key-file) + s3_access_key_file="${2:-}" + shift 2 + ;; + --s3-secret-key-file) + s3_secret_key_file="${2:-}" + shift 2 + ;; + --test-only) + test_only=true + shift + ;; + --help|-h) + usage + exit 0 + ;; + *) + fail "unknown argument: $1" + ;; + esac +done + +[[ -n "$domain" ]] || fail "--domain is required" +[[ -n "$api_token_file" ]] || fail "--api-token-file is required" +[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set" +[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set" +[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set" +[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set" +[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set" + +require_file "$api_token_file" +api_token="$(read_secret "$api_token_file")" + +if [[ "$test_only" == false ]]; then + require_file "$s3_access_key_file" + require_file "$s3_secret_key_file" + s3_access_key_id="$(read_secret "$s3_access_key_file")" + s3_secret_access_key="$(read_secret "$s3_secret_key_file")" + + case "$s3_endpoint" in + http://*|https://*) + ;; + *) + fail "--s3-endpoint must start with http:// or https://" + ;; + esac +fi + +curl_config="$(mktemp)" +trap 'rm -f "$curl_config"' EXIT + +if [[ "$test_only" == false ]]; then + cat >"$curl_config" <&2 + curl --config "$curl_config" + printf '\n' >&2 +fi + +cat >"$curl_config" <&2 +curl --config "$curl_config" +printf '\n' >&2 diff --git a/Tools/forwardemail-hetzner-storage.py b/Tools/forwardemail-hetzner-storage.py new file mode 100755 index 0000000..3a2a941 --- /dev/null +++ b/Tools/forwardemail-hetzner-storage.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import argparse +import datetime as dt +import hashlib +import hmac +import sys +import textwrap +from pathlib import Path +from urllib.parse import urlencode, urlparse + +import requests + + +def read_secret(path: str) -> str: + value = Path(path).read_text(encoding="utf-8").strip() + if not value: + raise SystemExit(f"error: empty secret file: {path}") + return value + + +def sign(key: bytes, msg: str) -> bytes: + return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() + + +def request( + *, + method: str, + endpoint: str, + region: str, + access_key: str, + secret_key: str, + bucket: str, + query: dict[str, str] | None = None, + body: bytes = b"", + content_type: str | None = None, +) -> requests.Response: + parsed = urlparse(endpoint) + if parsed.scheme != "https": + raise SystemExit("error: endpoint must use https") + + host = parsed.netloc + canonical_uri = f"/{bucket}" + query = query or {} + canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~") + + now = dt.datetime.now(dt.timezone.utc) + amz_date = now.strftime("%Y%m%dT%H%M%SZ") + date_stamp = now.strftime("%Y%m%d") + payload_hash = hashlib.sha256(body).hexdigest() + + headers = { + "host": host, + "x-amz-content-sha256": payload_hash, + "x-amz-date": amz_date, + } + if content_type: + headers["content-type"] = content_type + + signed_headers = ";".join(sorted(headers.keys())) + canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys())) + canonical_request = "\n".join( + [ + method, + canonical_uri, + canonical_querystring, + canonical_headers, + signed_headers, + payload_hash, + ] + ) + + algorithm = "AWS4-HMAC-SHA256" + credential_scope = f"{date_stamp}/{region}/s3/aws4_request" + string_to_sign = "\n".join( + [ + algorithm, + amz_date, + credential_scope, + hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(), + ] + ) + + k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp) + k_region = sign(k_date, region) + k_service = sign(k_region, "s3") + signing_key = sign(k_service, "aws4_request") + signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() + + auth_header = ( + f"{algorithm} Credential={access_key}/{credential_scope}, " + f"SignedHeaders={signed_headers}, Signature={signature}" + ) + + url = f"{parsed.scheme}://{host}{canonical_uri}" + if canonical_querystring: + url = f"{url}?{canonical_querystring}" + + response = requests.request( + method, + url, + headers={**headers, "Authorization": auth_header}, + data=body, + timeout=30, + ) + return response + + +def ensure_bucket(args: argparse.Namespace, bucket: str) -> None: + head = request( + method="HEAD", + endpoint=args.endpoint, + region=args.region, + access_key=args.access_key, + secret_key=args.secret_key, + bucket=bucket, + ) + if head.status_code == 200: + print(f"{bucket}: exists") + return + if head.status_code != 404: + raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}") + + body = textwrap.dedent( + f"""\ + + + {args.region} + + """ + ).encode("utf-8") + create = request( + method="PUT", + endpoint=args.endpoint, + region=args.region, + access_key=args.access_key, + secret_key=args.secret_key, + bucket=bucket, + body=body, + content_type="application/xml", + ) + if create.status_code not in (200, 204): + raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}") + print(f"{bucket}: created") + + +def put_lifecycle(args: argparse.Namespace, bucket: str) -> None: + body = textwrap.dedent( + f"""\ + + + + expire-forwardemail-backups-after-{args.expire_days}-days + Enabled + + + + + {args.expire_days} + + + + """ + ).encode("utf-8") + response = request( + method="PUT", + endpoint=args.endpoint, + region=args.region, + access_key=args.access_key, + secret_key=args.secret_key, + bucket=bucket, + query={"lifecycle": ""}, + body=body, + content_type="application/xml", + ) + if response.status_code not in (200, 204): + raise SystemExit( + f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}" + ) + print(f"{bucket}: lifecycle set to {args.expire_days} days") + + +def get_lifecycle(args: argparse.Namespace, bucket: str) -> None: + response = request( + method="GET", + endpoint=args.endpoint, + region=args.region, + access_key=args.access_key, + secret_key=args.secret_key, + bucket=bucket, + query={"lifecycle": ""}, + ) + if response.status_code != 200: + raise SystemExit( + f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}" + ) + print(f"=== {bucket} lifecycle ===") + print(response.text.strip()) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Provision Hetzner object-storage buckets for Forward Email backups." + ) + parser.add_argument( + "--endpoint", + default="https://hel1.your-objectstorage.com", + help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.", + ) + parser.add_argument("--region", default="hel1", help="S3 region.") + parser.add_argument( + "--access-key-file", + default="intake/hetzner-s3-user.txt", + help="File containing the S3 access key id.", + ) + parser.add_argument( + "--secret-key-file", + default="intake/hetzner-s3-secret.txt", + help="File containing the S3 secret key.", + ) + parser.add_argument( + "--bucket", + action="append", + required=True, + help="Bucket to provision. Repeat for multiple buckets.", + ) + parser.add_argument( + "--expire-days", + type=int, + default=90, + help="Lifecycle expiry window in days.", + ) + parser.add_argument( + "--verify-only", + action="store_true", + help="Skip create/update and only read the current lifecycle.", + ) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + args.access_key = read_secret(args.access_key_file) + args.secret_key = read_secret(args.secret_key_file) + + for bucket in args.bucket: + if args.verify_only: + get_lifecycle(args, bucket) + continue + ensure_bucket(args, bucket) + put_lifecycle(args, bucket) + get_lifecycle(args, bucket) + + +if __name__ == "__main__": + try: + main() + except requests.RequestException as err: + raise SystemExit(f"error: request failed: {err}") from err diff --git a/docs/FORWARDEMAIL.md b/docs/FORWARDEMAIL.md new file mode 100644 index 0000000..798f3e5 --- /dev/null +++ b/docs/FORWARDEMAIL.md @@ -0,0 +1,101 @@ +# Forward Email Backups + +Burrow's mail direction is hosted mail on [Forward Email](https://forwardemail.net/), with domain-owned backup retention in our own S3-compatible object storage. + +This is the first mail path to operationalize for `burrow.net` and `burrow.rs`. It keeps SMTP/IMAP hosting off the first forge host while still giving Burrow control over backup retention and object ownership. + +## What Forward Email Requires + +Forward Email exposes custom backup storage per domain. The documented API shape is: + +- `PUT /v1/domains/{domain}` with: + - `has_custom_s3=true` + - `s3_endpoint` + - `s3_access_key_id` + - `s3_secret_access_key` + - `s3_region` + - `s3_bucket` +- `POST /v1/domains/{domain}/test-s3-connection` + +Forward Email also documents these operational constraints: + +- the bucket must remain private +- credentials are validated with `HeadBucket` +- failed or public-bucket configurations fall back to Forward Email's default storage and notify domain administrators +- custom S3 keeps every backup version, so lifecycle expiration is our responsibility + +## Burrow Secret Layout + +Present in `intake/` today: + +- `intake/forwardemail_api_token.txt` +- `intake/hetzner-s3-user.txt` +- `intake/hetzner-s3-secret.txt` +- Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com` +- Hetzner object storage region: `hel1` +- Hetzner bucket used for Forward Email backups: `burrow` + +## Verified Storage State + +As of March 15, 2026, Burrow's Forward Email custom S3 configuration is live: + +- endpoint: `https://hel1.your-objectstorage.com` +- region: `hel1` +- bucket: `burrow` +- `burrow.net` has `has_custom_s3=true` +- `burrow.rs` has `has_custom_s3=true` +- Forward Email's `/test-s3-connection` succeeded for both domains +- the `burrow` bucket enforces lifecycle expiration after `90` days + +Forward Email performs bucket validation with bucket-style addressing. For Hetzner Object Storage, this means the working endpoint is the regional S3 endpoint (`https://hel1.your-objectstorage.com`), not the account alias (`https://burrow.hel1.your-objectstorage.com`). Using the account alias causes TLS hostname mismatches when the vendor prepends the bucket name. + +## Helper + +Use [`Tools/forwardemail-custom-s3.sh`](../Tools/forwardemail-custom-s3.sh) to configure or retest the domain setting without putting secrets on the process list. + +Use [`Tools/forwardemail-hetzner-storage.py`](../Tools/forwardemail-hetzner-storage.py) to ensure the Hetzner backup bucket exists and to apply lifecycle expiry before enabling custom S3 on the Forward Email side. + +Bucket bootstrap example: + +```sh +Tools/forwardemail-hetzner-storage.py \ + --endpoint https://hel1.your-objectstorage.com \ + --bucket burrow \ + --expire-days 90 +``` + +Example: + +```sh +Tools/forwardemail-custom-s3.sh \ + --domain burrow.net \ + --api-token-file intake/forwardemail_api_token.txt \ + --s3-endpoint https://hel1.your-objectstorage.com \ + --s3-region hel1 \ + --s3-bucket burrow \ + --s3-access-key-file intake/hetzner-s3-user.txt \ + --s3-secret-key-file intake/hetzner-s3-secret.txt +``` + +Retest an existing domain configuration without rewriting it: + +```sh +Tools/forwardemail-custom-s3.sh \ + --domain burrow.net \ + --api-token-file intake/forwardemail_api_token.txt \ + --test-only +``` + +## Retention + +Forward Email preserves every backup object when custom S3 is enabled. Configure lifecycle expiration on the bucket itself. A 30-day or 90-day expiry window is the baseline recommendation from the vendor docs; Burrow should choose explicitly per domain instead of letting the bucket grow without bound. The current Burrow bootstrap helper defaults to `90` days. + +## Identity Direction + +Hosted mail and SaaS identity are separate concerns: + +- mail hosting/backups: Forward Email + Burrow-owned S3-compatible storage +- interactive identity: Authentik as the long-term IdP +- future SaaS SSO target: Linear via SAML once the workspace and plan are ready + +This means the forge host does not need to become the first mail server just to give Burrow mailboxes or retention control. diff --git a/docs/PROTOCOL_ROADMAP.md b/docs/PROTOCOL_ROADMAP.md new file mode 100644 index 0000000..6bfde42 --- /dev/null +++ b/docs/PROTOCOL_ROADMAP.md @@ -0,0 +1,31 @@ +# Protocol Roadmap + +Burrow currently has two tunnel paths in-tree: + +- a WireGuard data plane +- a mesh transport built on `iroh` + +What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer: + +- control-plane data structures in `burrow/src/control/mod.rs` +- local auth bootstrap and persistent node/session storage in `burrow/src/auth/server/` +- governance documents under `evolution/` for the bigger protocol work + +## `CONNECT-IP` + +Full RFC 9484 support requires more than packet forwarding. It needs HTTP/3 session management, Capsule handling, HTTP Datagram context identifiers, address assignment, route advertisement, and request-scope enforcement. Burrow does not implement those end to end yet. + +## Tailscale-Style Negotiation + +Burrow now has register/map request and response types plus persistent node records, but it does not yet implement the full Tailscale capability surface, peer delta protocol, DERP coordination, or Noise-based control transport. + +## Current Direction + +The intended sequence is: + +1. Stabilize the control-plane data model and bootstrap auth. +2. Introduce transport-neutral route and address abstractions. +3. Add MASQUE framing and HTTP/3 transport support. +4. Expand policy, relay, and interoperability testing. + +This keeps Burrow honest about what is running today while creating a clean path for the rest. diff --git a/docs/WIREGUARD_LINEAGE.md b/docs/WIREGUARD_LINEAGE.md new file mode 100644 index 0000000..15ca67a --- /dev/null +++ b/docs/WIREGUARD_LINEAGE.md @@ -0,0 +1,30 @@ +# WireGuard Rust Lineage + +Burrow's in-tree WireGuard engine is not a greenfield implementation. It was lifted from the Rust WireGuard lineage around Cloudflare's BoringTun, then cut down and reshaped to fit Burrow's own daemon and tunnel abstractions. + +## What Was Lifted + +- The repository history includes `1b39eca` (`boringtun wip`) and `28af9003` (`merge boringtun into burrow`). +- The current `burrow/src/wireguard/noise/*` files still carry the original Cloudflare copyright and SPDX headers. +- Core protocol machinery such as the Noise handshake, session state, rate limiter, and timer logic came from that imported body of work. + +## What Changed in Burrow + +Burrow does not embed BoringTun unchanged. + +- The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`. +- Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`. +- The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload. +- Burrow added its own runtime switching path so WireGuard and mesh transports can share one daemon lifecycle. + +## What Was Improved + +The lifted code has been tightened further in-repo. + +- Deprecated constant-time comparisons were replaced with `subtle`. +- Network ordering and runtime selection are now deterministic and test-covered. +- The Burrow runtime can swap between WireGuard and mesh-backed networks without restarting the daemon process itself. + +## Why This Matters + +This project should be explicit about lineage. Burrow benefits from proven Rust WireGuard work, but it owns the integration surface, runtime behavior, and future maintenance burden. That is why the code should be documented as lifted, modified, and improved rather than described as wholly original. diff --git a/evolution/README.md b/evolution/README.md new file mode 100644 index 0000000..e55a347 --- /dev/null +++ b/evolution/README.md @@ -0,0 +1,60 @@ +# Burrow Evolution + +Burrow Evolution Proposals (BEPs) are the repository's durable design record for protocol work, control-plane changes, forge infrastructure, and operational policy. + +## Goals + +1. Capture intent before implementation outruns the architecture. +2. Give contributors and agents enough context to work safely without re-discovering prior decisions. +3. Tie ambitious work to concrete validation, rollout, and rollback criteria. + +## When a BEP is required + +Open a BEP for: + +- new transports or protocol families +- control-plane and identity changes +- deployment, forge, runner, or secrets changes +- data model migrations +- user-visible behavior that changes security or routing semantics + +Small bug fixes and isolated refactors do not need a BEP unless they materially change one of the areas above. + +## Lifecycle + +1. Pitch + Capture the problem and why it matters now. +2. Draft + Copy `evolution/proposals/0000-template.md` to `evolution/proposals/BEP-XXXX-short-slug.md`. +3. Review + Collect feedback, tighten the design, and document unresolved concerns. +4. Decision + Mark the proposal `Accepted`, `Rejected`, or `Returned for Revision`. +5. Implementation + Link code changes, tests, and rollout evidence. +6. Supersession + Keep historical proposals in-tree and point forward to the replacing BEP. + +## Status Values + +- `Pitch` +- `Draft` +- `In Review` +- `Accepted` +- `Implemented` +- `Rejected` +- `Returned for Revision` +- `Superseded` +- `Archived` + +## Layout + +```text +evolution/ + README.md + proposals/ + 0000-template.md + BEP-0001-... +``` + +Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly. diff --git a/evolution/proposals/0000-template.md b/evolution/proposals/0000-template.md new file mode 100644 index 0000000..66954c6 --- /dev/null +++ b/evolution/proposals/0000-template.md @@ -0,0 +1,57 @@ +# `BEP-XXXX` - Title Case Summary + +```text +Status: Draft | In Review | Accepted | Implemented | Rejected | Returned for Revision | Superseded | Archived +Proposal: BEP-XXXX +Authors: +Coordinator: +Reviewers: +Constitution Sections: +Implementation PRs: (optional while drafting) +Decision Date: +``` + +## Summary + +One or two paragraphs that state the desired outcome and why it matters. + +## Motivation + +- What problem exists today? +- Why should Burrow solve it now? +- Which issues, incidents, or constraints support the change? + +## Detailed Design + +- Architecture and boundaries +- Data model and migration plan +- Protocol or API changes +- Observability, testing, and failure handling + +## Security and Operational Considerations + +- Access and secret handling +- Abuse, downgrade, or supply-chain risks +- Rollback and kill-switch plans + +## Contributor Playbook + +Give the concrete steps, commands, checks, and evidence a contributor should produce while implementing or rolling out the change. + +## Alternatives Considered + +List alternatives and why they were rejected. + +## Impact on Other Work + +- follow-up tasks +- dependencies +- compatibility constraints + +## Decision + +Record the final call, who made it, and any conditions. + +## References + +Link relevant issues, specs, transcripts, and external research. diff --git a/evolution/proposals/BEP-0001-sovereign-forge-and-governance.md b/evolution/proposals/BEP-0001-sovereign-forge-and-governance.md new file mode 100644 index 0000000..f48a7a9 --- /dev/null +++ b/evolution/proposals/BEP-0001-sovereign-forge-and-governance.md @@ -0,0 +1,61 @@ +# `BEP-0001` - Sovereign Forge and Governance Bootstrap + +```text +Status: Draft +Proposal: BEP-0001 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should own its forge, deployment logic, and operational context under `burrow.net`. This proposal establishes the repository-local governance and forge bootstrap required to move build, release, and infrastructure control out of GitHub-centric assumptions and into a self-hosted operating model. + +## Motivation + +- The repository currently keeps CI definitions under `.github/workflows/` but has no first-class self-hosted forge layout. +- Infrastructure changes and protocol work are already entangled; without a design record, the project risks landing irreversible operations without enough context. +- A self-hosted forge is a prerequisite for durable autonomy over source, runners, and release pipelines. + +## Detailed Design + +- Add a project constitution and BEP process under `evolution/`. +- Introduce a Nix flake and NixOS host/module layout for `burrow-forge`. +- Add Forgejo-native workflows under `.forgejo/workflows/` for repository-local CI. +- Bootstrap the initial forge identity around `contact@burrow.net` and an agent-owned SSH workflow. + +## Security and Operational Considerations + +- Initial bootstrap may read credentials from local intake, but production must converge on encrypted secret handling. +- The first forge host replacement must preserve rollback information before deleting any existing VM. +- DNS for `burrow.net` is currently pending activation; the forge rollout must not assume public reachability until nameserver cutover completes. + +## Contributor Playbook + +- Keep destructive host operations behind explicit verification of the current Hetzner state. +- Build and test repository-local workflows before using them for deployment. +- Record the active server id, image, IPs, and SSH path before replacement. + +## Alternatives Considered + +- Continue relying on GitHub Actions while separately hosting services. Rejected because it leaves source authority and CI policy split across systems. +- Stand up Forgejo without a repository-local operating model. Rejected because the repo would still be missing deployment truth. + +## Impact on Other Work + +- Blocks long-term migration of workflows away from GitHub. +- Provides the governance anchor for protocol and control-plane proposals. + +## Decision + +Pending. + +## References + +- `CONSTITUTION.md` +- `.github/workflows/` +- `.forgejo/workflows/` diff --git a/evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md b/evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md new file mode 100644 index 0000000..2558d09 --- /dev/null +++ b/evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md @@ -0,0 +1,60 @@ +# `BEP-0002` - Control-Plane Bootstrap and Local Auth + +```text +Status: Draft +Proposal: BEP-0002 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: I, II, III, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow needs a repository-owned control-plane model instead of ad hoc network payload storage plus third-party-only auth. This proposal introduces a local username/password bootstrap for `contact@burrow.net`, plus a register/map data model shaped to support a Tailscale-style control server without claiming full parity yet. + +## Motivation + +- Current auth support is limited and does not provide a plain local bootstrap path for the project's own operator identity. +- The existing database stores network payloads, but not a durable model for users, nodes, sessions, or control-plane negotiation state. +- Future work on route policy, device coordination, and richer negotiation needs a real data model now. + +## Detailed Design + +- Add control-plane types for users, nodes, register requests, and map responses. +- Extend the auth server schema with local credentials, sessions, provider logins, and control nodes. +- Expose JSON endpoints for local login, node registration, and map retrieval. +- Seed the initial operator account from intake-backed bootstrap credentials. + +## Security and Operational Considerations + +- Passwords are stored with Argon2id hashes only. +- Session tokens are bearer credentials and must be treated as sensitive. +- The bootstrap credential path is a short-term path; follow-up work should move it into encrypted secret management before public deployment. + +## Contributor Playbook + +- Verify bootstrap account creation in an isolated test database. +- Exercise login, register, and map end to end with integration tests. +- Do not advertise protocol parity beyond the implemented request/response contract. + +## Alternatives Considered + +- Wait for full external identity-provider integration first. Rejected because the forge needs an operator account now. +- Keep control-plane state implicit in daemon-local configuration. Rejected because it cannot express multi-device coordination. + +## Impact on Other Work + +- Unblocks forge bootstrap and future device control-plane work. +- Creates the storage model needed for richer policy and transport proposals. + +## Decision + +Pending. + +## References + +- `burrow/src/auth/server/` +- `burrow/src/control/` diff --git a/evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md b/evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md new file mode 100644 index 0000000..99ddedf --- /dev/null +++ b/evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md @@ -0,0 +1,61 @@ +# `BEP-0003` - CONNECT-IP and Negotiation Roadmap + +```text +Status: Draft +Proposal: BEP-0003 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: I, II, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should grow from a WireGuard-first tunnel runner into a transport stack that can support HTTP/3 MASQUE `CONNECT-IP` and a richer node negotiation model. This proposal stages that work so Burrow can adopt the right abstractions instead of stapling QUIC-era semantics onto a WireGuard-only daemon. + +## Motivation + +- `CONNECT-IP` introduces HTTP/3 sessions, context identifiers, address assignment, and route advertisements that do not fit the current daemon model. +- A Tailscale-style control plane requires explicit node, endpoint, and session state rather than raw network blobs. +- The project needs a roadmap that distinguishes data-model work, control-plane work, and actual transport implementation. + +## Detailed Design + +- Stage 1: land control-plane types and persistent auth/session/node storage. +- Stage 2: add transport-agnostic route, address-assignment, and policy abstractions in Burrow. +- Stage 3: implement MASQUE `CONNECT-IP` framing and HTTP Datagram handling. +- Stage 4: connect the transport layer to real relay, policy, and observability paths. + +## Security and Operational Considerations + +- `CONNECT-IP` changes the trust boundary from WireGuard peers to HTTP/3 peers and relays; authentication, replay handling, and scope restriction must be explicit. +- Route advertisements and delegated prefixes must be validated before touching the data plane. +- Control-plane capability claims must not imply support that the transport layer does not yet implement. + +## Contributor Playbook + +- Keep protocol codecs independently testable before integrating them into live transports. +- Add interoperability tests for every new capsule or datagram type. +- Separate request parsing, policy validation, and packet forwarding so regressions stay localized. + +## Alternatives Considered + +- Implement MASQUE directly in the daemon without control-plane refactoring. Rejected because the current daemon has no transport-neutral contract for routes or prefixes. +- Treat Tailscale negotiation as a one-off compatibility shim. Rejected because Burrow needs first-class control-plane concepts either way. + +## Impact on Other Work + +- Depends on BEP-0002. +- Informs future relay, policy, and node coordination work. + +## Decision + +Pending. + +## References + +- RFC 9484 +- `burrow/src/daemon/` +- `burrow/src/control/` diff --git a/evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md b/evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md new file mode 100644 index 0000000..d633f37 --- /dev/null +++ b/evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md @@ -0,0 +1,68 @@ +# `BEP-0004` - Hosted Mail Backups and SaaS Identity + +```text +Status: Draft +Proposal: BEP-0004 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should start with hosted mail on Forward Email instead of self-hosting SMTP and IMAP on the first forge machine. Backup retention should still be controlled by Burrow through custom S3-compatible storage backed by Burrow-owned object storage. In parallel, Burrow should treat SaaS identity as a separate track and converge on Authentik as the long-term IdP, with Linear SAML SSO as a planned downstream integration rather than an immediate bootstrap dependency. + +## Motivation + +- The first forge host already carries source control, CI, and deployment bootstrap risk. Adding a self-hosted mail stack increases operational scope before the forge is stable. +- Forward Email already exposes SMTP and IMAP while allowing per-domain custom S3 backup storage, which preserves Burrow's data ownership over mailbox backups. +- The repository needs a durable decision record that separates hosted mail operations from future SaaS SSO work. + +## Detailed Design + +- Use Forward Email as the operational mail provider for `burrow.net` and `burrow.rs`. +- Configure custom S3-compatible storage per domain using Burrow-controlled object storage credentials. +- Keep one backup bucket per domain and enforce lifecycle expiration at the bucket layer. +- Add repository-owned tooling and documentation for applying and testing the Forward Email custom S3 configuration. +- Treat Authentik as the future identity authority for SaaS applications, but keep Linear SAML as a later rollout once the workspace and vendor prerequisites are available. Linear's current docs place SAML and SCIM behind higher-tier workspace security settings, so Burrow should treat plan availability as an explicit precondition. + +## Security and Operational Considerations + +- Forward Email API tokens and S3 credentials must stay in secret files and must not be passed directly on the shell command line. +- Buckets must remain private. Public bucket detection by the vendor should be treated as a hard failure, not a warning. +- Backup growth is unbounded without lifecycle rules. Retention policy is part of the rollout, not optional cleanup. +- Hosted mail reduces MTA attack surface on the forge host, but it adds third-party dependency risk; keeping backups in Burrow-owned storage limits that blast radius. + +## Contributor Playbook + +- Put the Forward Email API token in `intake/forwardemail_api_token.txt`. +- Use `Tools/forwardemail-custom-s3.sh` to configure `burrow.net` and `burrow.rs`. +- Run the helper again with `--test-only` after any credential rotation. +- Record the chosen endpoint, region, bucket names, and lifecycle policy alongside rollout evidence. +- Do not claim Linear SAML is live until the Authentik app, Linear workspace settings, workspace plan prerequisites, and end-to-end login flow are verified. + +## Alternatives Considered + +- Self-host Stalwart on the forge host immediately. Rejected for the first rollout because it expands host scope before source control and CI are stable. +- Rely on Forward Email default backup storage only. Rejected because it gives Burrow less control over retention and data location. +- Delay all SaaS identity planning until after forge cutover. Rejected because Linear and other SaaS integrations will otherwise accrete without an agreed authority. + +## Impact on Other Work + +- Narrows the first forge host scope. +- Creates a clean mail path for `contact@burrow.net` without requiring self-hosted SMTP and IMAP. +- Leaves Authentik and Linear SAML as explicit follow-up work instead of hidden assumptions. + +## Decision + +Pending. + +## References + +- `docs/FORWARDEMAIL.md` +- `Tools/forwardemail-custom-s3.sh` +- Forward Email FAQ: custom S3-compatible storage for backups +- Linear docs: SAML SSO diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..677bd0d --- /dev/null +++ b/flake.lock @@ -0,0 +1,86 @@ +{ + "nodes": { + "disko": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1773506317, + "narHash": "sha256-qWKbLUJpavIpvOdX1fhHYm0WGerytFHRoh9lVck6Bh0=", + "type": "tarball", + "url": "https://codeload.github.com/nix-community/disko/tar.gz/master" + }, + "original": { + "type": "tarball", + "url": "https://codeload.github.com/nix-community/disko/tar.gz/master" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "type": "tarball", + "url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main" + }, + "original": { + "type": "tarball", + "url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main" + } + }, + "hcloud-upload-image-src": { + "flake": false, + "locked": { + "lastModified": 1766413232, + "narHash": "sha256-1u9tpzciYjB/EgBI81pg9w0kez7hHZON7+AHvfKW7k0=", + "type": "tarball", + "url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0" + }, + "original": { + "type": "tarball", + "url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1773389992, + "narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=", + "type": "tarball", + "url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable" + }, + "original": { + "type": "tarball", + "url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable" + } + }, + "root": { + "inputs": { + "disko": "disko", + "flake-utils": "flake-utils", + "hcloud-upload-image-src": "hcloud-upload-image-src", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..18ff979 --- /dev/null +++ b/flake.nix @@ -0,0 +1,190 @@ +{ + description = "Burrow development shell and forge host configuration"; + + inputs = { + nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"; + flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main"; + disko = { + url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + hcloud-upload-image-src = { + url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"; + flake = false; + }; + }; + + outputs = { self, nixpkgs, flake-utils, disko, hcloud-upload-image-src }: + let + supportedSystems = [ + "x86_64-linux" + "aarch64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; + in + (flake-utils.lib.eachSystem supportedSystems (system: + let + pkgs = import nixpkgs { + inherit system; + }; + lib = pkgs.lib; + commonPackages = with pkgs; [ + cargo + rustc + rustfmt + clippy + protobuf + pkg-config + sqlite + git + openssh + curl + jq + nodejs_20 + python3 + rsync + ]; + nscPkg = + if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then + let + version = "0.0.452"; + osName = + if pkgs.stdenv.isLinux then + "linux" + else if pkgs.stdenv.isDarwin then + "darwin" + else + throw "nsc: unsupported host OS ${pkgs.stdenv.hostPlatform.system}"; + archInfo = + if pkgs.stdenv.hostPlatform.isx86_64 then + { + arch = "amd64"; + hash = + if pkgs.stdenv.isLinux then + "sha256-FBqOJ0UQWTv2r4HWMHrR/aqFzDa0ej/mS8dSoaCe6fY=" + else + "sha256-3fRKWO0SCCa5PEym5yCB7dtyEx3xSxXSHfJYz8B+/4M="; + } + else if pkgs.stdenv.hostPlatform.isAarch64 then + { + arch = "arm64"; + hash = + if pkgs.stdenv.isLinux then + "sha256-A6twO8Ievbu7Gi5Hqon4ug5rCGOm/uHhlCya3px6+io=" + else + "sha256-n363xLaGhy+a6lw2F+WicQYGXnGYnqRW8aTQCSppwcw="; + } + else + throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}"; + src = pkgs.fetchurl { + url = "https://github.com/namespacelabs/foundation/releases/download/v${version}/nsc_${version}_${osName}_${archInfo.arch}.tar.gz"; + sha256 = archInfo.hash; + }; + in + pkgs.stdenvNoCC.mkDerivation { + pname = "nsc"; + inherit version src; + dontConfigure = true; + dontBuild = true; + unpackPhase = '' + tar -xzf "$src" + ''; + installPhase = '' + install -d "$out/bin" + install -m 0555 nsc "$out/bin/nsc" + install -m 0555 docker-credential-nsc "$out/bin/docker-credential-nsc" + install -m 0555 bazel-credential-nsc "$out/bin/bazel-credential-nsc" + ''; + } + else + null; + hcloudUploadImagePkg = pkgs.buildGoModule { + pname = "hcloud-upload-image"; + version = "1.3.0"; + src = hcloud-upload-image-src; + vendorHash = "sha256-IdOAUBPg0CEuHd2rdc7jOlw0XtnAhr3PVPJbnFs2+x4="; + subPackages = [ "." ]; + env.GOWORK = "off"; + ldflags = [ + "-s" + "-w" + ]; + }; + forgejoNscSrc = lib.cleanSourceWith { + src = ./services/forgejo-nsc; + filter = path: type: + let + p = toString path; + name = builtins.baseNameOf path; + hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p; + in + !(hasDir ".git" || hasDir "vendor" || hasDir "node_modules" || name == "result"); + }; + forgejoNscDispatcher = pkgs.buildGoModule { + pname = "forgejo-nsc-dispatcher"; + version = "0.1.0"; + src = forgejoNscSrc; + subPackages = [ "./cmd/forgejo-nsc-dispatcher" ]; + vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs="; + }; + forgejoNscAutoscaler = pkgs.buildGoModule { + pname = "forgejo-nsc-autoscaler"; + version = "0.1.0"; + src = forgejoNscSrc; + subPackages = [ "./cmd/forgejo-nsc-autoscaler" ]; + vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs="; + }; + in + { + devShells.default = pkgs.mkShell { + packages = + commonPackages + ++ [ + hcloudUploadImagePkg + forgejoNscDispatcher + forgejoNscAutoscaler + ] + ++ lib.optionals (nscPkg != null) [ nscPkg ]; + }; + + devShells.ci = pkgs.mkShell { + packages = + commonPackages + ++ [ + hcloudUploadImagePkg + ] + ++ lib.optionals (nscPkg != null) [ nscPkg ]; + }; + + formatter = pkgs.nixpkgs-fmt; + + packages = + { + hcloud-upload-image = hcloudUploadImagePkg; + forgejo-nsc-dispatcher = forgejoNscDispatcher; + forgejo-nsc-autoscaler = forgejoNscAutoscaler; + } + // lib.optionalAttrs (nscPkg != null) { nsc = nscPkg; }; + })) + // { + nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix; + nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix; + nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix; + + nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem { + system = "x86_64-linux"; + specialArgs = { + inherit self; + }; + modules = [ + disko.nixosModules.disko + ./nixos/hosts/burrow-forge/default.nix + ]; + }; + + images = { + burrow-forge-raw = self.nixosConfigurations.burrow-forge.config.system.build.diskoImages; + }; + }; +} diff --git a/nixos/README.md b/nixos/README.md new file mode 100644 index 0000000..a682db0 --- /dev/null +++ b/nixos/README.md @@ -0,0 +1,53 @@ +# Burrow Forge Runbook + +This directory contains the Burrow forge host definition and the Hetzner bootstrap shape for `burrow-forge`. + +Mail hosting is intentionally not part of this NixOS host in the current plan. Burrow's first mail path is Forward Email with Burrow-owned custom S3 backups; see [`docs/FORWARDEMAIL.md`](../docs/FORWARDEMAIL.md). + +## Files + +- `hosts/burrow-forge/default.nix`: host entrypoint +- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module +- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap +- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services +- `hetzner-cloud-config.yaml`: desired Hetzner host shape +- `keys/contact_at_burrow_net.pub`: initial operator SSH public key +- `keys/agent_at_burrow_net.pub`: automation SSH public key +- `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow +- `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot +- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/` +- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot +- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers +- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host +- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists +- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host + +## Intended Flow + +1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`. +2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`. +3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. +4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. +5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. +6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`. +7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. +8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. +9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. + +## Current Constraints + +- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host. +- Public Burrow forge cutover completed on March 15, 2026: + - `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21` + - HTTP redirects to HTTPS on all three names + - `https://burrow.net` returns the root forge landing response + - `https://git.burrow.net` returns the live Forgejo front door + - `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/` +- The Cloudflare token currently in `intake/cloudflare-token.txt` is an account-scoped token: `POST /accounts//tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`. +- `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response. +- Both domains publish Forward Email MX/TXT records. +- Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`. +- The current Hetzner account contains both: + - the older Ubuntu bootstrap server in `hil` + - the live `burrow-forge` NixOS server in `hel1` +- The remaining forge work is follow-on product/integration work, not host bring-up, mail backup wiring, or public DNS cutover. diff --git a/nixos/hetzner-cloud-config.yaml b/nixos/hetzner-cloud-config.yaml new file mode 100644 index 0000000..7334b3a --- /dev/null +++ b/nixos/hetzner-cloud-config.yaml @@ -0,0 +1,10 @@ +name: burrow-forge +server_type: ccx23 +location: hel1 +image: ubuntu-24.04 +ssh_keys: + - contact@burrow.net + - agent@burrow.net +labels: + project: burrow + role: forge diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix new file mode 100644 index 0000000..d600539 --- /dev/null +++ b/nixos/hosts/burrow-forge/default.nix @@ -0,0 +1,46 @@ +{ self, ... }: + +{ + imports = [ + ./hardware-configuration.nix + ./disko-config.nix + self.nixosModules.burrow-forge + self.nixosModules.burrow-forge-runner + self.nixosModules.burrow-forgejo-nsc + ]; + + system.stateVersion = "24.11"; + + time.timeZone = "America/Los_Angeles"; + + nix.settings.experimental-features = [ + "nix-command" + "flakes" + ]; + + services.burrow.forge = { + enable = true; + adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; + authorizedKeys = [ + (builtins.readFile ../../keys/contact_at_burrow_net.pub) + (builtins.readFile ../../keys/agent_at_burrow_net.pub) + ]; + }; + + services.burrow.forgeRunner = { + enable = true; + sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; + }; + + services.burrow.forgejoNsc = { + enable = true; + nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; + dispatcher = { + configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml"; + }; + autoscaler = { + enable = true; + configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml"; + }; + }; +} diff --git a/nixos/hosts/burrow-forge/disko-config.nix b/nixos/hosts/burrow-forge/disko-config.nix new file mode 100644 index 0000000..d001422 --- /dev/null +++ b/nixos/hosts/burrow-forge/disko-config.nix @@ -0,0 +1,36 @@ +{ lib, ... }: + +{ + disko.devices = { + disk.main = { + type = "disk"; + device = lib.mkDefault "/dev/sda"; + imageName = "burrow-forge"; + imageSize = "80G"; + content = { + type = "gpt"; + partitions = { + ESP = { + size = "512M"; + type = "EF00"; + content = { + type = "filesystem"; + format = "vfat"; + mountpoint = "/boot"; + mountOptions = [ "umask=0077" ]; + }; + }; + + root = { + size = "100%"; + content = { + type = "filesystem"; + format = "ext4"; + mountpoint = "/"; + }; + }; + }; + }; + }; + }; +} diff --git a/nixos/hosts/burrow-forge/hardware-configuration.nix b/nixos/hosts/burrow-forge/hardware-configuration.nix new file mode 100644 index 0000000..27490e4 --- /dev/null +++ b/nixos/hosts/burrow-forge/hardware-configuration.nix @@ -0,0 +1,11 @@ +{ ... }: + +{ + # Derived from Hetzner Cloud rescue-mode hardware inspection. + boot.initrd.availableKernelModules = [ + "ahci" + "sd_mod" + "virtio_pci" + "virtio_scsi" + ]; +} diff --git a/nixos/keys/agent_at_burrow_net.pub b/nixos/keys/agent_at_burrow_net.pub new file mode 100644 index 0000000..de447b8 --- /dev/null +++ b/nixos/keys/agent_at_burrow_net.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net diff --git a/nixos/keys/contact_at_burrow_net.pub b/nixos/keys/contact_at_burrow_net.pub new file mode 100644 index 0000000..0daa6a3 --- /dev/null +++ b/nixos/keys/contact_at_burrow_net.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa diff --git a/nixos/modules/burrow-forge-runner.nix b/nixos/modules/burrow-forge-runner.nix new file mode 100644 index 0000000..1e183d2 --- /dev/null +++ b/nixos/modules/burrow-forge-runner.nix @@ -0,0 +1,213 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.forgeRunner; + runnerPkg = pkgs.forgejo-runner; + stateDir = cfg.stateDir; + runnerFile = "${stateDir}/.runner"; + configFile = "${stateDir}/runner.yaml"; + labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels); + sshPrivateKeyFile = cfg.sshPrivateKeyFile or ""; +in +{ + options.services.burrow.forgeRunner = { + enable = lib.mkEnableOption "the Burrow Forgejo Actions runner"; + + instanceUrl = lib.mkOption { + type = lib.types.str; + default = "http://127.0.0.1:3000"; + description = "Forgejo base URL used by the local runner for registration and job polling."; + }; + + labels = lib.mkOption { + type = with lib.types; listOf str; + default = [ "burrow-forge" ]; + description = "Runner labels exposed to Forgejo Actions."; + }; + + name = lib.mkOption { + type = lib.types.str; + default = "burrow-forge-agent"; + description = "Runner name shown in Forgejo."; + }; + + capacity = lib.mkOption { + type = lib.types.int; + default = 1; + description = "Maximum concurrent jobs on this runner."; + }; + + stateDir = lib.mkOption { + type = lib.types.str; + default = "/var/lib/forgejo-runner-agent"; + description = "Persistent runner state directory."; + }; + + user = lib.mkOption { + type = lib.types.str; + default = "forgejo-runner-agent"; + description = "System user that runs the Forgejo runner."; + }; + + group = lib.mkOption { + type = lib.types.str; + default = "forgejo-runner-agent"; + description = "System group that runs the Forgejo runner."; + }; + + forgejoConfigFile = lib.mkOption { + type = lib.types.str; + default = "/var/lib/forgejo/custom/conf/app.ini"; + description = "Forgejo app.ini path used to generate runner tokens."; + }; + + gitUserName = lib.mkOption { + type = lib.types.str; + default = "agent"; + description = "Git commit author name for automation on the forge host."; + }; + + gitUserEmail = lib.mkOption { + type = lib.types.str; + default = "agent@burrow.net"; + description = "Git commit author email for automation on the forge host."; + }; + + sshPrivateKeyFile = lib.mkOption { + type = with lib.types; nullOr str; + default = null; + description = "Optional host-local path to the agent SSH private key copied into the runner home."; + }; + }; + + config = lib.mkIf cfg.enable { + users.groups.${cfg.group} = { }; + + users.users.${cfg.user} = { + isSystemUser = true; + group = cfg.group; + description = "Burrow Forgejo Actions runner"; + home = cfg.stateDir; + createHome = true; + shell = pkgs.bashInteractive; + }; + + environment.systemPackages = with pkgs; [ + runnerPkg + bash + coreutils + findutils + git + git-lfs + openssh + python3 + rsync + ]; + + systemd.tmpfiles.rules = [ + "d ${stateDir} 0750 ${cfg.user} ${cfg.group} - -" + ]; + + systemd.services.burrow-forgejo-runner-bootstrap = { + description = "Bootstrap Burrow Forgejo runner registration"; + after = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ]; + wants = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ]; + before = [ "burrow-forgejo-runner.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + umask 077 + + install -d -m 0750 -o ${cfg.user} -g ${cfg.group} ${stateDir} + cat > ${configFile} <> ${configFile} + done + cat >> ${configFile} <<'EOF' +cache: + enabled: false +EOF + chown ${cfg.user}:${cfg.group} ${configFile} + chmod 0640 ${configFile} + + install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh + ${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \ + ${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName} + ${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \ + ${pkgs.git}/bin/git config --global user.email ${lib.escapeShellArg cfg.gitUserEmail} + + if [ -n ${lib.escapeShellArg sshPrivateKeyFile} ] && [ -s ${lib.escapeShellArg sshPrivateKeyFile} ]; then + install -m 0600 -o ${cfg.user} -g ${cfg.group} \ + ${lib.escapeShellArg sshPrivateKeyFile} \ + ${stateDir}/.ssh/id_ed25519 + cat > ${stateDir}/.ssh/config <&2 + exit 1 + fi + + ${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \ + ${runnerPkg}/bin/forgejo-runner register \ + --no-interactive \ + --instance ${lib.escapeShellArg cfg.instanceUrl} \ + --token "${"$"}token" \ + --name ${lib.escapeShellArg cfg.name} \ + --labels ${lib.escapeShellArg labelsCsv} \ + --config ${configFile} + fi + ''; + }; + + systemd.services.burrow-forgejo-runner = { + description = "Burrow Forgejo Actions runner"; + after = [ "burrow-forgejo-runner-bootstrap.service" ]; + wants = [ "burrow-forgejo-runner-bootstrap.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "simple"; + User = cfg.user; + Group = cfg.group; + WorkingDirectory = stateDir; + Restart = "on-failure"; + RestartSec = 2; + ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" '' + set -euo pipefail + export PATH="/run/wrappers/bin:/run/current-system/sw/bin:${"$"}{PATH:-}" + tmp="$(${pkgs.coreutils}/bin/mktemp)" + set +e + ${runnerPkg}/bin/forgejo-runner daemon --config ${configFile} 2>&1 | ${pkgs.coreutils}/bin/tee "${"$"}tmp" + rc="${"$"}{PIPESTATUS[0]}" + set -e + if ${pkgs.gnugrep}/bin/grep -qi "unregistered runner" "${"$"}tmp"; then + rm -f ${runnerFile} + fi + rm -f "${"$"}tmp" + exit "${"$"}rc" + ''; + }; + }; + }; +} diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix new file mode 100644 index 0000000..e02475f --- /dev/null +++ b/nixos/modules/burrow-forge.nix @@ -0,0 +1,247 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.forge; + forgejoCfg = config.services.forgejo; + forgejoExe = lib.getExe forgejoCfg.package; + forgejoWorkPath = forgejoCfg.stateDir; + forgejoCustomPath = "${forgejoWorkPath}/custom"; + forgejoConfigFile = "${forgejoCustomPath}/conf/app.ini"; + forgejoAdminArgs = "--config ${lib.escapeShellArg forgejoConfigFile} --work-path ${lib.escapeShellArg forgejoWorkPath} --custom-path ${lib.escapeShellArg forgejoCustomPath}"; + homeRepoPath = "/${cfg.homeOwner}/${cfg.homeRepo}"; + homeRepoUrl = "https://${cfg.gitDomain}${homeRepoPath}"; +in +{ + options.services.burrow.forge = { + enable = lib.mkEnableOption "the Burrow Forge host"; + + gitDomain = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "Public Forgejo domain."; + }; + + siteDomain = lib.mkOption { + type = lib.types.str; + default = "burrow.net"; + description = "Root site domain."; + }; + + homeOwner = lib.mkOption { + type = lib.types.str; + default = "hackclub"; + description = "Canonical Forgejo org/user for the Burrow home repository."; + }; + + homeRepo = lib.mkOption { + type = lib.types.str; + default = "burrow"; + description = "Canonical Forgejo repository name for the Burrow home repository."; + }; + + contactEmail = lib.mkOption { + type = lib.types.str; + default = "contact@burrow.net"; + description = "Operator contact email."; + }; + + nscAutoscalerDomain = lib.mkOption { + type = lib.types.str; + default = "nsc-autoscaler.burrow.net"; + description = "Public webhook domain for the Forgejo Namespace autoscaler."; + }; + + adminUsername = lib.mkOption { + type = lib.types.str; + default = "contact"; + description = "Initial Forgejo admin username."; + }; + + adminEmail = lib.mkOption { + type = lib.types.str; + default = "contact@burrow.net"; + description = "Initial Forgejo admin email."; + }; + + adminPasswordFile = lib.mkOption { + type = lib.types.str; + description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin."; + }; + + authorizedKeys = lib.mkOption { + type = with lib.types; listOf str; + default = [ ]; + description = "SSH keys allowed for root login and operational bootstrap."; + }; + }; + + config = lib.mkIf cfg.enable { + networking.hostName = "burrow-forge"; + networking.useDHCP = lib.mkDefault true; + + services.qemuGuest.enable = true; + + boot.loader.grub = { + enable = true; + efiSupport = true; + efiInstallAsRemovable = true; + device = "nodev"; + }; + + fileSystems."/boot".neededForBoot = true; + + services.postgresql = { + enable = true; + package = pkgs.postgresql_16; + }; + + services.openssh = { + enable = true; + settings = { + PasswordAuthentication = false; + KbdInteractiveAuthentication = false; + PermitRootLogin = "prohibit-password"; + }; + }; + + users.users.root.openssh.authorizedKeys.keys = cfg.authorizedKeys; + + networking.firewall.allowedTCPPorts = [ + 22 + 80 + 443 + 2222 + ]; + + services.forgejo = { + enable = true; + database = { + type = "postgres"; + createDatabase = true; + }; + lfs.enable = true; + settings = { + server = { + DOMAIN = cfg.gitDomain; + ROOT_URL = "https://${cfg.gitDomain}/"; + HTTP_PORT = 3000; + SSH_DOMAIN = cfg.gitDomain; + SSH_PORT = 2222; + START_SSH_SERVER = true; + }; + + service = { + DISABLE_REGISTRATION = true; + REQUIRE_SIGNIN_VIEW = false; + DEFAULT_ALLOW_CREATE_ORGANIZATION = false; + ENABLE_NOTIFY_MAIL = false; + NO_REPLY_ADDRESS = cfg.adminEmail; + }; + + session = { + COOKIE_SECURE = true; + SAME_SITE = "strict"; + }; + + openid = { + ENABLE_OPENID_SIGNIN = false; + ENABLE_OPENID_SIGNUP = false; + }; + + actions = { + ENABLED = true; + }; + + repository = { + DEFAULT_BRANCH = "main"; + ENABLE_PUSH_CREATE_USER = false; + }; + + ui = { + DEFAULT_THEME = "forgejo-auto"; + }; + }; + }; + + services.caddy = { + enable = true; + email = cfg.contactEmail; + virtualHosts = + { + "${cfg.gitDomain}".extraConfig = '' + encode gzip zstd + @root path / + redir @root ${homeRepoPath} 308 + reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT} + ''; + "${cfg.siteDomain}".extraConfig = '' + @root path / + redir @root ${homeRepoUrl} 308 + respond 404 + ''; + } + // lib.optionalAttrs ( + config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable + ) { + "${cfg.nscAutoscalerDomain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:8090 + ''; + }; + }; + + systemd.services.burrow-forgejo-bootstrap = { + description = "Seed the initial Burrow Forgejo admin account"; + after = [ "forgejo.service" ]; + requires = [ "forgejo.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ + forgejoCfg.package + pkgs.coreutils + pkgs.gnugrep + ]; + serviceConfig = { + Type = "oneshot"; + User = forgejoCfg.user; + Group = forgejoCfg.group; + WorkingDirectory = forgejoCfg.stateDir; + }; + script = '' + set -euo pipefail + + if [ ! -s ${lib.escapeShellArg cfg.adminPasswordFile} ]; then + echo "bootstrap password file is missing; skipping admin bootstrap" >&2 + exit 0 + fi + + password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.adminPasswordFile})" + if [ -z "$password" ]; then + echo "bootstrap password file is empty; skipping admin bootstrap" >&2 + exit 0 + fi + + log_file="$(mktemp)" + trap 'rm -f "$log_file"' EXIT + + if ! ${forgejoExe} admin user create \ + ${forgejoAdminArgs} \ + --admin \ + --username ${lib.escapeShellArg cfg.adminUsername} \ + --email ${lib.escapeShellArg cfg.adminEmail} \ + --password "$password" \ + --must-change-password=false >"$log_file" 2>&1; then + if grep -qi "already exists" "$log_file"; then + ${forgejoExe} admin user change-password \ + ${forgejoAdminArgs} \ + --username ${lib.escapeShellArg cfg.adminUsername} \ + --password "$password" \ + --must-change-password=false + else + cat "$log_file" >&2 + exit 1 + fi + fi + ''; + }; + }; +} diff --git a/nixos/modules/burrow-forgejo-nsc.nix b/nixos/modules/burrow-forgejo-nsc.nix new file mode 100644 index 0000000..ba116f7 --- /dev/null +++ b/nixos/modules/burrow-forgejo-nsc.nix @@ -0,0 +1,234 @@ +{ config, lib, pkgs, self, ... }: + +let + inherit (lib) + mkEnableOption + mkIf + mkOption + types + mkAfter + mkDefault + optional + optionalAttrs + optionalString + ; + + cfg = config.services.burrow.forgejoNsc; + dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml"; + autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml"; + + pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" '' + set -euo pipefail + if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then + echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2 + exit 1 + fi + ''; + + nscTokenPath = "${cfg.stateDir}/nsc.token"; + tokenSync = optionalString (cfg.nscTokenFile != null) '' + install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath} + chown ${cfg.user}:${cfg.group} ${nscTokenPath} + chmod 600 ${nscTokenPath} + ''; + dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) '' + install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig} + chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig} + chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig} + ''; + autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) '' + install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig} + chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig} + chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig} + ''; + + dispatcherEnv = + cfg.extraEnv + // optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; } + // optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; } + // optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; }; +in { + options.services.burrow.forgejoNsc = { + enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher"; + + user = mkOption { + type = types.str; + default = "forgejo-nsc"; + description = "System user that runs the forgejo-nsc services."; + }; + + group = mkOption { + type = types.str; + default = "forgejo-nsc"; + description = "System group for the forgejo-nsc services."; + }; + + stateDir = mkOption { + type = types.str; + default = "/var/lib/forgejo-nsc"; + description = "State directory for the dispatcher/autoscaler."; + }; + + nscTokenFile = mkOption { + type = types.nullOr types.str; + default = null; + description = "Optional NSC token file (exported as NSC_TOKEN_FILE)."; + }; + + nscTokenSpecFile = mkOption { + type = types.nullOr types.str; + default = null; + description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE)."; + }; + + nscEndpoint = mkOption { + type = types.nullOr types.str; + default = null; + description = "Optional NSC endpoint override (exported as NSC_ENDPOINT)."; + }; + + extraEnv = mkOption { + type = types.attrsOf types.str; + default = { }; + description = "Extra environment variables injected into the services."; + }; + + nscPackage = mkOption { + type = types.nullOr types.package; + default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null; + description = "Optional nsc CLI package added to the service PATH."; + }; + + dispatcher = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable the forgejo-nsc dispatcher service."; + }; + + package = mkOption { + type = types.package; + default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher; + description = "Package providing the forgejo-nsc dispatcher binary."; + }; + + configFile = mkOption { + type = types.nullOr types.str; + default = null; + description = "Host-local YAML config file for the dispatcher."; + }; + + allowPending = mkOption { + type = types.bool; + default = false; + description = "Allow placeholder values (PENDING-) in the dispatcher config."; + }; + }; + + autoscaler = { + enable = mkOption { + type = types.bool; + default = false; + description = "Enable the forgejo-nsc autoscaler service."; + }; + + package = mkOption { + type = types.package; + default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler; + description = "Package providing the forgejo-nsc autoscaler binary."; + }; + + configFile = mkOption { + type = types.nullOr types.str; + default = null; + description = "Host-local YAML config file for the autoscaler."; + }; + + allowPending = mkOption { + type = types.bool; + default = false; + description = "Allow placeholder values (PENDING-) in the autoscaler config."; + }; + }; + }; + + config = mkIf cfg.enable { + assertions = [ + { + assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null; + message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled."; + } + { + assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null; + message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled."; + } + ]; + + users.groups.${cfg.group} = { }; + users.users.${cfg.user} = { + uid = mkDefault 2011; + isSystemUser = true; + group = cfg.group; + description = "Forgejo Namespace Cloud runner services"; + home = cfg.stateDir; + createHome = true; + shell = pkgs.bashInteractive; + }; + + systemd.tmpfiles.rules = mkAfter [ + "d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -" + ]; + + systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable { + description = "Forgejo Namespace Cloud dispatcher"; + wantedBy = [ "multi-user.target" ]; + after = [ "network-online.target" ]; + wants = [ "network-online.target" ]; + unitConfig.ConditionPathExists = + optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile + ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; + serviceConfig = { + Type = "simple"; + User = cfg.user; + Group = cfg.group; + WorkingDirectory = cfg.stateDir; + ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}"; + Restart = "on-failure"; + RestartSec = 5; + }; + path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; + environment = dispatcherEnv; + preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ + (optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile)) + dispatcherConfigSync + tokenSync + ]); + }; + + systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable { + description = "Forgejo Namespace Cloud autoscaler"; + wantedBy = [ "multi-user.target" ]; + after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ]; + wants = [ "network-online.target" ]; + unitConfig.ConditionPathExists = + optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile + ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; + serviceConfig = { + Type = "simple"; + User = cfg.user; + Group = cfg.group; + WorkingDirectory = cfg.stateDir; + ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}"; + Restart = "on-failure"; + RestartSec = 5; + }; + path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; + environment = dispatcherEnv; + preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ + (optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile)) + autoscalerConfigSync + tokenSync + ]); + }; + }; +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..8f7dc3d --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "1.85.0" +components = ["rustfmt"] +profile = "minimal" diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md new file mode 100644 index 0000000..f3959de --- /dev/null +++ b/services/forgejo-nsc/README.md @@ -0,0 +1,183 @@ +## forgejo-nsc-dispatcher + +This service exposes a simple HTTP API that tells Namespace Cloud to start +ephemeral Forgejo Actions runners on demand. It glues together three pieces: + +1. **Forgejo Actions** – the service requests a scoped registration token + for the repository/organization/instance where you want to run jobs. +2. **Namespace (`nsc`)** – the dispatcher shells out to the `nsc` CLI to create + a short‑lived environment, runs the `forgejo-runner` container inside it, + and exits after a single job (`forgejo-runner one-job`). The Namespace TTL is + the hard cap, not the typical lifetime. +3. **Your automation** – you call the service via HTTP (directly, through Caddy, + via Forgejo webhooks, etc.) whenever a new runner is needed. + +### Directory layout + +``` +. +├── cmd/forgejo-nsc-dispatcher # main entry point +├── internal/ # service packages (config, forgejo client, nsc dispatcher, HTTP server) +├── config.example.yaml # starter config referenced by README +├── flake.nix / flake.lock # reproducible builds (Go binary + container image) +└── .forgejo/workflows # CI that runs go test/build and publishes manifests +``` + +### Configuration + +Copy `config.example.yaml` and update it for your Forgejo instance and Namespace +profile. The important knobs are: + +- `forgejo.base_url` – HTTPS endpoint of your Forgejo server. A PAT with + `actions:runner` scope is required in `forgejo.token`. +- `forgejo.instance_url` – URL that spawned runners use to register back to Forgejo. + This must be reachable from the runner (typically the public URL like + `https://git.burrow.net`). On the forge host it commonly differs from `base_url` + (which may be `http://127.0.0.1:3000`). +- `forgejo.default_scope` – where new runners register + (`instance`, `organization`, or `repository`). +- `forgejo.default_labels` – labels applied to every spawned runner. GateForge + workflows via `runs-on: ["namespace-profile-linux-medium"]` (or other + `namespace-profile-linux-*` labels). +- `namespace.nsc_binary` – path to the `nsc` binary (the Nix container ships one + compiled from `namespacelabs/foundation` so `/app/bin/nsc` works out of the box). +- `namespace.image` – OCI image containing `forgejo-runner`. +- `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral + Namespace environment. The dispatcher destroys the instance after a job so the + TTL acts as a hard cap, not an idle timeout. + +### Running locally + +```shell +# Ensure nsc is available (e.g. `go build ./foundation/cmd/nsc`) +cp config.example.yaml config.yaml +nix develop # optional dev shell with Go toolchain +go run ./cmd/forgejo-nsc-dispatcher --config config.yaml +``` + +API example: + +```shell +curl -X POST http://localhost:8080/api/v1/dispatch \ + -H 'Content-Type: application/json' \ + -d '{ + "count": 1, + "ttl": "20m", + "labels": ["namespace-profile-linux-medium"], + "scope": {"level": "repository", "owner": "example", "name": "app"} + }' +``` + +### Deploying with Nix + GHCR + +- `nix build .#packages.x86_64-linux.container-amd64` produces a deterministic + tarball containing the service, the `nsc` binary, BusyBox, and `forgejo-runner`. +- The included `Build Container` workflow builds both `amd64` and `arm64` images + on Namespace runners and pushes them to `ghcr.io//`. + No Fly.io manifests are emitted – the multi‑arch manifest points only at GHCR. + +### How this fits behind Caddy (last-mile networking) + +The dispatcher is just an HTTP server. You can: + +1. Run it anywhere that can reach Forgejo and Namespace: bare metal, Namespace + cluster, Kubernetes, Fly, etc. +2. Put Caddy (or any reverse proxy) in front to terminate TLS, do auth, or + rewrite URLs. For example: + + ``` + forgejo-dispatcher.example.com { + reverse_proxy 127.0.0.1:8080 + basicauth /api/* { + user JDJhJDE... + } + } + ``` + +The service doesn’t assume Caddy, nor does it manipulate HTTP clients +directly – it simply waits for POST requests. As long as the dispatcher can +reach Forgejo’s REST API and run the `nsc` binary, you can drop it anywhere. + +### Autoscaling (webhook + poller) + +If you don’t want to call `/api/v1/dispatch` manually, there’s a companion +autoscaler (`cmd/forgejo-nsc-autoscaler`) that watches Forgejo job queues and +triggers the dispatcher for you. It operates in two modes simultaneously: + +1. **Polling** – every instance polls `GET /api/v1/.../actions/runners` to keep a + minimum number of idle Namespace runners per label. This continues until a + webhook is successfully processed, so the system is self-bootstrapping. +2. **Webhooks** – once Forgejo reaches the autoscaler via the `/webhook/{name}` + endpoint, the autoscaler stops polling and reacts to `workflow_job` events in + real time. Each payload is mapped to a target label set and results in a + dispatch call. + +You can manage multiple Forgejo instances by listing them under `instances` in +`autoscaler.example.yaml`: + +``` +listen: ":8090" +dispatcher: + url: "http://dispatcher:8080" + +instances: +- name: burrow + forgejo: + base_url: "https://git.burrow.net" + token: "PENDING-FORGEJO-PAT" + scope: + level: "repository" + owner: "hackclub" + name: "burrow" + disable_polling: true # webhook-only mode + poll_interval: "30s" + webhook_secret: "supersecret" + webhook: + url: "https://nsc-autoscaler.burrow.net/webhook/burrow" + content_type: "json" + events: ["workflow_job"] + active: true + targets: + - labels: ["namespace-profile-linux-medium"] + min_idle: 0 # set to 0 to scale-to-zero between jobs + ttl: "20m" + - labels: ["namespace-profile-macos-large"] + min_idle: 0 + ttl: "90m" + machine_type: "12x28" + - labels: ["namespace-profile-windows-large"] + min_idle: 0 + ttl: "45m" + machine_type: "windows/amd64:8x16" +``` + +For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT, +generate a Namespace token from the logged-in namespace account, and render the +dispatcher/autoscaler configs into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` +plus `intake/forgejo_nsc_token.txt`. + +For ongoing operations, use `Scripts/sync-forgejo-nsc-config.sh`: + +- `Scripts/sync-forgejo-nsc-config.sh` copies the intake-backed configs and + Namespace token onto `/var/lib/burrow/intake/` on the forge host, reapplies + file ownership for `forgejo-nsc`, and restarts the dispatcher/autoscaler. +- `Scripts/sync-forgejo-nsc-config.sh --rotate-pat` additionally mints a new + Forgejo PAT on the Burrow forge host and refreshes the local intake files. + +Run it next to the dispatcher: + +```bash +go run ./cmd/forgejo-nsc-autoscaler --config autoscaler.yaml +# or build the binary/container via `nix build .#forgejo-nsc-autoscaler` +``` + +If your Forgejo build doesn’t expose the runner listing API, set +`disable_polling: true` and rely on `webhook` entries. The autoscaler will +auto-create/update the webhook (using the PAT) so that new `workflow_job` events +immediately call the dispatcher even if the service isn’t publicly reachable yet. + +In Forgejo add a webhook pointing to `https://nsc-autoscaler.burrow.net/webhook/burrow` +with the shared secret (or let the autoscaler create it by specifying `webhook.url` +in config). The autoscaler continues polling until it receives the first valid +webhook (unless disabled), so you get capacity immediately even if outbound +webhooks from Forgejo aren’t yet configured. diff --git a/services/forgejo-nsc/autoscaler.example.yaml b/services/forgejo-nsc/autoscaler.example.yaml new file mode 100644 index 0000000..db7738e --- /dev/null +++ b/services/forgejo-nsc/autoscaler.example.yaml @@ -0,0 +1,34 @@ +listen: ":8090" +dispatcher: + url: "http://localhost:8080" + +instances: + - name: burrow + forgejo: + base_url: "https://git.burrow.net" + token: "PENDING-FORGEJO-PAT" + scope: + level: "repository" + owner: "hackclub" + name: "burrow" + disable_polling: true + poll_interval: "30s" + webhook_secret: "supersecret" + webhook: + url: "https://nsc-autoscaler.burrow.net/webhook/burrow" + content_type: "json" + events: ["workflow_job"] + active: true + targets: + - labels: ["namespace-profile-linux-medium"] + min_idle: 1 + ttl: "20m" + machine_type: "4x8" + - labels: ["namespace-profile-macos-large"] + min_idle: 0 + ttl: "90m" + machine_type: "12x28" + - labels: ["namespace-profile-windows-large"] + min_idle: 0 + ttl: "45m" + machine_type: "windows/amd64:8x16" diff --git a/services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go b/services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go new file mode 100644 index 0000000..bdbb6f8 --- /dev/null +++ b/services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "context" + "flag" + "log/slog" + "os" + "os/signal" + "syscall" + + "namespacelabs.dev/foundation/std/tasks" + "namespacelabs.dev/foundation/std/tasks/simplelog" + + "github.com/burrow/forgejo-nsc/internal/autoscaler" +) + +func main() { + var configPath string + flag.StringVar(&configPath, "config", "autoscaler.yaml", "Path to the autoscaler config file") + flag.Parse() + + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})) + + cfg, err := autoscaler.LoadConfig(configPath) + if err != nil { + logger.Error("failed to load config", "error", err) + os.Exit(1) + } + + service, err := autoscaler.NewService(cfg) + if err != nil { + logger.Error("failed to initialize autoscaler", "error", err) + os.Exit(1) + } + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + ctx = tasks.WithSink(ctx, simplelog.NewSink(os.Stdout, 0)) + + if err := tasks.Action("autoscaler.run").Run(ctx, func(ctx context.Context) error { + return service.Start(ctx) + }); err != nil { + logger.Error("autoscaler exited", "error", err) + os.Exit(1) + } +} diff --git a/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go new file mode 100644 index 0000000..9dcbfb1 --- /dev/null +++ b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go @@ -0,0 +1,90 @@ +package main + +import ( + "context" + "flag" + "log/slog" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/burrow/forgejo-nsc/internal/app" + "github.com/burrow/forgejo-nsc/internal/config" + "github.com/burrow/forgejo-nsc/internal/forgejo" + "github.com/burrow/forgejo-nsc/internal/nsc" + "github.com/burrow/forgejo-nsc/internal/server" +) + +func main() { + var configPath string + flag.StringVar(&configPath, "config", "config.yaml", "Path to the dispatcher config file.") + flag.Parse() + + logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})) + + cfg, err := config.Load(configPath) + if err != nil { + logger.Error("failed to load config", "error", err) + os.Exit(1) + } + + scope, err := cfg.Forgejo.DefaultScope.ToScope() + if err != nil { + logger.Error("invalid default scope", "error", err) + os.Exit(1) + } + + forgejoClient, err := forgejo.NewClient(cfg.Forgejo.BaseURL, cfg.Forgejo.Token) + if err != nil { + logger.Error("failed to create forgejo client", "error", err) + os.Exit(1) + } + + dispatcher, err := nsc.NewDispatcher(nsc.Options{ + BinaryPath: cfg.Namespace.NSCBinary, + ComputeBaseURL: cfg.Namespace.ComputeBaseURL, + DefaultImage: cfg.Namespace.Image, + DefaultMachine: cfg.Namespace.MachineType, + MacosBaseImageID: cfg.Namespace.MacosBaseImageID, + MacosMachineArch: cfg.Namespace.MacosMachineArch, + DefaultDuration: cfg.Namespace.Duration.Duration, + WorkDir: cfg.Namespace.WorkDir, + MaxParallel: cfg.Namespace.MaxParallel, + RunnerNamePrefix: cfg.Runner.NamePrefix, + Executor: cfg.Runner.Executor, + Network: cfg.Namespace.Network, + Logger: logger, + }) + if err != nil { + logger.Error("failed to create dispatcher", "error", err) + os.Exit(1) + } + + service := app.NewService(app.Config{ + DefaultScope: scope, + DefaultLabels: cfg.Forgejo.DefaultLabels, + InstanceURL: cfg.Forgejo.InstanceURL, + DefaultTTL: cfg.Namespace.Duration.Duration, + AllowLabels: cfg.Namespace.AllowLabels, + AllowScopes: cfg.Namespace.AllowScopes, + }, forgejoClient, dispatcher, logger) + + srv := server.New(cfg.Listen, service, logger) + + go func() { + logger.Info("dispatcher listening", "addr", cfg.Listen) + if err := srv.ListenAndServe(); err != nil && err != context.Canceled && err != http.ErrServerClosed { + logger.Error("server terminated", "error", err) + } + }() + + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, syscall.SIGTERM, syscall.SIGINT) + <-interrupt + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + _ = srv.Shutdown(ctx) +} diff --git a/services/forgejo-nsc/config.example.yaml b/services/forgejo-nsc/config.example.yaml new file mode 100644 index 0000000..5dc7551 --- /dev/null +++ b/services/forgejo-nsc/config.example.yaml @@ -0,0 +1,27 @@ +listen: ":8080" + +forgejo: + base_url: "https://forgejo.example.com" + token: "${FORGEJO_PERSONAL_ACCESS_TOKEN}" + default_scope: + level: "organization" + owner: "example" + default_labels: + - namespace-profile-linux-medium + timeout: "30s" + +namespace: + nsc_binary: "/app/bin/nsc" + compute_base_url: "https://ord4.compute.namespaceapis.com" + image: "ghcr.io/forgejo/runner:3" + machine_type: "8x16" + macos_base_image_id: "tahoe" + macos_machine_arch: "arm64" + duration: "30m" + workdir: "/var/lib/forgejo-runner" + max_parallel: 4 + network: "" + +runner: + name_prefix: "nscloud-" + executor: "shell" diff --git a/services/forgejo-nsc/deploy/autoscaler.yaml b/services/forgejo-nsc/deploy/autoscaler.yaml new file mode 100644 index 0000000..fae0d37 --- /dev/null +++ b/services/forgejo-nsc/deploy/autoscaler.yaml @@ -0,0 +1,35 @@ +listen: "127.0.0.1:8090" + +dispatcher: + url: "http://127.0.0.1:8080" + +instances: + - name: burrow + forgejo: + base_url: "http://127.0.0.1:3000" + token: "PENDING-FORGEJO-PAT" + scope: + level: "repository" + owner: "hackclub" + name: "burrow" + disable_polling: false + poll_interval: "30s" + webhook_secret: "PENDING-WEBHOOK-SECRET" + webhook: + url: "https://nsc-autoscaler.burrow.net/webhook/burrow" + content_type: "json" + events: ["workflow_job"] + active: true + targets: + - labels: ["namespace-profile-linux-medium"] + min_idle: 0 + ttl: "20m" + machine_type: "4x8" + - labels: ["namespace-profile-macos-large"] + min_idle: 0 + ttl: "90m" + machine_type: "12x28" + - labels: ["namespace-profile-windows-large"] + min_idle: 0 + ttl: "45m" + machine_type: "windows/amd64:8x16" diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml new file mode 100644 index 0000000..fe58994 --- /dev/null +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -0,0 +1,37 @@ +listen: "127.0.0.1:8080" + +forgejo: + base_url: "http://127.0.0.1:3000" + instance_url: "https://git.burrow.net" + token: "PENDING-FORGEJO-PAT" + default_scope: + level: "repository" + owner: "hackclub" + name: "burrow" + default_labels: + - namespace-profile-linux-medium + timeout: "30s" + +namespace: + nsc_binary: "/run/current-system/sw/bin/nsc" + compute_base_url: "https://ord4.compute.namespaceapis.com" + image: "code.forgejo.org/forgejo/runner:11" + machine_type: "4x8" + macos_base_image_id: "tahoe" + macos_machine_arch: "arm64" + duration: "30m" + workdir: "/var/lib/forgejo-runner" + max_parallel: 4 + allow_labels: + - namespace-profile-linux-medium + - namespace-profile-macos-large + - namespace-profile-windows-large + allow_scopes: + - "repository:hackclub/burrow" + instance_tags: + - "burrow" + network: "" + +runner: + name_prefix: "nscloud-" + executor: "shell" diff --git a/services/forgejo-nsc/go.mod b/services/forgejo-nsc/go.mod new file mode 100644 index 0000000..215aac1 --- /dev/null +++ b/services/forgejo-nsc/go.mod @@ -0,0 +1,65 @@ +module github.com/burrow/forgejo-nsc + +go 1.24.4 + +require ( + buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2 + buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1 + connectrpc.com/connect v1.19.1 + github.com/go-chi/chi/v5 v5.2.1 + github.com/google/uuid v1.6.0 + golang.org/x/crypto v0.48.0 + golang.org/x/sync v0.19.0 + google.golang.org/protobuf v1.36.11 + gopkg.in/yaml.v3 v3.0.1 + namespacelabs.dev/foundation v0.0.478 +) + +require ( + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jxskiss/base62 v1.1.0 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-zglob v0.0.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/rivo/uniseg v0.4.2 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/viper v1.14.0 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/term v0.40.0 // indirect + golang.org/x/text v0.34.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.76.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + helm.sh/helm/v3 v3.18.4 // indirect + namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 // indirect +) diff --git a/services/forgejo-nsc/go.sum b/services/forgejo-nsc/go.sum new file mode 100644 index 0000000..6e2a0a9 --- /dev/null +++ b/services/forgejo-nsc/go.sum @@ -0,0 +1,575 @@ +buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2 h1:XaeFtt6yN8G5q2uYoiTjyshOyai1Q+GzwfEKlxrTzVw= +buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2/go.mod h1:QvCL7PUDMFotMXVUoWMeRClEEnCbh7S51xHy39mO+H4= +buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1 h1:xTgPJaOj5QNRPAA3nxW3fTz01aAOLr/6SG7C4Iqxm54= +buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1/go.mod h1:Il2wpJNQB40Yj3Rmuhg5xKJPSXaZVwij+Q30d1PNuNY= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8= +github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw= +github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-zglob v0.0.3 h1:6Ry4EYsScDyt5di4OI6xw1bYhOqfE5S33Z1OPy+d+To= +github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= +github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= +github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ= +helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +namespacelabs.dev/foundation v0.0.478 h1:3xFLZcrjih7Jjey2N7faSfr6EoBCg2LMTHipq/3Hlrg= +namespacelabs.dev/foundation v0.0.478/go.mod h1:svBrTIfZK773sytmjudGkCzQWNisxcQtcWNCs+uLznI= +namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 h1:8NlnfPlzDSJr8TYV/qarIWwhjLd1gOXf3Jme0M/oGBM= +namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7/go.mod h1:J+Sd+ngeffnCsaO/M7zgs2bR8Klq/ZBhS0+bbnDEH2M= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/services/forgejo-nsc/internal/app/service.go b/services/forgejo-nsc/internal/app/service.go new file mode 100644 index 0000000..45b66eb --- /dev/null +++ b/services/forgejo-nsc/internal/app/service.go @@ -0,0 +1,253 @@ +package app + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/burrow/forgejo-nsc/internal/forgejo" + "github.com/burrow/forgejo-nsc/internal/nsc" +) + +type Dispatcher interface { + LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) +} + +type ForgejoClient interface { + RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) +} + +type Service struct { + forgejo ForgejoClient + dispatcher Dispatcher + logger *slog.Logger + + defaultScope forgejo.Scope + defaultLabels []string + instanceURL string + defaultTTL time.Duration + + allowLabels map[string]struct{} + allowScopes map[string]struct{} +} + +type Config struct { + DefaultScope forgejo.Scope + DefaultLabels []string + InstanceURL string + DefaultTTL time.Duration + AllowLabels []string + AllowScopes []string +} + +func NewService(cfg Config, forgejo ForgejoClient, dispatcher Dispatcher, logger *slog.Logger) *Service { + if logger == nil { + logger = slog.Default() + } + allowLabels := make(map[string]struct{}, len(cfg.AllowLabels)) + for _, label := range cfg.AllowLabels { + allowLabels[normalizeLabel(label)] = struct{}{} + } + allowScopes := make(map[string]struct{}, len(cfg.AllowScopes)) + for _, scope := range cfg.AllowScopes { + allowScopes[scope] = struct{}{} + } + return &Service{ + defaultScope: cfg.DefaultScope, + defaultLabels: cfg.DefaultLabels, + instanceURL: cfg.InstanceURL, + defaultTTL: cfg.DefaultTTL, + forgejo: forgejo, + dispatcher: dispatcher, + logger: logger, + allowLabels: allowLabels, + allowScopes: allowScopes, + } +} + +type DispatchRequest struct { + Count int + Labels []string + Scope *Scope + TTL time.Duration + Machine string + Image string + ExtraEnv map[string]string +} + +type Scope struct { + Level string + Owner string + Name string +} + +type DispatchResponse struct { + Runners []RunnerHandle `json:"runners"` +} + +type RunnerHandle struct { + Name string `json:"name"` +} + +func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchResponse, error) { + count := req.Count + if count <= 0 { + count = 1 + } + + scope, err := s.mergeScope(req.Scope) + if err != nil { + return DispatchResponse{}, err + } + + labels, err := s.mergeLabels(req.Labels) + if err != nil { + return DispatchResponse{}, err + } + if len(labels) == 0 { + return DispatchResponse{}, errors.New("no runner labels resolved") + } + + ttl := req.TTL + if ttl == 0 { + ttl = s.defaultTTL + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + res := DispatchResponse{ + Runners: make([]RunnerHandle, count), + } + eg, egCtx := errgroup.WithContext(ctx) + + for i := 0; i < count; i++ { + index := i + eg.Go(func() error { + token, err := s.forgejo.RegistrationToken(egCtx, scope) + if err != nil { + return fmt.Errorf("fetching registration token: %w", err) + } + + name, err := s.dispatcher.LaunchRunner(egCtx, nsc.LaunchRequest{ + Token: token, + InstanceURL: s.instanceURL, + Labels: labels, + Duration: ttl, + MachineType: req.Machine, + Image: req.Image, + ExtraEnv: req.ExtraEnv, + }) + if err != nil { + return err + } + + res.Runners[index] = RunnerHandle{Name: name} + return nil + }) + } + + if err := eg.Wait(); err != nil { + return DispatchResponse{}, err + } + + return res, nil +} + +func (s *Service) mergeScope(value *Scope) (forgejo.Scope, error) { + if value == nil { + return s.defaultScope, nil + } + + scope := forgejo.Scope{ + Level: forgejo.ScopeLevel(value.Level), + Owner: value.Owner, + Name: value.Name, + } + if scope.Level == "" { + return forgejo.Scope{}, errors.New("scope level is required") + } + switch scope.Level { + case forgejo.ScopeInstance: + if !s.scopeAllowed(scope) { + return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope)) + } + return scope, nil + case forgejo.ScopeOrganization: + if scope.Owner == "" { + return forgejo.Scope{}, errors.New("organization scope requires owner") + } + if !s.scopeAllowed(scope) { + return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope)) + } + return scope, nil + case forgejo.ScopeRepository: + if scope.Owner == "" || scope.Name == "" { + return forgejo.Scope{}, errors.New("repository scope requires owner and name") + } + if !s.scopeAllowed(scope) { + return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope)) + } + return scope, nil + default: + return forgejo.Scope{}, fmt.Errorf("unsupported scope %q", scope.Level) + } +} + +func (s *Service) mergeLabels(labels []string) ([]string, error) { + var resolved []string + if len(labels) == 0 { + resolved = append([]string{}, s.defaultLabels...) + } else { + resolved = labels + } + if len(s.allowLabels) == 0 { + return resolved, nil + } + for _, label := range resolved { + norm := normalizeLabel(label) + if _, ok := s.allowLabels[norm]; !ok { + return nil, fmt.Errorf("label %q not allowed", label) + } + } + return resolved, nil +} + +func normalizeLabel(label string) string { + trimmed := strings.TrimSpace(label) + if trimmed == "" { + return "" + } + // Ignore any explicit executor suffix ("label:host"), since workflows + // and config allowlists typically deal in base label names. + if before, _, ok := strings.Cut(trimmed, ":"); ok { + return before + } + return trimmed +} + +func scopeKey(scope forgejo.Scope) string { + switch scope.Level { + case forgejo.ScopeInstance: + return "instance" + case forgejo.ScopeOrganization: + return fmt.Sprintf("organization:%s", scope.Owner) + case forgejo.ScopeRepository: + return fmt.Sprintf("repository:%s/%s", scope.Owner, scope.Name) + default: + return string(scope.Level) + } +} + +func (s *Service) scopeAllowed(scope forgejo.Scope) bool { + if len(s.allowScopes) == 0 { + return true + } + _, ok := s.allowScopes[scopeKey(scope)] + return ok +} diff --git a/services/forgejo-nsc/internal/app/service_test.go b/services/forgejo-nsc/internal/app/service_test.go new file mode 100644 index 0000000..2be3d3c --- /dev/null +++ b/services/forgejo-nsc/internal/app/service_test.go @@ -0,0 +1,160 @@ +package app + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/burrow/forgejo-nsc/internal/forgejo" + "github.com/burrow/forgejo-nsc/internal/nsc" +) + +type mockForgejo struct { + mu sync.Mutex + tokens []string + scopes []forgejo.Scope + err error + counter int +} + +func (m *mockForgejo) RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.scopes = append(m.scopes, scope) + if m.err != nil { + return "", m.err + } + if m.counter >= len(m.tokens) { + return "", context.Canceled + } + tok := m.tokens[m.counter] + m.counter++ + return tok, nil +} + +type mockDispatcher struct { + mu sync.Mutex + requests []nsc.LaunchRequest + responses []string + err error +} + +func (m *mockDispatcher) LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.err != nil { + return "", m.err + } + m.requests = append(m.requests, req) + idx := len(m.requests) - 1 + if idx < len(m.responses) { + return m.responses[idx], nil + } + return "runner", nil +} + +func TestServiceDispatchUsesDefaults(t *testing.T) { + forgejoMock := &mockForgejo{tokens: []string{"token"}} + dispatcherMock := &mockDispatcher{responses: []string{"runner-default"}} + + cfg := Config{ + DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}, + DefaultLabels: []string{"nscloud"}, + InstanceURL: "https://forgejo.example.com", + DefaultTTL: 15 * time.Minute, + } + + service := NewService(cfg, forgejoMock, dispatcherMock, nil) + + resp, err := service.Dispatch(context.Background(), DispatchRequest{}) + if err != nil { + t.Fatalf("Dispatch returned error: %v", err) + } + if len(resp.Runners) != 1 || resp.Runners[0].Name != "runner-default" { + t.Fatalf("unexpected dispatch response: %+v", resp) + } + + if len(forgejoMock.scopes) != 1 || forgejoMock.scopes[0].Level != forgejo.ScopeInstance { + t.Fatalf("expected default scope, got %+v", forgejoMock.scopes) + } + + if len(dispatcherMock.requests) != 1 { + t.Fatalf("expected one dispatcher call, got %d", len(dispatcherMock.requests)) + } + req := dispatcherMock.requests[0] + if req.InstanceURL != cfg.InstanceURL { + t.Fatalf("expected instance URL %s, got %s", cfg.InstanceURL, req.InstanceURL) + } + if got := req.Labels; len(got) != 1 || got[0] != "nscloud" { + t.Fatalf("expected default labels, got %v", got) + } + if req.Duration != cfg.DefaultTTL { + t.Fatalf("expected duration %v, got %v", cfg.DefaultTTL, req.Duration) + } +} + +func TestServiceDispatchCustomScopeAndCount(t *testing.T) { + forgejoMock := &mockForgejo{tokens: []string{"token-1", "token-2"}} + dispatcherMock := &mockDispatcher{responses: []string{"runner-1", "runner-2"}} + + cfg := Config{ + DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}, + DefaultLabels: []string{"default"}, + InstanceURL: "https://forgejo.example.com", + DefaultTTL: 10 * time.Minute, + } + + service := NewService(cfg, forgejoMock, dispatcherMock, nil) + + reqScope := &Scope{Level: string(forgejo.ScopeRepository), Owner: "acme", Name: "repo"} + res, err := service.Dispatch(context.Background(), DispatchRequest{ + Count: 2, + Labels: []string{"custom"}, + Scope: reqScope, + TTL: 5 * time.Minute, + Machine: "4x8", + Image: "runner:latest", + ExtraEnv: map[string]string{"FOO": "bar"}, + }) + if err != nil { + t.Fatalf("Dispatch returned error: %v", err) + } + if len(res.Runners) != 2 { + t.Fatalf("expected two runners, got %+v", res) + } + + if len(forgejoMock.scopes) != 2 { + t.Fatalf("expected two scope calls, got %d", len(forgejoMock.scopes)) + } + for _, scope := range forgejoMock.scopes { + if scope.Level != forgejo.ScopeRepository || scope.Owner != "acme" || scope.Name != "repo" { + t.Fatalf("unexpected scope: %+v", scope) + } + } + + if len(dispatcherMock.requests) != 2 { + t.Fatalf("expected two dispatcher calls, got %d", len(dispatcherMock.requests)) + } + for _, call := range dispatcherMock.requests { + if call.MachineType != "4x8" || call.Image != "runner:latest" { + t.Fatalf("unexpected machine/image in %+v", call) + } + if call.Duration != 5*time.Minute { + t.Fatalf("expected TTL to override default, got %v", call.Duration) + } + if call.Labels[0] != "custom" { + t.Fatalf("expected custom labels, got %v", call.Labels) + } + if call.ExtraEnv["FOO"] != "bar" { + t.Fatalf("expected env passthrough, got %v", call.ExtraEnv) + } + } +} + +func TestServiceDispatchErrorsWithoutLabels(t *testing.T) { + service := NewService(Config{DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}}, &mockForgejo{}, &mockDispatcher{}, nil) + if _, err := service.Dispatch(context.Background(), DispatchRequest{}); err == nil { + t.Fatalf("expected error when no labels are available") + } +} diff --git a/services/forgejo-nsc/internal/autoscaler/config.go b/services/forgejo-nsc/internal/autoscaler/config.go new file mode 100644 index 0000000..7603e67 --- /dev/null +++ b/services/forgejo-nsc/internal/autoscaler/config.go @@ -0,0 +1,108 @@ +package autoscaler + +import ( + "fmt" + "os" + "time" + + "gopkg.in/yaml.v3" + + "github.com/burrow/forgejo-nsc/internal/config" +) + +type Config struct { + Listen string `yaml:"listen"` + Dispatcher DispatcherConfig `yaml:"dispatcher"` + Instances []InstanceConfig `yaml:"instances"` +} + +type DispatcherConfig struct { + URL string `yaml:"url"` + Timeout config.Duration `yaml:"timeout"` +} + +type InstanceConfig struct { + Name string `yaml:"name"` + Forgejo ForgejoInstance `yaml:"forgejo"` + Scope config.ScopeConfig `yaml:"scope"` + PollInterval config.Duration `yaml:"poll_interval"` + DisablePolling bool `yaml:"disable_polling"` + WebhookSecret string `yaml:"webhook_secret"` + Webhook WebhookConfig `yaml:"webhook"` + Dispatcher *DispatcherConfig `yaml:"dispatcher"` + Targets []TargetConfig `yaml:"targets"` +} + +type ForgejoInstance struct { + BaseURL string `yaml:"base_url"` + Token string `yaml:"token"` +} + +type WebhookConfig struct { + URL string `yaml:"url"` + ContentType string `yaml:"content_type"` + Events []string `yaml:"events"` + Active *bool `yaml:"active"` +} + +type TargetConfig struct { + Labels []string `yaml:"labels"` + MinIdle int `yaml:"min_idle"` + TTL config.Duration `yaml:"ttl"` + MachineType string `yaml:"machine_type"` + Image string `yaml:"image"` + Env map[string]string `yaml:"env"` +} + +func LoadConfig(path string) (Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return Config{}, err + } + var cfg Config + if err := yaml.Unmarshal(data, &cfg); err != nil { + return Config{}, err + } + if cfg.Listen == "" { + cfg.Listen = ":8090" + } + if cfg.Dispatcher.URL == "" { + return Config{}, fmt.Errorf("dispatcher.url is required") + } + if cfg.Dispatcher.Timeout.Duration == 0 { + cfg.Dispatcher.Timeout = config.Duration{Duration: 15 * time.Second} + } + if len(cfg.Instances) == 0 { + return Config{}, fmt.Errorf("at least one instance must be configured") + } + for i := range cfg.Instances { + inst := &cfg.Instances[i] + if inst.Name == "" { + return Config{}, fmt.Errorf("instance[%d] missing name", i) + } + if inst.Forgejo.BaseURL == "" || inst.Forgejo.Token == "" { + return Config{}, fmt.Errorf("instance %s missing forgejo.base_url or token", inst.Name) + } + if inst.PollInterval.Duration == 0 { + inst.PollInterval = config.Duration{Duration: 30 * time.Second} + } + if len(inst.Webhook.Events) == 0 { + inst.Webhook.Events = []string{"workflow_job"} + } + if inst.Webhook.ContentType == "" { + inst.Webhook.ContentType = "json" + } + if len(inst.Targets) == 0 { + return Config{}, fmt.Errorf("instance %s requires at least one target", inst.Name) + } + for ti, tgt := range inst.Targets { + if len(tgt.Labels) == 0 { + return Config{}, fmt.Errorf("instance %s target[%d] missing labels", inst.Name, ti) + } + if tgt.MinIdle < 0 { + return Config{}, fmt.Errorf("instance %s target[%d] min_idle must be >= 0", inst.Name, ti) + } + } + } + return cfg, nil +} diff --git a/services/forgejo-nsc/internal/autoscaler/service.go b/services/forgejo-nsc/internal/autoscaler/service.go new file mode 100644 index 0000000..08d4a42 --- /dev/null +++ b/services/forgejo-nsc/internal/autoscaler/service.go @@ -0,0 +1,385 @@ +package autoscaler + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-chi/chi/v5" + + "namespacelabs.dev/foundation/std/tasks" + + "github.com/burrow/forgejo-nsc/internal/forgejo" +) + +type Service struct { + listen string + controllers map[string]*InstanceController + router chi.Router +} + +func NewService(cfg Config) (*Service, error) { + controllers := make(map[string]*InstanceController) + for _, inst := range cfg.Instances { + scope, err := inst.Scope.ToScope() + if err != nil { + return nil, err + } + forgejoClient, err := forgejo.NewClient(inst.Forgejo.BaseURL, inst.Forgejo.Token) + if err != nil { + return nil, err + } + dispCfg := cfg.Dispatcher + if inst.Dispatcher != nil && inst.Dispatcher.URL != "" { + dispCfg = *inst.Dispatcher + if dispCfg.Timeout.Duration == 0 { + dispCfg.Timeout = cfg.Dispatcher.Timeout + } + } + dClient := newDispatcherClient(dispCfg.URL, dispCfg.Timeout.Duration) + webhookActive := true + if inst.Webhook.Active != nil { + webhookActive = *inst.Webhook.Active + } + controller := &InstanceController{ + name: inst.Name, + cfg: inst, + scope: scope, + forgejo: forgejoClient, + dispatcher: dClient, + webhook: forgejo.WebhookConfig{ + URL: inst.Webhook.URL, + ContentType: inst.Webhook.ContentType, + Events: inst.Webhook.Events, + Active: webhookActive, + }, + secret: inst.WebhookSecret, + } + controllers[inst.Name] = controller + } + + router := chi.NewRouter() + service := &Service{ + listen: cfg.Listen, + controllers: controllers, + router: router, + } + + router.Get("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) + }) + router.Post("/webhook/{instance}", service.handleWebhook) + + return service, nil +} + +func (s *Service) Start(ctx context.Context) error { + for _, controller := range s.controllers { + if err := controller.EnsureWebhook(ctx); err != nil { + return err + } + } + + var wg sync.WaitGroup + for _, controller := range s.controllers { + wg.Add(1) + go func(c *InstanceController) { + defer wg.Done() + c.Run(ctx) + }(controller) + } + + srv := &http.Server{ + Addr: s.listen, + Handler: s.router, + } + + go func() { + <-ctx.Done() + _ = srv.Shutdown(context.Background()) + }() + + if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + wg.Wait() + return nil +} + +func (s *Service) handleWebhook(w http.ResponseWriter, r *http.Request) { + name := chi.URLParam(r, "instance") + controller, ok := s.controllers[name] + if !ok { + http.Error(w, "unknown instance", http.StatusNotFound) + return + } + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "invalid body", http.StatusBadRequest) + return + } + if controller.cfg.WebhookSecret != "" { + signature := r.Header.Get("X-Gitea-Signature") + if signature == "" { + http.Error(w, "missing signature", http.StatusUnauthorized) + return + } + if !verifySignature(controller.cfg.WebhookSecret, signature, body) { + http.Error(w, "invalid signature", http.StatusUnauthorized) + return + } + } + + var payload workflowJobPayload + if err := json.Unmarshal(body, &payload); err != nil { + http.Error(w, "bad payload", http.StatusBadRequest) + return + } + + controller.MarkWebhookSeen() + if payload.Action == "queued" { + controller.DispatchForJob(r.Context(), payload) + } + + w.WriteHeader(http.StatusAccepted) +} + +type workflowJobPayload struct { + Action string `json:"action"` + WorkflowJob struct { + Labels []string `json:"labels"` + } `json:"workflow_job"` +} + +type InstanceController struct { + name string + cfg InstanceConfig + scope forgejo.Scope + forgejo *forgejo.Client + dispatcher *dispatcherClient + ready atomic.Bool + webhook forgejo.WebhookConfig + secret string +} + +func (c *InstanceController) EnsureWebhook(ctx context.Context) error { + if c.webhook.URL == "" { + return nil + } + return tasks.Action("autoscaler.ensure-webhook").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error { + return c.forgejo.EnsureWebhook(ctx, c.scope, c.webhook, c.secret) + }) +} + +func (c *InstanceController) Run(ctx context.Context) { + if c.cfg.DisablePolling { + <-ctx.Done() + return + } + ticker := time.NewTicker(c.cfg.PollInterval.Duration) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + _ = tasks.Action("autoscaler.poll").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error { + return c.reconcile(ctx) + }) + } + } +} + +func (c *InstanceController) reconcile(ctx context.Context) error { + runners, err := c.forgejo.ListRunners(ctx, c.scope) + if err != nil { + // Keep polling even if runner listing fails; we can still dispatch based on queued jobs. + runners = nil + } + + for _, target := range c.cfg.Targets { + idle := countIdle(runners, target.Labels) + + need := 0 + if idle < target.MinIdle { + need = target.MinIdle - idle + } + + jobs, jobErr := c.forgejo.ListRunJobs(ctx, c.scope, target.Labels) + if jobErr != nil { + return jobErr + } + waiting := countWaitingJobs(jobs, target.Labels) + // Scale-to-zero friendly: if anything is waiting and there are no idle runners + // for that label set, dispatch exactly one runner to unblock the queue. + if waiting > 0 && idle == 0 && need < 1 { + need = 1 + } + + if need <= 0 { + continue + } + if err := c.dispatch(ctx, target, need, "poll"); err != nil { + return err + } + } + return nil +} + +func (c *InstanceController) dispatch(ctx context.Context, target TargetConfig, count int, reason string) error { + if count <= 0 { + return nil + } + req := dispatcherRequest{ + Count: count, + Labels: target.Labels, + } + if target.TTL.Duration > 0 { + req.TTL = target.TTL.Duration.String() + } + if target.MachineType != "" { + req.MachineType = target.MachineType + } + if target.Image != "" { + req.Image = target.Image + } + if len(target.Env) > 0 { + req.Env = target.Env + } + return tasks.Action("autoscaler.dispatch").Arg("instance", c.name).Arg("reason", reason).Arg("labels", strings.Join(target.Labels, ",")).Run(ctx, func(ctx context.Context) error { + return c.dispatcher.Dispatch(ctx, req) + }) +} + +func (c *InstanceController) DispatchForJob(ctx context.Context, payload workflowJobPayload) { + action := strings.ToLower(payload.Action) + if action != "queued" && action != "waiting" { + return + } + jobLabels := payload.WorkflowJob.Labels + for _, target := range c.cfg.Targets { + if labelsMatch(jobLabels, target.Labels) { + _ = c.dispatch(ctx, target, 1, "webhook") + return + } + } +} + +func (c *InstanceController) MarkWebhookSeen() { + c.ready.Store(true) +} + +func countIdle(runners []forgejo.Runner, labels []string) int { + count := 0 + for _, runner := range runners { + if strings.ToLower(runner.Status) != "online" || runner.Busy { + continue + } + if labelsMatch(extractLabels(runner.Labels), labels) { + count++ + } + } + return count +} + +func countWaitingJobs(jobs []forgejo.RunJob, labels []string) int { + count := 0 + for _, job := range jobs { + if status := strings.ToLower(job.Status); status != "waiting" && status != "queued" { + continue + } + if labelsMatch(job.RunsOn, labels) { + count++ + } + } + return count +} + +func extractLabels(src []forgejo.RunnerLabel) []string { + result := make([]string, 0, len(src)) + for _, lbl := range src { + result = append(result, lbl.Name) + } + return result +} + +func labelsMatch(have, want []string) bool { + set := make(map[string]struct{}, len(have)) + for _, label := range have { + set[label] = struct{}{} + } + for _, label := range want { + if _, ok := set[label]; !ok { + return false + } + } + return true +} + +func verifySignature(secret, signature string, body []byte) bool { + parts := strings.SplitN(signature, "=", 2) + if len(parts) == 2 { + signature = parts[1] + } + mac := hmac.New(sha256.New, []byte(secret)) + mac.Write(body) + expected := hex.EncodeToString(mac.Sum(nil)) + return hmac.Equal([]byte(expected), []byte(signature)) +} + +type dispatcherClient struct { + url string + client *http.Client +} + +type dispatcherRequest struct { + Count int `json:"count"` + Labels []string `json:"labels"` + TTL string `json:"ttl,omitempty"` + MachineType string `json:"machine_type,omitempty"` + Image string `json:"image,omitempty"` + Env map[string]string `json:"env,omitempty"` +} + +func newDispatcherClient(url string, timeout time.Duration) *dispatcherClient { + if timeout == 0 { + timeout = 30 * time.Second + } + return &dispatcherClient{ + url: url, + client: &http.Client{ + Timeout: timeout, + }, + } +} + +func (d *dispatcherClient) Dispatch(ctx context.Context, req dispatcherRequest) error { + body, _ := json.Marshal(req) + endpoint := strings.TrimSuffix(d.url, "/") + "/api/v1/dispatch" + httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return err + } + httpReq.Header.Set("Content-Type", "application/json") + resp, err := d.client.Do(httpReq) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 300 { + return fmt.Errorf("dispatcher returned %s", resp.Status) + } + return nil +} diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go new file mode 100644 index 0000000..264cbd0 --- /dev/null +++ b/services/forgejo-nsc/internal/config/config.go @@ -0,0 +1,185 @@ +package config + +import ( + "errors" + "fmt" + "os" + "strings" + "time" + + "gopkg.in/yaml.v3" + + "github.com/burrow/forgejo-nsc/internal/forgejo" +) + +// Duration wraps time.Duration to support YAML unmarshalling from strings. +type Duration struct { + time.Duration +} + +// UnmarshalYAML implements yaml.v3 unmarshalling for Duration. +func (d *Duration) UnmarshalYAML(value *yaml.Node) error { + switch value.Tag { + case "!!int": + var seconds int64 + if err := value.Decode(&seconds); err != nil { + return err + } + d.Duration = time.Duration(seconds) * time.Second + return nil + default: + parsed, err := time.ParseDuration(value.Value) + if err != nil { + return err + } + d.Duration = parsed + return nil + } +} + +// MarshalYAML implements yaml.v3 marshalling. +func (d Duration) MarshalYAML() (any, error) { + return d.Duration.String(), nil +} + +type Config struct { + Listen string `yaml:"listen"` + Forgejo ForgejoConfig `yaml:"forgejo"` + Namespace NamespaceConfig `yaml:"namespace"` + Runner RunnerConfig `yaml:"runner"` +} + +type ForgejoConfig struct { + BaseURL string `yaml:"base_url"` + // InstanceURL is the URL runners should use when registering with Forgejo. + // This must be reachable from the spawned runner (e.g. the public URL like + // https://git.burrow.net), and may differ from BaseURL (which can be a local + // loopback URL on the forge host). + InstanceURL string `yaml:"instance_url"` + Token string `yaml:"token"` + DefaultScope ScopeConfig `yaml:"default_scope"` + DefaultLabels []string `yaml:"default_labels"` + Timeout Duration `yaml:"timeout"` + ExtraHeaders yaml.Node `yaml:"extra_headers"` +} + +type ScopeConfig struct { + Level string `yaml:"level"` + Owner string `yaml:"owner,omitempty"` + Name string `yaml:"name,omitempty"` +} + +type NamespaceConfig struct { + NSCBinary string `yaml:"nsc_binary"` + // ComputeBaseURL is the Namespace Cloud Compute API endpoint (Connect RPC base URL). + // This is used for macOS runners, since NSC "run" is container-based (Linux-only). + // Example: "https://ord4.compute.namespaceapis.com" + ComputeBaseURL string `yaml:"compute_base_url"` + Image string `yaml:"image"` + MachineType string `yaml:"machine_type"` + // MacosBaseImageID selects which macOS base image to use (e.g. "tahoe"). + MacosBaseImageID string `yaml:"macos_base_image_id"` + // MacosMachineArch is the architecture used for macOS instances (typically "arm64"). + MacosMachineArch string `yaml:"macos_machine_arch"` + Duration Duration `yaml:"duration"` + WorkDir string `yaml:"workdir"` + MaxParallel int64 `yaml:"max_parallel"` + Environment []string `yaml:"environment"` + AllowLabels []string `yaml:"allow_labels"` + AllowScopes []string `yaml:"allow_scopes"` + Network string `yaml:"network"` + InstanceTags []string `yaml:"instance_tags"` +} + +type RunnerConfig struct { + NamePrefix string `yaml:"name_prefix"` + Executor string `yaml:"executor"` +} + +func Load(path string) (*Config, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var cfg Config + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, err + } + + if err := cfg.Validate(); err != nil { + return nil, err + } + + return &cfg, nil +} + +func (c *Config) Validate() error { + if c.Listen == "" { + c.Listen = ":8080" + } + if c.Runner.NamePrefix == "" { + c.Runner.NamePrefix = "nscloud-" + } + if c.Runner.Executor == "" { + c.Runner.Executor = "shell" + } + + if c.Forgejo.BaseURL == "" { + return errors.New("forgejo.base_url is required") + } + if c.Forgejo.InstanceURL == "" { + // Backwards-compatible default: assume runners can reach the same URL. + c.Forgejo.InstanceURL = c.Forgejo.BaseURL + } + if c.Forgejo.Token == "" { + return errors.New("forgejo.token is required") + } + if c.Forgejo.Timeout.Duration == 0 { + c.Forgejo.Timeout.Duration = 30 * time.Second + } + if _, err := c.Forgejo.DefaultScope.ToScope(); err != nil { + return err + } + + if c.Namespace.NSCBinary == "" { + c.Namespace.NSCBinary = "nsc" + } + if c.Namespace.Image == "" { + c.Namespace.Image = "code.forgejo.org/forgejo/runner:11" + } + if c.Namespace.MacosBaseImageID == "" { + c.Namespace.MacosBaseImageID = "tahoe" + } + if c.Namespace.MacosMachineArch == "" { + c.Namespace.MacosMachineArch = "arm64" + } + if c.Namespace.Duration.Duration == 0 { + c.Namespace.Duration.Duration = 30 * time.Minute + } + if c.Namespace.MaxParallel <= 0 { + c.Namespace.MaxParallel = 4 + } + + return nil +} + +func (s ScopeConfig) ToScope() (forgejo.Scope, error) { + level := forgejo.ScopeLevel(strings.ToLower(s.Level)) + switch level { + case forgejo.ScopeInstance: + return forgejo.Scope{Level: level}, nil + case forgejo.ScopeOrganization: + if s.Owner == "" { + return forgejo.Scope{}, errors.New("forgejo default scope requires owner for organization level") + } + return forgejo.Scope{Level: level, Owner: s.Owner}, nil + case forgejo.ScopeRepository: + if s.Owner == "" || s.Name == "" { + return forgejo.Scope{}, errors.New("forgejo default scope requires owner and name for repository level") + } + return forgejo.Scope{Level: level, Owner: s.Owner, Name: s.Name}, nil + default: + return forgejo.Scope{}, fmt.Errorf("unknown scope level %q", s.Level) + } +} diff --git a/services/forgejo-nsc/internal/config/config_test.go b/services/forgejo-nsc/internal/config/config_test.go new file mode 100644 index 0000000..e42f3c9 --- /dev/null +++ b/services/forgejo-nsc/internal/config/config_test.go @@ -0,0 +1,41 @@ +package config + +import ( + "os" + "path/filepath" + "testing" + "time" +) + +func TestLoadConfig(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.yaml") + content := ` +listen: ":9090" +forgejo: + base_url: https://forgejo.test + token: abc + default_scope: + level: instance +namespace: + nsc_binary: /usr/bin/nsc + image: ghcr.io/forgejo/runner:3 + duration: 15m +runner: + name_prefix: custom- +` + if err := os.WriteFile(path, []byte(content), 0o600); err != nil { + t.Fatal(err) + } + + cfg, err := Load(path) + if err != nil { + t.Fatalf("Load() error = %v", err) + } + if cfg.Listen != ":9090" { + t.Fatalf("unexpected listen addr: %s", cfg.Listen) + } + if cfg.Namespace.Duration.Duration != 15*time.Minute { + t.Fatalf("duration parsing failed: %s", cfg.Namespace.Duration.Duration) + } +} diff --git a/services/forgejo-nsc/internal/forgejo/client.go b/services/forgejo-nsc/internal/forgejo/client.go new file mode 100644 index 0000000..7f63e0c --- /dev/null +++ b/services/forgejo-nsc/internal/forgejo/client.go @@ -0,0 +1,454 @@ +package forgejo + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +type ScopeLevel string + +const ( + ScopeInstance ScopeLevel = "instance" + ScopeOrganization ScopeLevel = "organization" + ScopeRepository ScopeLevel = "repository" +) + +type Scope struct { + Level ScopeLevel + Owner string + Name string +} + +type Client struct { + baseURL *url.URL + token string + client *http.Client +} + +type Runner struct { + ID int64 `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + Busy bool `json:"busy"` + Labels []RunnerLabel `json:"labels"` +} + +type RunnerLabel struct { + Name string `json:"name"` +} + +type RunJob struct { + ID int64 `json:"id"` + Name string `json:"name"` + RunsOn []string `json:"runs_on"` + Status string `json:"status"` + TaskID int64 `json:"task_id"` +} + +type WebhookConfig struct { + URL string + ContentType string + Events []string + Active bool +} + +type Option func(*Client) + +func WithHTTPClient(httpClient *http.Client) Option { + return func(c *Client) { + if httpClient != nil { + c.client = httpClient + } + } +} + +func NewClient(rawURL, token string, opts ...Option) (*Client, error) { + if rawURL == "" { + return nil, errors.New("forgejo base URL is required") + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + client := &Client{ + baseURL: u, + token: strings.TrimSpace(token), + client: &http.Client{ + Timeout: 30 * time.Second, + }, + } + + for _, opt := range opts { + opt(client) + } + + if client.token == "" { + return nil, errors.New("forgejo token is required") + } + + return client, nil +} + +type registrationTokenResponse struct { + Token string `json:"token"` + TTL time.Time `json:"expires_at"` +} + +func (c *Client) RegistrationToken(ctx context.Context, scope Scope) (string, error) { + endpoint, err := c.registrationEndpoint(scope) + if err != nil { + return "", err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return "", err + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Accept", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return "", fmt.Errorf("forgejo returned %s", resp.Status) + } + + var decoded registrationTokenResponse + if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil { + return "", err + } + if decoded.Token == "" { + return "", errors.New("forgejo response missing token") + } + + return decoded.Token, nil +} + +func (c *Client) ListRunners(ctx context.Context, scope Scope) ([]Runner, error) { + endpoint, err := c.runnersEndpoint(scope) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Accept", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("forgejo returned %s", resp.Status) + } + + var decoded []Runner + if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil { + return nil, err + } + + return decoded, nil +} + +func (c *Client) ListRunJobs(ctx context.Context, scope Scope, labels []string) ([]RunJob, error) { + endpoint, err := c.runJobsEndpoint(scope) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return nil, err + } + if len(labels) > 0 { + query := req.URL.Query() + query.Set("labels", strings.Join(labels, ",")) + req.URL.RawQuery = query.Encode() + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Accept", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("forgejo returned %s", resp.Status) + } + + var decoded []RunJob + if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil { + return nil, err + } + + if decoded == nil { + decoded = []RunJob{} + } + return decoded, nil +} + +func (c *Client) EnsureWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error { + if cfg.URL == "" { + return nil + } + + hooks, err := c.listWebhooks(ctx, scope) + if err != nil { + return err + } + + for _, hook := range hooks { + if strings.EqualFold(hook.Config.URL, cfg.URL) { + return c.updateWebhook(ctx, scope, hook.ID, cfg, secret) + } + } + + return c.createWebhook(ctx, scope, cfg, secret) +} + +func (c *Client) registrationEndpoint(scope Scope) (string, error) { + var segments []string + switch scope.Level { + case ScopeRepository: + if scope.Owner == "" || scope.Name == "" { + return "", errors.New("repository scope requires owner and name") + } + segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "registration-token"} + case ScopeOrganization: + if scope.Owner == "" { + return "", errors.New("organization scope requires owner") + } + segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "registration-token"} + case ScopeInstance: + segments = []string{"api", "v1", "admin", "actions", "runners", "registration-token"} + default: + return "", fmt.Errorf("unsupported scope level %q", scope.Level) + } + + clone := *c.baseURL + clone.Path = path.Join(append([]string{clone.Path}, segments...)...) + return clone.String(), nil +} + +type webhook struct { + ID int64 `json:"id"` + Config webhookConfigPayload `json:"config"` +} + +type webhookConfigPayload struct { + URL string `json:"url"` + ContentType string `json:"content_type"` +} + +func (c *Client) listWebhooks(ctx context.Context, scope Scope) ([]webhook, error) { + endpoint, err := c.webhooksEndpoint(scope) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Accept", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("forgejo returned %s", resp.Status) + } + + var hooks []webhook + if err := json.NewDecoder(resp.Body).Decode(&hooks); err != nil { + return nil, err + } + + return hooks, nil +} + +func (c *Client) createWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error { + payload := webhookRequestPayload{ + Type: "gitea", + Config: map[string]string{ + "url": cfg.URL, + "content_type": cfg.ContentType, + "secret": secret, + "insecure_ssl": "0", + }, + Events: cfg.Events, + Active: cfg.Active, + } + + body, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint, err := c.webhooksEndpoint(scope) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return fmt.Errorf("forgejo returned %s", resp.Status) + } + + return nil +} + +func (c *Client) updateWebhook(ctx context.Context, scope Scope, id int64, cfg WebhookConfig, secret string) error { + payload := webhookRequestPayload{ + Type: "gitea", + Config: map[string]string{ + "url": cfg.URL, + "content_type": cfg.ContentType, + "secret": secret, + "insecure_ssl": "0", + }, + Events: cfg.Events, + Active: cfg.Active, + } + + body, err := json.Marshal(payload) + if err != nil { + return err + } + + endpoint, err := c.webhooksEndpoint(scope) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("%s/%d", endpoint, id), bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token)) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return fmt.Errorf("forgejo returned %s", resp.Status) + } + + return nil +} + +func (c *Client) webhooksEndpoint(scope Scope) (string, error) { + var segments []string + switch scope.Level { + case ScopeRepository: + if scope.Owner == "" || scope.Name == "" { + return "", errors.New("repository scope requires owner and name") + } + segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "hooks"} + case ScopeOrganization: + if scope.Owner == "" { + return "", errors.New("organization scope requires owner") + } + segments = []string{"api", "v1", "orgs", scope.Owner, "hooks"} + default: + return "", fmt.Errorf("webhook management not supported for scope level %q", scope.Level) + } + + clone := *c.baseURL + clone.Path = path.Join(append([]string{clone.Path}, segments...)...) + return clone.String(), nil +} + +type webhookRequestPayload struct { + Type string `json:"type"` + Config map[string]string `json:"config"` + Events []string `json:"events"` + Active bool `json:"active"` +} + +func (c *Client) runnersEndpoint(scope Scope) (string, error) { + var segments []string + switch scope.Level { + case ScopeRepository: + if scope.Owner == "" || scope.Name == "" { + return "", errors.New("repository scope requires owner and name") + } + segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners"} + case ScopeOrganization: + if scope.Owner == "" { + return "", errors.New("organization scope requires owner") + } + segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners"} + case ScopeInstance: + segments = []string{"api", "v1", "actions", "runners"} + default: + return "", fmt.Errorf("unsupported scope level %q", scope.Level) + } + + clone := *c.baseURL + clone.Path = path.Join(append([]string{clone.Path}, segments...)...) + return clone.String(), nil +} + +func (c *Client) runJobsEndpoint(scope Scope) (string, error) { + var segments []string + switch scope.Level { + case ScopeRepository: + if scope.Owner == "" || scope.Name == "" { + return "", errors.New("repository scope requires owner and name") + } + segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "jobs"} + case ScopeOrganization: + if scope.Owner == "" { + return "", errors.New("organization scope requires owner") + } + segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "jobs"} + default: + return "", fmt.Errorf("run jobs not supported for scope level %q", scope.Level) + } + + clone := *c.baseURL + clone.Path = path.Join(append([]string{clone.Path}, segments...)...) + return clone.String(), nil +} diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go new file mode 100644 index 0000000..49cb4ec --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -0,0 +1,460 @@ +package nsc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "os/exec" + "strings" + "time" + + "github.com/google/uuid" + "golang.org/x/sync/semaphore" +) + +type Options struct { + BinaryPath string + DefaultImage string + DefaultMachine string + DefaultDuration time.Duration + WorkDir string + MaxParallel int64 + RunnerNamePrefix string + Executor string + Network string + ComputeBaseURL string + MacosBaseImageID string + MacosMachineArch string + Logger *slog.Logger +} + +type LaunchRequest struct { + Token string + InstanceURL string + Labels []string + Duration time.Duration + MachineType string + Image string + ExtraEnv map[string]string +} + +type Dispatcher struct { + opts Options + sem *semaphore.Weighted + log *slog.Logger +} + +func NewDispatcher(opts Options) (*Dispatcher, error) { + if opts.BinaryPath == "" { + return nil, errors.New("nsc binary path is required") + } + if opts.DefaultImage == "" { + return nil, errors.New("default Namespace runner image is required") + } + if opts.RunnerNamePrefix == "" { + opts.RunnerNamePrefix = "nscloud-" + } + if opts.Executor == "" { + opts.Executor = "shell" + } + if opts.MacosBaseImageID == "" { + opts.MacosBaseImageID = "tahoe" + } + if opts.MacosMachineArch == "" { + opts.MacosMachineArch = "arm64" + } + if opts.MaxParallel <= 0 { + opts.MaxParallel = 4 + } + if opts.DefaultDuration == 0 { + opts.DefaultDuration = 30 * time.Minute + } + logger := opts.Logger + if logger == nil { + logger = slog.New(slog.NewTextHandler(io.Discard, nil)) + } + + return &Dispatcher{ + opts: opts, + sem: semaphore.NewWeighted(opts.MaxParallel), + log: logger, + }, nil +} + +func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (string, error) { + if req.Token == "" { + return "", errors.New("registration token is required") + } + if req.InstanceURL == "" { + return "", errors.New("forgejo instance url is required") + } + if err := d.sem.Acquire(ctx, 1); err != nil { + return "", err + } + defer d.sem.Release(1) + + runnerName := d.generateName() + duration := req.Duration + if duration == 0 { + duration = d.opts.DefaultDuration + } + machineType := choose(req.MachineType, d.opts.DefaultMachine) + image := choose(req.Image, d.opts.DefaultImage) + + if hasWindowsLabel(req.Labels) { + if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil { + return "", err + } + return runnerName, nil + } + + if hasMacOSLabel(req.Labels) { + // Compute macOS shapes differ from the Linux "run" defaults. If the request + // didn't specify a machine type, ensure we pick a macOS-valid default. + if machineType == "" || machineType == d.opts.DefaultMachine { + machineType = "12x28" + } + + // Prefer the Compute API path because it uses the service token (NSC_TOKEN_FILE) + // and does not require an interactive `nsc login` session. + if err := d.launchMacOSRunner(ctx, runnerName, req, duration, machineType); err != nil { + d.log.Warn("macos compute launch failed; falling back to nsc create+ssh", "runner", runnerName, "err", err) + if err := d.launchMacOSRunnerViaNSC(ctx, runnerName, req, duration, machineType); err != nil { + return "", err + } + } + return runnerName, nil + } + + env := map[string]string{ + "FORGEJO_INSTANCE_URL": req.InstanceURL, + "FORGEJO_RUNNER_TOKEN": req.Token, + "FORGEJO_RUNNER_NAME": runnerName, + "FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","), + "FORGEJO_RUNNER_EXEC": d.opts.Executor, + } + for k, v := range req.ExtraEnv { + env[k] = v + } + if _, ok := env["NSC_CACHE_PATH"]; !ok { + env["NSC_CACHE_PATH"] = "/nix/store" + } + + script := d.bootstrapScript() + args := []string{ + "run", + "--wait", + "--output", + "json", + "--duration", duration.String(), + "--image", image, + "--name", runnerName, + "--user", "root", + } + if machineType != "" { + args = append(args, "--machine_type", machineType) + } + if d.opts.Network != "" { + args = append(args, "--network", d.opts.Network) + } + for key, value := range env { + if value == "" { + continue + } + args = append(args, "-e", fmt.Sprintf("%s=%s", key, value)) + } + if d.opts.WorkDir != "" { + args = append(args, "-e", fmt.Sprintf("FORGEJO_RUNNER_WORKDIR=%s", d.opts.WorkDir)) + } + + args = append(args, "--", "/bin/sh", "-c", script) + + cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + start := time.Now() + d.log.Info("launching Namespace runner", + "runner", runnerName, + "machine_type", machineType, + "image", image, + ) + err := cmd.Run() + if err != nil { + return "", fmt.Errorf("nsc run failed: %w\n%s", err, buf.String()) + } + + if output := strings.TrimSpace(buf.String()); output != "" { + d.log.Info("runner output", "runner", runnerName, "output", output) + } + + d.log.Info("runner completed", + "runner", runnerName, + "duration", time.Since(start), + ) + + if instanceID := parseInstanceID(buf.String()); instanceID != "" { + waitCtx, cancel := context.WithTimeout(context.Background(), duration) + defer cancel() + stopped := d.waitForInstanceStop(waitCtx, runnerName, instanceID, duration) + if !stopped { + d.log.Warn("runner did not stop before timeout", "runner", runnerName, "instance", instanceID) + } + d.destroyInstance(waitCtx, runnerName, instanceID) + } + + return runnerName, nil +} + +func (d *Dispatcher) generateName() string { + id := strings.ReplaceAll(uuid.NewString(), "-", "") + return d.opts.RunnerNamePrefix + id[:12] +} + +func parseInstanceID(output string) string { + if jsonBlob := extractJSON(output); jsonBlob != "" { + var payload struct { + ClusterID string `json:"cluster_id"` + } + if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil && payload.ClusterID != "" { + return payload.ClusterID + } + } + const marker = "ID:" + idx := strings.Index(output, marker) + if idx == -1 { + return "" + } + rest := strings.TrimSpace(output[idx+len(marker):]) + if rest == "" { + return "" + } + fields := strings.Fields(rest) + if len(fields) == 0 { + return "" + } + return fields[0] +} + +func extractJSON(output string) string { + trimmed := strings.TrimSpace(output) + if trimmed == "" { + return "" + } + start := strings.IndexAny(trimmed, "[{") + if start == -1 { + return "" + } + end := strings.LastIndexAny(trimmed, "]}") + if end == -1 || end < start { + return "" + } + return trimmed[start : end+1] +} + +type describeResponse struct { + Resource string `json:"resource"` + PerResource map[string]describeTarget `json:"per_resource"` +} + +type describeTarget struct { + Tombstone string `json:"tombstone"` + Container []describeContainer `json:"container"` +} + +type describeContainer struct { + Status string `json:"status"` + TerminatedAt string `json:"terminated_at"` +} + +func instanceStopped(output string) bool { + jsonBlob := extractJSON(output) + if jsonBlob == "" { + return false + } + var payload []describeResponse + if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil { + return false + } + if len(payload) == 0 { + return false + } + for _, entry := range payload { + for _, target := range entry.PerResource { + if target.Tombstone != "" { + return true + } + if len(target.Container) == 0 { + continue + } + for _, container := range target.Container { + if container.Status != "stopped" && container.TerminatedAt == "" { + return false + } + } + } + } + return true +} + +func (d *Dispatcher) waitForInstanceStop(ctx context.Context, runnerName, instanceID string, timeout time.Duration) bool { + if timeout <= 0 { + timeout = d.opts.DefaultDuration + } + deadline := time.Now().Add(timeout) + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + + for { + stopped, err := d.checkInstanceStopped(ctx, instanceID) + if err != nil { + d.log.Warn("runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err) + return false + } + if stopped { + return true + } + if time.Now().After(deadline) { + return false + } + select { + case <-ctx.Done(): + return false + case <-ticker.C: + } + } +} + +func (d *Dispatcher) checkInstanceStopped(ctx context.Context, instanceID string) (bool, error) { + cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "describe", "--output", "json", instanceID) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + if err := cmd.Run(); err != nil { + output := strings.ToLower(buf.String()) + if strings.Contains(output, "destroyed") || strings.Contains(output, "not found") { + return true, nil + } + return false, fmt.Errorf("nsc describe failed: %w\n%s", err, strings.TrimSpace(buf.String())) + } + return instanceStopped(buf.String()), nil +} + +func (d *Dispatcher) destroyInstance(ctx context.Context, runnerName, instanceID string) { + cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "destroy", "--force", instanceID) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + if err := cmd.Run(); err != nil { + d.log.Warn("runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String())) + return + } + if output := strings.TrimSpace(buf.String()); output != "" { + d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID, "output", output) + } else { + d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID) + } +} + +func choose(values ...string) string { + for _, v := range values { + if strings.TrimSpace(v) != "" { + return v + } + } + return "" +} + +func (d *Dispatcher) bootstrapScript() string { + var builder strings.Builder + builder.WriteString(`set -euo pipefail +mkdir -p "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" +cd "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" + +if ! command -v node >/dev/null 2>&1; then + apk add --no-cache nodejs npm >/dev/null +fi +if ! command -v sudo >/dev/null 2>&1; then + apk add --no-cache sudo bash >/dev/null +fi +if ! command -v curl >/dev/null 2>&1; then + apk add --no-cache curl >/dev/null +fi +if ! command -v xz >/dev/null 2>&1; then + apk add --no-cache xz >/dev/null +fi +export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +node --version >/dev/null + +cat > runner.yaml <<'EOF' +log: + level: info +runner: + file: .runner + capacity: 1 + name: ${FORGEJO_RUNNER_NAME} + labels: +EOF +`) + builder.WriteString(`runner_exec="${FORGEJO_RUNNER_EXEC:-host}" +if [ "$runner_exec" = "shell" ]; then + runner_exec="host" +fi + +resolved_labels="" +for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do + if [ -z "${label}" ]; then + continue + fi + case "${label}" in + *:*) resolved="${label}" ;; + *) + if [ "$runner_exec" = "host" ]; then + resolved="${label}:host" + else + resolved="${label}:${runner_exec}" + fi + ;; + esac + echo " - ${resolved}" >> runner.yaml + if [ -z "${resolved_labels}" ]; then + resolved_labels="${resolved}" + else + resolved_labels="${resolved_labels},${resolved}" + fi +done +`) + builder.WriteString(`cat >> runner.yaml <<'EOF' +cache: + enabled: false +EOF + +forgejo-runner register \ + --no-interactive \ + --instance "${FORGEJO_INSTANCE_URL}" \ + --token "${FORGEJO_RUNNER_TOKEN}" \ + --name "${FORGEJO_RUNNER_NAME}" \ + --labels "${resolved_labels}" \ + --config runner.yaml + +runner_mode="${FORGEJO_RUNNER_MODE:-one-job}" +case "$runner_mode" in + one-job) + forgejo-runner one-job --config runner.yaml + ;; + daemon) + forgejo-runner daemon --config runner.yaml + ;; + *) + echo "Unknown FORGEJO_RUNNER_MODE: ${runner_mode}" >&2 + exit 1 + ;; +esac +`) + return builder.String() +} diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go new file mode 100644 index 0000000..9bf3837 --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -0,0 +1,708 @@ +package nsc + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + computev1betaconnect "buf.build/gen/go/namespace/cloud/connectrpc/go/proto/namespace/cloud/compute/v1beta/computev1betaconnect" + computev1beta "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/cloud/compute/v1beta" + stdlib "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/stdlib" + "connectrpc.com/connect" + "golang.org/x/crypto/ssh" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func hasMacOSLabel(labels []string) bool { + for _, label := range labels { + l := strings.TrimSpace(label) + if l == "" { + continue + } + if strings.HasPrefix(l, "namespace-profile-macos-") { + return true + } + } + return false +} + +type lockedBuffer struct { + mu sync.Mutex + b bytes.Buffer +} + +func (lb *lockedBuffer) Write(p []byte) (int, error) { + lb.mu.Lock() + defer lb.mu.Unlock() + return lb.b.Write(p) +} + +func (lb *lockedBuffer) Len() int { + lb.mu.Lock() + defer lb.mu.Unlock() + return lb.b.Len() +} + +func (lb *lockedBuffer) String() string { + lb.mu.Lock() + defer lb.mu.Unlock() + return lb.b.String() +} + +func macosSupportDiskSelectors(baseImageID string) []*stdlib.Label { + id := strings.TrimSpace(baseImageID) + if id == "" { + id = "tahoe" + } + + // Allow specifying selectors directly, e.g. "macos.version=26.x,image.with=xcode-26". + if strings.Contains(id, "=") { + var out []*stdlib.Label + for _, part := range strings.Split(id, ",") { + part = strings.TrimSpace(part) + if part == "" { + continue + } + name, value, ok := strings.Cut(part, "=") + name = strings.TrimSpace(name) + value = strings.TrimSpace(value) + if !ok || name == "" || value == "" { + continue + } + out = append(out, &stdlib.Label{Name: name, Value: value}) + } + if len(out) > 0 { + return out + } + } + + // Human-friendly presets used by burrow config. + switch strings.ToLower(id) { + case "sonoma", "macos-14", "macos14", "14": + return []*stdlib.Label{{Name: "macos.version", Value: "14.x"}} + case "sequoia", "macos-15", "macos15", "15": + return []*stdlib.Label{{Name: "macos.version", Value: "15.x"}} + case "tahoe", "macos-26", "macos26", "26": + // Constrain to the Xcode 26 support disk explicitly, since Apple builds + // depend on Xcode being present and Compute currently errors if it can't + // resolve a support disk selection. + return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}, {Name: "image.with", Value: "xcode-26"}} + default: + return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}} + } +} + +func macosComputeBaseImageID(baseImageID string) string { + id := strings.TrimSpace(baseImageID) + if id == "" { + return "tahoe" + } + // If selectors were provided directly, we cannot safely infer a canonical + // base image ID from them. + if strings.Contains(id, "=") { + return "" + } + switch strings.ToLower(id) { + case "sonoma", "macos-14", "macos14", "14": + return "sonoma" + case "sequoia", "macos-15", "macos15", "15": + return "sequoia" + case "tahoe", "macos-26", "macos26", "26": + return "tahoe" + default: + return id + } +} + +type nscBearerTokenFile struct { + BearerToken string `json:"bearer_token"` +} + +func readNSCBearerToken() (string, error) { + path := os.Getenv("NSC_TOKEN_FILE") + if path == "" { + return "", errors.New("NSC_TOKEN_FILE is required for macos runners") + } + raw, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("read NSC_TOKEN_FILE: %w", err) + } + trimmed := strings.TrimSpace(string(raw)) + if trimmed == "" { + return "", errors.New("NSC_TOKEN_FILE is empty") + } + // Support the on-host format used by burrow: {"bearer_token":"..."}. + var parsed nscBearerTokenFile + if err := json.Unmarshal([]byte(trimmed), &parsed); err == nil && parsed.BearerToken != "" { + return parsed.BearerToken, nil + } + // Fallback: allow a raw bearer token. + return trimmed, nil +} + +func parseMachineTypeCPUxMemGB(machineType string) (vcpu int32, memoryMB int32, err error) { + parts := strings.Split(machineType, "x") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid machine_type %q: expected CPUxMemoryGB (e.g. 12x28)", machineType) + } + cpu64, err := strconv.ParseInt(parts[0], 10, 32) + if err != nil { + return 0, 0, fmt.Errorf("invalid machine_type %q: cpu: %w", machineType, err) + } + memGB64, err := strconv.ParseInt(parts[1], 10, 32) + if err != nil { + return 0, 0, fmt.Errorf("invalid machine_type %q: memory: %w", machineType, err) + } + return int32(cpu64), int32(memGB64 * 1024), nil +} + +func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { + if machineType == "" { + return errors.New("machine_type is required for macos runners") + } + vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType) + if err != nil { + return err + } + bearer, err := readNSCBearerToken() + if err != nil { + return err + } + + httpClient := &http.Client{Timeout: 60 * time.Second} + client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL) + + workdir := d.opts.WorkDir + if strings.TrimSpace(workdir) == "" { + workdir = "/tmp/forgejo-runner" + } + + env := map[string]string{ + "FORGEJO_INSTANCE_URL": req.InstanceURL, + "FORGEJO_RUNNER_TOKEN": req.Token, + "FORGEJO_RUNNER_NAME": runnerName, + "FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","), + "FORGEJO_RUNNER_EXEC": d.opts.Executor, + "FORGEJO_RUNNER_WORKDIR": workdir, + } + for k, v := range req.ExtraEnv { + env[k] = v + } + // Best-effort caching: workflows call Scripts/nscloud-cache.sh, which is a + // no-op unless NSC_CACHE_PATH is set. This may still be skipped if spacectl + // lacks credentials, but setting the path is harmless and keeps behavior + // consistent across macOS / Linux runners. + if _, ok := env["NSC_CACHE_PATH"]; !ok { + env["NSC_CACHE_PATH"] = "/Users/runner/.cache/nscloud" + } + + deadline := timestamppb.New(time.Now().Add(ttl)) + + createReq := &computev1beta.CreateInstanceRequest{ + Shape: &computev1beta.InstanceShape{ + VirtualCpu: vcpu, + MemoryMegabytes: memoryMB, + MachineArch: d.opts.MacosMachineArch, + Os: "macos", + // Namespace macOS compute requires selectors to pick the base image + // ("support disk"), otherwise instance creation fails. + Selectors: macosSupportDiskSelectors(d.opts.MacosBaseImageID), + }, + DocumentedPurpose: fmt.Sprintf("burrow forgejo runner %s", runnerName), + Deadline: deadline, + Labels: []*stdlib.Label{ + {Name: "nsc.source", Value: "forgejo-nsc"}, + {Name: "burrow.service", Value: "forgejo-runner"}, + {Name: "burrow.runner", Value: runnerName}, + }, + Applications: []*computev1beta.ApplicationRequest{ + { + Name: "forgejo-runner", + Command: "/bin/bash", + Args: []string{"-lc", macosBootstrapScript()}, + Environment: env, + WorkloadType: computev1beta.ApplicationRequest_JOB, + }, + }, + } + if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" { + createReq.Experimental = &computev1beta.CreateInstanceRequest_ExperimentalFeatures{ + MacosBaseImageId: imageID, + } + } + + d.log.Info("launching Namespace macos runner", + "runner", runnerName, + "compute_base_url", d.opts.ComputeBaseURL, + "macos_base_image_id", d.opts.MacosBaseImageID, + "shape", fmt.Sprintf("%dx%d", vcpu, memoryMB/1024), + "arch", d.opts.MacosMachineArch, + ) + + reqCreate := connect.NewRequest(createReq) + reqCreate.Header().Set("Authorization", "Bearer "+bearer) + resp, err := client.CreateInstance(ctx, reqCreate) + if err != nil { + return fmt.Errorf("compute create instance failed: %w", err) + } + if resp.Msg == nil || resp.Msg.Metadata == nil { + return errors.New("compute create instance returned no metadata") + } + instanceID := resp.Msg.Metadata.InstanceId + + waitErr := d.waitForMacOSRunnerStop(ctx, client, bearer, runnerName, instanceID, ttl) + d.destroyComputeInstance(context.Background(), client, bearer, runnerName, instanceID) + return waitErr +} + +func (d *Dispatcher) runMacOSComputeSSHScript(ctx context.Context, runnerName, instanceID, script string) error { + bearer, err := readNSCBearerToken() + if err != nil { + return err + } + + httpClient := &http.Client{Timeout: 60 * time.Second} + client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL) + + getReq := connect.NewRequest(&computev1beta.GetSSHConfigRequest{ + InstanceId: instanceID, + // TargetContainer is optional. Keep it empty to run commands in the default instance environment. + }) + getReq.Header().Set("Authorization", "Bearer "+bearer) + + resp, err := client.GetSSHConfig(ctx, getReq) + if err != nil { + return fmt.Errorf("compute get ssh config failed: %w", err) + } + if resp.Msg == nil { + return errors.New("compute get ssh config returned empty response") + } + if resp.Msg.Endpoint == "" { + return errors.New("compute get ssh config returned empty endpoint") + } + if len(resp.Msg.SshPrivateKey) == 0 { + return errors.New("compute get ssh config returned empty ssh private key") + } + if strings.TrimSpace(resp.Msg.Username) == "" { + return errors.New("compute get ssh config returned empty username") + } + + signer, err := ssh.ParsePrivateKey(resp.Msg.SshPrivateKey) + if err != nil { + return fmt.Errorf("parse ssh private key: %w", err) + } + + addr := fmt.Sprintf("%s:22", resp.Msg.Endpoint) + conn, err := net.Dial("tcp", addr) + if err != nil { + return fmt.Errorf("dial ssh endpoint: %w", err) + } + defer conn.Close() + + sshCfg := &ssh.ClientConfig{ + User: resp.Msg.Username, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Endpoint is short-lived and key is delivered out-of-band. + Timeout: 30 * time.Second, + } + + c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshCfg) + if err != nil { + return fmt.Errorf("ssh client conn: %w", err) + } + clientSSH := ssh.NewClient(c, chans, reqs) + defer clientSSH.Close() + + session, err := clientSSH.NewSession() + if err != nil { + return fmt.Errorf("ssh new session: %w", err) + } + defer session.Close() + + var buf bytes.Buffer + session.Stdout = &buf + session.Stderr = &buf + session.Stdin = strings.NewReader(script) + + // Feed the bootstrap script via stdin so we don't need to quote/escape it. + // + // Note: Some SSH servers do not reliably parse exec strings with arguments. + // Running bare `/bin/bash` still reads from stdin and avoids argument parsing. + if err := session.Run("/bin/bash"); err != nil { + outRaw := buf.String() + out := strings.TrimSpace(outRaw) + + // Some SSH servers reject exec requests and only allow interactive shells, + // and others will "succeed" but still interpret stdin under the default + // login shell (showing the zsh banner / prompts). + // + // In those cases, retry via Shell() with a PTY. + exitStatus := 0 + exitErr, isExitErr := err.(*ssh.ExitError) + if isExitErr { + exitStatus = exitErr.ExitStatus() + } + + looksInteractive := strings.Contains(outRaw, "The default interactive shell is now zsh") || + strings.Contains(outRaw, " runner$ ") || + strings.Contains(outRaw, "bash-3.2$") + shouldFallback := !isExitErr || looksInteractive + + if shouldFallback { + d.log.Warn("compute ssh exec bootstrap failed; retrying via interactive shell", + "runner", runnerName, + "instance", instanceID, + "exit_status", exitStatus, + ) + + session2, err2 := clientSSH.NewSession() + if err2 != nil { + return fmt.Errorf("ssh new session (fallback): %w", err2) + } + defer session2.Close() + + // bytes.Buffer isn't safe for concurrent writes + reads; the SSH session + // writes from background goroutines. Wrap it so we can poll for a prompt + // before sending commands. + lb := &lockedBuffer{} + session2.Stdout = lb + session2.Stderr = lb + + in, err2 := session2.StdinPipe() + if err2 != nil { + return fmt.Errorf("ssh stdin pipe (fallback): %w", err2) + } + + // Request a PTY to match interactive semantics even when the caller + // doesn't have a local terminal. + _ = session2.RequestPty("xterm", 24, 80, nil) + + if err2 := session2.Shell(); err2 != nil { + return fmt.Errorf("ssh shell (fallback): %w", err2) + } + + // Wait briefly for the prompt/banner so the first command isn't dropped. + // We also emit a sentinel `echo` to verify the TTY is live. + deadline := time.Now().Add(3 * time.Second) + for time.Now().Before(deadline) { + n := lb.Len() + if n > 0 { + break + } + time.Sleep(50 * time.Millisecond) + } + + // Stream the script then exit. Prefer LF line endings; macOS shells and + // PTYs can treat CRLF as literal CR characters (breaking heredoc + // delimiters and quoting). + writeTTY := func(s string) { + if s == "" { + return + } + s = strings.ReplaceAll(s, "\r\n", "\n") + _, _ = io.WriteString(in, s) + } + + scriptTTY := strings.ReplaceAll(script, "\r\n", "\n") + + // Cut down noise in logs and reduce the chance of ZSH line-editing + // behavior corrupting long inputs. + writeTTY("stty -echo 2>/dev/null || true\n") + writeTTY("echo BURROW_BOOTSTRAP_TTY_OK\n") + + // Avoid heredocs for the script itself (PTY newline handling is fragile). + // Instead, stream base64 in short chunks to a file, then decode and run it. + enc := base64.StdEncoding.EncodeToString([]byte(scriptTTY)) + idSafe := strings.ReplaceAll(instanceID, "-", "_") + b64Path := "/tmp/burrow-bootstrap-" + idSafe + ".b64" + shPath := "/tmp/burrow-bootstrap-" + idSafe + ".sh" + + writeTTY("rm -f " + b64Path + " " + shPath + "\n") + writeTTY(": > " + b64Path + "\n") + + const chunkSize = 80 + for i := 0; i < len(enc); i += chunkSize { + j := i + chunkSize + if j > len(enc) { + j = len(enc) + } + chunk := enc[i:j] + // Base64 chunks contain only [A-Za-z0-9+/=], which are safe to pass + // unquoted. Avoid quotes entirely so a truncated line can't leave + // the remote shell in a multi-line continuation state. + writeTTY("printf %s " + chunk + " >> " + b64Path + "\n") + time.Sleep(5 * time.Millisecond) + } + + // macOS uses `base64 -D` (BSD), some environments use `-d` (GNU). + writeTTY("base64 -D " + b64Path + " > " + shPath + " 2>/dev/null || base64 -d " + b64Path + " > " + shPath + "\n") + writeTTY("/bin/bash " + shPath + "\n") + writeTTY("exit\n") + _ = in.Close() + + if err2 := session2.Wait(); err2 != nil { + out2 := strings.TrimSpace(lb.String()) + if len(out2) > 16*1024 { + out2 = out2[len(out2)-16*1024:] + } + return fmt.Errorf("compute ssh runner bootstrap failed (shell fallback): %w\n%s", err2, out2) + } + + d.log.Info("macos runner bootstrap completed via compute ssh shell", "runner", runnerName, "instance", instanceID) + return nil + } + + if len(out) > 16*1024 { + out = out[len(out)-16*1024:] + } + return fmt.Errorf("compute ssh runner bootstrap failed: %w\n%s", err, out) + } + + d.log.Info("macos runner bootstrap completed via compute ssh", "runner", runnerName, "instance", instanceID) + return nil +} + +func (d *Dispatcher) waitForMacOSRunnerStop(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string, ttl time.Duration) error { + if ttl <= 0 { + ttl = d.opts.DefaultDuration + } + deadline := time.Now().Add(ttl) + ticker := time.NewTicker(15 * time.Second) + defer ticker.Stop() + + for { + stopped, err := d.checkComputeInstanceStopped(ctx, client, bearer, instanceID) + if err != nil { + d.log.Warn("macos runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err) + } else if stopped { + return nil + } + + if time.Now().After(deadline) { + return fmt.Errorf("macos runner exceeded ttl (%s) without stopping", ttl) + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (d *Dispatcher) checkComputeInstanceStopped(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, instanceID string) (bool, error) { + describeReq := connect.NewRequest(&computev1beta.DescribeInstanceRequest{InstanceId: instanceID}) + describeReq.Header().Set("Authorization", "Bearer "+bearer) + resp, err := client.DescribeInstance(ctx, describeReq) + if err != nil { + // NotFound means the instance is already gone. + if connect.CodeOf(err) == connect.CodeNotFound { + return true, nil + } + return false, err + } + if resp.Msg == nil || resp.Msg.Metadata == nil { + return false, errors.New("describe instance returned no metadata") + } + switch resp.Msg.Metadata.Status { + case computev1beta.InstanceMetadata_DESTROYED: + return true, nil + case computev1beta.InstanceMetadata_ERROR: + // Best-effort include shutdown reasons; do not include unbounded output. + var b strings.Builder + for _, reason := range resp.Msg.ShutdownReasons { + if reason == nil { + continue + } + if b.Len() > 0 { + b.WriteString("; ") + } + b.WriteString(reason.String()) + if b.Len() > 1024 { + break + } + } + msg := strings.TrimSpace(b.String()) + if msg == "" { + msg = "unknown shutdown reason" + } + return true, fmt.Errorf("instance entered error state: %s", msg) + default: + if resp.Msg.Metadata.DestroyedAt != nil { + return true, nil + } + return false, nil + } +} + +func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string) { + if ctx == nil { + ctx = context.Background() + } + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + destroyReq := connect.NewRequest(&computev1beta.DestroyInstanceRequest{InstanceId: instanceID}) + destroyReq.Header().Set("Authorization", "Bearer "+bearer) + if _, err := client.DestroyInstance(ctx, destroyReq); err != nil { + if connect.CodeOf(err) == connect.CodeNotFound { + d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID, "status", "not_found") + return + } + d.log.Warn("macos runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err) + return + } + d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID) +} + +func macosBootstrapScript() string { + // Keep this script self-contained: it runs on a fresh macOS VM base image. + var b strings.Builder + b.WriteString(`set -euo pipefail + +workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" +mkdir -p "${workdir}" +cd "${workdir}" + +export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" + +if ! command -v curl >/dev/null 2>&1; then + echo "curl is required" >&2 + exit 1 +fi + +if ! command -v nix >/dev/null 2>&1; then + echo "Installing nix (Determinate Systems installer)..." + installer="/tmp/nix-installer.$$" + curl -fsSL -o "${installer}" https://install.determinate.systems/nix + chmod +x "${installer}" + + if command -v sudo >/dev/null 2>&1; then + if sudo -n true 2>/dev/null; then + sudo -n sh "${installer}" install --no-confirm + else + sudo sh "${installer}" install --no-confirm + fi + else + sh "${installer}" install --no-confirm + fi + + rm -f "${installer}" +fi + +if [[ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]]; then + # shellcheck disable=SC1091 + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh +fi + +export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" + +# Flake builds need nix-command + flakes enabled. Workflows may layer additional +# config, but ensure a sane default exists. +mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/nix" +cat > "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" <<'EOF' +experimental-features = nix-command flakes +sandbox = true +fallback = true +substituters = https://cache.nixos.org +trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= +EOF + +mkdir -p bin +export PATH="${PWD}/bin:${PATH}" + +runner_version="v12.6.4" +runner_src_tgz="forgejo-runner-${runner_version}.tar.gz" +runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz" +runner_src_dir="forgejo-runner-src" + +if ! command -v forgejo-runner >/dev/null 2>&1; then + rm -rf "${runner_src_dir}" + mkdir -p "${runner_src_dir}" + curl -fsSL "${runner_src_url}" -o "${runner_src_tgz}" + tar -xzf "${runner_src_tgz}" -C "${runner_src_dir}" --strip-components=1 + + toolchain="$(grep -E '^toolchain ' "${runner_src_dir}/go.mod" | awk '{print $2}' | head -n 1 || true)" + if [ -z "${toolchain}" ]; then + toolchain="go1.25.7" + fi + + if ! command -v go >/dev/null 2>&1; then + go_tgz="${toolchain}.darwin-arm64.tar.gz" + go_url="https://go.dev/dl/${go_tgz}" + curl -fsSL "${go_url}" -o "${go_tgz}" + tar -xzf "${go_tgz}" + export GOROOT="${PWD}/go" + export PATH="${GOROOT}/bin:${PATH}" + fi + + export GOPATH="${PWD}/.gopath" + export GOMODCACHE="${PWD}/.gomodcache" + export GOCACHE="${PWD}/.gocache" + mkdir -p "${GOPATH}" "${GOMODCACHE}" "${GOCACHE}" + + (cd "${runner_src_dir}" && go build -o "${workdir}/bin/forgejo-runner" .) + chmod +x "${workdir}/bin/forgejo-runner" +fi + +cat > runner.yaml <<'EOF' +log: + level: info +runner: + file: .runner + capacity: 1 + name: ${FORGEJO_RUNNER_NAME} + labels: +EOF + +runner_exec="${FORGEJO_RUNNER_EXEC:-host}" +if [ "$runner_exec" = "shell" ]; then + runner_exec="host" +fi + +resolved_labels="" +for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do + if [ -z "${label}" ]; then + continue + fi + case "${label}" in + *:*) resolved="${label}" ;; + *) + resolved="${label}:host" + ;; + esac + echo " - ${resolved}" >> runner.yaml + if [ -z "${resolved_labels}" ]; then + resolved_labels="${resolved}" + else + resolved_labels="${resolved_labels},${resolved}" + fi +done + +cat >> runner.yaml <<'EOF' +cache: + enabled: false +EOF + +forgejo-runner register \ + --no-interactive \ + --instance "${FORGEJO_INSTANCE_URL}" \ + --token "${FORGEJO_RUNNER_TOKEN}" \ + --name "${FORGEJO_RUNNER_NAME}" \ + --labels "${resolved_labels}" \ + --config runner.yaml + +forgejo-runner one-job --config runner.yaml +`) + return b.String() +} diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go new file mode 100644 index 0000000..c22fadb --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -0,0 +1,373 @@ +package nsc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +func normalizeMacOSNSCMachineType(machineType string) (normalized string, changed bool, err error) { + vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType) + if err != nil { + return "", false, err + } + memGB := memoryMB / 1024 + if memGB <= 0 || vcpu <= 0 { + return "", false, fmt.Errorf("invalid machine_type %q after parse: vcpu=%d memGB=%d", machineType, vcpu, memGB) + } + + // NSC CLI (and the underlying InstanceService) enforce discrete cpu/mem sets + // for macOS. Normalize requested values by rounding up to the closest allowed + // values to keep provisioning stable even when configs drift. + // + // Observed allowed sets from Namespace API error output for macos/arm64: + // cpu: [4 6 8 12] + // mem: [7 14 28 56] (GB) + allowedCPU := []int32{4, 6, 8, 12} + allowedMemGB := []int32{7, 14, 28, 56} + + roundUp := func(v int32, allowed []int32) (int32, bool) { + for _, a := range allowed { + if v <= a { + return a, a != v + } + } + // Clamp to max if above all allowed values. + return allowed[len(allowed)-1], true + } + + newCPU, cpuChanged := roundUp(vcpu, allowedCPU) + newMemGB, memChanged := roundUp(memGB, allowedMemGB) + + normalized = fmt.Sprintf("%dx%d", newCPU, newMemGB) + changed = cpuChanged || memChanged + return normalized, changed, nil +} + +func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { + if machineType == "" { + return errors.New("machine_type is required for macos runners") + } + if strings.TrimSpace(os.Getenv("NSC_TOKEN_FILE")) == "" { + // The Burrow forge host feeds NSC_TOKEN_FILE from the intake-backed runtime token. + return errors.New("NSC_TOKEN_FILE is required for macos runners") + } + + selectors := macosSelectorsArg(d.opts.MacosBaseImageID) + if selectors == "" { + return errors.New("macos selectors resolved empty") + } + + normalizedMachineType := machineType + if n, changed, err := normalizeMacOSNSCMachineType(machineType); err != nil { + return err + } else if changed { + normalizedMachineType = n + } + + // If capacity is constrained for the requested (large) shape, try a small + // set of progressively smaller shapes before failing the dispatch request. + // This keeps macOS builds flowing even when large runners are scarce. + candidates := []string{normalizedMachineType, "8x28", "6x14", "4x7"} + seen := map[string]struct{}{} + var uniq []string + for _, c := range candidates { + c = strings.TrimSpace(c) + if c == "" { + continue + } + if _, ok := seen[c]; ok { + continue + } + seen[c] = struct{}{} + uniq = append(uniq, c) + } + candidates = uniq + + type attemptCfg struct { + waitTimeout time.Duration + createTimeout time.Duration + } + attempts := []attemptCfg{ + {waitTimeout: 6 * time.Minute, createTimeout: 8 * time.Minute}, + {waitTimeout: 4 * time.Minute, createTimeout: 6 * time.Minute}, + {waitTimeout: 3 * time.Minute, createTimeout: 5 * time.Minute}, + } + + createInstance := func(mt string, a attemptCfg) (instanceID string, out string, err error) { + tmpDir, err := os.MkdirTemp("", "forgejo-nsc-macos-*") + if err != nil { + return "", "", fmt.Errorf("mktemp: %w", err) + } + defer os.RemoveAll(tmpDir) + + metaPath := filepath.Join(tmpDir, "create.json") + cidPath := filepath.Join(tmpDir, "create.cid") + + arch := strings.TrimSpace(d.opts.MacosMachineArch) + if arch == "" { + arch = "arm64" + } + // Namespace CLI requires the "os/arch:" prefix to create a macOS instance. + // Without it, `nsc create` defaults to Linux even if selectors include macos.*. + machineType := fmt.Sprintf("macos/%s:%s", arch, mt) + + args := []string{ + "create", + "--duration", ttl.String(), + "--machine_type", machineType, + "--selectors", selectors, + "--bare", + "--cidfile", cidPath, + "--log_actions", + "--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName), + // Prefer plain output for debuggability (progress, capacity errors, etc). + "--output", "plain", + "--output_json_to", metaPath, + // macOS instances can take a while to become ready. + "--wait_timeout", a.waitTimeout.String(), + } + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + createCtx, cancel := context.WithTimeout(ctx, a.createTimeout) + defer cancel() + + cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Run(); err != nil { + // Best-effort cleanup: if the instance ID was written before the command failed + // (or before we timed it out), attempt to destroy it to avoid idling machines. + if instanceID := strings.TrimSpace(mustReadFile(cidPath)); instanceID != "" { + d.destroyNSCInstance(context.Background(), runnerName, instanceID) + } + if errors.Is(createCtx.Err(), context.DeadlineExceeded) { + return "", buf.String(), fmt.Errorf("nsc create timed out after %s", a.createTimeout) + } + return "", buf.String(), fmt.Errorf("nsc create failed: %w", err) + } + + instanceID, err = readNSCCreateInstanceID(metaPath) + if err != nil { + return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err) + } + if instanceID == "" { + return "", buf.String(), fmt.Errorf("nsc create returned empty instance id") + } + return instanceID, buf.String(), nil + } + + var ( + instanceID string + lastOut string + lastErr error + ) + for i, mt := range candidates { + a := attempts[i] + if i >= len(attempts) { + a = attempts[len(attempts)-1] + } + + d.log.Info("launching Namespace macos runner via nsc", + "runner", runnerName, + "attempt", i+1, + "machine_type", mt, + "requested_machine_type", machineType, + "selectors", selectors, + ) + + id, out, err := createInstance(mt, a) + lastOut = out + lastErr = err + if err != nil { + // Timeouts are treated as retryable (capacity constrained). + if strings.Contains(err.Error(), "timed out") || strings.Contains(strings.ToLower(out), "capacity") { + continue + } + return fmt.Errorf("%w\n%s", err, out) + } + instanceID = id + break + } + if instanceID == "" { + if lastErr != nil { + return fmt.Errorf("%w\n%s", lastErr, lastOut) + } + return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut) + } + + // Always attempt cleanup even if the runner fails. + defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) + + script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) + // Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which + // relies on a websocket-based SSH proxy that is not supported by the + // revokable tenant token we run the dispatcher with. + if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil { + return err + } + return nil +} + +func mustReadFile(path string) string { + raw, err := os.ReadFile(path) + if err != nil { + return "" + } + return string(raw) +} + +func macosSelectorsArg(baseImageID string) string { + id := strings.TrimSpace(baseImageID) + if id == "" { + id = "tahoe" + } + // Allow passing selectors directly via config, e.g. "macos.version=26.x,image.with=xcode-26". + if strings.Contains(id, "=") { + return id + } + switch strings.ToLower(id) { + case "sonoma", "macos-14", "macos14", "14": + return "macos.version=14.x" + case "sequoia", "macos-15", "macos15", "15": + return "macos.version=15.x" + case "tahoe", "macos-26", "macos26", "26": + return "macos.version=26.x,image.with=xcode-26" + default: + return "macos.version=26.x" + } +} + +type nscCreateMetadata struct { + InstanceID string `json:"instance_id"` + ClusterID string `json:"cluster_id"` + ID string `json:"id"` +} + +func readNSCCreateInstanceID(path string) (string, error) { + raw, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("read %s: %w", path, err) + } + var meta nscCreateMetadata + if err := json.Unmarshal(raw, &meta); err != nil { + return "", err + } + if meta.InstanceID != "" { + return meta.InstanceID, nil + } + if meta.ClusterID != "" { + return meta.ClusterID, nil + } + if meta.ID != "" { + return meta.ID, nil + } + return "", nil +} + +func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanceID string) { + if ctx == nil { + ctx = context.Background() + } + ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + + args := []string{"destroy", "--force", instanceID} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + if err := cmd.Run(); err != nil { + d.log.Warn("nsc destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String())) + return + } + d.log.Info("nsc instance destroyed", "runner", runnerName, "instance", instanceID) +} + +func macosBootstrapWrapperScript(runnerName string, req LaunchRequest, executor, workdir string) string { + if strings.TrimSpace(workdir) == "" { + workdir = "/tmp/forgejo-runner" + } + + // Pass all values via stdin script so secrets do not appear in the nsc ssh argv. + env := map[string]string{ + "FORGEJO_INSTANCE_URL": req.InstanceURL, + "FORGEJO_RUNNER_TOKEN": req.Token, + "FORGEJO_RUNNER_NAME": runnerName, + "FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","), + "FORGEJO_RUNNER_EXEC": executor, + "FORGEJO_RUNNER_WORKDIR": workdir, + } + for k, v := range req.ExtraEnv { + env[k] = v + } + + var b strings.Builder + b.WriteString("set -euo pipefail\n") + for k, v := range env { + if strings.TrimSpace(k) == "" { + continue + } + // Single-quote shell escaping: safe for arbitrary tokens. + b.WriteString("export ") + b.WriteString(k) + b.WriteString("=") + b.WriteString(shellSingleQuote(v)) + b.WriteString("\n") + } + b.WriteString("\n") + b.WriteString(macosBootstrapScript()) + return b.String() +} + +func shellSingleQuote(value string) string { + // 'foo' -> '\'' within single quotes: '"'"' + return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" +} + +func prependNSCRegionArgs(args []string, computeBaseURL string) []string { + region := strings.TrimSpace(os.Getenv("NSC_REGION")) + if region == "" { + region = regionFromComputeBaseURL(computeBaseURL) + } + if region == "" { + // Default to the burrow region used for other Namespace integrations. + region = "ord4" + } + return append([]string{"--region", region}, args...) +} + +func regionFromComputeBaseURL(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return "" + } + u, err := url.Parse(raw) + if err != nil { + return "" + } + host := u.Hostname() + if host == "" { + return "" + } + parts := strings.Split(host, ".") + if len(parts) == 0 { + return "" + } + // ord4.compute.namespaceapis.com -> ord4 + if strings.HasSuffix(host, ".compute.namespaceapis.com") || strings.Contains(host, ".compute.") { + return parts[0] + } + return "" +} diff --git a/services/forgejo-nsc/internal/nsc/windows.go b/services/forgejo-nsc/internal/nsc/windows.go new file mode 100644 index 0000000..5c82d29 --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/windows.go @@ -0,0 +1,59 @@ +package nsc + +import ( + "regexp" + "strings" +) + +const windowsDefaultMachineType = "windows/amd64:8x16" + +var cpuMemShapePattern = regexp.MustCompile(`^\d+x\d+$`) + +func hasWindowsLabel(labels []string) bool { + for _, label := range labels { + l := strings.TrimSpace(label) + if l == "" { + continue + } + base := l + if before, _, ok := strings.Cut(l, ":"); ok { + base = before + } + if strings.HasPrefix(base, "namespace-profile-windows-") { + return true + } + } + return false +} + +func normalizeWindowsMachineType(machineType string, labels []string) string { + mt := strings.TrimSpace(machineType) + if strings.HasPrefix(mt, "windows/") { + return mt + } + if cpuMemShapePattern.MatchString(mt) { + return "windows/amd64:" + mt + } + + // Label-derived defaults: keep a simple shape ladder for explicit profile sizes. + for _, label := range labels { + base := strings.TrimSpace(label) + if before, _, ok := strings.Cut(base, ":"); ok { + base = before + } + switch { + case strings.HasPrefix(base, "namespace-profile-windows-small"): + return "windows/amd64:2x4" + case strings.HasPrefix(base, "namespace-profile-windows-medium"): + return "windows/amd64:4x8" + case strings.HasPrefix(base, "namespace-profile-windows-large"): + return windowsDefaultMachineType + } + } + return windowsDefaultMachineType +} + +func powershellSingleQuote(value string) string { + // PowerShell single-quoted string escaping: ' -> '' + return "'" + strings.ReplaceAll(value, "'", "''") + "'" +} diff --git a/services/forgejo-nsc/internal/nsc/windows_test.go b/services/forgejo-nsc/internal/nsc/windows_test.go new file mode 100644 index 0000000..2f1b5e6 --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/windows_test.go @@ -0,0 +1,98 @@ +package nsc + +import "testing" + +func TestHasWindowsLabel(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + labels []string + want bool + }{ + { + name: "namespace windows label", + labels: []string{"namespace-profile-windows-large"}, + want: true, + }, + { + name: "namespace windows label with host suffix", + labels: []string{"namespace-profile-windows-large:host"}, + want: true, + }, + { + name: "non namespace windows-like label", + labels: []string{"burrow-winrunner:host"}, + want: false, + }, + { + name: "macos label", + labels: []string{"namespace-profile-macos-large"}, + want: false, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := hasWindowsLabel(tc.labels) + if got != tc.want { + t.Fatalf("hasWindowsLabel(%v) = %v, want %v", tc.labels, got, tc.want) + } + }) + } +} + +func TestNormalizeWindowsMachineType(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + machine string + labels []string + wantPrefix string + }{ + { + name: "explicit windows machine type keeps value", + machine: "windows/amd64:8x16", + labels: []string{"namespace-profile-windows-large"}, + wantPrefix: "windows/amd64:8x16", + }, + { + name: "shape only is normalized", + machine: "4x8", + labels: []string{"namespace-profile-windows-large"}, + wantPrefix: "windows/amd64:4x8", + }, + { + name: "large label default", + machine: "", + labels: []string{"namespace-profile-windows-large"}, + wantPrefix: "windows/amd64:8x16", + }, + { + name: "medium label default", + machine: "", + labels: []string{"namespace-profile-windows-medium"}, + wantPrefix: "windows/amd64:4x8", + }, + { + name: "fallback default", + machine: "", + labels: []string{"namespace-profile-windows-custom"}, + wantPrefix: "windows/amd64:8x16", + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := normalizeWindowsMachineType(tc.machine, tc.labels) + if got != tc.wantPrefix { + t.Fatalf("normalizeWindowsMachineType(%q, %v) = %q, want %q", tc.machine, tc.labels, got, tc.wantPrefix) + } + }) + } +} diff --git a/services/forgejo-nsc/internal/nsc/windows_winrm.go b/services/forgejo-nsc/internal/nsc/windows_winrm.go new file mode 100644 index 0000000..22f13c9 --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/windows_winrm.go @@ -0,0 +1,499 @@ +package nsc + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +type windowsProxyOutput struct { + Endpoint string `json:"endpoint"` + RDP struct { + Credentials struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"credentials"` + } `json:"rdp"` +} + +func (d *Dispatcher) launchWindowsRunnerViaWinRM(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { + script := windowsBootstrapScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) + return d.launchWindowsScriptViaWinRM(ctx, runnerName, ttl, machineType, req.Labels, script) +} + +func (d *Dispatcher) launchWindowsScriptViaWinRM(ctx context.Context, runnerName string, ttl time.Duration, machineType string, labels []string, script string) error { + if ttl <= 0 { + ttl = d.opts.DefaultDuration + } + + mt := normalizeWindowsMachineType(machineType, labels) + instanceID, createOutput, err := d.createWindowsInstance(ctx, runnerName, ttl, mt) + if err != nil { + return fmt.Errorf("windows create failed: %w\n%s", err, createOutput) + } + defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) + + username, password, err := d.resolveWindowsCredentials(ctx, instanceID) + if err != nil { + return err + } + + if err := d.probeWindowsWinRMService(ctx, instanceID); err != nil { + return err + } + + endpoint, stopForward, err := d.startWindowsWinRMPortForward(ctx, instanceID) + if err != nil { + return err + } + defer stopForward() + + if err := d.runWindowsWinRMPowerShell(ctx, endpoint, username, password, script); err != nil { + return err + } + + return nil +} + +func (d *Dispatcher) createWindowsInstance(ctx context.Context, runnerName string, ttl time.Duration, machineType string) (instanceID string, output string, err error) { + tmpDir, err := os.MkdirTemp("", "forgejo-nsc-windows-*") + if err != nil { + return "", "", fmt.Errorf("mktemp: %w", err) + } + defer os.RemoveAll(tmpDir) + + metaPath := filepath.Join(tmpDir, "create.json") + cidPath := filepath.Join(tmpDir, "create.cid") + + args := []string{ + "create", + "--duration", ttl.String(), + "--machine_type", machineType, + "--cidfile", cidPath, + "--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName), + "--output", "plain", + "--output_json_to", metaPath, + "--wait_timeout", "6m", + } + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + createCtx, cancel := context.WithTimeout(ctx, 8*time.Minute) + defer cancel() + + cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...) + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Run(); err != nil { + if created := strings.TrimSpace(mustReadFile(cidPath)); created != "" { + d.destroyNSCInstance(context.Background(), runnerName, created) + } + if errors.Is(createCtx.Err(), context.DeadlineExceeded) { + return "", buf.String(), fmt.Errorf("nsc create timed out after %s", 8*time.Minute) + } + return "", buf.String(), fmt.Errorf("nsc create failed: %w", err) + } + + instanceID, err = readNSCCreateInstanceID(metaPath) + if err != nil { + return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err) + } + if instanceID == "" { + return "", buf.String(), errors.New("nsc create returned empty instance id") + } + return instanceID, buf.String(), nil +} + +func (d *Dispatcher) resolveWindowsCredentials(ctx context.Context, instanceID string) (username string, password string, err error) { + tmpDir, err := os.MkdirTemp("", "forgejo-nsc-winproxy-*") + if err != nil { + return "", "", fmt.Errorf("mktemp: %w", err) + } + defer os.RemoveAll(tmpDir) + + outPath := filepath.Join(tmpDir, "proxy.json") + outFile, err := os.Create(outPath) + if err != nil { + return "", "", fmt.Errorf("create proxy output file: %w", err) + } + defer outFile.Close() + + var stderr bytes.Buffer + args := []string{"instance", "proxy", instanceID, "-s", "rdp", "-o", "json"} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + proxyCtx, cancel := context.WithTimeout(ctx, 90*time.Second) + defer cancel() + + cmd := exec.CommandContext(proxyCtx, d.opts.BinaryPath, args...) + cmd.Stdout = outFile + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + return "", "", fmt.Errorf("start nsc instance proxy: %w", err) + } + + waitDone := make(chan struct{}) + var waitErr error + go func() { + waitErr = cmd.Wait() + close(waitDone) + }() + + var payload windowsProxyOutput + deadline := time.Now().Add(45 * time.Second) + for time.Now().Before(deadline) { + raw, _ := os.ReadFile(outPath) + jsonBlob := extractJSON(string(raw)) + if jsonBlob != "" { + if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil { + username = strings.TrimSpace(payload.RDP.Credentials.Username) + password = strings.TrimSpace(payload.RDP.Credentials.Password) + if username != "" && password != "" { + break + } + } + } + select { + case <-waitDone: + if waitErr != nil { + return "", "", fmt.Errorf("nsc instance proxy exited before credentials were available: %w\n%s", waitErr, stderr.String()) + } + default: + } + time.Sleep(1 * time.Second) + } + + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + <-waitDone + + if username == "" || password == "" { + raw, _ := os.ReadFile(outPath) + return "", "", fmt.Errorf("failed to resolve windows credentials from nsc instance proxy output\nstdout=%s\nstderr=%s", strings.TrimSpace(string(raw)), strings.TrimSpace(stderr.String())) + } + return username, password, nil +} + +func (d *Dispatcher) probeWindowsWinRMService(ctx context.Context, instanceID string) error { + args := []string{"instance", "proxy", instanceID, "-s", "winrm", "-o", "json", "--once"} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + probeCtx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + cmd := exec.CommandContext(probeCtx, d.opts.BinaryPath, args...) + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + + err := cmd.Run() + raw := strings.TrimSpace(out.String()) + if endpoint, ok := parseProxyEndpoint(raw); ok && endpoint != "" { + return nil + } + + if indicatesMissingProxyService(raw, "winrm") { + return fmt.Errorf("namespace windows non-interactive channel unavailable: instance does not expose winrm service (rdp-only)\n%s", raw) + } + + if errors.Is(probeCtx.Err(), context.DeadlineExceeded) { + return fmt.Errorf("timed out probing Namespace winrm service before bootstrap\n%s", raw) + } + + if err != nil { + return fmt.Errorf("nsc winrm service probe failed: %w\n%s", err, raw) + } + return fmt.Errorf("nsc winrm service probe did not yield endpoint output\n%s", raw) +} + +func parseProxyEndpoint(raw string) (string, bool) { + jsonBlob := extractJSON(raw) + if jsonBlob == "" { + return "", false + } + var payload struct { + Endpoint string `json:"endpoint"` + } + if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil { + return "", false + } + endpoint := strings.TrimSpace(payload.Endpoint) + if endpoint == "" { + return "", false + } + return endpoint, true +} + +func indicatesMissingProxyService(raw string, service string) bool { + service = strings.TrimSpace(service) + if service == "" { + return false + } + token := fmt.Sprintf("does not have service %q", service) + return strings.Contains(raw, token) +} + +func (d *Dispatcher) startWindowsWinRMPortForward(ctx context.Context, instanceID string) (endpoint string, stop func(), err error) { + args := []string{"instance", "port-forward", instanceID, "--target_port", "5985"} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + forwardCtx, cancel := context.WithCancel(ctx) + cmd := exec.CommandContext(forwardCtx, d.opts.BinaryPath, args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + cancel() + return "", nil, fmt.Errorf("port-forward stdout pipe: %w", err) + } + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Start(); err != nil { + cancel() + return "", nil, fmt.Errorf("start nsc port-forward: %w", err) + } + + waitDone := make(chan struct{}) + var waitErr error + go func() { + waitErr = cmd.Wait() + close(waitDone) + }() + + endpointCh := make(chan string, 1) + scanErrCh := make(chan error, 1) + go func() { + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "Listening on ") { + endpointCh <- strings.TrimSpace(strings.TrimPrefix(line, "Listening on ")) + return + } + } + if err := scanner.Err(); err != nil { + scanErrCh <- err + } + }() + + select { + case endpoint = <-endpointCh: + stop = func() { + cancel() + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + <-waitDone + } + return endpoint, stop, nil + case err := <-scanErrCh: + cancel() + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + <-waitDone + return "", nil, fmt.Errorf("failed reading port-forward output: %w", err) + case <-waitDone: + cancel() + if waitErr != nil { + return "", nil, fmt.Errorf("nsc port-forward exited early: %w\n%s", waitErr, stderr.String()) + } + return "", nil, fmt.Errorf("nsc port-forward exited without endpoint\n%s", stderr.String()) + case <-time.After(45 * time.Second): + cancel() + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + <-waitDone + return "", nil, fmt.Errorf("timed out waiting for WinRM port-forward endpoint\n%s", stderr.String()) + case <-ctx.Done(): + cancel() + if cmd.Process != nil { + _ = cmd.Process.Kill() + } + <-waitDone + return "", nil, ctx.Err() + } +} + +func (d *Dispatcher) runWindowsWinRMPowerShell(ctx context.Context, endpoint, username, password, script string) error { + pythonPath, err := exec.LookPath("python3") + if err != nil { + return fmt.Errorf("python3 is required for windows WinRM bootstrap: %w", err) + } + + workdir := strings.TrimSpace(d.opts.WorkDir) + if workdir == "" { + workdir = "/tmp/forgejo-runner" + } + if err := os.MkdirAll(workdir, 0o755); err != nil { + return fmt.Errorf("create workdir %s: %w", workdir, err) + } + + venvPath := filepath.Join(workdir, ".winrm-venv") + venvPython := filepath.Join(venvPath, "bin", "python") + if _, err := os.Stat(venvPython); err != nil { + cmd := exec.CommandContext(ctx, pythonPath, "-m", "venv", venvPath) + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + if err := cmd.Run(); err != nil { + return fmt.Errorf("create python venv for winrm failed: %w\n%s", err, out.String()) + } + } + + ensurePyWinRM := ` +import importlib.util, subprocess, sys +if importlib.util.find_spec("winrm") is None: + subprocess.check_call([sys.executable, "-m", "pip", "install", "--quiet", "pywinrm"]) +` + ensureCmd := exec.CommandContext(ctx, venvPython, "-c", ensurePyWinRM) + var ensureOut bytes.Buffer + ensureCmd.Stdout = &ensureOut + ensureCmd.Stderr = &ensureOut + if err := ensureCmd.Run(); err != nil { + return fmt.Errorf("install pywinrm failed: %w\n%s", err, ensureOut.String()) + } + + runScript := ` +import base64, os, sys, time, traceback, winrm + +endpoint = os.environ["WINRM_ENDPOINT"] +user = os.environ["WINRM_USER"] +password = os.environ["WINRM_PASS"] +script = base64.b64decode(os.environ["WINRM_SCRIPT_B64"]).decode("utf-8") + +deadline = time.time() + 300.0 +last_err = None + +while time.time() < deadline: + try: + session = winrm.Session(f"http://{endpoint}/wsman", auth=(user, password), transport="ntlm") + result = session.run_ps(script) + sys.stdout.write(result.std_out.decode("utf-8", errors="replace")) + sys.stderr.write(result.std_err.decode("utf-8", errors="replace")) + print(f"winrm_exit={result.status_code}") + sys.exit(result.status_code) + except Exception as err: + last_err = err + time.sleep(5.0) + +sys.stderr.write("timed out waiting for WinRM connectivity after 300s\\n") +if last_err is not None: + traceback.print_exception(last_err, file=sys.stderr) +sys.exit(111) +` + runCmd := exec.CommandContext(ctx, venvPython, "-c", runScript) + runCmd.Env = append(os.Environ(), + "WINRM_ENDPOINT="+endpoint, + "WINRM_USER="+username, + "WINRM_PASS="+password, + "WINRM_SCRIPT_B64="+base64.StdEncoding.EncodeToString([]byte(script)), + ) + var runOut bytes.Buffer + runCmd.Stdout = &runOut + runCmd.Stderr = &runOut + if err := runCmd.Run(); err != nil { + return fmt.Errorf("windows winrm bootstrap command failed: %w\n%s", err, runOut.String()) + } + return nil +} + +func windowsBootstrapScript(runnerName string, req LaunchRequest, executor, workdir string) string { + if strings.TrimSpace(workdir) == "" { + workdir = `C:\burrow\forgejo-runner` + } + + runnerExec := strings.TrimSpace(executor) + if runnerExec == "" || runnerExec == "shell" { + runnerExec = "host" + } + + safeName := strings.NewReplacer(`\`, "-", ":", "-", "/", "-", " ", "-").Replace(runnerName) + workRoot := strings.TrimRight(workdir, `\`) + `\` + safeName + + var b strings.Builder + b.WriteString("$ErrorActionPreference = 'Stop'\n") + b.WriteString("$ProgressPreference = 'SilentlyContinue'\n") + b.WriteString("[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12\n") + b.WriteString("$runnerName = " + powershellSingleQuote(runnerName) + "\n") + b.WriteString("$runnerToken = " + powershellSingleQuote(req.Token) + "\n") + b.WriteString("$instanceURL = " + powershellSingleQuote(req.InstanceURL) + "\n") + b.WriteString("$labelsCsv = " + powershellSingleQuote(strings.Join(req.Labels, ",")) + "\n") + b.WriteString("$runnerExec = " + powershellSingleQuote(runnerExec) + "\n") + b.WriteString("$workRoot = " + powershellSingleQuote(workRoot) + "\n") + b.WriteString(` +New-Item -Path $workRoot -ItemType Directory -Force | Out-Null +Set-Location $workRoot + +$runnerVersion = "12.6.4" +$zipUrl = "https://code.forgejo.org/forgejo/runner/releases/download/v${runnerVersion}/forgejo-runner-${runnerVersion}-windows-amd64.zip" +$zipPath = Join-Path $workRoot "forgejo-runner.zip" +$extractDir = Join-Path $workRoot "forgejo-runner" + +if (Test-Path $extractDir) { + Remove-Item -Path $extractDir -Recurse -Force +} + +Invoke-WebRequest -Uri $zipUrl -OutFile $zipPath +Expand-Archive -Path $zipPath -DestinationPath $extractDir -Force + +$runnerExe = Join-Path $extractDir "forgejo-runner.exe" +if (-not (Test-Path $runnerExe)) { + throw "Missing forgejo-runner.exe after extract: $runnerExe" +} + +$labels = @() +foreach ($label in ($labelsCsv -split ",")) { + $trimmed = $label.Trim() + if ([string]::IsNullOrWhiteSpace($trimmed)) { continue } + if ($trimmed.Contains(":")) { + $labels += $trimmed + } else { + $labels += ("{0}:{1}" -f $trimmed, $runnerExec) + } +} +if ($labels.Count -eq 0) { + throw "No runner labels resolved for windows bootstrap" +} + +$labelLines = ($labels | ForEach-Object { " - $_" }) -join [Environment]::NewLine +$configPath = Join-Path $workRoot "runner.yaml" +$runnerYaml = @" +log: + level: info +runner: + file: .runner + capacity: 1 + name: $runnerName + labels: +$labelLines +cache: + enabled: false +"@ +Set-Content -Path $configPath -Value $runnerYaml -Encoding UTF8 + +$labelsArg = ($labels -join ",") +& $runnerExe register --no-interactive --instance $instanceURL --token $runnerToken --name $runnerName --labels $labelsArg --config $configPath +if ($LASTEXITCODE -ne 0) { + throw ("forgejo-runner register failed: {0}" -f $LASTEXITCODE) +} + +& $runnerExe one-job --config $configPath +if ($LASTEXITCODE -ne 0) { + throw ("forgejo-runner one-job failed: {0}" -f $LASTEXITCODE) +} +`) + return b.String() +} diff --git a/services/forgejo-nsc/internal/nsc/windows_winrm_integration_test.go b/services/forgejo-nsc/internal/nsc/windows_winrm_integration_test.go new file mode 100644 index 0000000..407749b --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/windows_winrm_integration_test.go @@ -0,0 +1,59 @@ +package nsc + +import ( + "context" + "io" + "log/slog" + "os" + "os/exec" + "strings" + "testing" + "time" +) + +func TestWindowsWinRMScriptRoundTrip(t *testing.T) { + if os.Getenv("NSC_WINDOWS_E2E") != "1" { + t.Skip("set NSC_WINDOWS_E2E=1 to run Namespace Windows integration test") + } + + nscBinary, err := exec.LookPath("nsc") + if err != nil { + t.Skipf("nsc not found in PATH: %v", err) + } + + authCheck := exec.Command(nscBinary, "auth", "check-login") + if out, err := authCheck.CombinedOutput(); err != nil { + t.Skipf("nsc auth check-login failed: %v (%s)", err, strings.TrimSpace(string(out))) + } + + machineType := strings.TrimSpace(os.Getenv("NSC_WINDOWS_E2E_MACHINE_TYPE")) + if machineType == "" { + machineType = "windows/amd64:4x8" + } + + dispatcher, err := NewDispatcher(Options{ + BinaryPath: nscBinary, + DefaultImage: "code.forgejo.org/forgejo/runner:11", + DefaultMachine: machineType, + DefaultDuration: 20 * time.Minute, + MaxParallel: 1, + WorkDir: t.TempDir(), + ComputeBaseURL: strings.TrimSpace(os.Getenv("NSC_COMPUTE_BASE_URL")), + Logger: slog.New(slog.NewTextHandler(io.Discard, nil)), + }) + if err != nil { + t.Fatalf("NewDispatcher() error: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute) + defer cancel() + + script := "Write-Output ('winrm-ok:' + $env:COMPUTERNAME)" + labels := []string{"namespace-profile-windows-medium"} + if err := dispatcher.launchWindowsScriptViaWinRM(ctx, "nsc-winrm-itest", 20*time.Minute, machineType, labels, script); err != nil { + if strings.Contains(err.Error(), "does not expose winrm service (rdp-only)") { + t.Skipf("namespace windows control channel is rdp-only: %v", err) + } + t.Fatalf("launchWindowsScriptViaWinRM() error: %v", err) + } +} diff --git a/services/forgejo-nsc/internal/nsc/windows_winrm_test.go b/services/forgejo-nsc/internal/nsc/windows_winrm_test.go new file mode 100644 index 0000000..538d009 --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/windows_winrm_test.go @@ -0,0 +1,65 @@ +package nsc + +import "testing" + +func TestParseProxyEndpoint(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + raw string + want string + wantOK bool + }{ + { + name: "plain json payload", + raw: `{"endpoint":"127.0.0.1:61234"}`, + want: "127.0.0.1:61234", + wantOK: true, + }, + { + name: "json wrapped with extra output", + raw: `Connected. +{"endpoint":"127.0.0.1:61235","rdp":{"credentials":{"username":"runneradmin","password":"runneradmin"}}}`, + want: "127.0.0.1:61235", + wantOK: true, + }, + { + name: "missing endpoint field", + raw: `{"rdp":{"credentials":{"username":"runneradmin"}}}`, + wantOK: false, + }, + { + name: "non-json output", + raw: `Failed: instance does not have service "winrm"`, + wantOK: false, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, ok := parseProxyEndpoint(tc.raw) + if ok != tc.wantOK { + t.Fatalf("parseProxyEndpoint(%q) ok=%v, want %v", tc.raw, ok, tc.wantOK) + } + if got != tc.want { + t.Fatalf("parseProxyEndpoint(%q) endpoint=%q, want %q", tc.raw, got, tc.want) + } + }) + } +} + +func TestIndicatesMissingProxyService(t *testing.T) { + t.Parallel() + + raw := `Failed: instance does not have service "winrm"` + if !indicatesMissingProxyService(raw, "winrm") { + t.Fatalf("indicatesMissingProxyService should return true for missing winrm message") + } + if indicatesMissingProxyService(raw, "ssh") { + t.Fatalf("indicatesMissingProxyService should be false when service name does not match") + } +} diff --git a/services/forgejo-nsc/internal/server/server.go b/services/forgejo-nsc/internal/server/server.go new file mode 100644 index 0000000..b4bb1d2 --- /dev/null +++ b/services/forgejo-nsc/internal/server/server.go @@ -0,0 +1,151 @@ +package server + +import ( + "context" + "encoding/json" + "errors" + "log/slog" + "net/http" + "time" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + + "github.com/burrow/forgejo-nsc/internal/app" +) + +type Server struct { + httpServer *http.Server + app *app.Service + log *slog.Logger +} + +func New(listen string, svc *app.Service, logger *slog.Logger) *Server { + if logger == nil { + logger = slog.Default() + } + + router := chi.NewRouter() + router.Use(middleware.RequestID) + router.Use(middleware.RealIP) + router.Use(middleware.Logger) + router.Use(middleware.Recoverer) + + s := &Server{ + app: svc, + log: logger, + httpServer: &http.Server{ + Addr: listen, + Handler: router, + ReadTimeout: 30 * time.Second, + // Dispatch requests can legitimately run for the duration of a build. + // A short WriteTimeout will kill the request context mid-provisioning. + WriteTimeout: 2 * time.Hour, + IdleTimeout: 60 * time.Second, + }, + } + + router.Get("/healthz", s.handleHealthz) + router.Post("/api/v1/dispatch", s.handleDispatch) + + return s +} + +func (s *Server) ListenAndServe() error { + return s.httpServer.ListenAndServe() +} + +func (s *Server) Shutdown(ctx context.Context) error { + return s.httpServer.Shutdown(ctx) +} + +// Handler exposes the underlying HTTP handler for tests. +func (s *Server) Handler() http.Handler { + return s.httpServer.Handler +} + +type dispatchRequest struct { + Count int `json:"count"` + Labels []string `json:"labels"` + Scope *dispatchScope `json:"scope"` + TTL string `json:"ttl"` + Machine string `json:"machine_type"` + Image string `json:"image"` + Env map[string]string `json:"env"` +} + +type dispatchScope struct { + Level string `json:"level"` + Owner string `json:"owner"` + Name string `json:"name"` +} + +func (s *Server) handleDispatch(w http.ResponseWriter, r *http.Request) { + var payload dispatchRequest + if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { + s.writeError(w, http.StatusBadRequest, err) + return + } + + duration, err := parseDuration(payload.TTL) + if err != nil { + s.writeError(w, http.StatusBadRequest, err) + return + } + + var scope *app.Scope + if payload.Scope != nil { + scope = &app.Scope{ + Level: payload.Scope.Level, + Owner: payload.Scope.Owner, + Name: payload.Scope.Name, + } + } + + resp, err := s.app.Dispatch(r.Context(), app.DispatchRequest{ + Count: payload.Count, + Labels: payload.Labels, + Scope: scope, + TTL: duration, + Machine: payload.Machine, + Image: payload.Image, + ExtraEnv: payload.Env, + }) + if err != nil { + s.writeError(w, http.StatusInternalServerError, err) + return + } + + s.writeJSON(w, http.StatusOK, resp) +} + +func parseDuration(value string) (time.Duration, error) { + if value == "" { + return 0, nil + } + dur, err := time.ParseDuration(value) + if err != nil { + return 0, err + } + if dur <= 0 { + return 0, errors.New("ttl must be positive") + } + return dur, nil +} + +func (s *Server) handleHealthz(w http.ResponseWriter, _ *http.Request) { + s.writeJSON(w, http.StatusOK, map[string]string{"status": "ok"}) +} + +func (s *Server) writeError(w http.ResponseWriter, code int, err error) { + s.log.Error("request failed", "err", err, "status", code) + s.writeJSON(w, code, map[string]string{ + "error": err.Error(), + }) +} + +func (s *Server) writeJSON(w http.ResponseWriter, code int, payload any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + _ = json.NewEncoder(w).Encode(payload) +} diff --git a/services/forgejo-nsc/internal/server/server_test.go b/services/forgejo-nsc/internal/server/server_test.go new file mode 100644 index 0000000..09a9743 --- /dev/null +++ b/services/forgejo-nsc/internal/server/server_test.go @@ -0,0 +1,111 @@ +package server + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/burrow/forgejo-nsc/internal/app" + "github.com/burrow/forgejo-nsc/internal/forgejo" + "github.com/burrow/forgejo-nsc/internal/nsc" +) + +type serverForgejoMock struct { + mu sync.Mutex + token string + scopes []forgejo.Scope +} + +func (m *serverForgejoMock) RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.scopes = append(m.scopes, scope) + return m.token, nil +} + +type serverDispatcherMock struct { + mu sync.Mutex + requests []nsc.LaunchRequest + result string +} + +func (m *serverDispatcherMock) LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.requests = append(m.requests, req) + if m.result != "" { + return m.result, nil + } + return "runner", nil +} + +func TestDispatchEndpoint(t *testing.T) { + forgejoMock := &serverForgejoMock{token: "token"} + dispatcherMock := &serverDispatcherMock{result: "runner-http"} + + cfg := app.Config{ + DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}, + DefaultLabels: []string{"fallback"}, + InstanceURL: "https://forgejo.example.com", + DefaultTTL: 30 * time.Minute, + } + + service := app.NewService(cfg, forgejoMock, dispatcherMock, nil) + srv := New(":0", service, nil) + ts := httptest.NewServer(srv.Handler()) + defer ts.Close() + + body := map[string]any{ + "count": 1, + "ttl": "45m", + "labels": []string{"nscloud-arm"}, + "scope": map[string]string{"level": string(forgejo.ScopeOrganization), "owner": "acme"}, + "machine_type": "8x16", + "image": "runner:http", + "env": map[string]string{"FOO": "bar"}, + } + + payload, _ := json.Marshal(body) + + resp, err := http.Post(ts.URL+"/api/v1/dispatch", "application/json", bytes.NewReader(payload)) + if err != nil { + t.Fatalf("POST failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 OK, got %d", resp.StatusCode) + } + + var decoded app.DispatchResponse + if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if len(decoded.Runners) != 1 || decoded.Runners[0].Name != "runner-http" { + t.Fatalf("unexpected response: %+v", decoded) + } + + if len(forgejoMock.scopes) != 1 || forgejoMock.scopes[0].Level != forgejo.ScopeOrganization { + t.Fatalf("expected organization scope, got %+v", forgejoMock.scopes) + } + + if len(dispatcherMock.requests) != 1 { + t.Fatalf("expected dispatcher call") + } + call := dispatcherMock.requests[0] + if call.Duration != 45*time.Minute { + t.Fatalf("expected ttl override, got %v", call.Duration) + } + if call.Labels[0] != "nscloud-arm" { + t.Fatalf("expected labels passthrough, got %v", call.Labels) + } + if call.ExtraEnv["FOO"] != "bar" { + t.Fatalf("expected env passthrough") + } +} From 44dd88c1112749ab40c9017b4053233bd0b12531 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 03:04:31 -0700 Subject: [PATCH 03/50] Fix Forgejo NSC nsc runtime path --- services/forgejo-nsc/deploy/dispatcher.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index fe58994..1e45d39 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -13,7 +13,7 @@ forgejo: timeout: "30s" namespace: - nsc_binary: "/run/current-system/sw/bin/nsc" + nsc_binary: "nsc" compute_base_url: "https://ord4.compute.namespaceapis.com" image: "code.forgejo.org/forgejo/runner:11" machine_type: "4x8" From 5115eb831a49b080f5cfb188440e18b9a398a857 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 03:13:53 -0700 Subject: [PATCH 04/50] Update Namespace CLI to working release --- flake.nix | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flake.nix b/flake.nix index 18ff979..b49b330 100644 --- a/flake.nix +++ b/flake.nix @@ -48,7 +48,7 @@ nscPkg = if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then let - version = "0.0.452"; + version = "0.0.484"; osName = if pkgs.stdenv.isLinux then "linux" @@ -62,18 +62,18 @@ arch = "amd64"; hash = if pkgs.stdenv.isLinux then - "sha256-FBqOJ0UQWTv2r4HWMHrR/aqFzDa0ej/mS8dSoaCe6fY=" + "sha256-sT4YWSjQ7dU6/QV+vucm1ARSXf5yIcAtHoCYxbXJpRs=" else - "sha256-3fRKWO0SCCa5PEym5yCB7dtyEx3xSxXSHfJYz8B+/4M="; + "sha256-u0pSyUQw0IJcIipkLtm0MemD9BFO2/ZoAlBuFpfX1HI="; } else if pkgs.stdenv.hostPlatform.isAarch64 then { arch = "arm64"; hash = if pkgs.stdenv.isLinux then - "sha256-A6twO8Ievbu7Gi5Hqon4ug5rCGOm/uHhlCya3px6+io=" + "sha256-n3nOIBjGnHdNUhfWD7QHvGOW+DdrZaNlfatj4o17NvM=" else - "sha256-n363xLaGhy+a6lw2F+WicQYGXnGYnqRW8aTQCSppwcw="; + "sha256-8k2Jby6HCPClBaSGUrqIKP6MioVFrGD6HwAsjKZSSQA="; } else throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}"; From 251922da9e60105c88f6d035dea8fb1116be99e1 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 22:15:34 -0700 Subject: [PATCH 05/50] Normalize Namespace token file format --- Scripts/provision-forgejo-nsc.sh | 40 +++++++++++++++++++++++++++++++- services/forgejo-nsc/README.md | 4 +++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index 890d9a2..f6ab4d9 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -136,8 +136,46 @@ autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml" if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then "${NSC_BIN}" auth check-login --duration 20m >/dev/null - "${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null + raw_token_file="$(mktemp)" + trap 'rm -f "${raw_token_file}"; cleanup' EXIT + "${NSC_BIN}" auth generate-dev-token --output_to "${raw_token_file}" >/dev/null + RAW_NSC_TOKEN_FILE="${raw_token_file}" TOKEN_FILE="${token_file}" python3 - <<'PY' +import json +import os +from pathlib import Path + +raw = Path(os.environ["RAW_NSC_TOKEN_FILE"]).read_text(encoding="utf-8").strip() +if not raw: + raise SystemExit("generated Namespace token is empty") + +Path(os.environ["TOKEN_FILE"]).write_text( + json.dumps({"bearer_token": raw}, indent=2) + "\n", + encoding="utf-8", +) +PY + rm -f "${raw_token_file}" chmod 600 "${token_file}" +elif [[ -s "${token_file}" ]]; then + TOKEN_FILE="${token_file}" python3 - <<'PY' +import json +import os +from pathlib import Path + +path = Path(os.environ["TOKEN_FILE"]) +raw = path.read_text(encoding="utf-8").strip() +if not raw: + raise SystemExit(0) + +try: + parsed = json.loads(raw) +except json.JSONDecodeError: + parsed = None + +if isinstance(parsed, dict) and isinstance(parsed.get("bearer_token"), str) and parsed["bearer_token"].strip(): + raise SystemExit(0) + +path.write_text(json.dumps({"bearer_token": raw}, indent=2) + "\n", encoding="utf-8") +PY fi webhook_secret="$(python3 - <<'PY' diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index f3959de..dbd7e78 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -154,7 +154,9 @@ instances: For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT, generate a Namespace token from the logged-in namespace account, and render the dispatcher/autoscaler configs into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` -plus `intake/forgejo_nsc_token.txt`. +plus `intake/forgejo_nsc_token.txt`. The token file is emitted as JSON with a +`bearer_token` field so both the Compute API path and the `nsc` CLI fallback can +consume the same secret material. For ongoing operations, use `Scripts/sync-forgejo-nsc-config.sh`: From 48b8a3c32f6fe9e7651cdc3d101e0ad7360b4a20 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 22:40:44 -0700 Subject: [PATCH 06/50] Move Forgejo NSC runtime into agenix --- Makefile | 34 +++++++++ Scripts/agenix-load-file.sh | 22 ++++++ Scripts/provision-forgejo-nsc.sh | 1 + flake.lock | 81 +++++++++++++++++++++- flake.nix | 12 +++- nixos/README.md | 6 +- nixos/hosts/burrow-forge/default.nix | 29 ++++++-- secrets.nix | 1 + secrets/README.md | 17 +++++ secrets/forgejo/nsc-autoscaler-config.age | Bin 0 -> 1396 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 0 -> 1324 bytes secrets/forgejo/nsc-token.age | Bin 0 -> 1211 bytes secrets/secrets.nix | 12 ++++ services/forgejo-nsc/README.md | 20 +++--- 14 files changed, 217 insertions(+), 18 deletions(-) create mode 100755 Scripts/agenix-load-file.sh create mode 100644 secrets.nix create mode 100644 secrets/README.md create mode 100644 secrets/forgejo/nsc-autoscaler-config.age create mode 100644 secrets/forgejo/nsc-dispatcher-config.age create mode 100644 secrets/forgejo/nsc-token.age create mode 100644 secrets/secrets.nix diff --git a/Makefile b/Makefile index f927f5f..e852e32 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,43 @@ +FLAKE ?= . +AGENIX ?= nix run ${FLAKE}\#agenix -- + +SECRETS := forgejo/nsc-token \ + forgejo/nsc-dispatcher-config \ + forgejo/nsc-autoscaler-config + tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- +.PHONY: secret secret-file secrets-list + +secret: + @if [ -z "${name}" ]; then \ + printf 'Usage: make secret name=\nAvailable secrets:\n %s\n' "${SECRETS}"; \ + exit 1; \ + fi + ${AGENIX} -e secrets/${name}.age + +secret-file: + @if [ -z "${name}" ]; then \ + printf 'Usage: make secret-file name= file=\nAvailable secrets:\n %s\n' "${SECRETS}"; \ + exit 1; \ + fi + @if [ -z "${file}" ]; then \ + printf 'Usage: make secret-file name= file=\n'; \ + exit 1; \ + fi + @if [ ! -f "${file}" ]; then \ + printf 'Source file "%s" not found.\n' "${file}"; \ + exit 1; \ + fi + SECRET_SOURCE_FILE="${file}" EDITOR="${PWD}/Scripts/agenix-load-file.sh" ${AGENIX} -e secrets/${name}.age " >&2 + exit 1 +fi + +dest="${!#}" +source_path="${SECRET_SOURCE_FILE:-}" + +if [[ -z "$source_path" ]]; then + echo "SECRET_SOURCE_FILE is not set; point it at the source file to encrypt." >&2 + exit 1 +fi + +if [[ ! -f "$source_path" ]]; then + echo "Source file '$source_path' does not exist." >&2 + exit 1 +fi + +cp "$source_path" "$dest" diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index f6ab4d9..9e6e4b5 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -272,4 +272,5 @@ PY chmod 600 "${dispatcher_out}" "${autoscaler_out}" echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml." +echo "Re-encrypt them into secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age before deploying the forge host." echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}." diff --git a/flake.lock b/flake.lock index 677bd0d..6f7f20c 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,47 @@ { "nodes": { + "agenix": { + "inputs": { + "darwin": "darwin", + "home-manager": "home-manager", + "nixpkgs": [ + "nixpkgs" + ], + "systems": "systems" + }, + "locked": { + "lastModified": 1770165109, + "narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=", + "type": "tarball", + "url": "https://codeload.github.com/ryantm/agenix/tar.gz/main" + }, + "original": { + "type": "tarball", + "url": "https://codeload.github.com/ryantm/agenix/tar.gz/main" + } + }, + "darwin": { + "inputs": { + "nixpkgs": [ + "agenix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1744478979, + "narHash": "sha256-dyN+teG9G82G+m+PX/aSAagkC+vUv0SgUw3XkPhQodQ=", + "owner": "lnl7", + "repo": "nix-darwin", + "rev": "43975d782b418ebf4969e9ccba82466728c2851b", + "type": "github" + }, + "original": { + "owner": "lnl7", + "ref": "master", + "repo": "nix-darwin", + "type": "github" + } + }, "disko": { "inputs": { "nixpkgs": [ @@ -19,7 +61,7 @@ }, "flake-utils": { "inputs": { - "systems": "systems" + "systems": "systems_2" }, "locked": { "lastModified": 1731533236, @@ -45,6 +87,27 @@ "url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0" } }, + "home-manager": { + "inputs": { + "nixpkgs": [ + "agenix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1745494811, + "narHash": "sha256-YZCh2o9Ua1n9uCvrvi5pRxtuVNml8X2a03qIFfRKpFs=", + "owner": "nix-community", + "repo": "home-manager", + "rev": "abfad3d2958c9e6300a883bd443512c55dfeb1be", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, "nixpkgs": { "locked": { "lastModified": 1773389992, @@ -59,6 +122,7 @@ }, "root": { "inputs": { + "agenix": "agenix", "disko": "disko", "flake-utils": "flake-utils", "hcloud-upload-image-src": "hcloud-upload-image-src", @@ -79,6 +143,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index b49b330..51e4bc9 100644 --- a/flake.nix +++ b/flake.nix @@ -4,6 +4,10 @@ inputs = { nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"; flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main"; + agenix = { + url = "tarball+https://codeload.github.com/ryantm/agenix/tar.gz/main"; + inputs.nixpkgs.follows = "nixpkgs"; + }; disko = { url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master"; inputs.nixpkgs.follows = "nixpkgs"; @@ -14,7 +18,7 @@ }; }; - outputs = { self, nixpkgs, flake-utils, disko, hcloud-upload-image-src }: + outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }: let supportedSystems = [ "x86_64-linux" @@ -29,6 +33,7 @@ inherit system; }; lib = pkgs.lib; + agenixPkg = agenix.packages.${system}.agenix; commonPackages = with pkgs; [ cargo rustc @@ -141,6 +146,7 @@ packages = commonPackages ++ [ + agenixPkg hcloudUploadImagePkg forgejoNscDispatcher forgejoNscAutoscaler @@ -152,6 +158,7 @@ packages = commonPackages ++ [ + agenixPkg hcloudUploadImagePkg ] ++ lib.optionals (nscPkg != null) [ nscPkg ]; @@ -161,6 +168,7 @@ packages = { + agenix = agenixPkg; hcloud-upload-image = hcloudUploadImagePkg; forgejo-nsc-dispatcher = forgejoNscDispatcher; forgejo-nsc-autoscaler = forgejoNscAutoscaler; @@ -176,8 +184,10 @@ system = "x86_64-linux"; specialArgs = { inherit self; + agenixPackage = agenix.packages.x86_64-linux.agenix; }; modules = [ + agenix.nixosModules.default disko.nixosModules.disko ./nixos/hosts/burrow-forge/default.nix ]; diff --git a/nixos/README.md b/nixos/README.md index a682db0..f37637c 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -19,8 +19,8 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot - `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host -- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists -- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host +- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists +- `../secrets/forgejo/*.age`: authoritative encrypted Namespace token + dispatcher/autoscaler configs for the forge host ## Intended Flow @@ -29,7 +29,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. -6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`. +6. Run `Scripts/provision-forgejo-nsc.sh` locally, re-encrypt the resulting NSC token + configs into `secrets/forgejo/*.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. 7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. 9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index d600539..7dc828d 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -1,4 +1,4 @@ -{ self, ... }: +{ config, self, ... }: { imports = [ @@ -32,15 +32,36 @@ sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; }; + age.secrets.forgejoNscToken = { + file = ../../../secrets/forgejo/nsc-token.age; + mode = "0400"; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + }; + + age.secrets.forgejoNscDispatcherConfig = { + file = ../../../secrets/forgejo/nsc-dispatcher-config.age; + mode = "0400"; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + }; + + age.secrets.forgejoNscAutoscalerConfig = { + file = ../../../secrets/forgejo/nsc-autoscaler-config.age; + mode = "0400"; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + }; + services.burrow.forgejoNsc = { enable = true; - nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; + nscTokenFile = config.age.secrets.forgejoNscToken.path; dispatcher = { - configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml"; + configFile = config.age.secrets.forgejoNscDispatcherConfig.path; }; autoscaler = { enable = true; - configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml"; + configFile = config.age.secrets.forgejoNscAutoscalerConfig.path; }; }; } diff --git a/secrets.nix b/secrets.nix new file mode 100644 index 0000000..1e49f5d --- /dev/null +++ b/secrets.nix @@ -0,0 +1 @@ +import ./secrets/secrets.nix diff --git a/secrets/README.md b/secrets/README.md new file mode 100644 index 0000000..2132079 --- /dev/null +++ b/secrets/README.md @@ -0,0 +1,17 @@ +# Secrets + +Burrow secrets live in `secrets/.age` and are managed with `agenix`. + +For the Forgejo Namespace Cloud runtime: + +- `secrets/forgejo/nsc-token.age` +- `secrets/forgejo/nsc-dispatcher-config.age` +- `secrets/forgejo/nsc-autoscaler-config.age` + +Use: + +- `make secret name=forgejo/nsc-token` +- `make secret-file name=forgejo/nsc-token file=/path/to/source` + +The forge host decrypts these files at activation time and feeds the resulting +paths into `services.burrow.forgejoNsc`. diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age new file mode 100644 index 0000000000000000000000000000000000000000..243394ab37ac2cd766153d2a7f37f4bf3719d212 GIT binary patch literal 1396 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vjIYOLR>Q z^G_`EP4Rb03U|qJGB$J$GLNVTwk-E8&L}DM@e4I{$_;eP4dpT`^)Cwc4l|1K^l)+U zE+`Gj$SwgYS1JAOcP@`1$jFPabk|?i8 zZzC_`$gqm=?Chw(Jc}fwD(^B=M+2@PgR(T=h|&m`$|TcBx7@5e&(f&0Fk|P!a_s_V zVoaDz{szc=AoQ*%6<9E-ZQY@w`+!yT;|~#dU^PmPqe@cYo!UckBM>IUm~|P$u(e zqh9X&JvO^|zGtfVJo++qX*>T4-fhbb_;V)DyZ@r?MCSX$bHemeF6f_Dh|lf+R*-=L6e1w-xN+L=qoMXb-M0SgN>c-ntFrd zM{Dw5pL(T#TIasVvWv-Oes`~IVp;w+oZWsy1oxNzJ7!F5i)^3Q2|hAx-XCRm=Jjp$ z@0Gtca5|<)=RPS<_P*`mWuP1vrO&j%YsuO#I;S?~#Huva`Q}}|H`ym$cDC8OOUtgG zexcJeEjp7+CtWQf-|zoyyaz3U$;o%3sa6yTeg}$@tBd^pWkPC$NKbN?bFhmKR+qV zS$AMUz|DP9_y1qK$bb2){zCp!&54PtgfiK3*|)Gg-rc+WWwe%f39_`b#rI> z?eVDh*{7VAX{srk!sX|DT=!jYzT70&led50cGB9NDBO4J!TcX(C3oB=%9O>uE8O^5 zuVBH{hneyV*NaA+tK@IUoE5A6=J526Y5d=BAGoo;%-ZVlG@-yY&TGfEWrcH9uuojS zXm#JOXK&P>-j)0muwnC>DDzqFj7c$>ruic7#{;+C*4egeiE84r9y9)oz$I_8*}opv z;19C?dgrUn{pHcB6YlJPp;mDEg^}{>NAX{kf4k|j1g|{lV}GD|(e*anDFyJ8*^%FjdHBa?jQLS>;5X-OEKH~_|5~W%jbM{ dIey^W(-|^NvXaj>H3alX?XyX=)Zd#I0{|4xfhqt1 literal 0 HcmV?d00001 diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age new file mode 100644 index 0000000000000000000000000000000000000000..a3144309524798ec00ac0e89cbd041a6bff8479e GIT binary patch literal 1324 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vjIGO>vG2 z^G*viFm%y3_HeccGS&~&cMXV04G9e|h|0<}@JtVnbPRLPj^s*>G7rklb}KIla4t4C z4J*&qkMK!1wahjxElm!|N%yv}NDJ1^$?;2b$w#-%vnVRpF^c zG~GEO+buLO!&l$a!?7r{C@I)6EfC!{ud0a9a7Tp)C+_b{izO!uOqNUx%z z%q;W7Jhz;R(9}xjfOV6J3|t`Ox?cUYvP5i9OZ>O z_wfbsW?0Ts=`p={t*DkM~Xvv&!r=dMbG<$K_;a%SU&v0)MiK>{u+;0hA=;upl;#YL| zbfis*L3u0JtujkXUj8D+ORUBV!=^s!xxCOLtSYVQdDUs(Gk$Z=`*X(3z9b&EICIzA ziwlaFP9ME+>tMUk-O_2HoIjVjr_Db3SbpB>6DKk^am<|avFnuBzJ0L*F~ox>23=!i=TNm)|VdyJwb3ZxC;qR?Ts}GA9LnF;nVNt z>!A5t%U zJ(2l6Vaq44Ym-47$W<>cZ!%AF6EU7Ovdc3e|42A02=TW74qq-JBiDcEGI*=(k%%MJ?PIcOJrRYm>tEiNCnX*V}| zu3$b;E|Q?=^Z)Cc+z+Ro)if+UxOZNRjGo%l$c5YfOx$6e+|Yj1taAJ1#=n|MtBY&Q z+8(a3+1%>->DSQ%pH{7%XZSMaL&if}r^H9Et-oyj$v&O$;F*BcUplvEaGzE_bVw!Q F7yxcMSV{l@ literal 0 HcmV?d00001 diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age new file mode 100644 index 0000000000000000000000000000000000000000..7a515c1a502ef96eb4966bb5327efa02518fb4d9 GIT binary patch literal 1211 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vjgN^>TBr z$TJLx%<~B;NjC6_3@yvas`4^V4KH)H^hm5Mj>;|y%J(Yr@a4+%tSHX$&$S5m&<^$t za`DyAN(**(Dv5~DPfsqk@QX|g%+j{ZF%EEaOGmfOvnVRpF zJtV`xz}!4FEy&Q*q}&B1qd`->4`hu`D;E z($~|sBFnPM*eInU(;(B`A}rD=#1q{%ud0a9a7P7mOVfga6thCtEJG*Dj5KZ2L|1Ru zWOJ9ykX&QW!c4>9f;6w-s>ICP5LYgftgzH1v+x{uOEdq>(oBdBJ#?%lj>I^#{v{y$Zhwv_LDdCA`8)2!MW>})$~Vh8 zxTCk`4{uKJCON@Hg)tJxvS%Op+x?>XQ>1yQZ;`>NTP*juN`74Qy%~^UbvNH&?@J#! zrVsmj_NV*XJ&C!zcF7y1XO#zzwQOAb)iO5Oep+|Kp)Z{KKUVnPSD4Faz{4)J>Vcm} zs9&jmabVQzJ8Xxyl}ZLrW%NH2yRazmq{ZD%DVx(T%o&s=xi7n`Z`o$p%+^$~s{U8O z+ZQ2uKN@5IT%Yk%((ApujPBa}*QPJF%s3jm{a8y$>2(MHl(VL9>;039BF$8eAJv(8 z*!!dY??~C>_D3xrPUl_E_;2q{FT>aAQrhYDi*lA+a8g>{{=eUquk~ZoD`upt zh0p&M&0BE%xr>?uukT96u8I2=y_)JSqL?DP?UG$k%sPpn^$Ueht2`_9aom-u9&m$6 z#mcDUyZ^#f9A2LcfJiXm>hL)S#W}e2Q+O4;rz59QB=4q|V zPRG-&{z}g9VqfcC(PE#F`!DapZI|o+{40)|8C<=-+^OgAO7ES89}_P`S@8Z)+1ovb ziMLVKZiAC&0*A>%>u^c$;{P7s7A-zr*CE|??h3!*>qrGpgAIoc@L!l(b9?X9eqoPu zRwCPui5M<(sjrhySf9ftv!q3F`KC2r-{hoCE@9%~$T2(G!O!yC@{-XJN9Arg=0jWK zH~%_qb24bL-`~jJjQ_i8vz_JDr#}pL`|g$5>D_ur>*R~cN_WKOF4T$85!%YM*(;UUjrQk0rSz97FMPptz(0qR!zR0Egay6sNGPOMgz@@6wg`?CkZlI`#S4J$nhw9c2t}*`=;t zGUpas8~8pm=eo7(Zl}_i-t7-$xA*x)Ox(9>vAc8QJ!kuv=BVhB|4%IzJ$NC!#$mtl jlyHX$-(4>Jw|0sxu=%I*`N*`Yht3x>6qv2f%uNFTlg9?5 literal 0 HcmV?d00001 diff --git a/secrets/secrets.nix b/secrets/secrets.nix new file mode 100644 index 0000000..1cacc6a --- /dev/null +++ b/secrets/secrets.nix @@ -0,0 +1,12 @@ +{ }: +let + contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; + agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; + forge = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlkGo4lwpwIIZ0J01KjTuJuf/U/wGgy4/aKwPIUzutL root@burrow-forge"; + + forgeAutomation = [ contact agent forge ]; +in { + "secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation; + "secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation; + "secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation; +} diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index dbd7e78..6f55717 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -152,19 +152,21 @@ instances: ``` For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT, -generate a Namespace token from the logged-in namespace account, and render the -dispatcher/autoscaler configs into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` -plus `intake/forgejo_nsc_token.txt`. The token file is emitted as JSON with a +generate a Namespace token from the logged-in namespace account, and render +bootstrap artifacts into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` plus +`intake/forgejo_nsc_token.txt`. The token file is emitted as JSON with a `bearer_token` field so both the Compute API path and the `nsc` CLI fallback can consume the same secret material. -For ongoing operations, use `Scripts/sync-forgejo-nsc-config.sh`: +Long-lived runtime state is now sourced from age-encrypted files: -- `Scripts/sync-forgejo-nsc-config.sh` copies the intake-backed configs and - Namespace token onto `/var/lib/burrow/intake/` on the forge host, reapplies - file ownership for `forgejo-nsc`, and restarts the dispatcher/autoscaler. -- `Scripts/sync-forgejo-nsc-config.sh --rotate-pat` additionally mints a new - Forgejo PAT on the Burrow forge host and refreshes the local intake files. +- `secrets/forgejo/nsc-token.age` +- `secrets/forgejo/nsc-dispatcher-config.age` +- `secrets/forgejo/nsc-autoscaler-config.age` + +After refreshing the intake files, re-encrypt them into `secrets/forgejo/*.age` +and deploy the forge host so `config.age.secrets.*` updates the live paths for +`services.burrow.forgejoNsc`. Run it next to the dispatcher: From c72426ef526d9a03285a462d906fb92d4048e7e7 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 22:45:12 -0700 Subject: [PATCH 07/50] Rotate Forgejo NSC token --- secrets/forgejo/nsc-token.age | Bin 1211 -> 1238 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index 7a515c1a502ef96eb4966bb5327efa02518fb4d9..dfd1d04b253336e74f294f9b1120f6e6e5c33389 100644 GIT binary patch delta 1190 zcmdnZd5v>|PJMV%q)BF!V|Iy8x@&o4v75WOb8%QeWLRi^iFa;kdSO^)TC#6Qj-!59 zI#;n-k-kwxL}0j6v0qtEzHz#KcBz4(S!R%Pn0twZtD(DNQJO`GQCaM5d7iS7>@gMY(xdkb#k z#E;_j<-vvq=81{L&ORPyzD|zrep!wc?*6Xn-X3B8Az|4?ky%;h5grxp+UeO`MP3yV zMM>oq5e7+VrrF71M)@9QroqldhE*YsRYf5cffiYAVHv?eNkN`my1KdwIcBB$2HM)$ znStp}9|H%>d-9ckT|V=bZ|A2OrW?AwGj0|%O1#QEGWD)!%xkR`=DZJA9B+Q} z;zW;D?z*dmQU6N5>n}UqFU|4NY}K(dj@>D4zulhk|M?j6S7=J>5&5Urc3t2<|LieG z0+Z&wGuHL7PqeuukEvOw%v*7G-u^FVi_YA-Vm0~7)cFy6wmiADSMbn@Q|D41Pczxx z(Vr`!dn|0lI>xPsH(h=yv1b9(gk3u-ZhLMx9JsNJ)#ZfZnqKkP!u99(v^|(Q_0YPF z9d~ndmni*PY+QRazB{?Y+518gyB>p-?Na9#Z#Gp-Z|7Uw#o+Z?E=EkP@4VXg;scr=ryqKMl|9k1 z>Q`}ic;|&9U)&PPY`dj*rEb@~=e{GPl}XE?I{cZ)i3U!VRG>A&fkkYzI0rEP@lS6 z<}yQ=e#HU16@rqN{;Dl`C{&n zZsgg8-F|PlWAF9nGp|H^zwE*@ch_UzNtsuk)xY|gdC@A))T8<48Gh!l3GrWEq@2_L z({Sjb6!-F-B5fRRqQrET{S80kJhyjQQh@q|CXrK5gi`-+5e&YmkT{cNai7w!1(sKSj8`rW!xG5c-ndjMu>BHaK0 delta 1163 zcmcb{xtnu>PQ9tAmz#4%o?$>_o=-?gvVl)zXjx8Hm6v&Hc$u@MM`C4hRCZBNzE_cl zFIT2#MRAUQu0^fq7tH zh+&bBu~%q0S4pm)Nr`7lXlg}}w!gknQA%Q2ZbqfAr*B1;WtFi}N=2qYrn^O0q*I9J z#E;_j=9Z=f1u15Qu33gomKkZB*r{*~Z>kS^l|h?#b@;E*_QHUSVklTyNBqCGXw4dD(Qvo0|Q9 zsxECQ-}&;Az00Rr*BA1C+gv&M-{VJTZFEm+hPqr~>2Z8~;-$9n)a-2L5|65A)=+Wf zyVBeJO_JlZ8rGjae4H_Tg}91NOSzP9mUVDPZ_OXxoZwAzf{O}cB#vdzKJvHwMf0af z^HAR+gZfjqSnhF^{J7|QGa$q2Zoa|Zmp*b#ANKd`PxrTb5_5U&k~d1vDi0iM*|_$r zWo)wjwC;vOUpV)Ftnk0DFqhGQhh1vb13!;Yzf%3;z^K=E*bZ+il?~di((4ZXDQ8XJ*83+HMVhG`KdLkHu=hv(-;uJ%?T=bMoX)$R@!#H^UWTvJ zrL@!Q7v(Iu;H0#?{eQnJU+c%FSIkz+5B~IP3!ncjnz!Kia~CxSUf-3BT@&{$dNtKu zL@`Bn+alfAwpH_KR>f^X8Q$64YlZus5$#?%*j&iC_@yjjS5)vBc+1YuP zeLrNBaCmyV=L{`3x6M3_N3~mTKYRE8_{`H?^<+JZjp-{6{V_-uVq9$El7f zPGMP>{+ztur7Q2*+3RU_>hrUE_7a*q${60VOI^KW&Mme!@O@^^b!*k#PNgxu+aJho zx9anYn7D7%Vt41pd(QST%~8=M|DRecdhkMcjl+K9Dd7$izPnubZ|xLYVDnGq^O0#) P51lV&C@@={nVSXxY<~LG From f74a17c124b03a9e570e0ad1b83ac3fb20569e8a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 22:54:33 -0700 Subject: [PATCH 08/50] Use NSC keychain for macOS fallback --- .../forgejo-nsc/internal/nsc/macos_nsc.go | 51 ++++++++++++++++--- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index c22fadb..6e7273f 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -14,6 +14,18 @@ import ( "time" ) +func nscCLIEnv() []string { + env := os.Environ() + out := env[:0] + for _, entry := range env { + if strings.HasPrefix(entry, "NSC_TOKEN_FILE=") { + continue + } + out = append(out, entry) + } + return out +} + func normalizeMacOSNSCMachineType(machineType string) (normalized string, changed bool, err error) { vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType) if err != nil { @@ -56,10 +68,6 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str if machineType == "" { return errors.New("machine_type is required for macos runners") } - if strings.TrimSpace(os.Getenv("NSC_TOKEN_FILE")) == "" { - // The Burrow forge host feeds NSC_TOKEN_FILE from the intake-backed runtime token. - return errors.New("NSC_TOKEN_FILE is required for macos runners") - } selectors := macosSelectorsArg(d.opts.MacosBaseImageID) if selectors == "" { @@ -141,6 +149,7 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str defer cancel() cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...) + cmd.Env = nscCLIEnv() var buf bytes.Buffer cmd.Stdout = &buf cmd.Stderr = &buf @@ -210,10 +219,9 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) - // Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which - // relies on a websocket-based SSH proxy that is not supported by the - // revokable tenant token we run the dispatcher with. - if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil { + // The CLI fallback is explicitly keychain-backed and does not rely on the + // service bearer token, so use `nsc ssh` end-to-end here. + if err := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script); err != nil { return err } return nil @@ -285,6 +293,7 @@ func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanc args := []string{"destroy", "--force", instanceID} args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...) + cmd.Env = nscCLIEnv() var buf bytes.Buffer cmd.Stdout = &buf cmd.Stderr = &buf @@ -336,6 +345,32 @@ func shellSingleQuote(value string) string { return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" } +func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) error { + sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + args := []string{"ssh", "--disable-pty", instanceID, "/bin/bash"} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + cmd := exec.CommandContext(sshCtx, d.opts.BinaryPath, args...) + cmd.Env = nscCLIEnv() + cmd.Stdin = strings.NewReader(script) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Run(); err != nil { + if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { + return fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) + } + return fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) + } + + d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) + return nil +} + func prependNSCRegionArgs(args []string, computeBaseURL string) []string { region := strings.TrimSpace(os.Getenv("NSC_REGION")) if region == "" { From 5c57ac365518f4237032516f43a6c71d68996826 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 22:58:10 -0700 Subject: [PATCH 09/50] Use macOS-safe runner workdir --- services/forgejo-nsc/internal/nsc/macos.go | 15 +++++++++++---- services/forgejo-nsc/internal/nsc/macos_nsc.go | 4 +--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 9bf3837..42bb798 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -125,6 +125,16 @@ func macosComputeBaseImageID(baseImageID string) string { } } +func macosWorkDir(workdir string) string { + workdir = strings.TrimSpace(workdir) + switch workdir { + case "", "/var/lib/forgejo-runner": + return "/tmp/forgejo-runner" + default: + return workdir + } +} + type nscBearerTokenFile struct { BearerToken string `json:"bearer_token"` } @@ -183,10 +193,7 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r httpClient := &http.Client{Timeout: 60 * time.Second} client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL) - workdir := d.opts.WorkDir - if strings.TrimSpace(workdir) == "" { - workdir = "/tmp/forgejo-runner" - } + workdir := macosWorkDir(d.opts.WorkDir) env := map[string]string{ "FORGEJO_INSTANCE_URL": req.InstanceURL, diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index 6e7273f..e7b8023 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -305,9 +305,7 @@ func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanc } func macosBootstrapWrapperScript(runnerName string, req LaunchRequest, executor, workdir string) string { - if strings.TrimSpace(workdir) == "" { - workdir = "/tmp/forgejo-runner" - } + workdir = macosWorkDir(workdir) // Pass all values via stdin script so secrets do not appear in the nsc ssh argv. env := map[string]string{ From 6300c661ff848e0a938e31c46206e9f2a66b7b1d Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:02:50 -0700 Subject: [PATCH 10/50] Lower macOS Namespace runner default --- secrets/forgejo/nsc-autoscaler-config.age | Bin 1396 -> 1395 bytes services/forgejo-nsc/README.md | 2 +- services/forgejo-nsc/autoscaler.example.yaml | 2 +- services/forgejo-nsc/deploy/autoscaler.yaml | 2 +- .../forgejo-nsc/internal/nsc/dispatcher.go | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 243394ab37ac2cd766153d2a7f37f4bf3719d212..3d8df29dfe23e4c974eade24d7721b5946a22127 100644 GIT binary patch delta 1348 zcmeyu^_gpePJOa-u~(#7X<ruIOO>-Hm%dkkMOavnd!T7jhOc8qPNA2vd7xL3Wne~bMTB-%R=Q)bg|BarONoK+ z#E;_j=80xOIR)C@u8ASu5yja#rRhl)nP!3M{sAr_C7A(H`en|!+HO9D&OruT9^t`G zX&F)JSw@8g&Sq5whKbtQF6p_ELB&q4k;d9S74Er39?qWGm7ykFy1Kdw!S0cUK>>+A z0ZG|OiGjgUPL6pgnZbUMmRXtM#px*q*(q7|ZUMdp+J#k~T%WGIS;(&c%QdF%53}w} zjquGw(Y<9kl*Wmgn-2Y1BqaP_bL-7g$r)i zlU}gs$cq_wHRiWkiOeo zaJh-bzRmS(<4!oG=LbZkADPs^;GyEY$7%*gORW1V$xl8i>P%dag;o_r4? z^wmC#DAaGPeB)I8zHhV5a{+$`=7yQgY6*(P^Sd5vUQ%EFuzsER))M~O{rkQ?Y~02( z$wjOFv8R_a+rhXGVd2Y@OB{mNG@ef2<$9x}*3xL-_fmQ570>FIzRK&e4|X1x_;s}} zSvUL8yEADonsnd4p4go*>4Q5{%M_#SazCE^4P3VEaP`MO{mGfteQRgEKghjhlR|c@ zz<<8o-)z_0ny))>Ik7aZ{&?1nc+babN`~T1-RAX86E+4b{S#UfbxYz=_G4ET*0)A( zvz8>EWphg7$>rDy7%Q`JFbdDcfuG@>LHC+&`Iv#Z`R;bV-!>^V2o8744%?Add| zy{GrR!fUJWsSPe2y@8PYNGd~8djQG$}Rd#pp-k^0a z6V_Z<5f*;unA0&2!Gns-2Pb@gSn{o8MSb$K|9Q*%J*HAuEeihl{l0(H>E?n$ z?~5(E#%13R{!7xbT6)k=GxO{dhj|;WX?y+q*E=(6;i|TS7MZ_njg-#WML^3o&o?|f!_ziSD@lBYZ+oaeZ;_Y2+I`OHr1&{CI=uUdO~ zi}v=l_|H+=T4VUBTT-egW*h6%b%nwI{TE)?#~E$FBJW={iR*oH*OPm%6gK$&zEb|% zb*jOVsdxYG^u86yajh}6=c7Vv)Y_}Fzj*UKi9DS$Lz7yq(gkb-!8Bn9c_+Q#F?fVfS!%9xuYcp$HO&V5; GssjM6S8Hkj delta 1349 zcmey&^@VGKPJKaIqHAiHe`1+$ioZ)zxJ#Ckv7vL2c|=99Ww~#0MoFoUU#Ou|ZlGgs zD3@8Oe^Ib^m{F9chl`7ML1{=vc3F6YrD>H{Sb0f>Pk3=@v0rg=RFJ-LI+w1ULUD11 zZfc5=si~o*f@e`wu4B4FV1;{bk#S(4PgJ;fXrZ5Hky}z#pifv@c9dnZgXmnWvkHTei2MwqKx2s*`!> z#E;_j76pd+UIw0JL7_&e?inRvRV7hgk={mL#*twa;n~?yfq525MpfQrrj7<&K?Y@M zz7eGnE|p29k#4zJd7h&c;PaE>-El0illh;YE>Ly1KdwW~LG0iM}pn zUgi;z;brcwMdgKFu9hC@zB$QGex{azx$cSe>6QU5<>4-#Tv^eRR4?-9gg@VM{5^NX ziklI8=JyZ5z}<=>;l?W$}R8K{n2wiwmqOs=FvvI-1mEIcJX}ARPlNAW$My){uR92mK*Tr zOrCfDMcaw`%=d@qgz2SR&_AsZpWFYfX0u&ip!oFa{-a+@6_umg+vdJx-S}t2Gd_jN zKPSIEHm{jr8nx*1w65oj>5BparXFv-HG55pX_))j=8$)%#oU|MRNi^yqwzX_@@vi& z_q^wXC!fBjT$}Z5ZRu_I)sK2?H`^bt;+dBEX>Y5@tE+NPjJ4}IjC{;fuWR$Y`u9lu zj&jn!gC+|VzbTwh&{tZ%>vY|v1{*uuHT4F^kJjYBKJ`lfw9b8zWfzmn{O(@a#IpQt zIJ^CZ2<|Wacg&dB7TG?p6MSUYyg$nB%<*gu*ZkAHdg1iY z=Qj^He#@V#dhXp+tpKaY!5IPtu=vxNp6ClFN9@%b>n) zk-`_I9G|vqHGSeSBe_4n&-9M<>A%{ir8j?mQkb*uz=VLC`=svwzj%@V@>%_7d*_v^ z@-&90R0LUFQFRx}iL?uu`t8Vs&1e2x>saen@6Pnw<5BOkPdP2qR8uyE%g_0^?z`Z8 zxk;`kZ~wmSq_sOyxbN13`9I1^?zl~qDT{koxbd@I!Gfs|Gvybq7mYYq$={GUD^~l> z;prXI_`lyiaASR$wbkQkLV<0Z*N$z=3g@a|pSXU}>b_sk-l#vlEBPs4!{#+n=Cj-x zlVUPW^F`e2j|XnOt+Q>{64k_KJ!bqFflJX)aYq`Xv*I8!0%K2`a9ce>Q{H2cwWP0 zc&_ftZj-HYyo-DntgC+{dmv!tr=Js?sy|kRRmkqGQlIOX%vDwLF-U){Qreeoiw<$U z?1`2vy`Qeds3=zBcv$`{-^9pTwcxl_hmO~XZ+2PC5x|l$~j3Txu-K^nq(!PZE6VUk=kdI KXsN$9F9rZmly-Xn diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 6f55717..2cffe63 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -144,7 +144,7 @@ instances: - labels: ["namespace-profile-macos-large"] min_idle: 0 ttl: "90m" - machine_type: "12x28" + machine_type: "6x14" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" diff --git a/services/forgejo-nsc/autoscaler.example.yaml b/services/forgejo-nsc/autoscaler.example.yaml index db7738e..2185469 100644 --- a/services/forgejo-nsc/autoscaler.example.yaml +++ b/services/forgejo-nsc/autoscaler.example.yaml @@ -27,7 +27,7 @@ instances: - labels: ["namespace-profile-macos-large"] min_idle: 0 ttl: "90m" - machine_type: "12x28" + machine_type: "6x14" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" diff --git a/services/forgejo-nsc/deploy/autoscaler.yaml b/services/forgejo-nsc/deploy/autoscaler.yaml index fae0d37..30b2729 100644 --- a/services/forgejo-nsc/deploy/autoscaler.yaml +++ b/services/forgejo-nsc/deploy/autoscaler.yaml @@ -28,7 +28,7 @@ instances: - labels: ["namespace-profile-macos-large"] min_idle: 0 ttl: "90m" - machine_type: "12x28" + machine_type: "6x14" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 49cb4ec..3c7e94f 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -116,7 +116,7 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin // Compute macOS shapes differ from the Linux "run" defaults. If the request // didn't specify a machine type, ensure we pick a macOS-valid default. if machineType == "" || machineType == d.opts.DefaultMachine { - machineType = "12x28" + machineType = "6x14" } // Prefer the Compute API path because it uses the service token (NSC_TOKEN_FILE) From b9fb30c18cbf0a7dfe32b6cce58365b4c1a25926 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:07:57 -0700 Subject: [PATCH 11/50] Detach runner launch from request timeout --- services/forgejo-nsc/internal/app/service.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/services/forgejo-nsc/internal/app/service.go b/services/forgejo-nsc/internal/app/service.go index 45b66eb..10639a5 100644 --- a/services/forgejo-nsc/internal/app/service.go +++ b/services/forgejo-nsc/internal/app/service.go @@ -94,6 +94,17 @@ type RunnerHandle struct { Name string `json:"name"` } +func launchContext(ttl time.Duration) (context.Context, context.CancelFunc) { + if ttl <= 0 { + return context.WithTimeout(context.Background(), 2*time.Hour) + } + // Provisioning can legitimately take several minutes before the runner starts + // processing the actual Forgejo job. Keep the launch context independent from + // the caller's HTTP timeout so autoscaler/webhook requests don't kill active + // bootstraps mid-flight. + return context.WithTimeout(context.Background(), ttl+30*time.Minute) +} + func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchResponse, error) { count := req.Count if count <= 0 { @@ -134,7 +145,10 @@ func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchRe return fmt.Errorf("fetching registration token: %w", err) } - name, err := s.dispatcher.LaunchRunner(egCtx, nsc.LaunchRequest{ + launchCtx, cancel := launchContext(ttl) + defer cancel() + + name, err := s.dispatcher.LaunchRunner(launchCtx, nsc.LaunchRequest{ Token: token, InstanceURL: s.instanceURL, Labels: labels, From 9b642aa5b78c2ef3f1ef6e937b56c3597ac9ebab Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:14:40 -0700 Subject: [PATCH 12/50] Skip Nix install in macOS runner bootstrap --- services/forgejo-nsc/internal/nsc/macos.go | 36 +++------------------- 1 file changed, 4 insertions(+), 32 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 42bb798..11ac778 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -588,43 +588,15 @@ if ! command -v curl >/dev/null 2>&1; then exit 1 fi -if ! command -v nix >/dev/null 2>&1; then - echo "Installing nix (Determinate Systems installer)..." - installer="/tmp/nix-installer.$$" - curl -fsSL -o "${installer}" https://install.determinate.systems/nix - chmod +x "${installer}" - - if command -v sudo >/dev/null 2>&1; then - if sudo -n true 2>/dev/null; then - sudo -n sh "${installer}" install --no-confirm - else - sudo sh "${installer}" install --no-confirm - fi - else - sh "${installer}" install --no-confirm - fi - - rm -f "${installer}" -fi - +# Apple build workflows do not require Nix just to bootstrap the Forgejo runner. +# If Nix is already present on the base image, keep it on PATH; otherwise leave +# installation to the job itself. if [[ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]]; then # shellcheck disable=SC1091 . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" fi -export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" - -# Flake builds need nix-command + flakes enabled. Workflows may layer additional -# config, but ensure a sane default exists. -mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/nix" -cat > "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" <<'EOF' -experimental-features = nix-command flakes -sandbox = true -fallback = true -substituters = https://cache.nixos.org -trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= -EOF - mkdir -p bin export PATH="${PWD}/bin:${PATH}" From 17112e4e48ed3effd0b076b55d2bc1421046bd80 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:20:24 -0700 Subject: [PATCH 13/50] Register Namespace runners with exact labels --- services/forgejo-nsc/internal/nsc/dispatcher.go | 8 +------- services/forgejo-nsc/internal/nsc/macos.go | 4 +--- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 3c7e94f..0591605 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -413,13 +413,7 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) - if [ "$runner_exec" = "host" ]; then - resolved="${label}:host" - else - resolved="${label}:${runner_exec}" - fi - ;; + *) resolved="${label}" ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 11ac778..1f94fd8 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -656,9 +656,7 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) - resolved="${label}:host" - ;; + *) resolved="${label}" ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then From 28fd58b009ffa1e83ffc584bbf1bb90f9d1dff27 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:39:23 -0700 Subject: [PATCH 14/50] Restore host executor labels for Namespace runners --- services/forgejo-nsc/internal/nsc/dispatcher.go | 2 +- services/forgejo-nsc/internal/nsc/macos.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 0591605..3db2481 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -413,7 +413,7 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) resolved="${label}" ;; + *) resolved="${label}:${runner_exec}" ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 1f94fd8..9084584 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -656,7 +656,7 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) resolved="${label}" ;; + *) resolved="${label}:${runner_exec}" ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then From 6fcd7ff6eda9872f5d5ca8d86dedabbc2a15fbdb Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:44:19 -0700 Subject: [PATCH 15/50] Install Rust directly in Apple workflow --- .forgejo/workflows/build-apple.yml | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 32c6903..27274c8 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -63,10 +63,30 @@ jobs: DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true - name: Install Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: 1.85.0 - targets: ${{ matrix.rust-targets }} + shell: bash + run: | + set -euo pipefail + export RUSTUP_HOME="${HOME}/.rustup" + export CARGO_HOME="${HOME}/.cargo" + + if ! command -v rustup >/dev/null 2>&1; then + curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.85.0 + else + rustup set profile minimal + rustup toolchain install 1.85.0 + rustup default 1.85.0 + fi + + echo "${CARGO_HOME}/bin" >> "${GITHUB_PATH}" + . "${CARGO_HOME}/env" + + targets='${{ matrix.rust-targets }}' + for target in ${targets//,/ }; do + rustup target add "${target}" + done + + rustc --version + cargo --version - name: Install Protobuf shell: bash From afc3e79eb0f77a41d24b60987830f81715f04cf6 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 18 Mar 2026 23:46:19 -0700 Subject: [PATCH 16/50] Run Apple workflow on main pushes --- .forgejo/workflows/build-apple.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 27274c8..9b7fcc8 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -1,6 +1,9 @@ name: Build Apple on: + push: + branches: + - main pull_request: branches: - "**" From ed247b2f5e55abb93e0c18eaa8cd9a4606ee1b99 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:04:27 -0700 Subject: [PATCH 17/50] Wire runner caches and forge secrets through agenix --- .forgejo/workflows/build-apple.yml | 33 +++++++-- .forgejo/workflows/build-rust.yml | 19 ++++- .forgejo/workflows/build-site.yml | 8 ++ .../NetworkExtension/libburrow/build-rust.sh | 8 +- Makefile | 4 +- flake.nix | 1 + nixos/README.md | 8 +- nixos/hosts/burrow-forge/default.nix | 18 ++++- secrets/README.md | 7 +- secrets/forgejo/admin-password.age | 11 +++ secrets/forgejo/agent-ssh-key.age | Bin 0 -> 843 bytes secrets/secrets.nix | 3 +- services/forgejo-nsc/README.md | 6 ++ .../cmd/forgejo-nsc-dispatcher/main.go | 42 +++++++---- services/forgejo-nsc/config.example.yaml | 13 ++++ services/forgejo-nsc/deploy/dispatcher.yaml | 13 ++++ .../forgejo-nsc/internal/config/config.go | 70 +++++++++++++++--- .../forgejo-nsc/internal/nsc/dispatcher.go | 65 ++++++++++++---- services/forgejo-nsc/internal/nsc/macos.go | 33 +++++++-- .../forgejo-nsc/internal/nsc/macos_nsc.go | 1 + 20 files changed, 299 insertions(+), 64 deletions(-) create mode 100644 secrets/forgejo/admin-password.age create mode 100644 secrets/forgejo/agent-ssh-key.age diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 9b7fcc8..d55957e 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -22,14 +22,17 @@ jobs: matrix: include: - platform: macOS + cache-id: macos destination: platform=macOS rust-targets: x86_64-apple-darwin,aarch64-apple-darwin - platform: iOS Simulator + cache-id: ios-simulator destination: platform=iOS Simulator,name=iPhone 17 Pro rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios env: CARGO_INCREMENTAL: 0 RUST_BACKTRACE: short + RUSTC_WRAPPER: sccache steps: - name: Checkout uses: https://code.forgejo.org/actions/checkout@v4 @@ -65,12 +68,29 @@ jobs: echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV" DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true + - name: Prepare Cache Dirs + shell: bash + run: | + set -euo pipefail + cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" + mkdir -p \ + "${cache_root}/cargo" \ + "${cache_root}/rustup" \ + "${cache_root}/sccache" \ + "${cache_root}/apple/PackageCache" \ + "${cache_root}/apple/SourcePackages" \ + "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" + echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" + echo "RUSTUP_HOME=${cache_root}/rustup" >> "${GITHUB_ENV}" + echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" + echo "APPLE_PACKAGE_CACHE=${cache_root}/apple/PackageCache" >> "${GITHUB_ENV}" + echo "APPLE_SOURCE_PACKAGES=${cache_root}/apple/SourcePackages" >> "${GITHUB_ENV}" + echo "APPLE_DERIVED_DATA=${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" + - name: Install Rust shell: bash run: | set -euo pipefail - export RUSTUP_HOME="${HOME}/.rustup" - export CARGO_HOME="${HOME}/.cargo" if ! command -v rustup >/dev/null 2>&1; then curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.85.0 @@ -98,6 +118,9 @@ jobs: if ! command -v protoc >/dev/null 2>&1; then brew install protobuf fi + if ! command -v sccache >/dev/null 2>&1; then + brew install sccache + fi - name: Build shell: bash @@ -111,9 +134,9 @@ jobs: -skipPackagePluginValidation \ -skipMacroValidation \ -onlyUsePackageVersionsFromResolvedFile \ - -clonedSourcePackagesDirPath SourcePackages \ - -packageCachePath "$PWD/PackageCache" \ - -derivedDataPath "$PWD/DerivedData" \ + -clonedSourcePackagesDirPath "$APPLE_SOURCE_PACKAGES" \ + -packageCachePath "$APPLE_PACKAGE_CACHE" \ + -derivedDataPath "$APPLE_DERIVED_DATA" \ CODE_SIGNING_ALLOWED=NO \ CODE_SIGNING_REQUIRED=NO \ CODE_SIGN_IDENTITY="" \ diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index 2df1ad3..7fd2667 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -17,6 +17,10 @@ jobs: rust: name: Cargo Test runs-on: [self-hosted, linux, x86_64, burrow-forge] + env: + CARGO_INCREMENTAL: 0 + RUSTC_WRAPPER: sccache + SCCACHE_CACHE_SIZE: 20G steps: - name: Checkout uses: https://code.forgejo.org/actions/checkout@v4 @@ -24,8 +28,21 @@ jobs: token: ${{ github.token }} fetch-depth: 0 + - name: Prepare Cache Dirs + shell: bash + run: | + set -euo pipefail + cache_root="${HOME}/.cache/burrow" + mkdir -p "${cache_root}/cargo" "${cache_root}/sccache" + echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" + echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" + - name: Test shell: bash run: | set -euo pipefail - nix develop .#ci -c cargo test --workspace --all-features + nix develop .#ci -c bash -lc ' + sccache --zero-stats >/dev/null 2>&1 || true + cargo test --workspace --all-features + sccache --show-stats || true + ' diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index 6f7c5e2..de296d4 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -24,6 +24,14 @@ jobs: token: ${{ github.token }} fetch-depth: 0 + - name: Prepare Cache Dirs + shell: bash + run: | + set -euo pipefail + cache_root="${HOME}/.cache/burrow" + mkdir -p "${cache_root}/npm" + echo "NPM_CONFIG_CACHE=${cache_root}/npm" >> "${GITHUB_ENV}" + - name: Build shell: bash run: | diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index 6f455a9..258351c 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -73,7 +73,13 @@ CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH" # Run cargo without the various environment variables set by Xcode. # Those variables can confuse cargo and the build scripts it runs. -env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo build "${CARGO_ARGS[@]}" +EXTRA_ENV=() +for VAR_NAME in HOME CARGO_HOME RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do + if [[ -n "${!VAR_NAME:-}" ]]; then + EXTRA_ENV+=("${VAR_NAME}=${!VAR_NAME}") + fi +done +env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" "${EXTRA_ENV[@]}" cargo build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" diff --git a/Makefile b/Makefile index e852e32..1f15f36 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,9 @@ FLAKE ?= . AGENIX ?= nix run ${FLAKE}\#agenix -- -SECRETS := forgejo/nsc-token \ +SECRETS := forgejo/admin-password \ + forgejo/agent-ssh-key \ + forgejo/nsc-token \ forgejo/nsc-dispatcher-config \ forgejo/nsc-autoscaler-config diff --git a/flake.nix b/flake.nix index 51e4bc9..ed59619 100644 --- a/flake.nix +++ b/flake.nix @@ -36,6 +36,7 @@ agenixPkg = agenix.packages.${system}.agenix; commonPackages = with pkgs; [ cargo + sccache rustc rustfmt clippy diff --git a/nixos/README.md b/nixos/README.md index f37637c..aa0fff6 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -15,19 +15,19 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `keys/agent_at_burrow_net.pub`: automation SSH public key - `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow - `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot -- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/` +- `../Scripts/bootstrap-forge-intake.sh`: legacy intake bootstrap helper; current forge runtime secrets should live in `../secrets/forgejo/*.age` - `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot - `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host - `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists -- `../secrets/forgejo/*.age`: authoritative encrypted Namespace token + dispatcher/autoscaler configs for the forge host +- `../secrets/forgejo/*.age`: authoritative encrypted forge admin password, agent SSH key, and Namespace runtime configs for the forge host ## Intended Flow 1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`. 2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`. -3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. -4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. +3. Encrypt the Forgejo admin password and agent SSH key into `secrets/forgejo/{admin-password,agent-ssh-key}.age`. +4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account from the agenix secret path. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. 6. Run `Scripts/provision-forgejo-nsc.sh` locally, re-encrypt the resulting NSC token + configs into `secrets/forgejo/*.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. 7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 7dc828d..0ce7964 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -20,7 +20,7 @@ services.burrow.forge = { enable = true; - adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; + adminPasswordFile = config.age.secrets.forgejoAdminPassword.path; authorizedKeys = [ (builtins.readFile ../../keys/contact_at_burrow_net.pub) (builtins.readFile ../../keys/agent_at_burrow_net.pub) @@ -29,7 +29,21 @@ services.burrow.forgeRunner = { enable = true; - sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; + sshPrivateKeyFile = config.age.secrets.forgejoAgentSshKey.path; + }; + + age.secrets.forgejoAdminPassword = { + file = ../../../secrets/forgejo/admin-password.age; + mode = "0400"; + owner = "forgejo"; + group = "forgejo"; + }; + + age.secrets.forgejoAgentSshKey = { + file = ../../../secrets/forgejo/agent-ssh-key.age; + mode = "0400"; + owner = "root"; + group = "root"; }; age.secrets.forgejoNscToken = { diff --git a/secrets/README.md b/secrets/README.md index 2132079..f7d67f5 100644 --- a/secrets/README.md +++ b/secrets/README.md @@ -4,6 +4,8 @@ Burrow secrets live in `secrets/.age` and are managed with `agenix`. For the Forgejo Namespace Cloud runtime: +- `secrets/forgejo/admin-password.age` +- `secrets/forgejo/agent-ssh-key.age` - `secrets/forgejo/nsc-token.age` - `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-autoscaler-config.age` @@ -11,7 +13,8 @@ For the Forgejo Namespace Cloud runtime: Use: - `make secret name=forgejo/nsc-token` -- `make secret-file name=forgejo/nsc-token file=/path/to/source` +- `make secret-file name=forgejo/agent-ssh-key file=/path/to/source` The forge host decrypts these files at activation time and feeds the resulting -paths into `services.burrow.forgejoNsc`. +paths into `services.burrow.forge`, `services.burrow.forgeRunner`, and +`services.burrow.forgejoNsc`. diff --git a/secrets/forgejo/admin-password.age b/secrets/forgejo/admin-password.age new file mode 100644 index 0000000..53cfa83 --- /dev/null +++ b/secrets/forgejo/admin-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q nmGFzw38TKiVVuA9CM8wHQDVib0RddB+M/UjQnD45jk +iZNLNBlS32zR+TNfcK27T1V3w27sFKJkWfuOzHwcOL0 +-> ssh-ed25519 IrZmAg Y53DC0wGX8mjaXkD3+jZn2DviO5iSXsnZDBNCBTmLgA +XLz+YXzT4fYb7q0xuZMKgv88lAd0gGKaquSMcA6Yu3c +-> ssh-ed25519 JzXUWA EDAXBKEvHccJ4KKtHjUTA+KA+wN9bBu9v+kzRTFt9AI +JNADezBCxx26+QPD2tIpz5O8cncrJwnqaYQEWY56VGY +--- RpjdftRPUGT80IMYKFDFuHkKEr1heJOvqrqYLufhc10 +_ +F( +((0ɉ',8d]d%T[MKRQxiIf0 \ No newline at end of file diff --git a/secrets/forgejo/agent-ssh-key.age b/secrets/forgejo/agent-ssh-key.age new file mode 100644 index 0000000000000000000000000000000000000000..44ce1141a76201c003a1d4f3f4ccae7c462780cf GIT binary patch literal 843 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vkVP2~YD* zPR#K2DbMyODlsc^Eh_VmaI1(iGxT?JGjpl(%FGM*tnf+-3FS&N&h~f8smu+|D$y@D z4RK9Oi7Gd)aw~Pr3~>pMN)HdriE_*^O!Y7+DMq)=vnVRpF7D!p2SSmMb1`|FARrQ?2^sGK2b(BF3|e zk{bM(pJeT-e-Sd5`72y(PJzt-MiD&g2@9scysgeB$Gi96AP-aC%uub<2PTfbU3$vJ$rk4~4 z8A1jN&zjq{Yi<0P5VOndR!c&0-?#Z56SX?}ye4`Eaw~1?E11G(_HRkb-}B1+-Dh7l zc_gb{Ugf=ZKPTsgOUz7#dJ8|C_nf!Oa*mhpM=c|X(DMe9)K4tg@q2k}N6)F2gWLb~ O%k~Gp&1H(7mJI+SI9>_> literal 0 HcmV?d00001 diff --git a/secrets/secrets.nix b/secrets/secrets.nix index 1cacc6a..9d40bf3 100644 --- a/secrets/secrets.nix +++ b/secrets/secrets.nix @@ -1,4 +1,3 @@ -{ }: let contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; @@ -6,6 +5,8 @@ let forgeAutomation = [ contact agent forge ]; in { + "secrets/forgejo/admin-password.age".publicKeys = forgeAutomation; + "secrets/forgejo/agent-ssh-key.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation; diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 2cffe63..5b2926b 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -45,6 +45,9 @@ profile. The important knobs are: - `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral Namespace environment. The dispatcher destroys the instance after a job so the TTL acts as a hard cap, not an idle timeout. +- `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache + volumes mounted into runners so Linux can keep `/nix` plus build caches warm + and macOS can reuse Rust toolchains, Xcode package caches, and derived data. ### Running locally @@ -160,12 +163,15 @@ consume the same secret material. Long-lived runtime state is now sourced from age-encrypted files: +- `secrets/forgejo/admin-password.age` +- `secrets/forgejo/agent-ssh-key.age` - `secrets/forgejo/nsc-token.age` - `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-autoscaler-config.age` After refreshing the intake files, re-encrypt them into `secrets/forgejo/*.age` and deploy the forge host so `config.age.secrets.*` updates the live paths for +`services.burrow.forge`, `services.burrow.forgeRunner`, and `services.burrow.forgejoNsc`. Run it next to the dispatcher: diff --git a/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go index 9dcbfb1..3a04a26 100644 --- a/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go +++ b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go @@ -43,19 +43,23 @@ func main() { } dispatcher, err := nsc.NewDispatcher(nsc.Options{ - BinaryPath: cfg.Namespace.NSCBinary, - ComputeBaseURL: cfg.Namespace.ComputeBaseURL, - DefaultImage: cfg.Namespace.Image, - DefaultMachine: cfg.Namespace.MachineType, - MacosBaseImageID: cfg.Namespace.MacosBaseImageID, - MacosMachineArch: cfg.Namespace.MacosMachineArch, - DefaultDuration: cfg.Namespace.Duration.Duration, - WorkDir: cfg.Namespace.WorkDir, - MaxParallel: cfg.Namespace.MaxParallel, - RunnerNamePrefix: cfg.Runner.NamePrefix, - Executor: cfg.Runner.Executor, - Network: cfg.Namespace.Network, - Logger: logger, + BinaryPath: cfg.Namespace.NSCBinary, + ComputeBaseURL: cfg.Namespace.ComputeBaseURL, + DefaultImage: cfg.Namespace.Image, + DefaultMachine: cfg.Namespace.MachineType, + MacosBaseImageID: cfg.Namespace.MacosBaseImageID, + MacosMachineArch: cfg.Namespace.MacosMachineArch, + DefaultDuration: cfg.Namespace.Duration.Duration, + WorkDir: cfg.Namespace.WorkDir, + MaxParallel: cfg.Namespace.MaxParallel, + LinuxCachePath: cfg.Namespace.LinuxCachePath, + LinuxCacheVolumes: toNSCCacheVolumes(cfg.Namespace.LinuxCacheVolumes), + MacosCachePath: cfg.Namespace.MacosCachePath, + MacosCacheVolumes: toNSCCacheVolumes(cfg.Namespace.MacosCacheVolumes), + RunnerNamePrefix: cfg.Runner.NamePrefix, + Executor: cfg.Runner.Executor, + Network: cfg.Namespace.Network, + Logger: logger, }) if err != nil { logger.Error("failed to create dispatcher", "error", err) @@ -88,3 +92,15 @@ func main() { defer cancel() _ = srv.Shutdown(ctx) } + +func toNSCCacheVolumes(volumes []config.CacheVolumeConfig) []nsc.CacheVolume { + out := make([]nsc.CacheVolume, 0, len(volumes)) + for _, volume := range volumes { + out = append(out, nsc.CacheVolume{ + Tag: volume.Tag, + MountPoint: volume.MountPoint, + SizeGb: volume.SizeGb, + }) + } + return out +} diff --git a/services/forgejo-nsc/config.example.yaml b/services/forgejo-nsc/config.example.yaml index 5dc7551..fcd56ec 100644 --- a/services/forgejo-nsc/config.example.yaml +++ b/services/forgejo-nsc/config.example.yaml @@ -21,6 +21,19 @@ namespace: workdir: "/var/lib/forgejo-runner" max_parallel: 4 network: "" + linux_cache_path: "/var/cache/burrow" + linux_cache_volumes: + - tag: "burrow-forgejo-linux-nix" + mount_point: "/nix" + size_gb: 60 + - tag: "burrow-forgejo-linux-cache" + mount_point: "/var/cache/burrow" + size_gb: 40 + macos_cache_path: "/Users/runner/.cache/burrow" + macos_cache_volumes: + - tag: "burrow-forgejo-macos-cache" + mount_point: "/Users/runner/.cache/burrow" + size_gb: 60 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index 1e45d39..b906b75 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -31,6 +31,19 @@ namespace: instance_tags: - "burrow" network: "" + linux_cache_path: "/var/cache/burrow" + linux_cache_volumes: + - tag: "burrow-forgejo-linux-nix" + mount_point: "/nix" + size_gb: 60 + - tag: "burrow-forgejo-linux-cache" + mount_point: "/var/cache/burrow" + size_gb: 40 + macos_cache_path: "/Users/runner/.cache/burrow" + macos_cache_volumes: + - tag: "burrow-forgejo-macos-cache" + mount_point: "/Users/runner/.cache/burrow" + size_gb: 60 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go index 264cbd0..6a93e69 100644 --- a/services/forgejo-nsc/internal/config/config.go +++ b/services/forgejo-nsc/internal/config/config.go @@ -49,8 +49,14 @@ type Config struct { Runner RunnerConfig `yaml:"runner"` } +type CacheVolumeConfig struct { + Tag string `yaml:"tag"` + MountPoint string `yaml:"mount_point"` + SizeGb int64 `yaml:"size_gb"` +} + type ForgejoConfig struct { - BaseURL string `yaml:"base_url"` + BaseURL string `yaml:"base_url"` // InstanceURL is the URL runners should use when registering with Forgejo. // This must be reachable from the spawned runner (e.g. the public URL like // https://git.burrow.net), and may differ from BaseURL (which can be a local @@ -80,15 +86,19 @@ type NamespaceConfig struct { // MacosBaseImageID selects which macOS base image to use (e.g. "tahoe"). MacosBaseImageID string `yaml:"macos_base_image_id"` // MacosMachineArch is the architecture used for macOS instances (typically "arm64"). - MacosMachineArch string `yaml:"macos_machine_arch"` - Duration Duration `yaml:"duration"` - WorkDir string `yaml:"workdir"` - MaxParallel int64 `yaml:"max_parallel"` - Environment []string `yaml:"environment"` - AllowLabels []string `yaml:"allow_labels"` - AllowScopes []string `yaml:"allow_scopes"` - Network string `yaml:"network"` - InstanceTags []string `yaml:"instance_tags"` + MacosMachineArch string `yaml:"macos_machine_arch"` + Duration Duration `yaml:"duration"` + WorkDir string `yaml:"workdir"` + MaxParallel int64 `yaml:"max_parallel"` + Environment []string `yaml:"environment"` + AllowLabels []string `yaml:"allow_labels"` + AllowScopes []string `yaml:"allow_scopes"` + Network string `yaml:"network"` + InstanceTags []string `yaml:"instance_tags"` + LinuxCachePath string `yaml:"linux_cache_path"` + LinuxCacheVolumes []CacheVolumeConfig `yaml:"linux_cache_volumes"` + MacosCachePath string `yaml:"macos_cache_path"` + MacosCacheVolumes []CacheVolumeConfig `yaml:"macos_cache_volumes"` } type RunnerConfig struct { @@ -160,6 +170,46 @@ func (c *Config) Validate() error { if c.Namespace.MaxParallel <= 0 { c.Namespace.MaxParallel = 4 } + if c.Namespace.LinuxCachePath == "" { + c.Namespace.LinuxCachePath = "/var/cache/burrow" + } + if len(c.Namespace.LinuxCacheVolumes) == 0 { + c.Namespace.LinuxCacheVolumes = []CacheVolumeConfig{ + { + Tag: "burrow-forgejo-linux-nix", + MountPoint: "/nix", + SizeGb: 60, + }, + { + Tag: "burrow-forgejo-linux-cache", + MountPoint: c.Namespace.LinuxCachePath, + SizeGb: 40, + }, + } + } + if c.Namespace.MacosCachePath == "" { + c.Namespace.MacosCachePath = "/Users/runner/.cache/burrow" + } + if len(c.Namespace.MacosCacheVolumes) == 0 { + c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{ + { + Tag: "burrow-forgejo-macos-cache", + MountPoint: c.Namespace.MacosCachePath, + SizeGb: 60, + }, + } + } + for _, volume := range append(append([]CacheVolumeConfig{}, c.Namespace.LinuxCacheVolumes...), c.Namespace.MacosCacheVolumes...) { + if strings.TrimSpace(volume.Tag) == "" { + return errors.New("namespace cache volume tag is required") + } + if strings.TrimSpace(volume.MountPoint) == "" { + return fmt.Errorf("namespace cache volume %q mount_point is required", volume.Tag) + } + if volume.SizeGb <= 0 { + return fmt.Errorf("namespace cache volume %q size_gb must be positive", volume.Tag) + } + } return nil } diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 3db2481..7fa6d62 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -17,19 +17,29 @@ import ( ) type Options struct { - BinaryPath string - DefaultImage string - DefaultMachine string - DefaultDuration time.Duration - WorkDir string - MaxParallel int64 - RunnerNamePrefix string - Executor string - Network string - ComputeBaseURL string - MacosBaseImageID string - MacosMachineArch string - Logger *slog.Logger + BinaryPath string + DefaultImage string + DefaultMachine string + DefaultDuration time.Duration + WorkDir string + MaxParallel int64 + RunnerNamePrefix string + Executor string + Network string + ComputeBaseURL string + MacosBaseImageID string + MacosMachineArch string + LinuxCachePath string + LinuxCacheVolumes []CacheVolume + MacosCachePath string + MacosCacheVolumes []CacheVolume + Logger *slog.Logger +} + +type CacheVolume struct { + Tag string + MountPoint string + SizeGb int64 } type LaunchRequest struct { @@ -73,6 +83,12 @@ func NewDispatcher(opts Options) (*Dispatcher, error) { if opts.DefaultDuration == 0 { opts.DefaultDuration = 30 * time.Minute } + if opts.LinuxCachePath == "" { + opts.LinuxCachePath = "/var/cache/burrow" + } + if opts.MacosCachePath == "" { + opts.MacosCachePath = "/Users/runner/.cache/burrow" + } logger := opts.Logger if logger == nil { logger = slog.New(slog.NewTextHandler(io.Discard, nil)) @@ -104,6 +120,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } machineType := choose(req.MachineType, d.opts.DefaultMachine) image := choose(req.Image, d.opts.DefaultImage) + if req.ExtraEnv == nil { + req.ExtraEnv = make(map[string]string) + } if hasWindowsLabel(req.Labels) { if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil { @@ -113,6 +132,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } if hasMacOSLabel(req.Labels) { + if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok { + req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.MacosCachePath + } // Compute macOS shapes differ from the Linux "run" defaults. If the request // didn't specify a machine type, ensure we pick a macOS-valid default. if machineType == "" || machineType == d.opts.DefaultMachine { @@ -129,6 +151,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } return runnerName, nil } + if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok { + req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.LinuxCachePath + } env := map[string]string{ "FORGEJO_INSTANCE_URL": req.InstanceURL, @@ -140,9 +165,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin for k, v := range req.ExtraEnv { env[k] = v } - if _, ok := env["NSC_CACHE_PATH"]; !ok { - env["NSC_CACHE_PATH"] = "/nix/store" - } script := d.bootstrapScript() args := []string{ @@ -161,6 +183,7 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin if d.opts.Network != "" { args = append(args, "--network", d.opts.Network) } + args = appendVolumeArgs(args, d.opts.LinuxCacheVolumes) for key, value := range env { if value == "" { continue @@ -370,6 +393,16 @@ func choose(values ...string) string { return "" } +func appendVolumeArgs(args []string, volumes []CacheVolume) []string { + for _, volume := range volumes { + if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 { + continue + } + args = append(args, "--volume", fmt.Sprintf("cache:%s:%s:%d", volume.Tag, volume.MountPoint, volume.SizeGb)) + } + return args +} + func (d *Dispatcher) bootstrapScript() string { var builder strings.Builder builder.WriteString(`set -euo pipefail diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 9084584..e5deee7 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -206,12 +206,8 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r for k, v := range req.ExtraEnv { env[k] = v } - // Best-effort caching: workflows call Scripts/nscloud-cache.sh, which is a - // no-op unless NSC_CACHE_PATH is set. This may still be skipped if spacectl - // lacks credentials, but setting the path is harmless and keeps behavior - // consistent across macOS / Linux runners. if _, ok := env["NSC_CACHE_PATH"]; !ok { - env["NSC_CACHE_PATH"] = "/Users/runner/.cache/nscloud" + env["NSC_CACHE_PATH"] = d.opts.MacosCachePath } deadline := timestamppb.New(time.Now().Add(ttl)) @@ -243,10 +239,15 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r }, }, } + experimental := &computev1beta.CreateInstanceRequest_ExperimentalFeatures{} if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" { - createReq.Experimental = &computev1beta.CreateInstanceRequest_ExperimentalFeatures{ - MacosBaseImageId: imageID, - } + experimental.MacosBaseImageId = imageID + } + if volumes := computeCacheVolumeRequests(d.opts.MacosCacheVolumes); len(volumes) > 0 { + experimental.Volumes = volumes + } + if experimental.MacosBaseImageId != "" || len(experimental.Volumes) > 0 { + createReq.Experimental = experimental } d.log.Info("launching Namespace macos runner", @@ -572,6 +573,22 @@ func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID) } +func computeCacheVolumeRequests(volumes []CacheVolume) []*computev1beta.VolumeRequest { + var out []*computev1beta.VolumeRequest + for _, volume := range volumes { + if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 { + continue + } + out = append(out, &computev1beta.VolumeRequest{ + MountPoint: volume.MountPoint, + Tag: volume.Tag, + SizeMb: volume.SizeGb * 1024, + PersistencyKind: computev1beta.VolumeRequest_CACHE, + }) + } + return out +} + func macosBootstrapScript() string { // Keep this script self-contained: it runs on a fresh macOS VM base image. var b strings.Builder diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index e7b8023..26cbab0 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -144,6 +144,7 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str "--wait_timeout", a.waitTimeout.String(), } args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + args = appendVolumeArgs(args, d.opts.MacosCacheVolumes) createCtx, cancel := context.WithTimeout(ctx, a.createTimeout) defer cancel() From 1a3d59d25f1b0f83fbf6ae0d283247e8b5f70616 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:08:07 -0700 Subject: [PATCH 18/50] Fix Apple rustup cache bootstrap --- .forgejo/workflows/build-apple.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index d55957e..c2b1502 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -100,8 +100,9 @@ jobs: rustup default 1.85.0 fi + mkdir -p "${CARGO_HOME}/bin" echo "${CARGO_HOME}/bin" >> "${GITHUB_PATH}" - . "${CARGO_HOME}/env" + export PATH="${CARGO_HOME}/bin:${PATH}" targets='${{ matrix.rust-targets }}' for target in ${targets//,/ }; do From 15e897d262fa91576eb12601fec64806643fadd4 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:10:27 -0700 Subject: [PATCH 19/50] Fix macOS runner home permissions --- services/forgejo-nsc/internal/nsc/macos.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index e5deee7..bdcf503 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -597,6 +597,8 @@ func macosBootstrapScript() string { workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" mkdir -p "${workdir}" cd "${workdir}" +export HOME="${workdir}/home" +mkdir -p "${HOME}/.cache/act" export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" From 7039bf5aadaf97b0d3774ed82f9ad8a9021ca438 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:13:11 -0700 Subject: [PATCH 20/50] Provision Forgejo act cache on macOS --- services/forgejo-nsc/internal/nsc/macos.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index bdcf503..b87e954 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -597,8 +597,9 @@ func macosBootstrapScript() string { workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" mkdir -p "${workdir}" cd "${workdir}" -export HOME="${workdir}/home" -mkdir -p "${HOME}/.cache/act" +if ! mkdir -p "/Users/runner/.cache/act" 2>/dev/null; then + sudo install -d -m 0775 -o "$(id -un)" -g "$(id -gn)" /Users/runner/.cache /Users/runner/.cache/act +fi export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" From 03415e579b14d9555634114b661f74d222cb8f21 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:28:18 -0700 Subject: [PATCH 21/50] Rotate operator secrets into agenix and deepen caches --- .forgejo/workflows/build-apple.yml | 4 + .forgejo/workflows/build-rust.yml | 3 +- .gitignore | 1 + .../NetworkExtension/libburrow/build-rust.sh | 7 +- Makefile | 7 +- Scripts/_burrow-secrets.sh | 78 ++++++++++++++++++ Scripts/bootstrap-forge-intake.sh | 49 +++++++++--- Scripts/check-forge-host.sh | 24 ++++-- Scripts/cloudflare-upsert-a-record.sh | 27 +++++-- Scripts/forge-deploy.sh | 29 ++++--- Scripts/hcloud-upload-nixos-image.sh | 24 +++++- Scripts/hetzner-forge.sh | 24 ++++-- Scripts/nsc-build-and-upload-image.sh | 24 +++++- Scripts/provision-forgejo-nsc.sh | 79 ++++++++++++++----- Scripts/sync-forgejo-nsc-config.sh | 56 ++++++++++--- Tools/forwardemail-custom-s3.sh | 52 +++++++++--- Tools/forwardemail-hetzner-storage.py | 36 ++++++++- docs/FORWARDEMAIL.md | 19 +++-- nixos/README.md | 4 +- secrets/README.md | 8 ++ secrets/cloudflare/api-token.age | 7 ++ secrets/forwardemail/api-token.age | 7 ++ secrets/forwardemail/hetzner-s3-secret.age | 7 ++ secrets/forwardemail/hetzner-s3-user.age | 7 ++ secrets/hetzner/api-token.age | 7 ++ secrets/secrets.nix | 6 ++ services/forgejo-nsc/README.md | 18 ++--- services/forgejo-nsc/internal/nsc/macos.go | 38 ++++++--- 28 files changed, 526 insertions(+), 126 deletions(-) create mode 100644 Scripts/_burrow-secrets.sh create mode 100644 secrets/cloudflare/api-token.age create mode 100644 secrets/forwardemail/api-token.age create mode 100644 secrets/forwardemail/hetzner-s3-secret.age create mode 100644 secrets/forwardemail/hetzner-s3-user.age create mode 100644 secrets/hetzner/api-token.age diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index c2b1502..460b6b8 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -75,14 +75,18 @@ jobs: cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" mkdir -p \ "${cache_root}/cargo" \ + "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ "${cache_root}/rustup" \ "${cache_root}/sccache" \ + "${cache_root}/homebrew" \ "${cache_root}/apple/PackageCache" \ "${cache_root}/apple/SourcePackages" \ "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" + echo "CARGO_TARGET_DIR=${cache_root}/cargo-target/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" echo "RUSTUP_HOME=${cache_root}/rustup" >> "${GITHUB_ENV}" echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" + echo "HOMEBREW_CACHE=${cache_root}/homebrew" >> "${GITHUB_ENV}" echo "APPLE_PACKAGE_CACHE=${cache_root}/apple/PackageCache" >> "${GITHUB_ENV}" echo "APPLE_SOURCE_PACKAGES=${cache_root}/apple/SourcePackages" >> "${GITHUB_ENV}" echo "APPLE_DERIVED_DATA=${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index 7fd2667..d70dcf0 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -33,9 +33,10 @@ jobs: run: | set -euo pipefail cache_root="${HOME}/.cache/burrow" - mkdir -p "${cache_root}/cargo" "${cache_root}/sccache" + mkdir -p "${cache_root}/cargo" "${cache_root}/sccache" "${cache_root}/cargo-target/build-rust" echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" + echo "CARGO_TARGET_DIR=${cache_root}/cargo-target/build-rust" >> "${GITHUB_ENV}" - name: Test shell: bash diff --git a/.gitignore b/.gitignore index 3c80ef9..3ce64aa 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ target/ .idea/ tmp/ +intake/ *.db *.sqlite3 diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index 258351c..031e6bc 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -74,16 +74,17 @@ CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH" # Run cargo without the various environment variables set by Xcode. # Those variables can confuse cargo and the build scripts it runs. EXTRA_ENV=() -for VAR_NAME in HOME CARGO_HOME RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do +for VAR_NAME in HOME CARGO_HOME CARGO_TARGET_DIR RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do if [[ -n "${!VAR_NAME:-}" ]]; then EXTRA_ENV+=("${VAR_NAME}=${!VAR_NAME}") fi done -env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" "${EXTRA_ENV[@]}" cargo build "${CARGO_ARGS[@]}" +EFFECTIVE_CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-${CONFIGURATION_TEMP_DIR}/target}" +env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${EFFECTIVE_CARGO_TARGET_DIR}" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" "${EXTRA_ENV[@]}" cargo build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" # Use `lipo` to merge the architectures together into BUILT_PRODUCTS_DIR /usr/bin/xcrun --sdk $PLATFORM_NAME lipo \ - -create $(printf "${CONFIGURATION_TEMP_DIR}/target/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \ + -create $(printf "${EFFECTIVE_CARGO_TARGET_DIR}/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \ -output "${BUILT_PRODUCTS_DIR}/libburrow.a" diff --git a/Makefile b/Makefile index 1f15f36..6738052 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,12 @@ SECRETS := forgejo/admin-password \ forgejo/agent-ssh-key \ forgejo/nsc-token \ forgejo/nsc-dispatcher-config \ - forgejo/nsc-autoscaler-config + forgejo/nsc-autoscaler-config \ + cloudflare/api-token \ + hetzner/api-token \ + forwardemail/api-token \ + forwardemail/hetzner-s3-user \ + forwardemail/hetzner-s3-secret tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- diff --git a/Scripts/_burrow-secrets.sh b/Scripts/_burrow-secrets.sh new file mode 100644 index 0000000..2ecd282 --- /dev/null +++ b/Scripts/_burrow-secrets.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +set -euo pipefail + +BURROW_SECRET_TMPFILES=() + +burrow_cleanup_secret_tmpfiles() { + local path + for path in "${BURROW_SECRET_TMPFILES[@]:-}"; do + [[ -n "${path}" ]] && rm -f "${path}" >/dev/null 2>&1 || true + done + BURROW_SECRET_TMPFILES=() +} + +burrow_decrypt_age_secret_to_temp() { + local repo_root="$1" + local secret_path="$2" + local tmp_file + + if [[ ! -f "${secret_path}" ]]; then + echo "age secret not found: ${secret_path}" >&2 + return 1 + fi + + tmp_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret.XXXXXX")" + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${secret_path}" > "${tmp_file}" + chmod 600 "${tmp_file}" + BURROW_SECRET_TMPFILES+=("${tmp_file}") + printf '%s\n' "${tmp_file}" +} + +burrow_resolve_secret_file() { + local repo_root="$1" + local explicit_path="$2" + local intake_path="$3" + local age_path="$4" + local fallback_path="${5:-}" + + if [[ -n "${explicit_path}" ]]; then + if [[ ! -s "${explicit_path}" ]]; then + echo "required file missing or empty: ${explicit_path}" >&2 + return 1 + fi + printf '%s\n' "${explicit_path}" + return 0 + fi + + if [[ -n "${intake_path}" && -s "${intake_path}" ]]; then + printf '%s\n' "${intake_path}" + return 0 + fi + + if [[ -n "${age_path}" && -f "${age_path}" ]]; then + burrow_decrypt_age_secret_to_temp "${repo_root}" "${age_path}" + return 0 + fi + + if [[ -n "${fallback_path}" && -s "${fallback_path}" ]]; then + printf '%s\n' "${fallback_path}" + return 0 + fi + + return 1 +} + +burrow_encrypt_secret_from_file() { + local repo_root="$1" + local secret_path="$2" + local source_path="$3" + + if [[ ! -s "${source_path}" ]]; then + echo "secret source missing or empty: ${source_path}" >&2 + return 1 + fi + + SECRET_SOURCE_FILE="${source_path}" \ + EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${secret_path}" +} diff --git a/Scripts/bootstrap-forge-intake.sh b/Scripts/bootstrap-forge-intake.sh index 0cc1d91..b927083 100644 --- a/Scripts/bootstrap-forge-intake.sh +++ b/Scripts/bootstrap-forge-intake.sh @@ -3,6 +3,8 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -10,27 +12,33 @@ Usage: Scripts/bootstrap-forge-intake.sh [options] Copy the minimum Burrow forge bootstrap secrets onto the target host under /var/lib/burrow/intake with the ownership expected by the NixOS services. +Legacy path only: the current forge runtime consumes agenix secrets directly. Options: --host SSH target (default: root@git.burrow.net) --ssh-key SSH private key used to reach the host - (default: intake/agent_at_burrow_net_ed25519) + (default: secrets/forgejo/agent-ssh-key.age, then intake/) --password-file Forgejo admin bootstrap password file - (default: intake/forgejo_pass_contact_at_burrow_net.txt) + (default: secrets/forgejo/admin-password.age, then intake/) --agent-key-file Agent SSH private key copied for runner bootstrap - (default: intake/agent_at_burrow_net_ed25519) + (default: secrets/forgejo/agent-ssh-key.age, then intake/) --no-verify Skip remote ls/stat verification after install -h, --help Show this help text EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" -PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}" -AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" +PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-}" +AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" VERIFY=1 +cleanup() { + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT + while [[ $# -gt 0 ]]; do case "$1" in --host) @@ -67,12 +75,29 @@ done mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" -for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do - if [[ ! -s "${path}" ]]; then - echo "required file missing or empty: ${path}" >&2 - exit 1 - fi -done +SSH_KEY="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${SSH_KEY}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" +PASSWORD_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${PASSWORD_FILE}" \ + "${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt" \ + "${REPO_ROOT}/secrets/forgejo/admin-password.age" +)" +AGENT_KEY_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${AGENT_KEY_FILE}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" ssh_opts=( -i "${SSH_KEY}" diff --git a/Scripts/check-forge-host.sh b/Scripts/check-forge-host.sh index ddfb83a..05ddeca 100755 --- a/Scripts/check-forge-host.sh +++ b/Scripts/check-forge-host.sh @@ -3,6 +3,8 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -12,17 +14,22 @@ Run a post-boot verification pass against the Burrow forge host. Options: --host SSH target (default: root@git.burrow.net) - --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) + --ssh-key SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/) --expect-nsc Fail if forgejo-nsc services are not active -h, --help Show this help text EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" EXPECT_NSC=0 +cleanup() { + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT + while [[ $# -gt 0 ]]; do case "$1" in --host) @@ -51,10 +58,17 @@ done mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" -if [[ ! -f "${SSH_KEY}" ]]; then - echo "forge SSH key not found: ${SSH_KEY}" >&2 +SSH_KEY="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${SSH_KEY}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" || { + echo "forge SSH key could not be resolved" >&2 exit 1 -fi +} ssh \ -i "${SSH_KEY}" \ diff --git a/Scripts/cloudflare-upsert-a-record.sh b/Scripts/cloudflare-upsert-a-record.sh index 88745af..af4cef4 100755 --- a/Scripts/cloudflare-upsert-a-record.sh +++ b/Scripts/cloudflare-upsert-a-record.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash set -euo pipefail +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" + usage() { cat <<'EOF' Usage: Scripts/cloudflare-upsert-a-record.sh --zone --name --ipv4
[options] @@ -13,7 +18,7 @@ Options: --name Fully-qualified DNS record name --ipv4
IPv4 address for the A record --token-file Cloudflare API token file - default: intake/cloudflare-token.txt + default: secrets/cloudflare/api-token.age, then intake/cloudflare-token.txt --ttl Record TTL, or auto default: auto --proxied Whether to proxy through Cloudflare @@ -25,10 +30,15 @@ EOF ZONE_NAME="" RECORD_NAME="" IPV4="" -TOKEN_FILE="intake/cloudflare-token.txt" +TOKEN_FILE="${CLOUDFLARE_TOKEN_FILE:-}" TTL_VALUE="auto" PROXIED="false" +cleanup() { + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT + while [[ $# -gt 0 ]]; do case "$1" in --zone) @@ -71,11 +81,16 @@ if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then usage >&2 exit 2 fi - -if [[ ! -f "${TOKEN_FILE}" ]]; then - echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2 +TOKEN_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${TOKEN_FILE}" \ + "${REPO_ROOT}/intake/cloudflare-token.txt" \ + "${REPO_ROOT}/secrets/cloudflare/api-token.age" +)" || { + echo "Cloudflare token file could not be resolved" >&2 exit 1 -fi +} if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then echo "Invalid IPv4 address: ${IPV4}" >&2 diff --git a/Scripts/forge-deploy.sh b/Scripts/forge-deploy.sh index 5c4b959..1a7eec7 100755 --- a/Scripts/forge-deploy.sh +++ b/Scripts/forge-deploy.sh @@ -5,6 +5,8 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -18,7 +20,7 @@ Defaults: Environment: BURROW_FORGE_HOST root@git.burrow.net - BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519 + BURROW_FORGE_SSH_KEY explicit path, otherwise secrets/forgejo/agent-ssh-key.age EOF } @@ -28,6 +30,7 @@ ALLOW_DIRTY=0 BURROW_FLAKE_TMPDIRS=() cleanup() { + burrow_cleanup_secret_tmpfiles burrow_cleanup_flake_tmpdirs } trap cleanup EXIT @@ -71,21 +74,17 @@ if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then fi FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" - -if [[ -z "${FORGE_SSH_KEY}" ]]; then - if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then - FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" - else - FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519" - fi -fi - -if [[ ! -f "${FORGE_SSH_KEY}" ]]; then - echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2 - echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2 +FORGE_SSH_KEY="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${BURROW_FORGE_SSH_KEY:-}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" || { + echo "Unable to resolve the forge SSH key." >&2 exit 1 -fi +} FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")" diff --git a/Scripts/hcloud-upload-nixos-image.sh b/Scripts/hcloud-upload-nixos-image.sh index 2590519..36f1e3b 100755 --- a/Scripts/hcloud-upload-nixos-image.sh +++ b/Scripts/hcloud-upload-nixos-image.sh @@ -6,12 +6,14 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" DEFAULT_CONFIG="burrow-forge" DEFAULT_FLAKE="." DEFAULT_LOCATION="hel1" DEFAULT_ARCHITECTURE="x86" -DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt" +DEFAULT_TOKEN_FILE="" CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}" FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}" @@ -30,6 +32,13 @@ NIX_BUILD_FLAGS=() BURROW_FLAKE_TMPDIRS=() LOCAL_STORE_DIR="" +cleanup() { + burrow_cleanup_secret_tmpfiles + burrow_cleanup_flake_tmpdirs +} + +trap cleanup EXIT + usage() { cat <<'EOF' Usage: Scripts/hcloud-upload-nixos-image.sh [options] @@ -42,7 +51,7 @@ Options: --location Hetzner location for the temporary upload server (default: hel1) --architecture CPU architecture of the image (default: x86) --server-type Hetzner server type for the temporary upload server - --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) + --token-file Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt) --artifact-path Prebuilt raw image artifact to upload directly --output-hash Stable hash label for --artifact-path uploads --builder-spec Complete builders string passed to nix build @@ -125,6 +134,17 @@ while [[ $# -gt 0 ]]; do esac done +TOKEN_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${TOKEN_FILE}" \ + "${REPO_ROOT}/intake/hetzner-api-token.txt" \ + "${REPO_ROOT}/secrets/hetzner/api-token.age" +)" || { + echo "Hetzner API token file could not be resolved" >&2 + exit 1 +} + cleanup() { burrow_cleanup_flake_tmpdirs if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then diff --git a/Scripts/hetzner-forge.sh b/Scripts/hetzner-forge.sh index cfce7eb..73e1953 100755 --- a/Scripts/hetzner-forge.sh +++ b/Scripts/hetzner-forge.sh @@ -2,6 +2,9 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -31,7 +34,7 @@ Options: -h, --help Show this help text. Environment: - HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt + HCLOUD_TOKEN_FILE Defaults to secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt EOF } @@ -43,10 +46,15 @@ IMAGE="ubuntu-24.04" CONFIG="burrow-forge" FLAKE="." UPLOAD_LOCATION="" -TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}" YES=0 SSH_KEYS=("contact@burrow.net" "agent@burrow.net") +cleanup() { + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT + if [[ $# -gt 0 ]]; then case "$1" in show|create|delete|recreate|build-image|create-from-image|recreate-from-image) @@ -110,10 +118,16 @@ while [[ $# -gt 0 ]]; do esac done -if [[ ! -f "${TOKEN_FILE}" ]]; then - echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2 +TOKEN_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${TOKEN_FILE}" \ + "${REPO_ROOT}/intake/hetzner-api-token.txt" \ + "${REPO_ROOT}/secrets/hetzner/api-token.age" +)" || { + echo "Hetzner API token file could not be resolved" >&2 exit 1 -fi +} if [[ -z "${UPLOAD_LOCATION}" ]]; then UPLOAD_LOCATION="${LOCATION}" diff --git a/Scripts/nsc-build-and-upload-image.sh b/Scripts/nsc-build-and-upload-image.sh index 6fb99a9..27badb6 100755 --- a/Scripts/nsc-build-and-upload-image.sh +++ b/Scripts/nsc-build-and-upload-image.sh @@ -6,11 +6,13 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}" FLAKE="${HCLOUD_IMAGE_FLAKE:-.}" LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}" -TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}" NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}" NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}" NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}" @@ -26,6 +28,13 @@ EXTRA_LABELS=() BURROW_FLAKE_TMPDIRS=() BUILDER_ID="" +cleanup() { + burrow_cleanup_secret_tmpfiles + burrow_cleanup_flake_tmpdirs +} + +trap cleanup EXIT + usage() { cat <<'EOF' Usage: Scripts/nsc-build-and-upload-image.sh [options] @@ -37,7 +46,7 @@ Options: --config images.-raw output to build (default: burrow-forge) --flake Flake path to build from (default: .) --location Hetzner upload location (default: hel1) - --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) + --token-file Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt) --machine-type Namespace machine type (default: linux/amd64:32x64) --ssh-host Namespace SSH endpoint (default: ssh.ord2.namespace.so) --duration Namespace builder lifetime (default: 4h) @@ -126,6 +135,17 @@ while [[ $# -gt 0 ]]; do esac done +TOKEN_FILE="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${TOKEN_FILE}" \ + "${REPO_ROOT}/intake/hetzner-api-token.txt" \ + "${REPO_ROOT}/secrets/hetzner/api-token.age" +)" || { + echo "Hetzner API token file could not be resolved" >&2 + exit 1 +} + cleanup() { if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then "${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index 9e6e4b5..c85b993 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -6,31 +6,35 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' Usage: Scripts/provision-forgejo-nsc.sh [options] -Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the -Namespace token from the currently logged-in namespace account. +Generate Burrow forgejo-nsc runtime inputs and refresh the authoritative +`secrets/forgejo/*.age` files, optionally refreshing the Namespace token from +the currently logged-in namespace account. Options: --host SSH target used to mint the Forgejo PAT. Default: root@git.burrow.net --ssh-key SSH private key for the forge host. - Default: intake/agent_at_burrow_net_ed25519 + Default: secrets/forgejo/agent-ssh-key.age, then intake/ --nsc-bin Override the nsc binary. - --no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists. + --no-refresh-token Reuse the existing encrypted Namespace token if it already exists. --token-name Forgejo PAT name prefix (default: forgejo-nsc) --contact-user Forgejo username used for PAT creation (default: contact) --scope-owner Forgejo org/user owner for the default NSC scope (default: hackclub) --scope-name Forgejo repository name for the default NSC scope (default: burrow) + --write-intake Also write plaintext runtime inputs to intake/ for local debugging. -h, --help Show this help text. EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" NSC_BIN="${NSC_BIN:-}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" REFRESH_TOKEN=1 @@ -39,8 +43,12 @@ CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}" SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}" SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}" BURROW_FLAKE_TMPDIRS=() +WRITE_INTAKE=0 +TMP_DIR="" cleanup() { + [[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true + burrow_cleanup_secret_tmpfiles burrow_cleanup_flake_tmpdirs } trap cleanup EXIT @@ -79,6 +87,10 @@ while [[ $# -gt 0 ]]; do SCOPE_NAME="${2:?missing value for --scope-name}" shift 2 ;; + --write-intake) + WRITE_INTAKE=1 + shift + ;; -h|--help) usage exit 0 @@ -97,13 +109,15 @@ burrow_require_cmd nix burrow_require_cmd ssh burrow_require_cmd python3 -if [[ ! -f "${SSH_KEY}" ]]; then - echo "forge SSH key not found: ${SSH_KEY}" >&2 - exit 1 -fi - -mkdir -p "${REPO_ROOT}/intake" -chmod 700 "${REPO_ROOT}/intake" +SSH_KEY="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${SSH_KEY}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" +TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/burrow-forgejo-nsc.XXXXXX")" flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")" if [[ -z "${NSC_BIN}" ]]; then @@ -128,13 +142,16 @@ if [[ ! -x "${NSC_BIN}" ]]; then exit 1 fi -token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" -dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" -autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" +token_file="${TMP_DIR}/forgejo_nsc_token.txt" +dispatcher_out="${TMP_DIR}/forgejo_nsc_dispatcher.yaml" +autoscaler_out="${TMP_DIR}/forgejo_nsc_autoscaler.yaml" dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml" autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml" +token_secret="${REPO_ROOT}/secrets/forgejo/nsc-token.age" +dispatcher_secret="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" +autoscaler_secret="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" -if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then +if [[ "${REFRESH_TOKEN}" -eq 1 ]]; then "${NSC_BIN}" auth check-login --duration 20m >/dev/null raw_token_file="$(mktemp)" trap 'rm -f "${raw_token_file}"; cleanup' EXIT @@ -155,7 +172,13 @@ Path(os.environ["TOKEN_FILE"]).write_text( PY rm -f "${raw_token_file}" chmod 600 "${token_file}" -elif [[ -s "${token_file}" ]]; then +elif [[ -f "${token_secret}" ]]; then + burrow_decrypt_age_secret_to_temp "${REPO_ROOT}" "${token_secret}" > "${token_file}" +elif [[ -s "${REPO_ROOT}/intake/forgejo_nsc_token.txt" ]]; then + cp "${REPO_ROOT}/intake/forgejo_nsc_token.txt" "${token_file}" +fi + +if [[ -s "${token_file}" ]]; then TOKEN_FILE="${token_file}" python3 - <<'PY' import json import os @@ -271,6 +294,24 @@ PY chmod 600 "${dispatcher_out}" "${autoscaler_out}" -echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml." -echo "Re-encrypt them into secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age before deploying the forge host." +burrow_encrypt_secret_from_file "${REPO_ROOT}" "${token_secret}" "${token_file}" +burrow_encrypt_secret_from_file "${REPO_ROOT}" "${dispatcher_secret}" "${dispatcher_out}" +burrow_encrypt_secret_from_file "${REPO_ROOT}" "${autoscaler_secret}" "${autoscaler_out}" + +if [[ "${WRITE_INTAKE}" -eq 1 ]]; then + mkdir -p "${REPO_ROOT}/intake" + chmod 700 "${REPO_ROOT}/intake" + cp "${token_file}" "${REPO_ROOT}/intake/forgejo_nsc_token.txt" + cp "${dispatcher_out}" "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" + cp "${autoscaler_out}" "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" + chmod 600 \ + "${REPO_ROOT}/intake/forgejo_nsc_token.txt" \ + "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" \ + "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" +fi + +echo "Updated secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age." +if [[ "${WRITE_INTAKE}" -eq 1 ]]; then + echo "Also refreshed intake/forgejo_nsc_{token,dispatcher,autoscaler} for local debugging." +fi echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}." diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh index 77581f8..baa4960 100755 --- a/Scripts/sync-forgejo-nsc-config.sh +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -5,12 +5,12 @@ usage() { cat <<'EOF' Usage: Scripts/sync-forgejo-nsc-config.sh [options] -Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and +Copy Burrow forgejo-nsc runtime inputs from age secrets or intake/ onto the forge host and restart the dispatcher/autoscaler units. Options: --host SSH target (default: root@git.burrow.net) - --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) + --ssh-key SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/) --rotate-pat Re-render the intake files before syncing. --no-restart Copy files only. -h, --help Show this help text. @@ -19,12 +19,21 @@ EOF SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${SCRIPT_DIR}/_burrow-secrets.sh" HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" ROTATE_PAT=0 NO_RESTART=0 +TMP_DIR="" + +cleanup() { + [[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT while [[ $# -gt 0 ]]; do case "$1" in @@ -68,18 +77,41 @@ burrow_require_cmd() { burrow_require_cmd ssh burrow_require_cmd scp -if [[ ! -f "${SSH_KEY}" ]]; then - echo "forge SSH key not found: ${SSH_KEY}" >&2 - exit 1 -fi +SSH_KEY="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${SSH_KEY}" \ + "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ + "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" +)" if [[ "${ROTATE_PAT}" -eq 1 ]]; then "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" fi -token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" -dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" -autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" +TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/burrow-nsc-sync.XXXXXX")" +token_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "" \ + "${REPO_ROOT}/intake/forgejo_nsc_token.txt" \ + "${REPO_ROOT}/secrets/forgejo/nsc-token.age" +)" +dispatcher_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "" \ + "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" \ + "${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" +)" +autoscaler_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "" \ + "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" \ + "${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" +)" for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do if [[ ! -s "${path}" ]]; then @@ -96,12 +128,12 @@ ssh_opts=( ) remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")" -cleanup() { +cleanup_remote() { if [[ -n "${remote_tmp:-}" ]]; then ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true fi } -trap cleanup EXIT +trap 'cleanup_remote; cleanup' EXIT scp "${ssh_opts[@]}" \ "${token_file}" \ diff --git a/Tools/forwardemail-custom-s3.sh b/Tools/forwardemail-custom-s3.sh index 5f39ddd..4640bc8 100755 --- a/Tools/forwardemail-custom-s3.sh +++ b/Tools/forwardemail-custom-s3.sh @@ -3,17 +3,22 @@ set -euo pipefail umask 077 +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" +# shellcheck source=Scripts/_burrow-secrets.sh +source "${REPO_ROOT}/Scripts/_burrow-secrets.sh" + usage() { cat <<'EOF' Usage: Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file intake/forwardemail_api_token.txt \ + --api-token-file secrets/forwardemail/api-token.age \ --s3-endpoint https:// \ --s3-region \ --s3-bucket \ - --s3-access-key-file intake/hetzner-s3-user.txt \ - --s3-secret-key-file intake/hetzner-s3-secret.txt + --s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \ + --s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age Options: --domain Forward Email domain to update. @@ -54,13 +59,18 @@ read_secret() { printf '%s' "$value" } +cleanup() { + burrow_cleanup_secret_tmpfiles +} +trap cleanup EXIT + domain="" -api_token_file="" +api_token_file="${FORWARDEMAIL_API_TOKEN_FILE:-}" s3_endpoint="" s3_region="" s3_bucket="" -s3_access_key_file="" -s3_secret_key_file="" +s3_access_key_file="${FORWARDEMAIL_S3_ACCESS_KEY_FILE:-}" +s3_secret_key_file="${FORWARDEMAIL_S3_SECRET_KEY_FILE:-}" test_only=false while [[ $# -gt 0 ]]; do @@ -108,16 +118,38 @@ while [[ $# -gt 0 ]]; do done [[ -n "$domain" ]] || fail "--domain is required" -[[ -n "$api_token_file" ]] || fail "--api-token-file is required" [[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set" [[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set" [[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set" -[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set" -[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set" - +api_token_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${api_token_file}" \ + "${REPO_ROOT}/intake/forwardemail_api_token.txt" \ + "${REPO_ROOT}/secrets/forwardemail/api-token.age" +)" || fail "unable to resolve Forward Email API token file" require_file "$api_token_file" api_token="$(read_secret "$api_token_file")" +if [[ "$test_only" != true ]]; then + s3_access_key_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${s3_access_key_file}" \ + "${REPO_ROOT}/intake/hetzner-s3-user.txt" \ + "${REPO_ROOT}/secrets/forwardemail/hetzner-s3-user.age" + )" || fail "unable to resolve Hetzner S3 access key file" + s3_secret_key_file="$( + burrow_resolve_secret_file \ + "${REPO_ROOT}" \ + "${s3_secret_key_file}" \ + "${REPO_ROOT}/intake/hetzner-s3-secret.txt" \ + "${REPO_ROOT}/secrets/forwardemail/hetzner-s3-secret.age" + )" || fail "unable to resolve Hetzner S3 secret key file" + require_file "$s3_access_key_file" + require_file "$s3_secret_key_file" +fi + if [[ "$test_only" == false ]]; then require_file "$s3_access_key_file" require_file "$s3_secret_key_file" diff --git a/Tools/forwardemail-hetzner-storage.py b/Tools/forwardemail-hetzner-storage.py index 3a2a941..2c5ff82 100755 --- a/Tools/forwardemail-hetzner-storage.py +++ b/Tools/forwardemail-hetzner-storage.py @@ -6,6 +6,7 @@ import argparse import datetime as dt import hashlib import hmac +import subprocess import sys import textwrap from pathlib import Path @@ -13,11 +14,38 @@ from urllib.parse import urlencode, urlparse import requests +REPO_ROOT = Path(__file__).resolve().parent.parent + + +def default_secret_path(age_rel: str, intake_rel: str) -> str: + age_path = REPO_ROOT / age_rel + if age_path.exists(): + return str(age_path) + return intake_rel + def read_secret(path: str) -> str: - value = Path(path).read_text(encoding="utf-8").strip() + file_path = Path(path) + if not file_path.is_absolute(): + file_path = REPO_ROOT / file_path + if file_path.suffix == ".age": + value = subprocess.check_output( + [ + "nix", + "--extra-experimental-features", + "nix-command flakes", + "run", + f"{REPO_ROOT}#agenix", + "--", + "-d", + str(file_path), + ], + text=True, + ).strip() + else: + value = file_path.read_text(encoding="utf-8").strip() if not value: - raise SystemExit(f"error: empty secret file: {path}") + raise SystemExit(f"error: empty secret file: {file_path}") return value @@ -212,12 +240,12 @@ def parse_args() -> argparse.Namespace: parser.add_argument("--region", default="hel1", help="S3 region.") parser.add_argument( "--access-key-file", - default="intake/hetzner-s3-user.txt", + default=default_secret_path("secrets/forwardemail/hetzner-s3-user.age", "intake/hetzner-s3-user.txt"), help="File containing the S3 access key id.", ) parser.add_argument( "--secret-key-file", - default="intake/hetzner-s3-secret.txt", + default=default_secret_path("secrets/forwardemail/hetzner-s3-secret.age", "intake/hetzner-s3-secret.txt"), help="File containing the S3 secret key.", ) parser.add_argument( diff --git a/docs/FORWARDEMAIL.md b/docs/FORWARDEMAIL.md index 798f3e5..d7ffb34 100644 --- a/docs/FORWARDEMAIL.md +++ b/docs/FORWARDEMAIL.md @@ -26,11 +26,14 @@ Forward Email also documents these operational constraints: ## Burrow Secret Layout -Present in `intake/` today: +Authoritative secrets now live in: -- `intake/forwardemail_api_token.txt` -- `intake/hetzner-s3-user.txt` -- `intake/hetzner-s3-secret.txt` +- `secrets/forwardemail/api-token.age` +- `secrets/forwardemail/hetzner-s3-user.age` +- `secrets/forwardemail/hetzner-s3-secret.age` + +Legacy plaintext `intake/` files may still exist locally for debugging, but the +tooling now prefers the age-encrypted files above. - Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com` - Hetzner object storage region: `hel1` - Hetzner bucket used for Forward Email backups: `burrow` @@ -69,12 +72,12 @@ Example: ```sh Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file intake/forwardemail_api_token.txt \ + --api-token-file secrets/forwardemail/api-token.age \ --s3-endpoint https://hel1.your-objectstorage.com \ --s3-region hel1 \ --s3-bucket burrow \ - --s3-access-key-file intake/hetzner-s3-user.txt \ - --s3-secret-key-file intake/hetzner-s3-secret.txt + --s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \ + --s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age ``` Retest an existing domain configuration without rewriting it: @@ -82,7 +85,7 @@ Retest an existing domain configuration without rewriting it: ```sh Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file intake/forwardemail_api_token.txt \ + --api-token-file secrets/forwardemail/api-token.age \ --test-only ``` diff --git a/nixos/README.md b/nixos/README.md index aa0fff6..ebdb2dc 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -29,7 +29,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 3. Encrypt the Forgejo admin password and agent SSH key into `secrets/forgejo/{admin-password,agent-ssh-key}.age`. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account from the agenix secret path. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. -6. Run `Scripts/provision-forgejo-nsc.sh` locally, re-encrypt the resulting NSC token + configs into `secrets/forgejo/*.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. +6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. 7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. 9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. @@ -43,7 +43,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `https://burrow.net` returns the root forge landing response - `https://git.burrow.net` returns the live Forgejo front door - `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/` -- The Cloudflare token currently in `intake/cloudflare-token.txt` is an account-scoped token: `POST /accounts//tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`. +- The Cloudflare token now lives in `secrets/cloudflare/api-token.age`; the current token is account-scoped: `POST /accounts//tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`. - `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response. - Both domains publish Forward Email MX/TXT records. - Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`. diff --git a/secrets/README.md b/secrets/README.md index f7d67f5..706b374 100644 --- a/secrets/README.md +++ b/secrets/README.md @@ -9,11 +9,19 @@ For the Forgejo Namespace Cloud runtime: - `secrets/forgejo/nsc-token.age` - `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-autoscaler-config.age` +- `secrets/cloudflare/api-token.age` +- `secrets/hetzner/api-token.age` +- `secrets/forwardemail/api-token.age` +- `secrets/forwardemail/hetzner-s3-user.age` +- `secrets/forwardemail/hetzner-s3-secret.age` Use: - `make secret name=forgejo/nsc-token` - `make secret-file name=forgejo/agent-ssh-key file=/path/to/source` +- `Scripts/provision-forgejo-nsc.sh` to refresh the Forgejo Namespace token and runtime configs in `secrets/forgejo/*.age` +- `make secret-file name=cloudflare/api-token file=/path/to/cloudflare-token.txt` +- `make secret-file name=hetzner/api-token file=/path/to/hetzner-api-token.txt` The forge host decrypts these files at activation time and feeds the resulting paths into `services.burrow.forge`, `services.burrow.forgeRunner`, and diff --git a/secrets/cloudflare/api-token.age b/secrets/cloudflare/api-token.age new file mode 100644 index 0000000..caf8135 --- /dev/null +++ b/secrets/cloudflare/api-token.age @@ -0,0 +1,7 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q rX5+bmtxyHNgD+xNdHkB1fKdjUlrX275DaKTIHssYyA +KwbfKHx14QXRKBIGWwJDR8+DONyCdVssh8Ti8mdajyQ +-> ssh-ed25519 IrZmAg SOG/KvURA6PrxVhtZyIbazFGNQZyp0BR4MH+YInHGB4 +79pENXhtLwlCQVnqkPEzoFgrXMmTqRsfs4ULluTevWA +--- gDA64KNbgN+eGHsQbIbKvhOg1T/Nqui6I/wy2MK8VWE +[|V{['E .{CǶ {ha \ No newline at end of file diff --git a/secrets/forwardemail/api-token.age b/secrets/forwardemail/api-token.age new file mode 100644 index 0000000..4d4ea15 --- /dev/null +++ b/secrets/forwardemail/api-token.age @@ -0,0 +1,7 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q ICuXuDsZiw1ShfUX9qjq8bCkeNdsbHWnG4e+3ZOC3jg +wswxqzQtf7jumSYB8ZeQzRBpMrBPVsUnWOYsmlDvpSs +-> ssh-ed25519 IrZmAg Xrvp/tXzXrHF1+NxgTZs9nNufyxtTq5NoYT5gaW6p1M +UWGlhZpV19CWMR9abp30vkQwZUMb/ylvInGEBlDdjjE +--- qhAaAECwhmAY4g3/e+Dz9RvL1MBQkHGWyoe1NkdTuqA +d?)<36F:a˝ ųֲ \ No newline at end of file diff --git a/secrets/forwardemail/hetzner-s3-secret.age b/secrets/forwardemail/hetzner-s3-secret.age new file mode 100644 index 0000000..55b5be3 --- /dev/null +++ b/secrets/forwardemail/hetzner-s3-secret.age @@ -0,0 +1,7 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q jwJzvmXUV5rCB6ku7ILLQUDInuQJL2gN+pjmX/ccXWE +q9OSyVhTuzERRRZZOCQzbwAwLOvOFIT/l9MxJ0V3UTo +-> ssh-ed25519 IrZmAg 8IutYG3CnNP9gw5fTFOaXm1Ue4i/cVs1apA88bNs9mo +daaf+6HoE3bmUEKR8/zu9jKTstVFCXqBlBxBdNVpQ90 +--- gRGNkWqoh+lZWpDG7yNLd4fjoX2jCyHTWbzImzoFGko +R@+fu9RBX2 [I \ No newline at end of file diff --git a/secrets/forwardemail/hetzner-s3-user.age b/secrets/forwardemail/hetzner-s3-user.age new file mode 100644 index 0000000..733d6e8 --- /dev/null +++ b/secrets/forwardemail/hetzner-s3-user.age @@ -0,0 +1,7 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q jwyFpeVX18Q/1vnK2A1gwETTTH/QDUmW7vhCA+E/1lc +vtG1Ra+hR0cc/o9oJw7YTWMc2+JmrehzBE5QkIHQMKY +-> ssh-ed25519 IrZmAg KljcDNRlBmn7ElVfXq/E2prFHnRQD2TkQY9Vto+OQUA +T37sFc3xVrhky6e0n4KbsX18/fBqP3VjS/mNbxX6bfI +--- lvSjWGriUCYC14eI2eH9MdO2cB76Pe3gWD7pidw8Qjo +s&x*4}z&F \ No newline at end of file diff --git a/secrets/hetzner/api-token.age b/secrets/hetzner/api-token.age new file mode 100644 index 0000000..a409a7d --- /dev/null +++ b/secrets/hetzner/api-token.age @@ -0,0 +1,7 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q pEJA2VJkPC+NzA9yFvBrpXHD8qFMTD9iIHYSkx8P2RI +AGE1QJya77d92ERA1yQYylvZPNAJEQKoCL32BY5XBzo +-> ssh-ed25519 IrZmAg VMpoTBpNG/TAlnbJ2APwc4VMt2CX5rQwlrrihtmojFo +caOwayLgVDGPrjqLLH8hHHQ3Fy2WeRI2tf+R02HFqx0 +--- Ey1DYpyA4lnVqPaabNsEuSihl4fvZ2vpSc/IRGZwYBw +U2Q*mFޞ|^EV" \ No newline at end of file diff --git a/secrets/secrets.nix b/secrets/secrets.nix index 9d40bf3..4a78a69 100644 --- a/secrets/secrets.nix +++ b/secrets/secrets.nix @@ -3,6 +3,7 @@ let agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; forge = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlkGo4lwpwIIZ0J01KjTuJuf/U/wGgy4/aKwPIUzutL root@burrow-forge"; + operatorSecrets = [ contact agent ]; forgeAutomation = [ contact agent forge ]; in { "secrets/forgejo/admin-password.age".publicKeys = forgeAutomation; @@ -10,4 +11,9 @@ in { "secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation; + "secrets/cloudflare/api-token.age".publicKeys = operatorSecrets; + "secrets/hetzner/api-token.age".publicKeys = operatorSecrets; + "secrets/forwardemail/api-token.age".publicKeys = operatorSecrets; + "secrets/forwardemail/hetzner-s3-user.age".publicKeys = operatorSecrets; + "secrets/forwardemail/hetzner-s3-secret.age".publicKeys = operatorSecrets; } diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 5b2926b..f928973 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -155,11 +155,12 @@ instances: ``` For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT, -generate a Namespace token from the logged-in namespace account, and render -bootstrap artifacts into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` plus -`intake/forgejo_nsc_token.txt`. The token file is emitted as JSON with a -`bearer_token` field so both the Compute API path and the `nsc` CLI fallback can -consume the same secret material. +generate a Namespace token from the logged-in Namespace account, and refresh +`secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`. +The token file is emitted as JSON with a `bearer_token` field so both the +Compute API path and the `nsc` CLI fallback can consume the same secret +material. Use `--write-intake` only when you explicitly need local plaintext +debug copies. Long-lived runtime state is now sourced from age-encrypted files: @@ -169,10 +170,9 @@ Long-lived runtime state is now sourced from age-encrypted files: - `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-autoscaler-config.age` -After refreshing the intake files, re-encrypt them into `secrets/forgejo/*.age` -and deploy the forge host so `config.age.secrets.*` updates the live paths for -`services.burrow.forge`, `services.burrow.forgeRunner`, and -`services.burrow.forgejoNsc`. +After refreshing the encrypted secrets, deploy the forge host so +`config.age.secrets.*` updates the live paths for `services.burrow.forge`, +`services.burrow.forgeRunner`, and `services.burrow.forgejoNsc`. Run it next to the dispatcher: diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index b87e954..c54fb20 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -602,6 +602,18 @@ if ! mkdir -p "/Users/runner/.cache/act" 2>/dev/null; then fi export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" +cache_root="${NSC_CACHE_PATH:-$HOME/.cache/burrow}" +mkdir -p \ + "${cache_root}/bin" \ + "${cache_root}/downloads" \ + "${cache_root}/go/path" \ + "${cache_root}/go/mod" \ + "${cache_root}/go/build" \ + "${cache_root}/homebrew" +export HOMEBREW_CACHE="${cache_root}/homebrew" +export GOPATH="${cache_root}/go/path" +export GOMODCACHE="${cache_root}/go/mod" +export GOCACHE="${cache_root}/go/build" if ! command -v curl >/dev/null 2>&1; then echo "curl is required" >&2 @@ -622,14 +634,18 @@ export PATH="${PWD}/bin:${PATH}" runner_version="v12.6.4" runner_src_tgz="forgejo-runner-${runner_version}.tar.gz" +runner_src_tgz_path="${cache_root}/downloads/${runner_src_tgz}" runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz" runner_src_dir="forgejo-runner-src" +runner_bin_cache="${cache_root}/bin/forgejo-runner-${runner_version}" -if ! command -v forgejo-runner >/dev/null 2>&1; then +if [[ ! -x "${runner_bin_cache}" ]]; then rm -rf "${runner_src_dir}" mkdir -p "${runner_src_dir}" - curl -fsSL "${runner_src_url}" -o "${runner_src_tgz}" - tar -xzf "${runner_src_tgz}" -C "${runner_src_dir}" --strip-components=1 + if [[ ! -f "${runner_src_tgz_path}" ]]; then + curl -fsSL "${runner_src_url}" -o "${runner_src_tgz_path}" + fi + tar -xzf "${runner_src_tgz_path}" -C "${runner_src_dir}" --strip-components=1 toolchain="$(grep -E '^toolchain ' "${runner_src_dir}/go.mod" | awk '{print $2}' | head -n 1 || true)" if [ -z "${toolchain}" ]; then @@ -639,21 +655,23 @@ if ! command -v forgejo-runner >/dev/null 2>&1; then if ! command -v go >/dev/null 2>&1; then go_tgz="${toolchain}.darwin-arm64.tar.gz" go_url="https://go.dev/dl/${go_tgz}" - curl -fsSL "${go_url}" -o "${go_tgz}" - tar -xzf "${go_tgz}" + go_tgz_path="${cache_root}/downloads/${go_tgz}" + if [[ ! -f "${go_tgz_path}" ]]; then + curl -fsSL "${go_url}" -o "${go_tgz_path}" + fi + tar -xzf "${go_tgz_path}" export GOROOT="${PWD}/go" export PATH="${GOROOT}/bin:${PATH}" fi - export GOPATH="${PWD}/.gopath" - export GOMODCACHE="${PWD}/.gomodcache" - export GOCACHE="${PWD}/.gocache" mkdir -p "${GOPATH}" "${GOMODCACHE}" "${GOCACHE}" - (cd "${runner_src_dir}" && go build -o "${workdir}/bin/forgejo-runner" .) - chmod +x "${workdir}/bin/forgejo-runner" + (cd "${runner_src_dir}" && go build -o "${runner_bin_cache}" .) + chmod +x "${runner_bin_cache}" fi +ln -sf "${runner_bin_cache}" "${workdir}/bin/forgejo-runner" + cat > runner.yaml <<'EOF' log: level: info From 4fbebdf85cd3cf559c0dc8c19114663d1a73800a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:30:14 -0700 Subject: [PATCH 22/50] Fix agenix helper identity resolution --- Scripts/_burrow-secrets.sh | 58 +++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/Scripts/_burrow-secrets.sh b/Scripts/_burrow-secrets.sh index 2ecd282..9ebd1f5 100644 --- a/Scripts/_burrow-secrets.sh +++ b/Scripts/_burrow-secrets.sh @@ -3,6 +3,38 @@ set -euo pipefail BURROW_SECRET_TMPFILES=() +burrow_secret_repo_path() { + local repo_root="$1" + local secret_path="$2" + + case "${secret_path}" in + "${repo_root}"/*) + printf '%s\n' "${secret_path#${repo_root}/}" + ;; + *) + printf '%s\n' "${secret_path}" + ;; + esac +} + +burrow_agenix_identity_path() { + local repo_root="$1" + local candidate + + for candidate in \ + "${BURROW_AGE_IDENTITY:-}" \ + "${BURROW_FORGE_SSH_KEY:-}" \ + "${repo_root}/intake/agent_at_burrow_net_ed25519" \ + "${HOME}/.ssh/agent_at_burrow_net_ed25519" \ + "${HOME}/.ssh/id_ed25519" + do + if [[ -n "${candidate}" && -f "${candidate}" ]]; then + printf '%s\n' "${candidate}" + return 0 + fi + done +} + burrow_cleanup_secret_tmpfiles() { local path for path in "${BURROW_SECRET_TMPFILES[@]:-}"; do @@ -14,15 +46,23 @@ burrow_cleanup_secret_tmpfiles() { burrow_decrypt_age_secret_to_temp() { local repo_root="$1" local secret_path="$2" + local agenix_path + local identity_path local tmp_file if [[ ! -f "${secret_path}" ]]; then echo "age secret not found: ${secret_path}" >&2 return 1 fi + agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")" + identity_path="$(burrow_agenix_identity_path "${repo_root}")" tmp_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret.XXXXXX")" - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${secret_path}" > "${tmp_file}" + if [[ -n "${identity_path}" ]]; then + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" -i "${identity_path}" > "${tmp_file}" + else + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" > "${tmp_file}" + fi chmod 600 "${tmp_file}" BURROW_SECRET_TMPFILES+=("${tmp_file}") printf '%s\n' "${tmp_file}" @@ -66,13 +106,23 @@ burrow_encrypt_secret_from_file() { local repo_root="$1" local secret_path="$2" local source_path="$3" + local agenix_path + local identity_path if [[ ! -s "${source_path}" ]]; then echo "secret source missing or empty: ${source_path}" >&2 return 1 fi + agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")" + identity_path="$(burrow_agenix_identity_path "${repo_root}")" - SECRET_SOURCE_FILE="${source_path}" \ - EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${secret_path}" + if [[ -n "${identity_path}" ]]; then + SECRET_SOURCE_FILE="${source_path}" \ + EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" -i "${identity_path}" + else + SECRET_SOURCE_FILE="${source_path}" \ + EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" + fi } From b81a3377df6589217333fbc999efed6d271eb7f0 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:33:34 -0700 Subject: [PATCH 23/50] Resolve absolute sccache wrapper path on Apple --- Apple/NetworkExtension/libburrow/build-rust.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index 031e6bc..bae4727 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -71,6 +71,14 @@ fi PROTOC=$(readlink -f $(which protoc)) CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH" +if [[ -n "${RUSTC_WRAPPER:-}" && "${RUSTC_WRAPPER}" != /* ]]; then + WRAPPER_PATH="$(command -v "${RUSTC_WRAPPER}" || true)" + if [[ -n "${WRAPPER_PATH}" ]]; then + RUSTC_WRAPPER="${WRAPPER_PATH}" + CARGO_PATH="$(dirname "${WRAPPER_PATH}"):$CARGO_PATH" + fi +fi + # Run cargo without the various environment variables set by Xcode. # Those variables can confuse cargo and the build scripts it runs. EXTRA_ENV=() From 52b7f102f05dc876c99987e50dbde11907f28d5a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:40:07 -0700 Subject: [PATCH 24/50] Fix Apple deployment env and refresh NSC caches --- Apple/NetworkExtension/libburrow/build-rust.sh | 14 +++++++++++++- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 432 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 1324 -> 432 bytes secrets/forgejo/nsc-token.age | Bin 1238 -> 432 bytes 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index bae4727..d54bd71 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -88,7 +88,19 @@ for VAR_NAME in HOME CARGO_HOME CARGO_TARGET_DIR RUSTUP_HOME RUSTC_WRAPPER SCCAC fi done EFFECTIVE_CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-${CONFIGURATION_TEMP_DIR}/target}" -env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${EFFECTIVE_CARGO_TARGET_DIR}" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" "${EXTRA_ENV[@]}" cargo build "${CARGO_ARGS[@]}" +BUILD_ENV=( + "PATH=$CARGO_PATH" + "PROTOC=$PROTOC" + "CARGO_TARGET_DIR=${EFFECTIVE_CARGO_TARGET_DIR}" + "${EXTRA_ENV[@]}" +) +if [[ -n "${IPHONEOS_DEPLOYMENT_TARGET:-}" ]]; then + BUILD_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET}") +fi +if [[ -n "${MACOSX_DEPLOYMENT_TARGET:-}" ]]; then + BUILD_ENV+=("MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}") +fi +env -i "${BUILD_ENV[@]}" cargo build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 3d8df29dfe23e4c974eade24d7721b5946a22127..5ec06d7e98a6cb18f06e9a3f31274572922c90f3 100644 GIT binary patch delta 378 zcmey&wSjqpPQ9zIeo;VLrIWjhk4cH2ezI$%zfZbThI^HBs8d)~MvzIMeuhP^p{J99 zFIQ<&lu?;Qep!Y^NosPbpMSAQWUGQk9#Zg+a2bX{m2Y zlv9+Se`;7hS8{ntR7Q$nPPt)5fwoymaFw=4WL~y^nsb^_vAbt#WSDnGVt!arQIt>M z#E;_jzD_3Qg_dDH;qEyB>0yb9krghkAxKxrTuUBqtqr!kHd}uOQ@>0b XPw!=&!^Nt;i$y0YyFX(2k<0)9vdVwO delta 1348 zcmdnM{F!TlPJOa-u~(#7X<ruIOO>-Hm%dkkMOavnd!T7jhOc8qPNA2vd7xL3Wne~bMTB-%R=Q)bg|BarONoK+ z#E;_j=80xOIR)C@u8ASu5yja#rRhl)nP!3M{sAr_C7A(H`en|!+HO9D&OruT9^t`G zX&F)JSw@8g&Sq5whKbtQF6p_ELB&q4k;d9S74Er39?qWGm7ykFy1Kdw!S0cUK>>+A z0ZG|OiGjgUPL6pgnZbUMmRXtM#px*q*(q7|ZUMdp+J#k~T%WGIS;(&c%QdF%53}w} zjquGw(Y<9kl*Wmgn-2Y1BqaP_bL-7g$r)i zlU}gs$cq_wHRiWkiOeo zaJh-bzRmS(<4!oG=LbZkADPs^;GyEY$7%*gORW1V$xl8i>P%dag;o_r4? z^wmC#DAaGPeB)I8zHhV5a{+$`=7yQgY6*(P^Sd5vUQ%EFuzsER))M~O{rkQ?Y~02( z$wjOFv8R_a+rhXGVd2Y@OB{mNG@ef2<$9x}*3xL-_fmQ570>FIzRK&e4|X1x_;s}} zSvUL8yEADonsnd4p4go*>4Q5{%M_#SazCE^4P3VEaP`MO{mGfteQRgEKghjhlR|c@ zz<<8o-)z_0ny))>Ik7aZ{&?1nc+babN`~T1-RAX86E+4b{S#UfbxYz=_G4ET*0)A( zvz8>EWphg7$>rDy7%Q`JFbdDcfuG@>LHC+&`Iv#Z`R;bV-!>^V2o8744%?Add| zy{GrR!fUJWsSPe2y@8PYNGd~8djQG$}Rd#pp-k^0a z6V_Z<5f*;unA0&2!Gns-2Pb@gSn{o8MSb$K|9Q*%J*HAuEeihl{l0(H>E?n$ z?~5(E#%13R{!7xbT6)k=GxO{dhj|;WX?y+q*E=(6;i|TS7MZ_njg-#WML^3o&o?|f!_ziSD@lBYZ+oaeZ;_Y2+I`OHr1&{CI=uUdO~ zi}v=l_|H+=T4VUBTT-egW*h6%b%nwI{TE)?#~E$FBJW={iR*oH*OPm%6gK$&zEb|% zb*jOVsdxYG^u86yajh}6=c7Vv)Y_}Fzj*UKi9DS$Lz7yq(gkb-!8Bn9c_+Q#F?fVfS!%9xuYcp$HO&V5; GssjKbV{0D( diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index a3144309524798ec00ac0e89cbd041a6bff8479e..cd957f985c2aa89fbedf9f44c2cf352d275c3648 100644 GIT binary patch delta 378 zcmZ3(wSjqpPQ9N?rFm+qQDJhLe{o1ger8mTp<||3ZhnEbOITJwfVZ}#cBFSjZh3*T zD_4qTa8`(^pFv8dc4>H;TcLJwc5X&!vTvrjYkF>~MP)%+re$DZzN>LWF_*5LLUD11 zZfc5=si~o*f@e`wu4B4_bBIN0ZcvVAxq)d?afq=`o|{Q%k#<(3qnl@bl1r3JK&HN{ zX^>fRu}M}km#ME;N^(S5phZ@2WrT5vlSipRWVxwFUYfIaXpwhjgsXdCc8*6>c}`;S z#E;_j&MrlkW@d%qm5F6WUL`@M;pLg3KB0+bDMnryVa|!!WkEqX*@@+;?#{kk-dPrv zCV}Nu5e23Co<@aL+L2Wi#>qMQe#HUCJ|-q%1x0?vu9YDsUJ>P7y1KdwVda(v#@QhO zX2uoH`57+xCHfxTW;srYRTgDI;m+Rvl|?C<9#O6Zxk;4bv^`1l9UZWuEd93_rzEaIzKv6DWc8 delta 1277 zcmdnMyoPInPJOXyigQ$$cUquv$Ty0?WzTCjFbj$fKfK9{bYLUD11 zZfc5=si~o*f@e`wu4B4Fu0>#KSV57yyRWB5YDl?(v0ILBPI^&DsFR_-Pf(CwmWiKb zfM0oFNm6++mqCP?n@d`zp=+Xtc7bt0k)M}yW`$|Gb40dVXkdn~zNd#{QD#w6uwz=_ z#E;_j8Ad4;evzf6?jB_x=8iteS*1n+h8c;;l}=vy0b#{q=5G3>Zb^xm5ze7pX?ZE( z203M!j-ip^l@b2AZth`Dg_-U}MUh@bMVVRViFs~06``q>&IN&7y1Kdwp+#9qe)>Mz zRUUbXg+-;luBC3iE@{601!)yQ839q5u1RI}u7wf4k?C2!T&nv5rxp54ocPIQwTR($ z!7lLzt)nY$l-OB+>#g`PZR?3#CdUa^emyVT)p0bwYjQg$U#zrQNo~$8k?rm`mrduq zyujgP*cR)+SzC98Bz~E?eZSYl3tKtL3wQ404G4Cd%M-HJqhZmto(SbGtBy;$D~_5Z z_2Z^-^>6pGiBeU0yxWk~!Z_ zLwk~F_TsR^yS)FO;oc$=RV^Vp{mUNXqRsP)SID*gsXU{3cZ1nkSxZe`K(9f69#INY^=}4OrgYs6cTVk6yQR}YIe#v5Pn&)6 zvHZN%Cr)H;;+Q$-W7jFM`hELi4WtCU+^TG1ANF^wS=YyNQgov_>xCIhnJ>Rtuy@Za zlincSG_9nQCMsWJ|Lbk(@;8K;M=K(4@@1q^VUxEu3oVHiS^Sd8p&(6U7t7e$c)!JpEmc@2Y-{~ zQ~9w=(zrAuaO2XJ!|QzK^!kXLSs(OpuV3!o{W~7TPMThL?#C>XAM8aLo$u!8&lPh0 zD*a>XuXkZ~nu-heg#O+l;KtSAV6#5v%z?tE-_6%g_kGwoQ>0bTSofb^&2k}?Pbp5d z1s6NkmQAj>DB&hw^ZWAVej~?eT4oQ#)kEq(q+a@ZBJ+E~mQP;SCb2y5pTe7aXxCEH z18w}u$;Ee+J0C2&HoHshxTa>ti8cA<57J7`t^D(By6%y06;*Yy-;EWg*j}ux*fZ^C z-u9(h?tqe>oeM(izGf-3YL}i%3@m>!x6W9J zNzKN3Q?SWav)N2jmmL(obI>mMs*3vOTU^W2jej+jRu|WpwLM&6vstIr_0zAT2R^M@JJ0ZC m%!iDJwoZwUUR!_J`jdS+-@!8htG{$^&)`0-eCUu$#4!MxK1{&? diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index dfd1d04b253336e74f294f9b1120f6e6e5c33389..ced6518e6b98d7cdf565c706dec733cdb88823af 100644 GIT binary patch delta 378 zcmcb{xq*3tPJMB(V`Q+Io2$E_Q;MsBk84GlVQGYyzJ7Xo#NLfK;S&C_Txm!k}mtmRf z#E;_jUZ%-OL6%MhxfKPLE>2Z0m1TipIr@g>mKokj-lajVNtxv#!LA|xUb*F5Nrt5+ z?x9}3IVMSY{`%%&E+M6fDaB4%mZ{!>F3$O${%$@gNseJ=E*9lny1Kdwl?BOprGeUM zg=Jyx;Z7D+!LAnW#kmo=#(6I3S;=Jv=|%;bp(dsV+Ky5ATyt8a7rdTwuTV~QvBC@1 XZrKd&@IJxX(;T%5bDmqrcg_a@whw;H delta 1190 zcmdnMe2sI0PJMV%q)BF!V|Iy8x@&o4v75WOb8%QeWLRi^iFa;kdSO^)TC#6Qj-!59 zI#;n-k-kwxL}0j6v0qtEzHz#KcBz4(S!R%Pn0twZtD(DNQJO`GQCaM5d7iS7>@gMY(xdkb#k z#E;_j<-vvq=81{L&ORPyzD|zrep!wc?*6Xn-X3B8Az|4?ky%;h5grxp+UeO`MP3yV zMM>oq5e7+VrrF71M)@9QroqldhE*YsRYf5cffiYAVHv?eNkN`my1KdwIcBB$2HM)$ znStp}9|H%>d-9ckT|V=bZ|A2OrW?AwGj0|%O1#QEGWD)!%xkR`=DZJA9B+Q} z;zW;D?z*dmQU6N5>n}UqFU|4NY}K(dj@>D4zulhk|M?j6S7=J>5&5Urc3t2<|LieG z0+Z&wGuHL7PqeuukEvOw%v*7G-u^FVi_YA-Vm0~7)cFy6wmiADSMbn@Q|D41Pczxx z(Vr`!dn|0lI>xPsH(h=yv1b9(gk3u-ZhLMx9JsNJ)#ZfZnqKkP!u99(v^|(Q_0YPF z9d~ndmni*PY+QRazB{?Y+518gyB>p-?Na9#Z#Gp-Z|7Uw#o+Z?E=EkP@4VXg;scr=ryqKMl|9k1 z>Q`}ic;|&9U)&PPY`dj*rEb@~=e{GPl}XE?I{cZ)i3U!VRG>A&fkkYzI0rEP@lS6 z<}yQ=e#HU16@rqN{;Dl`C{&n zZsgg8-F|PlWAF9nGp|H^zwE*@ch_UzNtsuk)xY|gdC@A))T8<48Gh!l3GrWEq@2_L z({Sjb6!-F-B5fRRqQrET{S80kJhyjQQh@q|CXrK5gi`-+5e&YmkT{cNai7w!1(sKSj8`rW!xG5c-ndjJ<{BFq2) From ef3585bb149dbf1f8a027cc03803fa2a8ef31287 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 00:43:31 -0700 Subject: [PATCH 25/50] Fix agenix secret streaming for NSC configs --- Scripts/_burrow-secrets.sh | 8 ++------ secrets/forgejo/nsc-autoscaler-config.age | Bin 432 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 432 -> 1746 bytes secrets/forgejo/nsc-token.age | Bin 432 -> 1236 bytes 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/Scripts/_burrow-secrets.sh b/Scripts/_burrow-secrets.sh index 9ebd1f5..7754b74 100644 --- a/Scripts/_burrow-secrets.sh +++ b/Scripts/_burrow-secrets.sh @@ -117,12 +117,8 @@ burrow_encrypt_secret_from_file() { identity_path="$(burrow_agenix_identity_path "${repo_root}")" if [[ -n "${identity_path}" ]]; then - SECRET_SOURCE_FILE="${source_path}" \ - EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" -i "${identity_path}" + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" -i "${identity_path}" < "${source_path}" else - SECRET_SOURCE_FILE="${source_path}" \ - EDITOR="${repo_root}/Scripts/agenix-load-file.sh" \ - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" + nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" < "${source_path}" fi } diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 5ec06d7e98a6cb18f06e9a3f31274572922c90f3..f2acc6fa6193a08bdbca99be371a2def0099bcd7 100644 GIT binary patch delta 1348 zcmdnM{F!TlPJLOvk7;GDtGi{edqkRfN^YRGsac_Ka9LEEMT)6UwuyOpMqYujQ-HR+ z0asC&SEjSOUu0s1dsMEznM;UQd0<*#R#cFYOQ}hLn_+2sMPaUac5q~9K9{bYLUD11 zZfc5=si~o*f@e`wu4B4Fu}QMGe@;k*NvcIrKzOi6x?6T+ahZ2kL28t5WQK`>wsvKh zvwKChkz2MWSFmYexW0Z^nL$*(Q>C_>dsMi;g`2CXqq}E_f3a_pM~IhaNOEFeYKd9- z#E;_jIpslV9%dCq`rbx9W!mmO61K89X~>E%HM7CE7XiCLDp89{#K={d%E{y9eFMg=BZy1Kdwra|Ujxrv5m zM&*gQ#%YxnQ5HGn8EM)D>E(e{7G}AIL3zRTMtPo@##N=RT-NcPA>JQt(j%vENWJ1P zOI_z3f2v9%?losf*zQS1lRoh$9B3DLUHZr5pLvb$;x1XKH;Uaa50-OZMuH!&NSMgvCN@|f-~S*|QKkB) zF;8J_eP-b=c1sCO*}A;WM~aJIwf$Zft-sm-{4J+%r{DMP)(}3EWty&?@FctVwMG4b z)~Vr&c9V=>Tdj!D&3$`x;yTy04^oRfwR&ge>Q|}-os26;yf*dbzK-`#X4HN;5p2vV zw39vHTT8ZeCd0W~Pu^!3CeFVZU-)bB&&2nOem&A?J`q`8aVk<!(`!8PY$nWIqoet z zW0m+dDJUrF@7=Hd$t#k+u4$R4C41YyzT(rQwg0;1=H1&V*3oNFwO47|TfP%l?}{80 z@xSAexh17d-SkIoBFDZf=QVbkvTk8}vAX=Yd6?w3CP#&p+K(QnJmZhs_&i-a_e0dF z4GB?wjCqomLpCKit>5>$H2(cMeK)?Frv#tcbT}(KVn2Rh{r(?I1JBxoR|l@+>@u8i z-k`NUXwyZ3+?QT~p{D7Py|x=I53j$V7ts4-+n;b=rdQ{DlkY8<7x26rf$xd zs5mM6k~NQFOc(`rUzRI87V1$}!jQv|7T-3pWmQ~)Kv~N#%=Ng42 z7e4LJUpKeAb6UIJ^riZe&Hl!WH@qVEx=xP2Zz3<%d3&v`)UGQk9#Zg+a2bX{m2Y zlv9+Se`;7hS8{ntR7Q$nPPt)5fwoymaFw=4WL~y^nsb^_vAbt#WSDnGVt!arQIt>M z#E;_jzD_3Qg_dDH;qEyB>0yb9krghkAxKxrTuUBqtqr!kHd}uOQ@>0b XPw!=&!^Nt;i$y0YyFX(2k<0)9vdVwO diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index cd957f985c2aa89fbedf9f44c2cf352d275c3648..a71da73a34bcec2ac97fa742a9a97f991f97f1d9 100644 GIT binary patch delta 1702 zcmdnMe2I60PJKYOqh+OESfq=$c41gjuxXZaag=LFPJy#muyKe}d7?*{yLX0TQczZv zCs%2bVWg{fP;q{tfkB{tvVUoKW_pohNPufmq;Y;)zDGfNwx?H=d1;uj1(&X!LUD11 zZfc5=si~o*f@e`wu4B4_VPa*vMQ&bJsz*VxPgZJFid#WYgmIyEVUkyhSA}ayW?6u# ze|C~dnPpHimx))oV`)T2xrdL3ahS1bu77DofOkq_ntN4BkdeP zkru%Q6?w&O?&Xy^xmn(>?%L(r;YOi85!r60$u1s6`Yu8GIr-UKy1Kdwnf_kB>HcO# zsUhA*Sye`<&QXOUTod%>o|_mTwApR@&1F3c zzB^dDpZdeZ^kw}6J)ZBqiHUb#-uiVou+fIme4^k3EAw4#?W|9pEOFi~d2-XAPnTGA z+uP6EFq)bPH#${z^-s-{TUwTSaQRZp@EytDZt19Mo#9n{&2N`EA!NGt)I)C%O3Cbf zu=n4p>gf8Fu?6;p8yYQO=Z?=DTMHR$DL~)^qsG-S~DD^T~>yE9dG?*t)$l zc{Xjys)>hu)}7YE*CLnRGJN)T`p>e5Kb%vv)^M$vD4JN{`I+xz zR?*!VH(VDjQ@){d-tq@WO3qz&PwA$6`UT7H8eJ8iB%G99EM31?x9d`EBh&2#MV+f` zHz+Eud-Gh&(8|W+KPOwwp7rfF?(ylXZojnakz-eWhPm#8D-)K_Khfo!%DN`3zdxk) zx9j=+ElG#VCQO_4dB)vceICIJCd9Uful~9D<;Jb|@>g8&^`B5bzi6@T#y<;gOne~z zYU(NW1Dc<;Ckk<_PMdU6U`_pbzxO7>ev{m`cO8h5Qq(-=TVk}iNWNqDt)z%KvP^zP zli~_>JeAhilt;Sm_}(e})q~Odh5VJX3nTOU51uZJZ<*?TBG@{OBW-n3PE6P9l$n1m zT~GG@mz3AnS1WtZ$av^~z}?)nQsIxz);|C8CG~*$4sIRMh54mFFO_p1x$<#E{SB6F zA3FcsYtLD5PEj#9axzQKioo084mj=gPO#y7*6ywE=A~Cxc$^J*SUORw?r78fGMo2YxyKG^ z#av5p69`+OS^Y61TUxVg=AS^m3g1FMYo+=b=0Y_&{-MXOCf?Y*T{riPtDxg&_cdEY zxzsBd4ykQgaI9Fu^5LO;w^hmuCOwF=UmFr%C|JU%4CJAvi>-zf(LB$~?H=e66u z)4wIO#D(Fdl6|v4bz`FCKI<*_o4S5KU;3dyJ?7V+)@b3&W)C=pI)0}eywZEG-oS}n z;`)~L^=n>NE3WO}^cE7Z*;F*UX3mM**SX8sgm`Zr(PkDGSa*8<*Kd1bAE;C}-k2uN zZgfNG+rbXCIsBPfb0jYI8|+*!IN@MP&Gm*3W9#{M&PMNlm%2~<=)9}1HW|%3Yk5&U z)$@kRp6Z-GG zZ0nY+FqSnJzg)XJ#X&WrU1yfBS=AwV?dy+IBU}~My^RyUF+*Zr0wCRudQg`*4>3_ZekkY58({3jIvd zoEh=MCjI)UL$b#g`Rw>38y226FP=9idzsR~Pai8MPB9i&T2wasw)b^E zlmGkIJMBL?`A3&)ue$2~ojKoT<>>y?sa3DBYj2obv}L8?oS&XX-;H{>`Re1$m$1yt zo#=g}*0=ARd&!NMM+w0^|Mj=z9$)@C#>lsR`|l+61&>Y?YRpyHc0Afze)`At$JV`a zcqU>dJ^j!9Y1&(zR@-G>+?tqUmdbFKv!+8KO8DC6=&h^EHtL*7UV3PjY*aaG^5#~F z-Z@(@@+pPcUXr@I%%FXVMd7_IFR#3o4}HouSN21hpv0|gH=`&+(W~d|EI!Tsu~AIc zw)kFc@1buIRdd^0#C}GwFlt=yNVl`enf9yJ+LGH;TcLJwc5X&!vTvrjYkF>~MP)%+re$DZzN>LWF_*5LLUD11 zZfc5=si~o*f@e`wu4B4_bBIN0ZcvVAxq)d?afq=`o|{Q%k#<(3qnl@bl1r3JK&HN{ zX^>fRu}M}km#ME;N^(S5phZ@2WrT5vlSipRWVxwFUYfIaXpwhjgsXdCc8*6>c}`;S z#E;_j&MrlkW@d%qm5F6WUL`@M;pLg3KB0+bDMnryVa|!!WkEqX*@@+;?#{kk-dPrv zCV}Nu5e23Co<@aL+L2Wi#>qMQe#HUCJ|-q%1x0?vu9YDsUJ>P7y1KdwVda(v#@QhO zX2uoH`57+xCHfxTW;srYRTgDI;m+Rvl|?C<9#O6Zxk;4bv^`1l9UZWuEd93_rzEaIzKvPp^U= diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index ced6518e6b98d7cdf565c706dec733cdb88823af..68ace3ed46a52bfcf92df311d394a5b5534dd741 100644 GIT binary patch delta 1187 zcmdnMe1&s@PQ6ola7lopiDjfyVv)CFR77QkrG7=4S5ahvL1en0sgIGin^{1JsaI}9 zF_(wATScNrntQfmRD_d{kx^!NNTq3xsaaJ@ka?PUM5MljrE9X2zE7sJE0?aFLUD11 zZfc5=si~o*f@e`wu4B4_n@MPKrg3_9gmYO{c%-3!l53?$PFjS!yGd4XPN9phdthLG zrCCIzrAw|SmqnIyL_tuIe|Uaro_kT4iHm7rd8JE9sc%k2QE_RGPfm7PzE^&tt8dlB z4-)mJVdkzGMVX#nrcRaN{=td9CdpC0jy}HbCKe_o-mb~!+Acm(?tWgmj*eW(q2B)a zQH3elK}Dek=4GZC1#TwV-X^Ar1wn@T1yPl5CSLhr9-b*VjwW2Xy1EM99;L=E6={hc zk(mJoK>^Oik%l1_rG@bCw{-D^rl{$ z^Za>}m2yA2kLvyV*3`eQDI+g_l@pgumz>BqDG|2o?G}DVVk}QhOPJ$mvfw`7s-!I$ z{C;z;oqMHiekkJOA=~v+kI5=dbg$T^D8g~%#`oO4cF#Qv0-imzso8rVFesk;r0JpK z9GL-o>fZ&srT;l}WR~edZ_nEY7}!o#PqUt5FMKm_>b+y^t3pb59`@aM$v@QHYWw%c zJrc}a`~Do3NIQ7on~soPmVI)oT-MTgj|18T@_(_Pxo}k>>~p{O?tia@4;qHC^&ZlD zShK=msnmDAU()|;s~!a(em>8_q&eIsL$hl0yJa)mPfnUxpX_b(SAPE_ueM_`pLhQi zX?njR`g7*ae`gu5J<3?jtL%GrZrGj+t%Y$%-soHKJXyHzu#oZJ)7F;3Ifs8SE$tHs zP+iuxJC(s8dLW;kDW@W@^;fRq^_s^}u+WS9R*8^B^?R44Y_YecWX}BBa=Ll@A${NYC-L=03mMsN zUhK2HzGJoI$=HM=d9OS;o3h>SCzRe*VLS05{g}j@=M91{Gj7fA-m_)p#fr<1Y_6<( z(kx~&y&)~<=#8+|K8ZhW9JSM`U|Cr5<$;LE$=ZMq+FEjIlVrS2%Z@LQso47FenW_u zL3;4pVvk+bRqOQ>%a*qGytr2{E8&!Ot$r)pBe~+3#piS57w_Z#%D?wz>6fs$6`blD zPKA8q32afn{rjcu(#7}7*rE^4Z&o#NLfK;S&C_Txm!k}mton& z4-)lWrpZY`mQDq^6$O?qPE{_IWr1Ni`iA9}8Qw|Wr9rMqndKqDt|9(jx#e6*hNUL% zp zWnu2&P8LX>c`oT$$z=xVMg^LoCZ-13j#2qsb6TVqyqHS!VA`J W*$nOQKEc}49JLB_o?FLv&IbTM%YK9a From 7fb6419fa01f4cfd3f7dc18d7bc1ed3c0625047b Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:02:48 -0700 Subject: [PATCH 26/50] Rotate forgejo NSC token to Burrow tenant --- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 1746 -> 1746 bytes secrets/forgejo/nsc-token.age | Bin 1236 -> 1238 bytes 3 files changed, 0 insertions(+), 0 deletions(-) diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index f2acc6fa6193a08bdbca99be371a2def0099bcd7..15d36ff8c5fb63780b43bda0cff52f9346a3061b 100644 GIT binary patch delta 1348 zcmey&^_gpePJOzMdvHdkyH90Sxn)R*i(^TsnTcV6sbg85vst#EXQ)S(p`%w=nx#)x zI+uH~aglbhW1f?)Ut(Z#qPe?oAeXM4LUD11 zZfc5=si~o*f@e`wu4B4_kA8rAuA_O9QC?(#pKq>zn46_fNr9oUUy6ZcVsUPC^lbK0ChL3h|nsI*d z#E;_j1?J^}h8blgxsJJxu9itzr6yGc70xNeK`!3r-epl{`4+A|uIUvaPL38_x#=N( z#sxm!rR7CN8QE@yfkiH} zo)tk(&WTwe5ox7`?&%)xW~pwuX4)RXDc;`o{-ti_mQFdITqo-O`wMl?x^QXU^7bAX z_aDbX=65nlE@j_x+Pkm9{7dJzh;IL-<^;?tVSAOR{*-52=pfpi{sQIGK9p1^h<-y^@CsXrGDSRwK%LFlsh^0_HX{;XnhxK-!) zQ00J2(nU4JZ zED|QCX7km{ib&n}`EY5u*qqyPdEf5U>8xp->8CO|;?|N=^-^i;6*@C|q^euqPdUJH zKl<3(wg}7gj&t#b!Ws;7J-=QFXZ|enxif3J?-qmGk88inCf&@kU7vD$<=k(Ze~ND6 zTq_YL5XiawPvy0nGiUZVGg=FB=G{L#F~e(9)4%AV$FcwTzMY#tVL8vIiMI}=wk|TX zHPNzN%A{Uz@-%C9@X5@2#rt2ravTagHD9kq!eh(Yn7!u=84B4}pP#iSYThw@R%!+xhRuUcY);CzVvU>7h@y><777*SIb%J3K*5uFv%x^D&S2 zOa;s8O{R4uUF5m7y^7=HTqUt7hF{N}W1nU(a9oIra zYjdBz75y{!oBQ7MHv6pQr!VxmzuK6_Y8$PR$g;Fn`*5^*z;pQIr~&5-O8*9 zJlZ8b+xf}1N&G+6*L!VSu+MF|Q*Wv*kNFDjniSc(pH5Hww{7U0q%^Jf)3OFZA!}p( z!cWiEcW+;I<?_aoZ)3_na=PS E0B>DdcK`qY delta 1348 zcmey&^_gpePJLOvk7;GDtGi{edqkRfN^YRGsac_Ka9LEEMT)6UwuyOpMqYujQ-HR+ z0asC&SEjSOUu0s1dsMEznM;UQd0<*#R#cFYOQ}hLn_+2sMPaUac5q~9K9{bYLUD11 zZfc5=si~o*f@e`wu4B4Fu}QMGe@;k*NvcIrKzOi6x?6T+ahZ2kL28t5WQK`>wsvKh zvwKChkz2MWSFmYexW0Z^nL$*(Q>C_>dsMi;g`2CXqq}E_f3a_pM~IhaNOEFeYKd9- z#E;_jIpslV9%dCq`rbx9W!mmO61K89X~>E%HM7CE7XiCLDp89{#K={d%E{y9eFMg=BZy1Kdwra|Ujxrv5m zM&*gQ#%YxnQ5HGn8EM)D>E(e{7G}AIL3zRTMtPo@##N=RT-NcPA>JQt(j%vENWJ1P zOI_z3f2v9%?losf*zQS1lRoh$9B3DLUHZr5pLvb$;x1XKH;Uaa50-OZMuH!&NSMgvCN@|f-~S*|QKkB) zF;8J_eP-b=c1sCO*}A;WM~aJIwf$Zft-sm-{4J+%r{DMP)(}3EWty&?@FctVwMG4b z)~Vr&c9V=>Tdj!D&3$`x;yTy04^oRfwR&ge>Q|}-os26;yf*dbzK-`#X4HN;5p2vV zw39vHTT8ZeCd0W~Pu^!3CeFVZU-)bB&&2nOem&A?J`q`8aVk<!(`!8PY$nWIqoet z zW0m+dDJUrF@7=Hd$t#k+u4$R4C41YyzT(rQwg0;1=H1&V*3oNFwO47|TfP%l?}{80 z@xSAexh17d-SkIoBFDZf=QVbkvTk8}vAX=Yd6?w3CP#&p+K(QnJmZhs_&i-a_e0dF z4GB?wjCqomLpCKit>5>$H2(cMeK)?Frv#tcbT}(KVn2Rh{r(?I1JBxoR|l@+>@u8i z-k`NUXwyZ3+?QT~p{D7Py|x=I53j$V7ts4-+n;b=rdQ{DlkY8<7x26rf$xd zs5mM6k~NQFOc(`rUzRI87V1$}!jQv|7T-3pWmQ~)Kv~N#%=Ng42 z7e4LJUpKeAb6UIJ^riZe&Hl!WH@qVEx=xP2Zz3<%d3&v`){~PJMoaSCLV6lxtB$p`&qdWRkXPvVW9|lV6r&Xr!Zlj(Jd+SFUz)Qn^_~ zI#*~#zMpnruy4ABMM{B7uxGGKN>#aKo{5uzg?5g1P@cEBxkqS#cA}|!GMBEMLUD11 zZfc5=si~o*f@e`wu4B4FK~6zLaiwdhQ&CV#T25$&zgucfNLG?ZdT4p1nTvn1k%@PR zSC(&XrcZe`mr;IDfJbPeM^a=`a&U@EVQ98_c}j7aS7xZMWoe>OP+DPma&~!9j)$M` z#E;_jfo@eUl@S$zX4+vDk?Afz0f_;no_?ODWhRMUk>y^QhTh5EW>IC4`9Ypsi58VE z+MfBDL18JuX%?a0-aeVy1}TBreieoluCDqP=4KH-+M)XHWq}r4y1KdwCWa-2g<(0N zZkEYzsgap^xkcvQ5yd4DDOp7p#$j#|UO~zAm4+2%Au0YQTvDB~p~(s2Yi{^H*DPZ` zH|bk=0eecqtAxOZLY>zG0w(1xXK&CGZLwB=rf2hD&2O>8|6gc+F^G0&Ueps<7wq_T zkLQQoR_ne`-?#C`=l_NGdg>zeWfKar=9>38c7Njy`?WFT#$-+F@0#In!w-IXevvIm zPB8ZN>v|zc|NaYhd(-zf=<}`BnW6o7>qqSkU;i&Fc0d2c@=f@cZb8nRb*t{RKelK{ z(&?SHjz#HCa*hk*(tb}1xG*~c`B{Z4N7>(@ax;XeAzf?TCHcC9Vz zxqrUrzMFyJsepL5hkoyQHq>q?SAa4J)MXWX$Rb!RT#o!_O|cvvN9 z$^4GL)wRxB?!CG@(-iTPkp7gIE^+>@@Po&4L5K1yJ+N?;bW zFgSSM;#Zp1!Pi&h^%P_*+LagDh%c|RTkjpq;-SJTxL(3UZjqw=w+ zG`AQW&2M76cI(==+?xSg7I1BRQeJ%ECr|j>fF+l@rhNP&@J4QL=g!V^*|&nZo++}& zz1cf?Rzsxm{g!6hip?g7-#lx zW-hh-qIaRjc1k~s?rZboHlFJ~)}GA1SDDA?nRnk|LY~f9$Ih8|?dz%-tQ#&*UsF6y zr@a2(_e%vIrQb5`dvL?RZsQxT_;;pZA7}6HdH4RSB>S0;_cMFn>CL>rKGS?U?{_hl z<-hJ-TwZ;5=eugP>P@PWC*S{mRrm0nM-a1*=wkD-HJ5&zh?qJx#$I>Q+t02t$27KG zF{@gWZkHJ3!!Jv?MNiAOnO_M`fn`|TV2X5^N!&E(Xck}}1_ zJU#ns`;||MwpBiNO-tWFF=k6>y-CLBq@BU=XrM#a)A|2;Guq^oy zF@fje)R2-`!5!XD_SS~A9DcQuJ@Qf3jU?y&5nM&9M4PAjge4g!9TJ$g{%-p}Q*XV> zTOq0s{zb34V%$<0QorucgL1+BN@soTo7&pX%v`qcW$nWM8z)IKuid!WmDS+Tj(6|3 z?wb3(UgzPCE~B!GGd+E`ZhQWB+N$~ke6P|U3obpT*WO{NAd_YFR(b6#!>P)@q8_+k z*A#rctGywqf6n*T-z$0FZ_=2$$SaV-Ci-Zm#PgM_qSYUST{~PJKYOqh+OESfq=$c41gjuxXZaag=LFPJy#muyKe}d7?*{yLX0TQczZv zCs%2bVWg{fP;q{tfkB{tvVUoKW_pohNPufmq;Y;)zDGfNwx?H=d1;uj1(&X!LUD11 zZfc5=si~o*f@e`wu4B4_VPa*vMQ&bJsz*VxPgZJFid#WYgmIyEVUkyhSA}ayW?6u# ze|C~dnPpHimx))oV`)T2xrdL3ahS1bu77DofOkq_ntN4BkdeP zkru%Q6?w&O?&Xy^xmn(>?%L(r;YOi85!r60$u1s6`Yu8GIr-UKy1Kdwnf_kB>HcO# zsUhA*Sye`<&QXOUTod%>o|_mTwApR@&1F3c zzB^dDpZdeZ^kw}6J)ZBqiHUb#-uiVou+fIme4^k3EAw4#?W|9pEOFi~d2-XAPnTGA z+uP6EFq)bPH#${z^-s-{TUwTSaQRZp@EytDZt19Mo#9n{&2N`EA!NGt)I)C%O3Cbf zu=n4p>gf8Fu?6;p8yYQO=Z?=DTMHR$DL~)^qsG-S~DD^T~>yE9dG?*t)$l zc{Xjys)>hu)}7YE*CLnRGJN)T`p>e5Kb%vv)^M$vD4JN{`I+xz zR?*!VH(VDjQ@){d-tq@WO3qz&PwA$6`UT7H8eJ8iB%G99EM31?x9d`EBh&2#MV+f` zHz+Eud-Gh&(8|W+KPOwwp7rfF?(ylXZojnakz-eWhPm#8D-)K_Khfo!%DN`3zdxk) zx9j=+ElG#VCQO_4dB)vceICIJCd9Uful~9D<;Jb|@>g8&^`B5bzi6@T#y<;gOne~z zYU(NW1Dc<;Ckk<_PMdU6U`_pbzxO7>ev{m`cO8h5Qq(-=TVk}iNWNqDt)z%KvP^zP zli~_>JeAhilt;Sm_}(e})q~Odh5VJX3nTOU51uZJZ<*?TBG@{OBW-n3PE6P9l$n1m zT~GG@mz3AnS1WtZ$av^~z}?)nQsIxz);|C8CG~*$4sIRMh54mFFO_p1x$<#E{SB6F zA3FcsYtLD5PEj#9axzQKioo084mj=gPO#y7*6ywE=A~Cxc$^J*SUORw?r78fGMo2YxyKG^ z#av5p69`+OS^Y61TUxVg=AS^m3g1FMYo+=b=0Y_&{-MXOCf?Y*T{riPtDxg&_cdEY zxzsBd4ykQgaI9Fu^5LO;w^hmuCOwF=UmFr%C|JU%4CJAvi>-zf(LB$~?H=e66u z)4wIO#D(Fdl6|v4bz`FCKI<*_o4S5KU;3dyJ?7V+)@b3&W)C=pI)0}eywZEG-oS}n z;`)~L^=n>NE3WO}^cE7Z*;F*UX3mM**SX8sgm`Zr(PkDGSa*8<*Kd1bAE;C}-k2uN zZgfNG+rbXCIsBPfb0jYI8|+*!IN@MP&Gm*3W9#{M&PMNlm%2~<=)9}1HW|%3Yk5&U z)$@kRp6Z-GG zZ0nY+FqSnJzg)XJ#X&WrU1yfBS=AwV?dy+IBU}~My^RyUF+*Zr0wCRudQg`*4>3_ZekkY58({3jIvd zoEh=MCjI)UL$b#g`Rw>38y226FP=9idzsR~Pai8MPB9i&T2wasw)b^E zlmGkIJMBL?`A3&)ue$2~ojKoT<>>y?sa3DBYj2obv}L8?oS&XX-;H{>`Re1$m$1yt zo#=g}*0=ARd&!NMM+w0^|Mj=z9$)@C#>lsR`|l+61&>Y?YRpyHc0Afze)`At$JV`a zcqU>dJ^j!9Y1&(zR@-G>+?tqUmdbFKv!+8KO8DC6=&h^EHtL*7UV3PjY*aaG^5#~F z-Z@(@@+pPcUXr@I%%FXVMd7_IFR#3o4}HouSN21hpv0|gH=`&+(W~d|EI!Tsu~AIc zw)kFc@1buIRdd^0#C}GwFlt=yNVl`enf9yJ+LG|PJMZbg?UcCV?lw1uTNlZW?8DUVVIk@b6HSUXjZX)c~($TMOdD`cX48s z0avhdwvoA!Yo1@SQ^5lzQqx_ zrp`$f9z{uc!5+yb8I^7!k%`)7`i>Dk0T%xCQHh!1RUsb5T#j^?t@ z@y*?~re`a<-hYiZ)4n@rOI+Z!RXbSDWIo-$W&cdaElonH=gQ7Zy6*H&k>kIq{g(@; zj%8L$u8WSv(wQprQsw82 zMN6|P*B+|hwLyxf<%{LjCCBBDh#zg{IX^o#CHz&`lpmW;TsArvuxG}s#Pu_lOtzc) zn*AS#d9+5rzID~s+|1X-*?MbITJ85NShMkPm3+&}^_&}zFuuR^PA)2exBvAU<$k3S zo!YDO4vOEdTKrqRVY=et%UTxg>nmf<N^e3D;EAeBzcJQ-7LeH@9w{!u9nr+QaL#1`nSDqh~a`}(5NXKSJN zmjpLFt=_4#L_;rgf2(Gk>zewFx=W4-oLQakQtvL3k?2y)i z664_XZ>!=&E*4yj{u{Tvv7+bld0(ZLgx|@}T6aEoS-VyFn6s&?@Rcj_sdo-XIQ{IK zd;3|!yxCT*`8%0%7l}T7-ScPa@#J@Fl_i^Qxh8s#}&H_$eQpd&5-XeJtmn%+OtD#F#(Jh{?rF z*FUTme_MNF$hI8DUX7;g>wK*@&fWg8-jmzaewE})>zChc_OE)N*KqG)gTkq6FU->R zWQ0!o7G0h15EsMX)x%;bclzL_XC~J_%=zwA|8A;WGym6{>^u$jOuMFLc36Ht#{KrP zn$`9si4)d`COu|jaXiwuS2+1Ni>6A_+K)ff94A$l#U2+*n|*QT)8;wx{K*SHiRgC~ ztx!=3yUb}4(rjbsw%&C4q4`?Thpn&qN0_fOaJ_Vtt3t(?Px{Xf>shn3Ufwmk%QeOF zgG<_zlSf0RL{HL~^KpCUHb1?OKc5C&+UEMflCA&wKQE6}_m|4`O_}#vc!|*Sk2zrr yi)+aKlzS@ulbIUzvI2OX-90=A54CbQw#vWV;`RY delta 1188 zcmcb{d4+R=PQ6ola7lopiDjfyVv)CFR77QkrG7=4S5ahvL1en0sgIGin^{1JsaI}9 zF_(wATScNrntQfmRD_d{kx^!NNTq3xsaaJ@ka?PUM5MljrE9X2zE7sJE0?aFLUD11 zZfc5=si~o*f@e`wu4B4_n@MPKrg3_9gmYO{c%-3!l3S%mPFjS!yGd4XPN9phdthLG zrCCIzrAw|SmqnIyL_tuIe|Uaro_kT4iHm7rd8JE9sc%k2QE_RGPfm7PzE^&tt8bO- z#E;_jreWr;8AX|%UZzf!;r_viz9z|0zK%Y=?j{x{CEl*d=GrbkQSN?TxsHxp$)Vo< z`B8-_*+E622Igg^83k@8+TJFni3LH1`UO#yZYEy&VIH0-IgTb=y1Kdw-X5jKE){8s z9+8;=20;PN#gT>~7Nv!T{(0tAL8(Qd>4E+KePM`A2bO-q>LXtLlw->Rf7 z8T@{8uAO_OZGI@?<00GiQ;*3iPIRx>rYOR3`z%3j&@!w5i#9ATTJN`=sfi z;~bd*d+Of>yQTj*bYzz4LT}I82N>8+RZp{?V=sI&aO%Bd?5jdbcOLfLc*#H1-D>;y z$2}6vUHkqVmPk8z;G2$+UY31wt6bL7d5;6y1@eEfpSf^VA?$O%_wIkMgbx~qvGpF( zdswr=VX4%2zF*S+YpWgwAAUa1!lXIeCPTAo^Sfm;+fPoKSfA`|^H+ZVB(Jt(F`sw; z6={0EBKmXY&VOeauRY3G%&Y8sc5c|73$2B5N8adL@H|<#?y!*Y-_zEX!8wP2F)i&A z2vA+twmX%GtkhL=_qk5E5V1V)=lAu0nXc!x+let7IeK%X zd|qGY{Jd63jZ@D!adC1@y}+GsVn^EDcQI>(o@O{-ck)Hptg}p4F3!k|(D)qg6>$l5uH^MhEZ;r1Ru)zA0&uLn2QvdAZ>6;25M{ZQ!F(~bY6a|6G9nDV{$ zs1iW$Bl&xD}l0 z8%~9M;|Xk0zy15A?b5~f%h;k1&Tm+|-gPI-6v?dc30Ix^k}Xr&Gz-#ggu{xSe|%!O zxM~`6|HC_{H}w2A_J6Q^QO)};uRE(AzYsV(_4Mu56)TJ)>>0x>{O*0rO0CH%+Qe0M zT(Y!mRq&|>_D>t8+i}&UnD|d$>6sy0`#9|TWR>a91DY95Z;(;FSv+k`;VVD uAC{7O`qAcWXhs8{;d@=CvMZbwOKt{lTq%;DnRf5U)eS1SrN22n9smHKEfW&} From 2da0244d42c09518d50f87167a75b4df4eab60ce Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:06:24 -0700 Subject: [PATCH 27/50] Stabilize Apple Namespace build caches --- .forgejo/workflows/build-apple.yml | 8 ++++++++ Apple/NetworkExtension/libburrow/build-rust.sh | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 460b6b8..7fce5ca 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -33,6 +33,7 @@ jobs: CARGO_INCREMENTAL: 0 RUST_BACKTRACE: short RUSTC_WRAPPER: sccache + SCCACHE_CACHE_SIZE: 20G steps: - name: Checkout uses: https://code.forgejo.org/actions/checkout@v4 @@ -82,6 +83,12 @@ jobs: "${cache_root}/apple/PackageCache" \ "${cache_root}/apple/SourcePackages" \ "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" + rm -rf \ + "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ + "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" + mkdir -p \ + "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ + "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" echo "CARGO_TARGET_DIR=${cache_root}/cargo-target/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" echo "RUSTUP_HOME=${cache_root}/rustup" >> "${GITHUB_ENV}" @@ -90,6 +97,7 @@ jobs: echo "APPLE_PACKAGE_CACHE=${cache_root}/apple/PackageCache" >> "${GITHUB_ENV}" echo "APPLE_SOURCE_PACKAGES=${cache_root}/apple/SourcePackages" >> "${GITHUB_ENV}" echo "APPLE_DERIVED_DATA=${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" + df -h "${cache_root}" || true - name: Install Rust shell: bash diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index d54bd71..d3886fe 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -79,6 +79,14 @@ if [[ -n "${RUSTC_WRAPPER:-}" && "${RUSTC_WRAPPER}" != /* ]]; then fi fi +if [[ -x "$(command -v rustup)" ]]; then + for TARGET in "${RUST_TARGETS[@]}"; do + if ! rustup target list --installed | grep -qx "${TARGET}"; then + rustup target add "${TARGET}" + fi + done +fi + # Run cargo without the various environment variables set by Xcode. # Those variables can confuse cargo and the build scripts it runs. EXTRA_ENV=() From bf4b270db5c569d64f3f4da386e473fa9a2e0e47 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:07:41 -0700 Subject: [PATCH 28/50] Increase macOS Namespace cache volume --- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 1746 -> 1750 bytes secrets/forgejo/nsc-token.age | Bin 1238 -> 1238 bytes services/forgejo-nsc/deploy/dispatcher.yaml | 4 ++-- .../forgejo-nsc/internal/config/config.go | 4 ++-- 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 15d36ff8c5fb63780b43bda0cff52f9346a3061b..e315d30120e5032fbf4840e88c9265333e084485 100644 GIT binary patch delta 1348 zcmey&^_gpePQ9ONxpQz-VnKkXn^{4LWo2MNUT$E%g==0~dR9bHS%77DYG7rKL2+%0?pgYU!CA@4Ii_h5k=~(BW+mokSwY2I6)xt6 z7Or7##c957CGJ^SL1Brek!}{+9*O12X>Q3L2A1Z99zi806=soKy1KdwhGA)jxrIq# zp>DyRp4lO07T)ez#X&`dmA-+V78dRnB`yW^Zbnfig>D9cTp=z}>0y0a?tGbl@bqcU zdgZuHKSj!57+s-{IT#e6`HxluH~`N0d8lPFA_K zLug3m`ri5rw?kik4QJgUqAjf_E%{jM*TtJ6xhIX*^@v(8eR5j&bMmsUtc#=)XZvOz zZHe=AooT-Gf#6(mMm5_YmdUKq`b$*rAN*cB|E}1-z@TiulYdGzSmqiTtCXugYs}J& zb&udmK37u9zG4kWhiUB2B@*mAmN7lLxkfy~LX{;^EI5R-e(8_UiDK0?i*sepGe7LC zikUgpQ1mNIi73G*=M~ z`?Y!3N48II+^p_uElU=-bo9vd|0P`_2Y5alxLTOCAmeC4oP%}ulzF^@M-KCeN^d_^ z&#ZH}>e9VDt=JP=8fMp*@AP*lm}t}DBB~djR$Or+PK9+x-OmgX*75k2YR) zp;B$Cz}|RHnb)dcL$>O~-aq^A#NJJLl_3|CPuwtGW*WHuC)fMMlOv9^Pm=p{@MTTA zPtW32R)rroI&OJCi?8->Sl}y%t!I@d^43n$ZqHtPVV?P--91-xuiFZ@e3nqpxheXc z`Ow^7z9DWwyKaZS-;{kelfUkQ+*^~Mj`f27w}_}2tgrs{)JjGr;`0&HRu0CT&E+O0 zUCSAzzE%c#xhwu{P*Pm@K~eqN(%(*V5C5x_&-*<)rb^!Z&Wm-^#N)&Y!($xRESg?- z&~U}^e#6=|Z#moRa#Wb>C;n9xl~0N9`r`O?K7<3|E%xWYw6NC5`P0WE$CL; zK4tOSI*FN{ZY&xu33hf3Pc$AZ|Em<#Kc&X1`(XdOFmLhVx64{RVl`(^jrw#st+D;s ziesKP3*YD67hEcPe&ce*jTW}wC&wR~BK`cxB3D{>9%x)Ut(Z#qPe?oAeXM4LUD11 zZfc5=si~o*f@e`wu4B4_kA8rAuA_O9QC?(#pKq>zn46_fNr9oUUy6ZcVsUPC^lbK0ChL3h|nsI*d z#E;_j1?J^}h8blgxsJJxu9itzr6yGc70xNeK`!3r-epl{`4+A|uIUvaPL38_x#=N( z#sxm!rR7CN8QE@yfkiH} zo)tk(&WTwe5ox7`?&%)xW~pwuX4)RXDc;`o{-ti_mQFdITqo-O`wMl?x^QXU^7bAX z_aDbX=65nlE@j_x+Pkm9{7dJzh;IL-<^;?tVSAOR{*-52=pfpi{sQIGK9p1^h<-y^@CsXrGDSRwK%LFlsh^0_HX{;XnhxK-!) zQ00J2(nU4JZ zED|QCX7km{ib&n}`EY5u*qqyPdEf5U>8xp->8CO|;?|N=^-^i;6*@C|q^euqPdUJH zKl<3(wg}7gj&t#b!Ws;7J-=QFXZ|enxif3J?-qmGk88inCf&@kU7vD$<=k(Ze~ND6 zTq_YL5XiawPvy0nGiUZVGg=FB=G{L#F~e(9)4%AV$FcwTzMY#tVL8vIiMI}=wk|TX zHPNzN%A{Uz@-%C9@X5@2#rt2ravTagHD9kq!eh(Yn7!u=84B4}pP#iSYThw@R%!+xhRuUcY);CzVvU>7h@y><777*SIb%J3K*5uFv%x^D&S2 zOa;s8O{R4uUF5m7y^7=HTqUt7hF{N}W1nU(a9oIra zYjdBz75y{!oBQ7MHv6pQr!VxmzuK6_Y8$PR$g;Fn`*5^*z;pQIr~&5-O8*9 zJlZ8b+xf}1N&G+6*L!VSu+MF|Q*Wv*kNFDjniSc(pH5Hww{7U0q%^Jf)3OFZA!}p( z!cWiEcW+;I<?_aoZ)3_na=PS E0B>DdcK`qY diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index 961317f86753f52dae9ebf43e30191f9cda3c9da..6fa883bf53aeafe5403bc703edb8d8cd683c4910 100644 GIT binary patch delta 1706 zcmcb_dyRL3PJKmQV!4@7Sz2UdPLyk;d$Oaif1*)fn!b^xMLbEseY1+rB`xUsB1(}s84xhp-V-HWwNKUNv2tmBbTn7LUD11 zZfc5=si~o*f@e`wu4B4Fk$1AYwsuNdV46p2c9of{fm?~ITUlVSxoKo}n0``PUXo|A ze|fIIXMV6Nmuo;(NO_`hkY|#2NobyFmcC_TsHbs(Z-`G>xk0A3L8(z%Sdvq@cX^`Y z#E;_jsZrjp9%f->*@0mu`q?GfW?54+>+KzNz3eV}f&6Qt4y2#F!D{`~ z*DgJGW0&2Whh7nhOv^bv<=x6$!nOMu#Q*;)@1MHyfamWA=Od#Js_TRmd{$f%xVuGA zqRcyxoo7cQ^V>{@q%0k#lVUz>K?fKg)y_V@J*DFN{|U?LFBCgi_Z$A)qZ8e$fA^4Y z+6R^!(dyDYLJC^~pKtSP@Gd{%z|)pg$<$le->KB_Be|DlGQ&Oxm0K&n&#w2{)2vxu zocsCEE~{-$Up;EMu4f!qn0k6j+ia`YD?zfR#Szu#FaBMX`=;eAPyhOCyUq7nPA|W; z?K1oO1l^zs*~{)7T2Y^Qt$VYMe3y9MtK_UBYiIvC_xHL-edUCI2Rp4DH}a-rPG)UO zD&^Om?(0zV==K7QsSUZa6W^TavTgm=^30h3_WGM85hpcYtW29$d!^Q@>x;UMtBgh8 zts+jQqYfBA;*5xh&6$~fs&JLYKmmz2I)S1ZRj#o=IWefF&<>)xl% zIkqcEY|*W`9lSw8KQ9TrJ+OJgY|l%(*2;3+6`wxkD%&c)(ivq5kBhDwOgMYf=lZSu z1D*RlEh4*DF4^h)DnzySl4M3Xd+zV4{7&~)h2G7)qm&eUYTNIbVh3DC#W^d zmj1%NOyA8sZ~V^JvHYEv(D2D&a`%$0llOU^)m@$BQj8J=LDbBUjLH;r07fU3)IP z()sXN;Qr*QPoBpkBV9w{G@0k#D3p z$x?{5_3RNDwLrbH&x;Q!ICmYZp1x|gZ`L(~K4y#WS2F5PpIvEESM^n8GSicv$;)5P z-k_lPE^y`QX5D8m&oK5^{mfqS?M^|Tm$tET-h(SsFDYJ|AT`->6>CWP{K)~HNtSPe zKX*NPS7yMmUwvH{*DUTe8`agTw67I4^D3>-Hg^@;F;z^K27R9{vR!2_!*0j0x z?QtF3f^f;GUgI#&`}J`OGk#su=@l1y$o1oEuL$>Y`Ker81#IbBXT%ze*OhqdUh|Ri z=#`WCy(Y(1^KwbywaC9;^h1=(j(^iKVOqMZDooJqsc>fhy}MjV3mLk(GFp76WG+`a z)}x*MX!9E})!&I6l`CT|)t{NC>t*0kUtie4u;DT5Gxws6VNbln%zxKw{5qqTu|!TN z=kMI=b*ub((x*-jRf}P*t?l?F`}cn{Q_`LO2Ay9qYi%#4`pwx_n$a_5?T`6N?V;LJ z^cGCk;`7|*@g^s1&if9R#l?}1J5sz3iS1hZGhwB(ZR*1p4{oq3-D4{iv0=_#?lZ?j zS8Km+O6}9qo1%w!F0N7%I<&adbwSbS2-T3)H?X+#q4aa Ql6kw{C%0Vaz0d3o08S_fE&u=k delta 1702 zcmcb{dx>{~PJMoaSCLV6lxtB$p`&qdWRkXPvVW9|lV6r&Xr!Zlj(Jd+SFUz)Qn^_~ zI#*~#zMpnruy4ABMM{B7uxGGKN>#aKo{5uzg?5g1P@cEBxkqS#cA}|!GMBEMLUD11 zZfc5=si~o*f@e`wu4B4FK~6zLaiwdhQ&CV#T25$&zgucfNLG?ZdT4p1nTvn1k%@PR zSC(&XrcZe`mr;IDfJbPeM^a=`a&U@EVQ98_c}j7aS7xZMWoe>OP+DPma&~!9j)$M` z#E;_jfo@eUl@S$zX4+vDk?Afz0f_;no_?ODWhRMUk>y^QhTh5EW>IC4`9Ypsi58VE z+MfBDL18JuX%?a0-aeVy1}TBreieoluCDqP=4KH-+M)XHWq}r4y1KdwCWa-2g<(0N zZkEYzsgap^xkcvQ5yd4DDOp7p#$j#|UO~zAm4+2%Au0YQTvDB~p~(s2Yi{^H*DPZ` zH|bk=0eecqtAxOZLY>zG0w(1xXK&CGZLwB=rf2hD&2O>8|6gc+F^G0&Ueps<7wq_T zkLQQoR_ne`-?#C`=l_NGdg>zeWfKar=9>38c7Njy`?WFT#$-+F@0#In!w-IXevvIm zPB8ZN>v|zc|NaYhd(-zf=<}`BnW6o7>qqSkU;i&Fc0d2c@=f@cZb8nRb*t{RKelK{ z(&?SHjz#HCa*hk*(tb}1xG*~c`B{Z4N7>(@ax;XeAzf?TCHcC9Vz zxqrUrzMFyJsepL5hkoyQHq>q?SAa4J)MXWX$Rb!RT#o!_O|cvvN9 z$^4GL)wRxB?!CG@(-iTPkp7gIE^+>@@Po&4L5K1yJ+N?;bW zFgSSM;#Zp1!Pi&h^%P_*+LagDh%c|RTkjpq;-SJTxL(3UZjqw=w+ zG`AQW&2M76cI(==+?xSg7I1BRQeJ%ECr|j>fF+l@rhNP&@J4QL=g!V^*|&nZo++}& zz1cf?Rzsxm{g!6hip?g7-#lx zW-hh-qIaRjc1k~s?rZboHlFJ~)}GA1SDDA?nRnk|LY~f9$Ih8|?dz%-tQ#&*UsF6y zr@a2(_e%vIrQb5`dvL?RZsQxT_;;pZA7}6HdH4RSB>S0;_cMFn>CL>rKGS?U?{_hl z<-hJ-TwZ;5=eugP>P@PWC*S{mRrm0nM-a1*=wkD-HJ5&zh?qJx#$I>Q+t02t$27KG zF{@gWZkHJ3!!Jv?MNiAOnO_M`fn`|TV2X5^N!&E(Xck}}1_ zJU#ns`;||MwpBiNO-tWFF=k6>y-CLBq@BU=XrM#a)A|2;Guq^oy zF@fje)R2-`!5!XD_SS~A9DcQuJ@Qf3jU?y&5nM&9M4PAjge4g!9TJ$g{%-p}Q*XV> zTOq0s{zb34V%$<0QorucgL1+BN@soTo7&pX%v`qcW$nWM8z)IKuid!WmDS+Tj(6|3 z?wb3(UgzPCE~B!GGd+E`ZhQWB+N$~ke6P|U3obpT*WO{NAd_YFR(b6#!>P)@q8_+k z*A#rctGywqf6n*T-z$0FZ_=2$$SaV-Ci-Zm#PgM_qSYUST<{9 diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index ada8d0cd52960b774c27c43ed0a4f87cfb4a7cea..50ca059a5801777749087ab18e22bc670deebee2 100644 GIT binary patch delta 1190 zcmcb{d5v>|PJND{VP0N&l2Kr3p?Q##uc47!rKe+}S!kMnvPGD=Wr$@;R#mb|gn34| z30GLLv9^DbtEEX^m3OLBu8(ot;CO!r} z6&4|$d4^S9kpcc$1{L91WodzhQLX{r+U_Q4hR#KWY31RLZaD^Ay1KdwWl^pH&fXSj zQI+P|`Vqb<5$;7vekMjG9swx@Sw@8g5vB$88D?4K-p-MMTq?(}FBIjn{B(H||8w4R z2XjgsH=KB*y2x_*0>+ct7ymd19g?#TdX&U5@19Ca^%aG^b>$^yS;@V2r&#o#-u?gX zeTF(i=oO(0{Ng&F{;i&wX==cD%8|mFH{Bw2drDyLoy;y6da+E}vK2R=m^g(h|R=Ja;ZJ z{MF`|=VE_6coJ{Bu<{h)`2hw;3rgPA9kAc@_}|+zyy77;- z@nYGUd*`o*rsOgFt8j2Kixv6$Q)|CK@2219ME*TCmYVUbzU-H`lfcz{|2f?LXEsf~ zr6w7xF4I%)Z*@xezqC-GyXDa?o}&kZ4{)++Tt=CtfnKKR!R~ z@7ku?uWu||+5~=93#L^}@GW>ZY5TOWqiR`3CuUA`m2#i05~#eV=b7~1b$br`PEn{> zC^WrhzusxVkfR4aNZhXXUHsotX8Ikq;})02-#jiBGQU>Z634dpu)Fe&Nk1EYSLm9X zxV>Do@`rQXyyuI(7_^wy&F8uNA}Z$aTFu)t1a_=unX%&at{iQzRqp2_N_ahIZtH7T z+*KZX=+RnJ#fSgq?YkH7Udu%7kocS^v8})knfg5?%Ez7dET--J5Gm}nmq+5TpJmdW* znht5}G}C6Twz7S}n$Tif^76RAbIJdG^cwV&TO{sXQFX-51_+bP4^I zwUrJve{;@Ik}-YaCYi{~ljnylS~qLTw@<~{ucKet+xVWZef;Cd%u~x8mjwImxRc2n u`Isjn!8PmJlkMSen^xyP>Dcq?{QT|PLwD`0)7;FFl+04=a--{}QWXGh9}qSG delta 1190 zcmcb{d5v>|PJMZbg?UcCV?lw1uTNlZW?8DUVVIk@b6HSUXjZX)c~($TMOdD`cX48s z0avhdwvoA!Yo1@SQ^5lzQqx_ zrp`$f9z{uc!5+yb8I^7!k%`)7`i>Dk0T%xCQHh!1RUsb5T#j^?t@ z@y*?~re`a<-hYiZ)4n@rOI+Z!RXbSDWIo-$W&cdaElonH=gQ7Zy6*H&k>kIq{g(@; zj%8L$u8WSv(wQprQsw82 zMN6|P*B+|hwLyxf<%{LjCCBBDh#zg{IX^o#CHz&`lpmW;TsArvuxG}s#Pu_lOtzc) zn*AS#d9+5rzID~s+|1X-*?MbITJ85NShMkPm3+&}^_&}zFuuR^PA)2exBvAU<$k3S zo!YDO4vOEdTKrqRVY=et%UTxg>nmf<N^e3D;EAeBzcJQ-7LeH@9w{!u9nr+QaL#1`nSDqh~a`}(5NXKSJN zmjpLFt=_4#L_;rgf2(Gk>zewFx=W4-oLQakQtvL3k?2y)i z664_XZ>!=&E*4yj{u{Tvv7+bld0(ZLgx|@}T6aEoS-VyFn6s&?@Rcj_sdo-XIQ{IK zd;3|!yxCT*`8%0%7l}T7-ScPa@#J@Fl_i^Qxh8s#}&H_$eQpd&5-XeJtmn%+OtD#F#(Jh{?rF z*FUTme_MNF$hI8DUX7;g>wK*@&fWg8-jmzaewE})>zChc_OE)N*KqG)gTkq6FU->R zWQ0!o7G0h15EsMX)x%;bclzL_XC~J_%=zwA|8A;WGym6{>^u$jOuMFLc36Ht#{KrP zn$`9si4)d`COu|jaXiwuS2+1Ni>6A_+K)ff94A$l#U2+*n|*QT)8;wx{K*SHiRgC~ ztx!=3yUb}4(rjbsw%&C4q4`?Thpn&qN0_fOaJ_Vtt3t(?Px{Xf>shn3Ufwmk%QeOF zgG<_zlSf0RL{HL~^KpCUHb1?OKc5C&+UEMflCA&wKQE6}_m|4`O_}#vc!|*Sk2zrr yi)+aKlzS@ulbIUzvI2OX-90=A54CbQw#vfuOFcR diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index b906b75..0ce95c0 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -41,9 +41,9 @@ namespace: size_gb: 40 macos_cache_path: "/Users/runner/.cache/burrow" macos_cache_volumes: - - tag: "burrow-forgejo-macos-cache" + - tag: "burrow-forgejo-macos-cache-v2" mount_point: "/Users/runner/.cache/burrow" - size_gb: 60 + size_gb: 160 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go index 6a93e69..b0d2ebc 100644 --- a/services/forgejo-nsc/internal/config/config.go +++ b/services/forgejo-nsc/internal/config/config.go @@ -193,9 +193,9 @@ func (c *Config) Validate() error { if len(c.Namespace.MacosCacheVolumes) == 0 { c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{ { - Tag: "burrow-forgejo-macos-cache", + Tag: "burrow-forgejo-macos-cache-v2", MountPoint: c.Namespace.MacosCachePath, - SizeGb: 60, + SizeGb: 160, }, } } From 8678ef61bad23a38f28d0a6080b5471894614287 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:13:12 -0700 Subject: [PATCH 29/50] Shard macOS Namespace caches by lane --- .forgejo/workflows/build-apple.yml | 44 ++++++++++--------- services/forgejo-nsc/deploy/dispatcher.yaml | 12 +++-- .../forgejo-nsc/internal/config/config.go | 16 +++++-- 3 files changed, 45 insertions(+), 27 deletions(-) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 7fce5ca..5231f7e 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -74,30 +74,32 @@ jobs: run: | set -euo pipefail cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" + shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" + lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/${{ matrix.cache-id }}}" mkdir -p \ - "${cache_root}/cargo" \ - "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ - "${cache_root}/rustup" \ - "${cache_root}/sccache" \ - "${cache_root}/homebrew" \ - "${cache_root}/apple/PackageCache" \ - "${cache_root}/apple/SourcePackages" \ - "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" + "${shared_root}/cargo" \ + "${shared_root}/rustup" \ + "${shared_root}/sccache" \ + "${shared_root}/homebrew" \ + "${shared_root}/apple/PackageCache" \ + "${shared_root}/apple/SourcePackages" \ + "${lane_root}/cargo-target" \ + "${lane_root}/DerivedData" rm -rf \ - "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ - "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" + "${lane_root}/cargo-target" \ + "${lane_root}/DerivedData" mkdir -p \ - "${cache_root}/cargo-target/${{ matrix.cache-id }}" \ - "${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" - echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" - echo "CARGO_TARGET_DIR=${cache_root}/cargo-target/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" - echo "RUSTUP_HOME=${cache_root}/rustup" >> "${GITHUB_ENV}" - echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" - echo "HOMEBREW_CACHE=${cache_root}/homebrew" >> "${GITHUB_ENV}" - echo "APPLE_PACKAGE_CACHE=${cache_root}/apple/PackageCache" >> "${GITHUB_ENV}" - echo "APPLE_SOURCE_PACKAGES=${cache_root}/apple/SourcePackages" >> "${GITHUB_ENV}" - echo "APPLE_DERIVED_DATA=${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" >> "${GITHUB_ENV}" - df -h "${cache_root}" || true + "${lane_root}/cargo-target" \ + "${lane_root}/DerivedData" + echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}" + echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" + echo "RUSTUP_HOME=${shared_root}/rustup" >> "${GITHUB_ENV}" + echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}" + echo "HOMEBREW_CACHE=${shared_root}/homebrew" >> "${GITHUB_ENV}" + echo "APPLE_PACKAGE_CACHE=${shared_root}/apple/PackageCache" >> "${GITHUB_ENV}" + echo "APPLE_SOURCE_PACKAGES=${shared_root}/apple/SourcePackages" >> "${GITHUB_ENV}" + echo "APPLE_DERIVED_DATA=${lane_root}/DerivedData" >> "${GITHUB_ENV}" + df -h "${shared_root}" "${lane_root}" || true - name: Install Rust shell: bash diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index 0ce95c0..1dc01b8 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -41,9 +41,15 @@ namespace: size_gb: 40 macos_cache_path: "/Users/runner/.cache/burrow" macos_cache_volumes: - - tag: "burrow-forgejo-macos-cache-v2" - mount_point: "/Users/runner/.cache/burrow" - size_gb: 160 + - tag: "burrow-forgejo-macos-shared-v1" + mount_point: "/Users/runner/.cache/burrow/shared" + size_gb: 80 + - tag: "burrow-forgejo-macos-macos-v1" + mount_point: "/Users/runner/.cache/burrow/lane/macos" + size_gb: 80 + - tag: "burrow-forgejo-macos-ios-simulator-v1" + mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator" + size_gb: 80 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go index b0d2ebc..5750196 100644 --- a/services/forgejo-nsc/internal/config/config.go +++ b/services/forgejo-nsc/internal/config/config.go @@ -193,9 +193,19 @@ func (c *Config) Validate() error { if len(c.Namespace.MacosCacheVolumes) == 0 { c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{ { - Tag: "burrow-forgejo-macos-cache-v2", - MountPoint: c.Namespace.MacosCachePath, - SizeGb: 160, + Tag: "burrow-forgejo-macos-shared-v1", + MountPoint: c.Namespace.MacosCachePath + "/shared", + SizeGb: 80, + }, + { + Tag: "burrow-forgejo-macos-macos-v1", + MountPoint: c.Namespace.MacosCachePath + "/lane/macos", + SizeGb: 80, + }, + { + Tag: "burrow-forgejo-macos-ios-simulator-v1", + MountPoint: c.Namespace.MacosCachePath + "/lane/ios-simulator", + SizeGb: 80, }, } } From 52c30484587a247bd0c1ead5ea76c465fd07755e Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:13:46 -0700 Subject: [PATCH 30/50] Refresh Forgejo NSC agenix secrets --- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 1750 -> 2015 bytes secrets/forgejo/nsc-token.age | Bin 1238 -> 1238 bytes 3 files changed, 0 insertions(+), 0 deletions(-) diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index e315d30120e5032fbf4840e88c9265333e084485..5d9aa28de8171c62917d52908acd821b19402659 100644 GIT binary patch delta 1348 zcmey&^_gpePJKXRc0fd6MW9ExNmWi{d2YIEuz`W8abkX-r6%Tn8D1FyCGHijiA7;yfq~^*y1KdwerDND+1?h~ z#^&BerKzPEfmPW~u08?ohF)Q%6_$qPp_T#lrJimEX#rXJT*e3Q_HXf6lYVx=x^>g1 zr16K9FH^X&(6C_t-nlp9-6S^J+pqo2vDME)oIhdn=C7{Qeb-i@cw`V&3$Xv+2m45cSEOnv?BcruKTO z_*5~>kYuV)Furpzsi@XNOj26l=Yn~Q_B?zr#UxaVz3SM*%Lj8lYAj7JmRoMP+~C{8 zb*qAcdw2sK!sHn1S0=olzPqipY*rbE%t7P7;!V0<_fKA0dNIRQVdvk_e)c&tOw7cq zKWqtc_1EliVGPy$;-~JR{iONEoBL1HzA^B)7G_QOUDS7syZ)ZCthM7Kqj=AXi<=Wz zTdr-3lG*$s;AqV4SsNc-H~iBmD*9H&mM82+U-8$u-}RO>%wImQCScilrt=r~c?gJy z{a$df)kgo)hh=+`=Si;K<-H=&^8U|`)2FS3pIn}?Jgi}EcHQqeF@KJ_zuD~|? z6SFV2Fn9cue-*%SMI_1Wx{75%o71lJFYM(KmR}^yWD+K+%xx2?eqa4GdZYHGx#4rQ zwS|HoEV-iRnKH$cJ7V>xCoNCvjONMRWr|_*pZM_D;XBDm-uHGN+H^UNKkCh^`adVH zTAz&b6pHXG@p-sR#6&#XiY;eXTJEu3vNKocm#~{}(tdH|<(agnmU@Bn)F)rdUN`YH z$6o83Y+Ww=5slOt>!5IQ+4*B^Ckp@O z3vKN9f2&6A_>Xw$$qJp0KPM=yHSYCw^-8Xfe0bF`M3Qy?mbC%GwQv5nF18Gu9C@4X z;f{0@%bDVG@78NI%Uj*KD6seAZO!bs+p%XfZKiB&7J4n*m_Ps4iJfUvYHz+(xmIs6 z!;qaJq0?>43WGq~=PUO8QB;|BrCunYWuA~>W8kdQ5!07GolvzqrPW_=mqp3<&jnYR zr@fNUm{9MuOP$lO?y(z3lj@G}`>T=@vcGq!D=0LsZpr4%Guv#~79Q`v;z;AphgIHR z7SDH@^Z7g9s^-a^DL1zqsXop)d*+%+CzCYH1s@mNIbSc@#vOdz{6`h5@%QJ;QbX2$ z|01<0K|w)r_DT+&Qvv+4FZr+iGVD-(e{{8Hg0kPE))yWdi*INzyS{pFY-7;#e37}D zZ3frnY$NkuVx+l(xwotH|b-uiSu-g}@W zXUWbDLXLtqd!F37pZoBFSo4)TkM?scXzh1oV7Os?C38VcWzB@|&55!9k0dkZ&N;f1 HEl~ylJb7fW delta 1348 zcmey&^_gpePQ9ONxpQz-VnKkXn^{4LWo2MNUT$E%g==0~dR9bHS%77DYG7rKL2+%0?pgYU!CA@4Ii_h5k=~(BW+mokSwY2I6)xt6 z7Or7##c957CGJ^SL1Brek!}{+9*O12X>Q3L2A1Z99zi806=soKy1KdwhGA)jxrIq# zp>DyRp4lO07T)ez#X&`dmA-+V78dRnB`yW^Zbnfig>D9cTp=z}>0y0a?tGbl@bqcU zdgZuHKSj!57+s-{IT#e6`HxluH~`N0d8lPFA_K zLug3m`ri5rw?kik4QJgUqAjf_E%{jM*TtJ6xhIX*^@v(8eR5j&bMmsUtc#=)XZvOz zZHe=AooT-Gf#6(mMm5_YmdUKq`b$*rAN*cB|E}1-z@TiulYdGzSmqiTtCXugYs}J& zb&udmK37u9zG4kWhiUB2B@*mAmN7lLxkfy~LX{;^EI5R-e(8_UiDK0?i*sepGe7LC zikUgpQ1mNIi73G*=M~ z`?Y!3N48II+^p_uElU=-bo9vd|0P`_2Y5alxLTOCAmeC4oP%}ulzF^@M-KCeN^d_^ z&#ZH}>e9VDt=JP=8fMp*@AP*lm}t}DBB~djR$Or+PK9+x-OmgX*75k2YR) zp;B$Cz}|RHnb)dcL$>O~-aq^A#NJJLl_3|CPuwtGW*WHuC)fMMlOv9^Pm=p{@MTTA zPtW32R)rroI&OJCi?8->Sl}y%t!I@d^43n$ZqHtPVV?P--91-xuiFZ@e3nqpxheXc z`Ow^7z9DWwyKaZS-;{kelfUkQ+*^~Mj`f27w}_}2tgrs{)JjGr;`0&HRu0CT&E+O0 zUCSAzzE%c#xhwu{P*Pm@K~eqN(%(*V5C5x_&-*<)rb^!Z&Wm-^#N)&Y!($xRESg?- z&~U}^e#6=|Z#moRa#Wb>C;n9xl~0N9`r`O?K7<3|E%xWYw6NC5`P0WE$CL; zK4tOSI*FN{ZY&xu33hf3Pc$AZ|Em<#Kc&X1`(XdOFmLhVx64{RVl`(^jrw#st+D;s ziesKP3*YD67hEcPe&ce*jTW}wC&wR~BK`cxB3D{>9%xcmSb1cWZ+T*Igu7FeZ-&2P zGM7$-RiFcB(X{u9kRk&ePX{K4BZ1y1KdwL5>-rnMtNW z;bp$AiQ2BENu`l~F2=r@IR%F0X`ZIpW#u9D7HNJt&LI`~T>XYSIpgJS%iCDHs=Ri5 zIsf~_?|1erX;m|MGUd}{%~$HFmWfPfHYG?!*-X<_di%*nQ0l_Qsj@8P_I$RlYdb6- z+)+w6$#RSR?Xr`n)0IBB8NEBYCT2Zn#lzf%tK+pFwR7~mXL7sQ?-a|szvo(haGRFx zTLbBW>Us}{yYE%>-->2iZt0S}`%F~&80+DmTmC$1=1IpX673P|_Z|vbxP0*!-xB{@ zwYk=6{=S8gazaPqsa?6d$LqASsnW}?e!k*e|8;u)6VseMM(;l;Ub)Pv z)%yRx?#(9VMKffd7(VEq~-#&56cv0Oace^2F}Bw3f~N*~i>yR08Fn=4)xo-}<^U+(3P!V&>d%JtKB=CRHy zxG*c`Fw4JrOK;40wLbS*qhd^3!lhSpCdfRDuPRyf`pTA!9pNV}9L0OTILf1M{DFxsVw@;<&f_lDZc2ZrN6J140~eIyx4+m#Y?xeotb#Kh>u@v^`pJ7 z_g~^{tTp_uaoi!}=Z%a{?_aNbX*1zcLw(@&BV2dY{x~?Qd|;T^FnzgxqGama5O%)% zT)GKW|7Kh0I6f6(x_bOp*ioNZiz+hZS4``7*51y4$!e!`;3<2vwa1zMe-S&ATD?QZ z^!SPoc0FQO3LeY9*ltyS{OgV@#r=$oGhW2V>5I%t^JpqN>Zx&Y$Bo@{Pi>cKvddq3 z?BmBv^gJI%RSX=$D0>r1^pUs8MKPP%{SK#%&PqeqXh z{Vof8Sn8X)WQWV1%_Vk1^~-H)^>Wk}ugZPWqpSP(li-&6BcJw_2wpH(Shsn75bL3K zi{9_b#-VQu!#kqZK6IMR(965~hMgycx+I(w>SbdCn8~wtcPvQ4O>E$Qj@KaBszp9r9BjR-RmKzegm- zAab!?N2jyI)<~t5H5D&5%kfYSL=)a zmA(x6ouYSR>rx+$33ZuYw$x4LGc{GR5T7WGM@SZ7&W@eERC;9W^%YjIyfA z-5(ow7{(`^__|Go@llP;lKoamAN{P}f3ovD$8`Q!e*99W2eP>B$cfvD<>Syx-5Up<>bX{wLlm<-#|uPCWQ1_Q;Fh z^8!+Pc;-Gh^X1LnzN;q11@V%Jn$}+-I4#Lc$&nWU#*)u{#(sgJ+1U(*T=Um4zgWFeZbd4yrtQQzwD z^1V!5x{;S>U%v9nN8^pI&T@{5u;?;pgRmcbdi?9=#HGx8{qIih)8~A=`;r`WX50O^ zlW=(!E6>rJTA${eY%988FE{t%{hxCJs;;b#V~bf3oqTv+ZU3$Z6X!5}DsJVSYX5c0 p)m8sjoxRQ~&%iYG#;ZDKqtu-?b%HuG4yC`A|0^_O&(UbEseY1+rB`xUsB1(}s84xhp-V-HWwNKUNv2tmBbTn7LUD11 zZfc5=si~o*f@e`wu4B4Fk$1AYwsuNdV46p2c9of{fm?~ITUlVSxoKo}n0``PUXo|A ze|fIIXMV6Nmuo;(NO_`hkY|#2NobyFmcC_TsHbs(Z-`G>xk0A3L8(z%Sdvq@cX^`Y z#E;_jsZrjp9%f->*@0mu`q?GfW?54+>+KzNz3eV}f&6Qt4y2#F!D{`~ z*DgJGW0&2Whh7nhOv^bv<=x6$!nOMu#Q*;)@1MHyfamWA=Od#Js_TRmd{$f%xVuGA zqRcyxoo7cQ^V>{@q%0k#lVUz>K?fKg)y_V@J*DFN{|U?LFBCgi_Z$A)qZ8e$fA^4Y z+6R^!(dyDYLJC^~pKtSP@Gd{%z|)pg$<$le->KB_Be|DlGQ&Oxm0K&n&#w2{)2vxu zocsCEE~{-$Up;EMu4f!qn0k6j+ia`YD?zfR#Szu#FaBMX`=;eAPyhOCyUq7nPA|W; z?K1oO1l^zs*~{)7T2Y^Qt$VYMe3y9MtK_UBYiIvC_xHL-edUCI2Rp4DH}a-rPG)UO zD&^Om?(0zV==K7QsSUZa6W^TavTgm=^30h3_WGM85hpcYtW29$d!^Q@>x;UMtBgh8 zts+jQqYfBA;*5xh&6$~fs&JLYKmmz2I)S1ZRj#o=IWefF&<>)xl% zIkqcEY|*W`9lSw8KQ9TrJ+OJgY|l%(*2;3+6`wxkD%&c)(ivq5kBhDwOgMYf=lZSu z1D*RlEh4*DF4^h)DnzySl4M3Xd+zV4{7&~)h2G7)qm&eUYTNIbVh3DC#W^d zmj1%NOyA8sZ~V^JvHYEv(D2D&a`%$0llOU^)m@$BQj8J=LDbBUjLH;r07fU3)IP z()sXN;Qr*QPoBpkBV9w{G@0k#D3p z$x?{5_3RNDwLrbH&x;Q!ICmYZp1x|gZ`L(~K4y#WS2F5PpIvEESM^n8GSicv$;)5P z-k_lPE^y`QX5D8m&oK5^{mfqS?M^|Tm$tET-h(SsFDYJ|AT`->6>CWP{K)~HNtSPe zKX*NPS7yMmUwvH{*DUTe8`agTw67I4^D3>-Hg^@;F;z^K27R9{vR!2_!*0j0x z?QtF3f^f;GUgI#&`}J`OGk#su=@l1y$o1oEuL$>Y`Ker81#IbBXT%ze*OhqdUh|Ri z=#`WCy(Y(1^KwbywaC9;^h1=(j(^iKVOqMZDooJqsc>fhy}MjV3mLk(GFp76WG+`a z)}x*MX!9E})!&I6l`CT|)t{NC>t*0kUtie4u;DT5Gxws6VNbln%zxKw{5qqTu|!TN z=kMI=b*ub((x*-jRf}P*t?l?F`}cn{Q_`LO2Ay9qYi%#4`pwx_n$a_5?T`6N?V;LJ z^cGCk;`7|*@g^s1&if9R#l?}1J5sz3iS1hZGhwB(ZR*1p4{oq3-D4{iv0=_#?lZ?j zS8Km+O6}9qo1%w!F0N7%I<&adbwSbS2-T3)H?X+#q4aa Ql6kw{C%0Vaz0d3o0J0niJpcdz diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index 50ca059a5801777749087ab18e22bc670deebee2..fbabf0c5405c5dcb8006c30a58fefa52085ae280 100644 GIT binary patch delta 1190 zcmcb{d5v>|PQ8y$UU81QX^E+4VsUA4N^oSJp|6W!c}QuFr*n3dUy*)*XPT#Fx@VAc zGMBGspi8ieONvE7l51tQM@Cd;u5o2zP-;MEV!ENTqjQ0CRe7*au3KT0BbTn7LUD11 zZfc5=si~o*f@e`wu4B4_d!)W=pqH_;NkmDZmy20)p__JPNMX2hU_rREOGbKLqG@ER zX?ki_a8RWMm#ee6MOIabcbHeDb9t_FSgO94X+dFHhN-`4dSF&bQfaZ5rC(@OmW!vw z#E;_jPPsuTg#{@=&K9oz*{S+wRpmaJ0ilMDPT86IE>+G+mj0QJo_T@VZWZ}le#Ob5 z5vIvmA=YbxXxO2M}A)L!3voRl2c0`H?rvmJ#5y!$DaRbl|kH;bI-aW&ptMc`YgEn z@3goN`SlaGZOGg?W$LZRhT7LJwOeY>{Q5QLl^@5h51I>uZmef+&DU7+tGX)b317rK z(HWutMRvLL9{r%IUijwGOe?{LC4EP=R_#dAHmWq}KQzZpQt*)3+Gs7F9>Y-kB8HXg z_n7lMo%&__oSvpFzJ0HhQ;*)~%x!$vc1&LFvReAHkMlNp)!!7(*ZT7^AS>}r)2+V~ z6C`eZ<}G=)opFPJmE2_&+q4N+SKVA3Um~E<5zv>C+4hI^M`+p_rBH*jSqvXxUS*^ z70!nKiBnd%p79I$bck`%q2HGh8QATP*zBA*b>G~bHk;bFH**|yVEU`S=T*ig&Eqqt zd!LauefA>iX|P2U%LV1)aMg3tmgj3%y#Et(ts(8Zb1vtS3cXY{?!_{;~6OzQ}?|()>CP!{v`I2?p>D$3)BRn|5xviwh)az zF@4GMS3!0cCX`JxT~HbI(sRq)zj?}{{Ef@G70;D3UYO=$@bhb(tHJp*(fN1M&wf0> z5&84k>a*HMZEAd<1fF`^^4>T3)ca)aXZ3PhD-R@PDA$XgX)@3g4jJ^hZ}AWYN;eJzp&s%*m5fX5;g-_|PJND{VP0N&l2Kr3p?Q##uc47!rKe+}S!kMnvPGD=Wr$@;R#mb|gn34| z30GLLv9^DbtEEX^m3OLBu8(ot;CO!r} z6&4|$d4^S9kpcc$1{L91WodzhQLX{r+U_Q4hR#KWY31RLZaD^Ay1KdwWl^pH&fXSj zQI+P|`Vqb<5$;7vekMjG9swx@Sw@8g5vB$88D?4K-p-MMTq?(}FBIjn{B(H||8w4R z2XjgsH=KB*y2x_*0>+ct7ymd19g?#TdX&U5@19Ca^%aG^b>$^yS;@V2r&#o#-u?gX zeTF(i=oO(0{Ng&F{;i&wX==cD%8|mFH{Bw2drDyLoy;y6da+E}vK2R=m^g(h|R=Ja;ZJ z{MF`|=VE_6coJ{Bu<{h)`2hw;3rgPA9kAc@_}|+zyy77;- z@nYGUd*`o*rsOgFt8j2Kixv6$Q)|CK@2219ME*TCmYVUbzU-H`lfcz{|2f?LXEsf~ zr6w7xF4I%)Z*@xezqC-GyXDa?o}&kZ4{)++Tt=CtfnKKR!R~ z@7ku?uWu||+5~=93#L^}@GW>ZY5TOWqiR`3CuUA`m2#i05~#eV=b7~1b$br`PEn{> zC^WrhzusxVkfR4aNZhXXUHsotX8Ikq;})02-#jiBGQU>Z634dpu)Fe&Nk1EYSLm9X zxV>Do@`rQXyyuI(7_^wy&F8uNA}Z$aTFu)t1a_=unX%&at{iQzRqp2_N_ahIZtH7T z+*KZX=+RnJ#fSgq?YkH7Udu%7kocS^v8})knfg5?%Ez7dET--J5Gm}nmq+5TpJmdW* znht5}G}C6Twz7S}n$Tif^76RAbIJdG^cwV&TO{sXQFX-51_+bP4^I zwUrJve{;@Ik}-YaCYi{~ljnylS~qLTw@<~{ucKet+xVWZef;Cd%u~x8mjwImxRc2n u`Isjn!8PmJlkMSen^xyP>Dcq?{QT|PLwD`0)7;FFl+04=a--{}QWXGh9}qSG From 9a5f147585d2b79ade9b6dd4eecbf7e10179dda2 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 02:18:04 -0700 Subject: [PATCH 31/50] Use shared macOS cache for runner bootstrap --- services/forgejo-nsc/internal/nsc/macos.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index c54fb20..30be465 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -602,14 +602,27 @@ if ! mkdir -p "/Users/runner/.cache/act" 2>/dev/null; then fi export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" -cache_root="${NSC_CACHE_PATH:-$HOME/.cache/burrow}" -mkdir -p \ +cache_base="${NSC_CACHE_PATH:-$HOME/.cache/burrow}" +cache_root="${NSC_SHARED_CACHE_PATH:-${cache_base}/shared}" +cache_owner="$(id -un)" +cache_group="$(id -gn)" +if ! install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \ + "${cache_root}" \ "${cache_root}/bin" \ "${cache_root}/downloads" \ "${cache_root}/go/path" \ "${cache_root}/go/mod" \ "${cache_root}/go/build" \ - "${cache_root}/homebrew" + "${cache_root}/homebrew" 2>/dev/null; then + sudo install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \ + "${cache_root}" \ + "${cache_root}/bin" \ + "${cache_root}/downloads" \ + "${cache_root}/go/path" \ + "${cache_root}/go/mod" \ + "${cache_root}/go/build" \ + "${cache_root}/homebrew" +fi export HOMEBREW_CACHE="${cache_root}/homebrew" export GOPATH="${cache_root}/go/path" export GOMODCACHE="${cache_root}/go/mod" From f7193728df69a7c5b8fe6d631a51151cf68f1cad Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:21:26 -0700 Subject: [PATCH 32/50] Relax tunnel provider isolation for Xcode 26 --- .../PacketTunnelProvider.swift | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/Apple/NetworkExtension/PacketTunnelProvider.swift b/Apple/NetworkExtension/PacketTunnelProvider.swift index a8e42e0..ede20d9 100644 --- a/Apple/NetworkExtension/PacketTunnelProvider.swift +++ b/Apple/NetworkExtension/PacketTunnelProvider.swift @@ -10,14 +10,7 @@ class PacketTunnelProvider: NEPacketTunnelProvider { case missingTunnelConfiguration } - private let logger = Logger.logger(for: PacketTunnelProvider.self) - - private var client: TunnelClient { - get throws { try _client.get() } - } - private let _client: Result = Result { - try TunnelClient.unix(socketURL: Constants.socketURL) - } + private static let logger = Logger.logger(for: PacketTunnelProvider.self) override init() { do { @@ -26,31 +19,33 @@ class PacketTunnelProvider: NEPacketTunnelProvider { databasePath: try Constants.databaseURL.path(percentEncoded: false) ) } catch { - logger.error("Failed to spawn networking thread: \(error)") + Self.logger.error("Failed to spawn networking thread: \(error)") } } - override func startTunnel(options: [String: NSObject]? = nil) async throws { + nonisolated override func startTunnel(options: [String: NSObject]? = nil) async throws { do { + let client = try TunnelClient.unix(socketURL: Constants.socketURL) let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first guard let settings = configuration?.settings else { throw Error.missingTunnelConfiguration } try await setTunnelNetworkSettings(settings) _ = try await client.tunnelStart(.init()) - logger.log("Started tunnel with network settings: \(settings)") + Self.logger.log("Started tunnel with network settings: \(settings)") } catch { - logger.error("Failed to start tunnel: \(error)") + Self.logger.error("Failed to start tunnel: \(error)") throw error } } - override func stopTunnel(with reason: NEProviderStopReason) async { + nonisolated override func stopTunnel(with reason: NEProviderStopReason) async { do { + let client = try TunnelClient.unix(socketURL: Constants.socketURL) _ = try await client.tunnelStop(.init()) - logger.log("Stopped client") + Self.logger.log("Stopped client") } catch { - logger.error("Failed to stop tunnel: \(error)") + Self.logger.error("Failed to stop tunnel: \(error)") } } } From 9fcaf137ac5a1752b4cd7510c92dc92e1075bf7a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:27:14 -0700 Subject: [PATCH 33/50] Use active rustup toolchain in Apple build --- .../NetworkExtension/libburrow/build-rust.sh | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index d3886fe..05b3595 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -62,10 +62,19 @@ else CARGO_TARGET_SUBDIR="release" fi +RUSTUP_TOOLCHAIN="" if [[ -x "$(command -v rustup)" ]]; then - CARGO_PATH="$(dirname $(rustup which cargo)):/usr/bin" + RUSTUP_TOOLCHAIN="$(rustup show active-toolchain | awk '{print $1}')" + if [[ -z "${RUSTUP_TOOLCHAIN}" ]]; then + echo 'error: Unable to determine active rustup toolchain' + exit 1 + fi + CARGO_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" cargo)" + RUSTC_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" rustc)" + CARGO_PATH="$(dirname "${CARGO_BIN}"):$(dirname "${RUSTC_BIN}"):/usr/bin" else - CARGO_PATH="$(dirname $(readlink -f $(which cargo))):/usr/bin" + CARGO_BIN="$(command -v cargo)" + CARGO_PATH="$(dirname "${CARGO_BIN}"):/usr/bin" fi PROTOC=$(readlink -f $(which protoc)) @@ -82,7 +91,7 @@ fi if [[ -x "$(command -v rustup)" ]]; then for TARGET in "${RUST_TARGETS[@]}"; do if ! rustup target list --installed | grep -qx "${TARGET}"; then - rustup target add "${TARGET}" + rustup target add --toolchain "${RUSTUP_TOOLCHAIN}" "${TARGET}" fi done fi @@ -102,13 +111,16 @@ BUILD_ENV=( "CARGO_TARGET_DIR=${EFFECTIVE_CARGO_TARGET_DIR}" "${EXTRA_ENV[@]}" ) +if [[ -n "${RUSTUP_TOOLCHAIN}" ]]; then + BUILD_ENV+=("RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN}") +fi if [[ -n "${IPHONEOS_DEPLOYMENT_TARGET:-}" ]]; then BUILD_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET}") fi if [[ -n "${MACOSX_DEPLOYMENT_TARGET:-}" ]]; then BUILD_ENV+=("MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}") fi -env -i "${BUILD_ENV[@]}" cargo build "${CARGO_ARGS[@]}" +env -i "${BUILD_ENV[@]}" "${CARGO_BIN}" build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" From e0fe21fad852c59d92fc0efcf3fa58d0b7eb0c9c Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:36:11 -0700 Subject: [PATCH 34/50] Fix Apple runner toolchain alignment --- .forgejo/workflows/build-apple.yml | 19 ++++++++----- .../PacketTunnelProvider.swift | 27 +++++++++++-------- .../NetworkExtension/libburrow/build-rust.sh | 12 ++++++++- rust-toolchain.toml | 2 +- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index 5231f7e..e154a98 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -106,25 +106,32 @@ jobs: run: | set -euo pipefail + export PATH="${CARGO_HOME}/bin:${PATH}" + if ! command -v rustup >/dev/null 2>&1; then - curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.85.0 + curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.93.1 else rustup set profile minimal - rustup toolchain install 1.85.0 - rustup default 1.85.0 + rustup toolchain install 1.93.1 + rustup default 1.93.1 fi mkdir -p "${CARGO_HOME}/bin" echo "${CARGO_HOME}/bin" >> "${GITHUB_PATH}" export PATH="${CARGO_HOME}/bin:${PATH}" + rustup show active-toolchain + toolchain="$(rustup show active-toolchain | awk '{print $1}')" + cargo_bin="$(rustup which --toolchain "${toolchain}" cargo)" + rustc_bin="$(rustup which --toolchain "${toolchain}" rustc)" + targets='${{ matrix.rust-targets }}' for target in ${targets//,/ }; do - rustup target add "${target}" + rustup target add --toolchain "${toolchain}" "${target}" done - rustc --version - cargo --version + "${rustc_bin}" --version + "${cargo_bin}" --version - name: Install Protobuf shell: bash diff --git a/Apple/NetworkExtension/PacketTunnelProvider.swift b/Apple/NetworkExtension/PacketTunnelProvider.swift index ede20d9..98bf841 100644 --- a/Apple/NetworkExtension/PacketTunnelProvider.swift +++ b/Apple/NetworkExtension/PacketTunnelProvider.swift @@ -2,7 +2,7 @@ import AsyncAlgorithms import BurrowConfiguration import BurrowCore import libburrow -import NetworkExtension +@preconcurrency import NetworkExtension import os class PacketTunnelProvider: NEPacketTunnelProvider { @@ -10,7 +10,14 @@ class PacketTunnelProvider: NEPacketTunnelProvider { case missingTunnelConfiguration } - private static let logger = Logger.logger(for: PacketTunnelProvider.self) + private let logger = Logger.logger(for: PacketTunnelProvider.self) + + private var client: TunnelClient { + get throws { try _client.get() } + } + private let _client: Result = Result { + try TunnelClient.unix(socketURL: Constants.socketURL) + } override init() { do { @@ -19,33 +26,31 @@ class PacketTunnelProvider: NEPacketTunnelProvider { databasePath: try Constants.databaseURL.path(percentEncoded: false) ) } catch { - Self.logger.error("Failed to spawn networking thread: \(error)") + logger.error("Failed to spawn networking thread: \(error)") } } - nonisolated override func startTunnel(options: [String: NSObject]? = nil) async throws { + override func startTunnel(options: [String: NSObject]? = nil) async throws { do { - let client = try TunnelClient.unix(socketURL: Constants.socketURL) let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first guard let settings = configuration?.settings else { throw Error.missingTunnelConfiguration } try await setTunnelNetworkSettings(settings) _ = try await client.tunnelStart(.init()) - Self.logger.log("Started tunnel with network settings: \(settings)") + logger.log("Started tunnel with network settings: \(settings)") } catch { - Self.logger.error("Failed to start tunnel: \(error)") + logger.error("Failed to start tunnel: \(error)") throw error } } - nonisolated override func stopTunnel(with reason: NEProviderStopReason) async { + override func stopTunnel(with reason: NEProviderStopReason) async { do { - let client = try TunnelClient.unix(socketURL: Constants.socketURL) _ = try await client.tunnelStop(.init()) - Self.logger.log("Stopped client") + logger.log("Stopped client") } catch { - Self.logger.error("Failed to stop tunnel: \(error)") + logger.error("Failed to stop tunnel: \(error)") } } } diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index 05b3595..3da8fae 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -84,7 +84,6 @@ if [[ -n "${RUSTC_WRAPPER:-}" && "${RUSTC_WRAPPER}" != /* ]]; then WRAPPER_PATH="$(command -v "${RUSTC_WRAPPER}" || true)" if [[ -n "${WRAPPER_PATH}" ]]; then RUSTC_WRAPPER="${WRAPPER_PATH}" - CARGO_PATH="$(dirname "${WRAPPER_PATH}"):$CARGO_PATH" fi fi @@ -114,12 +113,23 @@ BUILD_ENV=( if [[ -n "${RUSTUP_TOOLCHAIN}" ]]; then BUILD_ENV+=("RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN}") fi +if [[ -n "${RUSTC_BIN:-}" ]]; then + BUILD_ENV+=("RUSTC=${RUSTC_BIN}") +fi if [[ -n "${IPHONEOS_DEPLOYMENT_TARGET:-}" ]]; then BUILD_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET}") fi if [[ -n "${MACOSX_DEPLOYMENT_TARGET:-}" ]]; then BUILD_ENV+=("MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}") fi +echo "Using Rust toolchain: ${RUSTUP_TOOLCHAIN:-system}" +echo "Using cargo: ${CARGO_BIN}" +if [[ -n "${RUSTC_BIN:-}" ]]; then + echo "Using rustc: ${RUSTC_BIN}" +fi +if [[ -n "${RUSTC_WRAPPER:-}" ]]; then + echo "Using rustc wrapper: ${RUSTC_WRAPPER}" +fi env -i "${BUILD_ENV[@]}" "${CARGO_BIN}" build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 8f7dc3d..ff09ebf 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.85.0" +channel = "1.93.1" components = ["rustfmt"] profile = "minimal" From 5bd95b7a7ca6eaf18d28d86df0472c8d8082fd89 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:42:12 -0700 Subject: [PATCH 35/50] Avoid oslog on iOS simulator builds --- burrow/Cargo.toml | 4 +++- burrow/src/tracing.rs | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/burrow/Cargo.toml b/burrow/Cargo.toml index 3bae2ae..15facd1 100644 --- a/burrow/Cargo.toml +++ b/burrow/Cargo.toml @@ -25,7 +25,6 @@ tun = { version = "0.1", path = "../tun", features = ["serde", "tokio"] } clap = { version = "4.4", features = ["derive"] } tracing = "0.1" tracing-log = "0.1" -tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" } tracing-subscriber = { version = "0.3", features = ["std", "env-filter"] } log = "0.4" serde = { version = "1", features = ["derive"] } @@ -82,6 +81,9 @@ libc = "0.2" nix = { version = "0.27", features = ["ioctl"] } rusqlite = { version = "0.38.0", features = ["bundled", "blob"] } +[target.'cfg(target_os = "macos")'.dependencies] +tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" } + [dev-dependencies] insta = { version = "1.32", features = ["yaml"] } diff --git a/burrow/src/tracing.rs b/burrow/src/tracing.rs index 861b41f..d48c53b 100644 --- a/burrow/src/tracing.rs +++ b/burrow/src/tracing.rs @@ -29,12 +29,15 @@ pub fn initialize() { } }; - #[cfg(target_vendor = "apple")] + #[cfg(target_os = "macos")] let system_log = Some(tracing_oslog::OsLogger::new( "com.hackclub.burrow", "tracing", )); + #[cfg(all(target_vendor = "apple", not(target_os = "macos")))] + let system_log = None::; + let stderr = (console::user_attended_stderr() || system_log.is_none()).then(|| { tracing_subscriber::fmt::layer() .with_level(true) From 028627bfcb5ee3359b08ee8c5938b2b27671649a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:51:53 -0700 Subject: [PATCH 36/50] Wire namespace caches and agenix secrets --- .forgejo/workflows/build-apple.yml | 6 --- .forgejo/workflows/build-rust.yml | 20 +++++--- .forgejo/workflows/build-site.yml | 25 ++++++++-- Scripts/_burrow-secrets.sh | 8 ++-- Scripts/provision-forgejo-nsc.sh | 23 --------- Scripts/sync-forgejo-nsc-config.sh | 59 +++++------------------- services/forgejo-nsc/README.md | 9 ++-- services/forgejo-nsc/config.example.yaml | 18 +++++--- 8 files changed, 66 insertions(+), 102 deletions(-) diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml index e154a98..fd69acc 100644 --- a/.forgejo/workflows/build-apple.yml +++ b/.forgejo/workflows/build-apple.yml @@ -85,12 +85,6 @@ jobs: "${shared_root}/apple/SourcePackages" \ "${lane_root}/cargo-target" \ "${lane_root}/DerivedData" - rm -rf \ - "${lane_root}/cargo-target" \ - "${lane_root}/DerivedData" - mkdir -p \ - "${lane_root}/cargo-target" \ - "${lane_root}/DerivedData" echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}" echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" echo "RUSTUP_HOME=${shared_root}/rustup" >> "${GITHUB_ENV}" diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index d70dcf0..17bcea1 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -16,7 +16,7 @@ concurrency: jobs: rust: name: Cargo Test - runs-on: [self-hosted, linux, x86_64, burrow-forge] + runs-on: namespace-profile-linux-medium env: CARGO_INCREMENTAL: 0 RUSTC_WRAPPER: sccache @@ -32,11 +32,19 @@ jobs: shell: bash run: | set -euo pipefail - cache_root="${HOME}/.cache/burrow" - mkdir -p "${cache_root}/cargo" "${cache_root}/sccache" "${cache_root}/cargo-target/build-rust" - echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}" - echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}" - echo "CARGO_TARGET_DIR=${cache_root}/cargo-target/build-rust" >> "${GITHUB_ENV}" + cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" + shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" + lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-rust}" + mkdir -p \ + "${shared_root}/cargo" \ + "${shared_root}/sccache" \ + "${shared_root}/xdg" \ + "${lane_root}/cargo-target" + echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}" + echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}" + echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" + echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" + df -h /nix "${shared_root}" "${lane_root}" || true - name: Test shell: bash diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index de296d4..9b08152 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -16,7 +16,7 @@ concurrency: jobs: site: name: Next.js Build - runs-on: [self-hosted, linux, x86_64, burrow-forge] + runs-on: namespace-profile-linux-medium steps: - name: Checkout uses: https://code.forgejo.org/actions/checkout@v4 @@ -28,12 +28,27 @@ jobs: shell: bash run: | set -euo pipefail - cache_root="${HOME}/.cache/burrow" - mkdir -p "${cache_root}/npm" - echo "NPM_CONFIG_CACHE=${cache_root}/npm" >> "${GITHUB_ENV}" + cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" + shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" + lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-site}" + mkdir -p \ + "${shared_root}/npm" \ + "${shared_root}/xdg" \ + "${lane_root}/next-cache" + echo "NPM_CONFIG_CACHE=${shared_root}/npm" >> "${GITHUB_ENV}" + echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" + echo "NEXT_CACHE_DIR=${lane_root}/next-cache" >> "${GITHUB_ENV}" + df -h /nix "${shared_root}" "${lane_root}" || true - name: Build shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -lc 'cd site && npm install && npm run build' + nix develop .#ci -c bash -lc ' + mkdir -p site/.next + rm -rf site/.next/cache + ln -sfn "${NEXT_CACHE_DIR}" site/.next/cache + cd site + npm install + npm run build + ' diff --git a/Scripts/_burrow-secrets.sh b/Scripts/_burrow-secrets.sh index 7754b74..e08bf2a 100644 --- a/Scripts/_burrow-secrets.sh +++ b/Scripts/_burrow-secrets.sh @@ -84,13 +84,13 @@ burrow_resolve_secret_file() { return 0 fi - if [[ -n "${intake_path}" && -s "${intake_path}" ]]; then - printf '%s\n' "${intake_path}" + if [[ -n "${age_path}" && -f "${age_path}" ]]; then + burrow_decrypt_age_secret_to_temp "${repo_root}" "${age_path}" return 0 fi - if [[ -n "${age_path}" && -f "${age_path}" ]]; then - burrow_decrypt_age_secret_to_temp "${repo_root}" "${age_path}" + if [[ -n "${intake_path}" && -s "${intake_path}" ]]; then + printf '%s\n' "${intake_path}" return 0 fi diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index c85b993..b8c9f12 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -28,7 +28,6 @@ Options: --contact-user Forgejo username used for PAT creation (default: contact) --scope-owner Forgejo org/user owner for the default NSC scope (default: hackclub) --scope-name Forgejo repository name for the default NSC scope (default: burrow) - --write-intake Also write plaintext runtime inputs to intake/ for local debugging. -h, --help Show this help text. EOF } @@ -43,7 +42,6 @@ CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}" SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}" SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}" BURROW_FLAKE_TMPDIRS=() -WRITE_INTAKE=0 TMP_DIR="" cleanup() { @@ -87,10 +85,6 @@ while [[ $# -gt 0 ]]; do SCOPE_NAME="${2:?missing value for --scope-name}" shift 2 ;; - --write-intake) - WRITE_INTAKE=1 - shift - ;; -h|--help) usage exit 0 @@ -174,8 +168,6 @@ PY chmod 600 "${token_file}" elif [[ -f "${token_secret}" ]]; then burrow_decrypt_age_secret_to_temp "${REPO_ROOT}" "${token_secret}" > "${token_file}" -elif [[ -s "${REPO_ROOT}/intake/forgejo_nsc_token.txt" ]]; then - cp "${REPO_ROOT}/intake/forgejo_nsc_token.txt" "${token_file}" fi if [[ -s "${token_file}" ]]; then @@ -298,20 +290,5 @@ burrow_encrypt_secret_from_file "${REPO_ROOT}" "${token_secret}" "${token_file}" burrow_encrypt_secret_from_file "${REPO_ROOT}" "${dispatcher_secret}" "${dispatcher_out}" burrow_encrypt_secret_from_file "${REPO_ROOT}" "${autoscaler_secret}" "${autoscaler_out}" -if [[ "${WRITE_INTAKE}" -eq 1 ]]; then - mkdir -p "${REPO_ROOT}/intake" - chmod 700 "${REPO_ROOT}/intake" - cp "${token_file}" "${REPO_ROOT}/intake/forgejo_nsc_token.txt" - cp "${dispatcher_out}" "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" - cp "${autoscaler_out}" "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" - chmod 600 \ - "${REPO_ROOT}/intake/forgejo_nsc_token.txt" \ - "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" \ - "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" -fi - echo "Updated secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age." -if [[ "${WRITE_INTAKE}" -eq 1 ]]; then - echo "Also refreshed intake/forgejo_nsc_{token,dispatcher,autoscaler} for local debugging." -fi echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}." diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh index baa4960..431f832 100755 --- a/Scripts/sync-forgejo-nsc-config.sh +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -5,14 +5,13 @@ usage() { cat <<'EOF' Usage: Scripts/sync-forgejo-nsc-config.sh [options] -Copy Burrow forgejo-nsc runtime inputs from age secrets or intake/ onto the forge host and -restart the dispatcher/autoscaler units. +Deploy Burrow forgejo-nsc runtime inputs from age secrets onto the forge host. Options: --host SSH target (default: root@git.burrow.net) --ssh-key SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/) - --rotate-pat Re-render the intake files before syncing. - --no-restart Copy files only. + --rotate-pat Re-render the encrypted runtime inputs before deploying. + --no-restart Validate the encrypted inputs only; do not deploy. -h, --help Show this help text. EOF } @@ -75,7 +74,6 @@ burrow_require_cmd() { } burrow_require_cmd ssh -burrow_require_cmd scp SSH_KEY="$( burrow_resolve_secret_file \ @@ -90,26 +88,25 @@ if [[ "${ROTATE_PAT}" -eq 1 ]]; then "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" fi -TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/burrow-nsc-sync.XXXXXX")" token_file="$( burrow_resolve_secret_file \ "${REPO_ROOT}" \ "" \ - "${REPO_ROOT}/intake/forgejo_nsc_token.txt" \ + "" \ "${REPO_ROOT}/secrets/forgejo/nsc-token.age" )" dispatcher_file="$( burrow_resolve_secret_file \ "${REPO_ROOT}" \ "" \ - "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" \ + "" \ "${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" )" autoscaler_file="$( burrow_resolve_secret_file \ "${REPO_ROOT}" \ "" \ - "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" \ + "" \ "${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" )" @@ -120,45 +117,11 @@ for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do fi done -ssh_opts=( - -i "${SSH_KEY}" - -o IdentitiesOnly=yes - -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" - -o StrictHostKeyChecking=accept-new -) - -remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")" -cleanup_remote() { - if [[ -n "${remote_tmp:-}" ]]; then - ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true - fi -} -trap 'cleanup_remote; cleanup' EXIT - -scp "${ssh_opts[@]}" \ - "${token_file}" \ - "${dispatcher_file}" \ - "${autoscaler_file}" \ - "${HOST}:${remote_tmp}/" - -ssh "${ssh_opts[@]}" "${HOST}" " - set -euo pipefail - install -d -m 0755 /var/lib/burrow/intake - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml -" - if [[ "${NO_RESTART}" -eq 0 ]]; then - ssh "${ssh_opts[@]}" "${HOST}" " - set -euo pipefail - systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service - systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service - ls -l \ - /var/lib/burrow/intake/forgejo_nsc_token.txt \ - /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \ - /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml - " + BURROW_FORGE_HOST="${HOST}" \ + BURROW_FORGE_SSH_KEY="${SSH_KEY}" \ + BURROW_FORGE_KNOWN_HOSTS_FILE="${KNOWN_HOSTS_FILE}" \ + "${SCRIPT_DIR}/forge-deploy.sh" --switch fi -echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))." +echo "forgejo-nsc runtime sync complete (host=${HOST}, deployed=$((1 - NO_RESTART)))." diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index f928973..3b819d4 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -46,8 +46,9 @@ profile. The important knobs are: Namespace environment. The dispatcher destroys the instance after a job so the TTL acts as a hard cap, not an idle timeout. - `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache - volumes mounted into runners so Linux can keep `/nix` plus build caches warm - and macOS can reuse Rust toolchains, Xcode package caches, and derived data. + volumes mounted into runners so Linux can keep `/nix` plus shared build + caches warm and macOS can reuse Rust toolchains, Xcode package caches, and + lane-local derived data. ### Running locally @@ -159,8 +160,8 @@ generate a Namespace token from the logged-in Namespace account, and refresh `secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`. The token file is emitted as JSON with a `bearer_token` field so both the Compute API path and the `nsc` CLI fallback can consume the same secret -material. Use `--write-intake` only when you explicitly need local plaintext -debug copies. +material. The forge host consumes the encrypted secrets through agenix; avoid +keeping local plaintext `intake/` copies around. Long-lived runtime state is now sourced from age-encrypted files: diff --git a/services/forgejo-nsc/config.example.yaml b/services/forgejo-nsc/config.example.yaml index fcd56ec..15fe0a4 100644 --- a/services/forgejo-nsc/config.example.yaml +++ b/services/forgejo-nsc/config.example.yaml @@ -11,10 +11,10 @@ forgejo: timeout: "30s" namespace: - nsc_binary: "/app/bin/nsc" + nsc_binary: "nsc" compute_base_url: "https://ord4.compute.namespaceapis.com" - image: "ghcr.io/forgejo/runner:3" - machine_type: "8x16" + image: "code.forgejo.org/forgejo/runner:11" + machine_type: "4x8" macos_base_image_id: "tahoe" macos_machine_arch: "arm64" duration: "30m" @@ -31,9 +31,15 @@ namespace: size_gb: 40 macos_cache_path: "/Users/runner/.cache/burrow" macos_cache_volumes: - - tag: "burrow-forgejo-macos-cache" - mount_point: "/Users/runner/.cache/burrow" - size_gb: 60 + - tag: "burrow-forgejo-macos-shared-v1" + mount_point: "/Users/runner/.cache/burrow/shared" + size_gb: 80 + - tag: "burrow-forgejo-macos-macos-v1" + mount_point: "/Users/runner/.cache/burrow/lane/macos" + size_gb: 80 + - tag: "burrow-forgejo-macos-ios-simulator-v1" + mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator" + size_gb: 80 runner: name_prefix: "nscloud-" From 5c0a9b3f548049926b1543ea86c45dcf274ff628 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 03:51:56 -0700 Subject: [PATCH 37/50] Work around Xcode 26 tunnel isolation --- .../PacketTunnelProvider.swift | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Apple/NetworkExtension/PacketTunnelProvider.swift b/Apple/NetworkExtension/PacketTunnelProvider.swift index 98bf841..54b813c 100644 --- a/Apple/NetworkExtension/PacketTunnelProvider.swift +++ b/Apple/NetworkExtension/PacketTunnelProvider.swift @@ -5,19 +5,17 @@ import libburrow @preconcurrency import NetworkExtension import os +// Xcode 26 imports `startTunnel(options:)` as `[String: NSObject]?` and treats the +// override as crossing a nonisolated boundary. The extension target does not +// mutate or forward these Cocoa objects, so treat them as an unchecked escape hatch. +extension NSObject: @retroactive @unchecked Sendable {} + class PacketTunnelProvider: NEPacketTunnelProvider { enum Error: Swift.Error { case missingTunnelConfiguration } - private let logger = Logger.logger(for: PacketTunnelProvider.self) - - private var client: TunnelClient { - get throws { try _client.get() } - } - private let _client: Result = Result { - try TunnelClient.unix(socketURL: Constants.socketURL) - } + private static let logger = Logger.logger(for: PacketTunnelProvider.self) override init() { do { @@ -26,31 +24,33 @@ class PacketTunnelProvider: NEPacketTunnelProvider { databasePath: try Constants.databaseURL.path(percentEncoded: false) ) } catch { - logger.error("Failed to spawn networking thread: \(error)") + Self.logger.error("Failed to spawn networking thread: \(error)") } } - override func startTunnel(options: [String: NSObject]? = nil) async throws { + nonisolated override func startTunnel(options: [String: NSObject]? = nil) async throws { do { + let client = try TunnelClient.unix(socketURL: Constants.socketURL) let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first guard let settings = configuration?.settings else { throw Error.missingTunnelConfiguration } try await setTunnelNetworkSettings(settings) _ = try await client.tunnelStart(.init()) - logger.log("Started tunnel with network settings: \(settings)") + Self.logger.log("Started tunnel with network settings: \(settings)") } catch { - logger.error("Failed to start tunnel: \(error)") + Self.logger.error("Failed to start tunnel: \(error)") throw error } } - override func stopTunnel(with reason: NEProviderStopReason) async { + nonisolated override func stopTunnel(with reason: NEProviderStopReason) async { do { + let client = try TunnelClient.unix(socketURL: Constants.socketURL) _ = try await client.tunnelStop(.init()) - logger.log("Stopped client") + Self.logger.log("Stopped client") } catch { - logger.error("Failed to stop tunnel: \(error)") + Self.logger.error("Failed to stop tunnel: \(error)") } } } From 5b09f3a742244f27879f35bb4a9af3371f1c1e63 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:08:10 -0700 Subject: [PATCH 38/50] Stabilize forgejo namespace auth and secrets --- Scripts/_burrow-secrets.sh | 21 +++++--- Scripts/provision-forgejo-nsc.sh | 50 ++++++++++++------ Scripts/sync-forgejo-nsc-config.sh | 24 ++------- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 2015 -> 2015 bytes secrets/forgejo/nsc-token.age | Bin 1238 -> 1861 bytes services/forgejo-nsc/README.md | 9 ++-- .../forgejo-nsc/internal/nsc/dispatcher.go | 4 ++ 8 files changed, 59 insertions(+), 49 deletions(-) diff --git a/Scripts/_burrow-secrets.sh b/Scripts/_burrow-secrets.sh index e08bf2a..6f1bc28 100644 --- a/Scripts/_burrow-secrets.sh +++ b/Scripts/_burrow-secrets.sh @@ -107,18 +107,25 @@ burrow_encrypt_secret_from_file() { local secret_path="$2" local source_path="$3" local agenix_path - local identity_path + local backup_file="" if [[ ! -s "${source_path}" ]]; then echo "secret source missing or empty: ${source_path}" >&2 return 1 fi agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")" - identity_path="$(burrow_agenix_identity_path "${repo_root}")" - - if [[ -n "${identity_path}" ]]; then - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" -i "${identity_path}" < "${source_path}" - else - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" < "${source_path}" + if [[ -f "${secret_path}" ]]; then + backup_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret-backup.XXXXXX")" + cp "${secret_path}" "${backup_file}" fi + rm -f "${secret_path}" + + if ! nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" < "${source_path}"; then + if [[ -n "${backup_file}" && -f "${backup_file}" ]]; then + mv "${backup_file}" "${secret_path}" + fi + return 1 + fi + + [[ -n "${backup_file}" ]] && rm -f "${backup_file}" } diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index b8c9f12..537107e 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -146,25 +146,36 @@ dispatcher_secret="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" autoscaler_secret="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" if [[ "${REFRESH_TOKEN}" -eq 1 ]]; then - "${NSC_BIN}" auth check-login --duration 20m >/dev/null - raw_token_file="$(mktemp)" - trap 'rm -f "${raw_token_file}"; cleanup' EXIT - "${NSC_BIN}" auth generate-dev-token --output_to "${raw_token_file}" >/dev/null - RAW_NSC_TOKEN_FILE="${raw_token_file}" TOKEN_FILE="${token_file}" python3 - <<'PY' + ssh \ + -i "${SSH_KEY}" \ + -o IdentitiesOnly=yes \ + -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \ + -o StrictHostKeyChecking=accept-new \ + "${HOST}" \ + 'sudo -u forgejo-nsc python3 - <<'"'"'PY'"'"' import json -import os from pathlib import Path -raw = Path(os.environ["RAW_NSC_TOKEN_FILE"]).read_text(encoding="utf-8").strip() -if not raw: - raise SystemExit("generated Namespace token is empty") +payload = {} -Path(os.environ["TOKEN_FILE"]).write_text( - json.dumps({"bearer_token": raw}, indent=2) + "\n", - encoding="utf-8", -) -PY - rm -f "${raw_token_file}" +token_json = Path("/var/lib/forgejo-nsc/.config/ns/token.json") +if token_json.exists(): + data = json.loads(token_json.read_text(encoding="utf-8")) + session = str(data.get("session_token", "")).strip() + if session: + payload["session_token"] = session + +token_cache = Path("/var/lib/forgejo-nsc/.config/ns/token.cache") +if token_cache.exists(): + bearer = token_cache.read_text(encoding="utf-8").strip() + if bearer: + payload["bearer_token"] = bearer + +if not payload: + raise SystemExit("forgejo-nsc host does not have a usable Namespace session") + +print(json.dumps(payload, indent=2)) +PY' > "${token_file}" chmod 600 "${token_file}" elif [[ -f "${token_secret}" ]]; then burrow_decrypt_age_secret_to_temp "${REPO_ROOT}" "${token_secret}" > "${token_file}" @@ -186,8 +197,13 @@ try: except json.JSONDecodeError: parsed = None -if isinstance(parsed, dict) and isinstance(parsed.get("bearer_token"), str) and parsed["bearer_token"].strip(): - raise SystemExit(0) +if isinstance(parsed, dict): + bearer = parsed.get("bearer_token") + session = parsed.get("session_token") + if isinstance(bearer, str) and bearer.strip(): + raise SystemExit(0) + if isinstance(session, str) and session.strip(): + raise SystemExit(0) path.write_text(json.dumps({"bearer_token": raw}, indent=2) + "\n", encoding="utf-8") PY diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh index 431f832..d6ac48c 100755 --- a/Scripts/sync-forgejo-nsc-config.sh +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -88,27 +88,9 @@ if [[ "${ROTATE_PAT}" -eq 1 ]]; then "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" fi -token_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "" \ - "" \ - "${REPO_ROOT}/secrets/forgejo/nsc-token.age" -)" -dispatcher_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "" \ - "" \ - "${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" -)" -autoscaler_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "" \ - "" \ - "${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" -)" +token_file="${REPO_ROOT}/secrets/forgejo/nsc-token.age" +dispatcher_file="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" +autoscaler_file="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do if [[ ! -s "${path}" ]]; then diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 5d9aa28de8171c62917d52908acd821b19402659..460d194950b9d0da16732cbea12f8c05529cdeeb 100644 GIT binary patch delta 1348 zcmey&^_gpePJNPLhOnMH(q zGM9f~W@5N`YOr%vmA0pLNkoy4YrbzpMx;e?X?R##Sg}h*x<`Jk0xq)dG=8-``mPLW}l}Q0%$rfq8T>8qj9BaFe>}*|g@xG4I z>~6-T*2X7hH*(E$4|LE!QzhCpX@ykkm5shr#3>uS`yD zn!Cj`>sI}oxjnqr%{D8RrDbN>>ym_yUQl6Bd5Je zSr}b5Cq6m%`;?xqvC-rWX(4W>gMWE$DUm%YZD1ODB)jDQC28fiO_NMLo0Jwg^6pPf zJY=kM)cflPZM__u^lNL}j})X`TIs>RZkyA=wOH{vW!Yazgv*^Q)t9{I?cR!fD>)au~ zNwGIJ{5g{RJCOh0z0QZnk2`riou%*FuT{^uc+akFPd}{sFa0NtZC>ifDbY+OM>3=8 zK5Dsp2&eA8`j2~C($f8hoTRo1y}NQ)a>+Y6o|2aeA`1gP>r7&3;Vk{KXSzrCht1QT ziMsyh5)ab;DjU@iYjyY61;aBdy!&PxDrepOH{Z4LA@{FehMv=W#h;&M-yYVj?tkM+ z-v{RUlzNxH+jn-Jm7BBh;fWvm22$N0nWZ$e?lI;6TKafu_Q!BG*&C^D zvs(M(>MJe$Cvv3g#ozQ_%uDs;FwRzq?EY4IG~r@-f6ULB zo0*F_IAwpo-hT6o$H9-OZ&sdIfB(gf*?(@l$#yz%Phy7bLzPQ4w@<8FI;*Al{1t~^ I{f}1w04_Lrd;kCd delta 1348 zcmey&^_gpePJKXRc0fd6MW9ExNmWi{d2YIEuz`W8abkX-r6%Tn8D1FyCGHijiA7;yfq~^*y1KdwerDND+1?h~ z#^&BerKzPEfmPW~u08?ohF)Q%6_$qPp_T#lrJimEX#rXJT*e3Q_HXf6lYVx=x^>g1 zr16K9FH^X&(6C_t-nlp9-6S^J+pqo2vDME)oIhdn=C7{Qeb-i@cw`V&3$Xv+2m45cSEOnv?BcruKTO z_*5~>kYuV)Furpzsi@XNOj26l=Yn~Q_B?zr#UxaVz3SM*%Lj8lYAj7JmRoMP+~C{8 zb*qAcdw2sK!sHn1S0=olzPqipY*rbE%t7P7;!V0<_fKA0dNIRQVdvk_e)c&tOw7cq zKWqtc_1EliVGPy$;-~JR{iONEoBL1HzA^B)7G_QOUDS7syZ)ZCthM7Kqj=AXi<=Wz zTdr-3lG*$s;AqV4SsNc-H~iBmD*9H&mM82+U-8$u-}RO>%wImQCScilrt=r~c?gJy z{a$df)kgo)hh=+`=Si;K<-H=&^8U|`)2FS3pIn}?Jgi}EcHQqeF@KJ_zuD~|? z6SFV2Fn9cue-*%SMI_1Wx{75%o71lJFYM(KmR}^yWD+K+%xx2?eqa4GdZYHGx#4rQ zwS|HoEV-iRnKH$cJ7V>xCoNCvjONMRWr|_*pZM_D;XBDm-uHGN+H^UNKkCh^`adVH zTAz&b6pHXG@p-sR#6&#XiY;eXTJEu3vNKocm#~{}(tdH|<(agnmU@Bn)F)rdUN`YH z$6o83Y+Ww=5slOt>!5IQ+4*B^Ckp@O z3vKN9f2&6A_>Xw$$qJp0KPM=yHSYCw^-8Xfe0bF`M3Qy?mbC%GwQv5nF18Gu9C@4X z;f{0@%bDVG@78NI%Uj*KD6seAZO!bs+p%XfZKiB&7J4n*m_Ps4iJfUvYHz+(xmIs6 z!;qaJq0?>43WGq~=PUO8QB;|BrCunYWuA~>W8kdQ5!07GolvzqrPW_=mqp3<&jnYR zr@fNUm{9MuOP$lO?y(z3lj@G}`>T=@vcGq!D=0LsZpr4%Guv#~79Q`v;z;AphgIHR z7SDH@^Z7g9s^-a^DL1zqsXop)d*+%+CzCYH1s@mNIbSc@#vOdz{6`h5@%QJ;QbX2$ z|01<0K|w)r_DT+&Qvv+4FZr+iGVD-(e{{8Hg0kPE))yWdi*INzyS{pFY-7;#e37}D zZ3frnY$NkuVx+l(xwotH|b-uiSu-g}@W zXUWbDLXLtqd!F37pZoBFSo4)TkM?scXzh1oV7Os?C38VcWzB@|&55!9k0dkZ&N;f1 HEl~ylJb7fW diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index f3306aba0a1f8ca5956d67453d835359c57da274..7c752b14a6a62fe8b6d91a46ff8fc14a8a5c50a1 100644 GIT binary patch delta 1973 zcmcc5f1iJXPJL!bNnV&isAq0Pq?uuep<7CdVV0AJS-!rjPqMy|qe*0Peo$maKxA%s zC|6KrkU?;6rB6YSVPTMVk+z??Q&o_DN|<+EMOjL5a)^ajpn<7VUTUzrCzr0BLUD11 zZfc5=si~o*f@e`wu4B4_ufI`px?iS8fSY%2SZ=;)u3KnPW_pH!yQ51^P*!@9W4W1r zVVa|pzkg^TSF(qcX;i6Nm}y1Kdwk;Wwkh34hq z`B~<<6;)XUKB*O^0i{kwW~OPyiH;VYsh(!_LE%OLu3m-)Ts&U9H)5nzlZ=;K3pF`D zUCq(t;Hs6Yrs;g#{=%?W>YKLzTyKX^M~i8j&+q1nTh7fZ`PMk=&X=AnHo@QFLYa#b z13Pk^)-K7@XXT%<+V{Bomvzog`TXi1k1ssZYWI5a<_c%O!d8z@d;K<6cCQYuoM(M2 z|Nr`g$ogHn8E;m5R(`t?Xq4W$$<(~y!an4@$$lh;X*NQ(o&sf_fw7m1~3#?mS-*Ia9`9EPvTu!2smXt~_ z`ntt^eowS>#M5-uyf4`i$2b~i-t=mhc$^h+T4>6FZ*R4tRv79yxXjOG+rQAJVN1To zmzma$jJIw6uU4A+aQhG4Ux{7}-nUn}8UI>yQ9tL-_1E9tpZKK48YdhwwXu0|RBG~i z>C6AzBd$v9dU^4QrRe4Q>J@*pJC>wYC{8XBuAf(R^5ed9^E(X1Hu=doJoDz#iMN_` zc5-_9FO3-mueBTf?6wuXH!_;5V23!LzU=P{b#Dvpo<)E4zjiS*Z)eRRnd|KO ze`i0cKAo#`an|9sALa}nA8lT=%K7cF5BaNI&de(~zB$%#;lY1P%-4Dv*>={c-FT|U zRdkcFvfggbeRr4Ah_={AlCNzN4^Q;9 z7jv?kYCYK1Kf^oWXnLc_znQs**Ut>iT{%Bz;eCN${Gz9Bbnf31E9V>Sv|>k8I%h@4 zezlW|7Kh_sC%qTYU6E{4A{A6HH{nJ;x4W^p+z<8%$~#!<_ios0ytdEgsjXj;&4CX= zxmCBm2%YU&#;!B9w`h=EX1uH8<$YC>5&C-VNYG{gMQT>iO_9qHrGWJ3ne|}rOV@63skZ4-v@$!`> zQ|vA~O%45eUs+-QSIhdP75nzRU&`F7yzW|06GNH&GO>cjgQ^a5x}#<st^D7iG6&#Su5hzoBDt$g8k_) zYYr)&JG?gNG@s^Owm&8%>vv6JbP0XwztlbLH)l`u|5Fz`A6{N^IsEoHt^@vcryAb~ zG|SF-E)-_{I75oZ#=*DWK`;7TVs_JTi$Cc6`H5)-Ms8;NsRlM zoo`phsQq@ovwYINimXS%Dr}Z93p_0!)&GBFbN$96?K8XE_U@|QEZjUZyIp)=<>7+* zM+Z9lR3nTpem!$ZxcB4~y}9(sHTK(M!L0ROR%bL`lx+q!u`*pbVfn|w*eNMmL+WdaW)3ObbF(UG|OeK?@ zC*&DiGv3)EcVxj;zTRarRy@xc+}?0&v?ttQEL`zyuC(5^d2b*1+by3RRJw+B*PVk8 zmTrM~_~|7C+oJ&8?UMU@}>_g|m2X`*C`an!plF;_h-lo+QJeksy0 zG+Nc^bM3xP_xgmnKX^{Z+Hb90S7N$br+&#PqfCB%_9#|%m^_H>`%%;}!LzMPfpj{Z_UpS#7Nes}JhjA=*K0{~Pzo;m;k delta 1973 zcmcc5f1iJXPQ8U`hM#k2sCjmJh)afhxSyGkVO3;~Wu>cmSb1cWZ+T*Igu7FeZ-&2P zGM7$-RiFcB(X{u9kRk&ePX{K4BZ1y1KdwL5>-rnMtNW z;bp$AiQ2BENu`l~F2=r@IR%F0X`ZIpW#u9D7HNJt&LI`~T>XYSIpgJS%iCDHs=Ri5 zIsf~_?|1erX;m|MGUd}{%~$HFmWfPfHYG?!*-X<_di%*nQ0l_Qsj@8P_I$RlYdb6- z+)+w6$#RSR?Xr`n)0IBB8NEBYCT2Zn#lzf%tK+pFwR7~mXL7sQ?-a|szvo(haGRFx zTLbBW>Us}{yYE%>-->2iZt0S}`%F~&80+DmTmC$1=1IpX673P|_Z|vbxP0*!-xB{@ zwYk=6{=S8gazaPqsa?6d$LqASsnW}?e!k*e|8;u)6VseMM(;l;Ub)Pv z)%yRx?#(9VMKffd7(VEq~-#&56cv0Oace^2F}Bw3f~N*~i>yR08Fn=4)xo-}<^U+(3P!V&>d%JtKB=CRHy zxG*c`Fw4JrOK;40wLbS*qhd^3!lhSpCdfRDuPRyf`pTA!9pNV}9L0OTILf1M{DFxsVw@;<&f_lDZc2ZrN6J140~eIyx4+m#Y?xeotb#Kh>u@v^`pJ7 z_g~^{tTp_uaoi!}=Z%a{?_aNbX*1zcLw(@&BV2dY{x~?Qd|;T^FnzgxqGama5O%)% zT)GKW|7Kh0I6f6(x_bOp*ioNZiz+hZS4``7*51y4$!e!`;3<2vwa1zMe-S&ATD?QZ z^!SPoc0FQO3LeY9*ltyS{OgV@#r=$oGhW2V>5I%t^JpqN>Zx&Y$Bo@{Pi>cKvddq3 z?BmBv^gJI%RSX=$D0>r1^pUs8MKPP%{SK#%&PqeqXh z{Vof8Sn8X)WQWV1%_Vk1^~-H)^>Wk}ugZPWqpSP(li-&6BcJw_2wpH(Shsn75bL3K zi{9_b#-VQu!#kqZK6IMR(965~hMgycx+I(w>SbdCn8~wtcPvQ4O>E$Qj@KaBszp9r9BjR-RmKzegm- zAab!?N2jyI)<~t5H5D&5%kfYSL=)a zmA(x6ouYSR>rx+$33ZuYw$x4LGc{GR5T7WGM@SZ7&W@eERC;9W^%YjIyfA z-5(ow7{(`^__|Go@llP;lKoamAN{P}f3ovD$8`Q!e*99W2eP>B$cfvD<>Syx-5Up<>bX{wLlm<-#|uPCWQ1_Q;Fh z^8!+Pc;-Gh^X1LnzN;q11@V%Jn$}+-I4#Lc$&nWU#*)u{#(sgJ+1U(*T=Um4zgWFeZbd4yrtQQzwD z^1V!5x{;S>U%v9nN8^pI&T@{5u;?;pgRmcbdi?9=#HGx8{qIih)8~A=`;r`WX50O^ zlW=(!E6>rJTA${eY%988FE{t%{hxCJs;;b#V~bf3oqTv+ZU3$Z6X!5}DsJVSYX5c0 p)m8sjoxRQ~&%iYG#;ZDKqtu-?b%HuG4yC`A|0^_O&(UVt zFPBrgk8!bqV_tSap=ol7ccr&)h_8itslJP)XH|uEPNHdOVTomAS$cqnCzr0BLUD11 zZfc5=si~o*f@e`wu4B4_Uzt&ge}t2Xzjl6plu>eIfLpeyQC3QJNO_e-cz&o)u#>5Y zQL3Y}Z>~`wS81V%d8AoRW<{xcguj=zzN>zAX;yM!Zh>V=ly6#id17v=zPWd5n0JK9 z#E;_j`uTa;StWsfIVFXqxoQ61`ax#7=6;4AhN0PMz8<*+&gSkW2Ii^35g7(t;YNm~ zS%yaLNhwBYNrnbR{`#fnc@`Oo;ogM?De2~kre;AomTqo2;clT^y1KdwjwSAah5FuR zMS;$RE@@F=uFgTu;fbd1W(HMWdB$Fu{>JY0+J3o3K5mJgT&fnQ=gGbGS38&xy|gA~ z^Pl)fRkl?V;u3b2Jx}!dHE9~>e7{$HmZm+g*4X}iaM^LPf{1^~Nv)=$WxKZ8*f<;w z7F#nx>UR0f`MyOpr`u|m8P9Nb;9hMZX{I&l>Z;F6m$WgT33U*^^?Mfc$0vN*clS^5 zjJRhvC%8V4Bk$JjAZ|gw9TP4uz8No~-*5C^hDf1Q6pPTGn=T!E$|IUsEH})^< zk0ma8<9BCokL$w4Qdd4VI^RCX=>2m}Xk}1&+Rn7}sWWc8?GB%<_WM}xlj%A8raVf@ zwb3zLyDRVq)7j(xt*dk@SD0QEUefyf)Y3))@#$J=PX*Yy{Oh-C99Vzn5!33$TI|xg z>B~1&1blR!w)VsHixWcA|DJKL`nS5KaQUP4tFM*h-kA0x@7v^m&-e3GNpwB4bJ|nC zB=wKK?u9)UG?N`>FHm~J^KEKPz1m3FDsm|y#U z@$h`+2Knu;=FPpry<_(~(P{PE-&l_2*WJ8!!1DXMJ(K=sZN}uub%5c zR;sqvIUiy5pT8E}i0Rie6q~X(sR@_(Q*m$`=Hj+^}H?H zgQXEM;g5q})-Sy$YRfp6`(<^Wf}80(5o6=y0YR=8j-|d8b9tMWP$qu4h{??79Fw(PZvKmKPrbyqu%9 zd(E*t>5*;dWZhAJgE#NR|GHQ2cI(*xyQE;x(`@JAw&}t*^M$!8r&bi5o-BTFxa_F+zuC3m{I>Op*&A40JsMVD zx%@UYuO^4{v-@Q6_XS`4^@DhVMRJ~7ROMW|d+@66**y%Ndn5PAgvP9oG=8~4Vb2eN z%E>>RZYh7Baxvia)}0I=xIUlY-Wg-Y$GtLmy}?(D@9w-izj*NTb?ie%WPJ z?u#5&VHZ>D-X`;&6pVV^{9=A&;5ElP*Wb;G`myv}#dKx|r6lde<~RG5pGk2~TAjV{ zz`IMLopLD+-@Uw6&ssOfan;8Sb#FW`%spOSe;{-`18#7&y4#8RcG{_TCB3_<~EbF^@-j4p7DtZS^ikJPq-q7Gp1q}_3vC*H|)2b-=*a*E-O7(FZG$R@s-C%nS18Hnf!(iSONh>9O zX*d5aY1>(ztex+=OnjZHZZm(A;FQ|7Yg3TIeeS6rUM`;?DfhzW1th9AKtogW`q mFIu+jIp>n4OXoccy0ORW{!PE5fj=cg`tO{LyXr0bd?^6-7Fo;y delta 1190 zcmX@gca3v`PQ8y$UU81QX^E+4VsUA4N^oSJp|6W!c}QuFr*n3dUy*)*XPT#Fx@VAc zGMBGspi8ieONvE7l51tQM@Cd;u5o2zP-;MEV!ENTqjQ0CRe7*au3KT0BbTn7LUD11 zZfc5=si~o*f@e`wu4B4_d!)W=pqH_;NkmDZmy20)p__JPNMX2hU_rREOGbKLqG@ER zX?ki_a8RWMm#ee6MOIabcbHeDb9t_FSgO94X+dFHhN-`4dSF&bQfaZ5rC(@OmW!vw z#E;_jPPsuTg#{@=&K9oz*{S+wRpmaJ0ilMDPT86IE>+G+mj0QJo_T@VZWZ}le#Ob5 z5vIvmA=YbxXxO2M}A)L!3voRl2c0`H?rvmJ#5y!$DaRbl|kH;bI-aW&ptMc`YgEn z@3goN`SlaGZOGg?W$LZRhT7LJwOeY>{Q5QLl^@5h51I>uZmef+&DU7+tGX)b317rK z(HWutMRvLL9{r%IUijwGOe?{LC4EP=R_#dAHmWq}KQzZpQt*)3+Gs7F9>Y-kB8HXg z_n7lMo%&__oSvpFzJ0HhQ;*)~%x!$vc1&LFvReAHkMlNp)!!7(*ZT7^AS>}r)2+V~ z6C`eZ<}G=)opFPJmE2_&+q4N+SKVA3Um~E<5zv>C+4hI^M`+p_rBH*jSqvXxUS*^ z70!nKiBnd%p79I$bck`%q2HGh8QATP*zBA*b>G~bHk;bFH**|yVEU`S=T*ig&Eqqt zd!LauefA>iX|P2U%LV1)aMg3tmgj3%y#Et(ts(8Zb1vtS3cXY{?!_{;~6OzQ}?|()>CP!{v`I2?p>D$3)BRn|5xviwh)az zF@4GMS3!0cCX`JxT~HbI(sRq)zj?}{{Ef@G70;D3UYO=$@bhb(tHJp*(fN1M&wf0> z5&84k>a*HMZEAd<1fF`^^4>T3)ca)aXZ3PhD-R@PDA$XgX)@3g4jJ^hZ}AWYN;eJzp&s%*m5fX5;g-_ Date: Thu, 19 Mar 2026 04:12:11 -0700 Subject: [PATCH 39/50] Install nix on linux namespace runners --- services/forgejo-nsc/internal/nsc/dispatcher.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index a3291a4..1bf339f 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -425,8 +425,20 @@ fi if ! command -v xz >/dev/null 2>&1; then apk add --no-cache xz >/dev/null fi +if ! command -v nix >/dev/null 2>&1; then + apk add --no-cache nix >/dev/null +fi export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +if [ -f /etc/profile.d/nix.sh ]; then + # shellcheck disable=SC1091 + . /etc/profile.d/nix.sh +fi +if [ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then + # shellcheck disable=SC1091 + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh +fi node --version >/dev/null +nix --version >/dev/null cat > runner.yaml <<'EOF' log: From c47f0e6beaa0aa7372e26f4fe3dabd87f48f666a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:18:38 -0700 Subject: [PATCH 40/50] Enable Nix and refresh linux cache volumes --- services/forgejo-nsc/README.md | 3 ++- services/forgejo-nsc/config.example.yaml | 8 ++++---- services/forgejo-nsc/deploy/dispatcher.yaml | 8 ++++---- services/forgejo-nsc/internal/config/config.go | 8 ++++---- services/forgejo-nsc/internal/nsc/dispatcher.go | 5 +++++ 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index cdbb2bf..428ff27 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -48,7 +48,8 @@ profile. The important knobs are: - `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache volumes mounted into runners so Linux can keep `/nix` plus shared build caches warm and macOS can reuse Rust toolchains, Xcode package caches, and - lane-local derived data. + lane-local derived data. If Namespace keeps reusing an older undersized cache + volume, bump the cache tag name to force a fresh allocation at the new size. ### Running locally diff --git a/services/forgejo-nsc/config.example.yaml b/services/forgejo-nsc/config.example.yaml index 15fe0a4..b45234f 100644 --- a/services/forgejo-nsc/config.example.yaml +++ b/services/forgejo-nsc/config.example.yaml @@ -23,12 +23,12 @@ namespace: network: "" linux_cache_path: "/var/cache/burrow" linux_cache_volumes: - - tag: "burrow-forgejo-linux-nix" + - tag: "burrow-forgejo-linux-nix-v2" mount_point: "/nix" - size_gb: 60 - - tag: "burrow-forgejo-linux-cache" + size_gb: 80 + - tag: "burrow-forgejo-linux-cache-v2" mount_point: "/var/cache/burrow" - size_gb: 40 + size_gb: 80 macos_cache_path: "/Users/runner/.cache/burrow" macos_cache_volumes: - tag: "burrow-forgejo-macos-shared-v1" diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index 1dc01b8..0f183e1 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -33,12 +33,12 @@ namespace: network: "" linux_cache_path: "/var/cache/burrow" linux_cache_volumes: - - tag: "burrow-forgejo-linux-nix" + - tag: "burrow-forgejo-linux-nix-v2" mount_point: "/nix" - size_gb: 60 - - tag: "burrow-forgejo-linux-cache" + size_gb: 80 + - tag: "burrow-forgejo-linux-cache-v2" mount_point: "/var/cache/burrow" - size_gb: 40 + size_gb: 80 macos_cache_path: "/Users/runner/.cache/burrow" macos_cache_volumes: - tag: "burrow-forgejo-macos-shared-v1" diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go index 5750196..5ef8a7a 100644 --- a/services/forgejo-nsc/internal/config/config.go +++ b/services/forgejo-nsc/internal/config/config.go @@ -176,14 +176,14 @@ func (c *Config) Validate() error { if len(c.Namespace.LinuxCacheVolumes) == 0 { c.Namespace.LinuxCacheVolumes = []CacheVolumeConfig{ { - Tag: "burrow-forgejo-linux-nix", + Tag: "burrow-forgejo-linux-nix-v2", MountPoint: "/nix", - SizeGb: 60, + SizeGb: 80, }, { - Tag: "burrow-forgejo-linux-cache", + Tag: "burrow-forgejo-linux-cache-v2", MountPoint: c.Namespace.LinuxCachePath, - SizeGb: 40, + SizeGb: 80, }, } } diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 1bf339f..4a579a6 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -410,6 +410,8 @@ func appendVolumeArgs(args []string, volumes []CacheVolume) []string { func (d *Dispatcher) bootstrapScript() string { var builder strings.Builder builder.WriteString(`set -euo pipefail +export HOME=/root +export USER=root mkdir -p "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" cd "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" @@ -437,6 +439,9 @@ if [ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then # shellcheck disable=SC1091 . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh fi +export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" +export NIX_CONFIG="experimental-features = nix-command flakes +accept-flake-config = true" node --version >/dev/null nix --version >/dev/null From 3210570ff33366c8b8acd1bf76aba1266ef9cbcb Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:23:37 -0700 Subject: [PATCH 41/50] Refresh namespace runtime config and linux nix env --- .forgejo/workflows/build-rust.yml | 8 ++++++++ .forgejo/workflows/build-site.yml | 9 +++++++++ secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 2015 -> 2021 bytes secrets/forgejo/nsc-token.age | Bin 1861 -> 1861 bytes 5 files changed, 17 insertions(+) diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index 17bcea1..d1c3e1c 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -19,6 +19,9 @@ jobs: runs-on: namespace-profile-linux-medium env: CARGO_INCREMENTAL: 0 + NIX_CONFIG: | + experimental-features = nix-command flakes + accept-flake-config = true RUSTC_WRAPPER: sccache SCCACHE_CACHE_SIZE: 20G steps: @@ -44,6 +47,11 @@ jobs: echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}" echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" + { + echo 'NIX_CONFIG<> "${GITHUB_ENV}" df -h /nix "${shared_root}" "${lane_root}" || true - name: Test diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index 9b08152..3224ea9 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -17,6 +17,10 @@ jobs: site: name: Next.js Build runs-on: namespace-profile-linux-medium + env: + NIX_CONFIG: | + experimental-features = nix-command flakes + accept-flake-config = true steps: - name: Checkout uses: https://code.forgejo.org/actions/checkout@v4 @@ -38,6 +42,11 @@ jobs: echo "NPM_CONFIG_CACHE=${shared_root}/npm" >> "${GITHUB_ENV}" echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" echo "NEXT_CACHE_DIR=${lane_root}/next-cache" >> "${GITHUB_ENV}" + { + echo 'NIX_CONFIG<> "${GITHUB_ENV}" df -h /nix "${shared_root}" "${lane_root}" || true - name: Build diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index 460d194950b9d0da16732cbea12f8c05529cdeeb..f8e55cd4c58f2ecff3314d28d752288bb909f5dc 100644 GIT binary patch delta 1348 zcmey&^_gpePQ7tZj)$qaX=qq*X=sF@rCVmANxG$eQBYJyWujBLr(uXsmRnAEc9vsC zF_)V~se8D$zonOJKyambep*mzQINhtnu&g1kam@^Pkv&Gwo$2%zGaYkAeXM4LUD11 zZfc5=si~o*f@e`wu4B4_MUj!0xo>iCrDJeusYQ8ym|JO}S(R6MvSXB!r&FGPl1W&k zfvIY;iB&0q2H6%Kxh}qlU&Ynd+B?kHK-X*2lE|$eyy1Kdwxjuyf5mjd9 z23f@+0S3W_ITqSxu1+qN?zyR@g~1kiL1~8d#`(?xLFUz;#$p_z2F<$rR~x=8*EtJ zcJoNRH>|JU@NG7qe`aEGn;VPFS0yX4+KL;7eysI3zbH7gE=rf!`sw_Zv_&iB)Ay}r zFngKcyou|(w8V?n2W2zMqt&{it{lDJ663Hb`oGhfV9uc8U13VXTdptA(bRgn<`#?5 z=ARZiDu=!9tl%}neTkoa(>88tmb`JFMLGD{p&wIyP{PyLPN!xyP7w%-qlt(H!gUiGj6mzAk0r_4ensk8Ql}bt`&Bcj;`m9IFRc&SYpl zUApDeJ=2zzt7k1N7rWGwVpC(7J3Y@P;+W^I%g>hfwEA#LFO>e};OTkc?fmlBlRkbn zJ9qVW7*}w1+GMA{%~#ILEP3pDwEA^lTKxr%7pu*)4mfMxsr@ne>kS1>>2BA4j?Zex z>hBw*`EI`6v><=_Yw0s`{?6~iZvB1E^fOSibke6dD|ODid-`vFPpV{+|5Mw+6`^n- z;#r|^P2`%q93I_3LotmhT{7*HjJa0bKla;cOU7RHgO8gd)n?o5e;>Z#po@Owt)mBK zMIOIYzx#M$t97*yJ9B4q`LU&Sr<+f+Y%(hAxvYNXhUwLMQIU;#O^shSX1-?=U->6t zhV9WW?3=lF2ygSJQv|~8?TOUPh)v!SaN4C=lfLl*Ud)FI^oc?#caVV zBj%p{KdX2S>$Q^W^CJRH*y0zjt-8Nq!EJ*T;YyClg*S?2*q>Lloi_dHmERc?6xn#g zHSGIC)p=PC{JCM;-<_3u61gwvU-;+pa!>VR^=Xc=&!u!voabFDI8$=+(!(KsZyg=V zpC+2*RrxGjUu&?#a>9n}4cFg=$T;%${*$kWs^Dx5|D1bzR^$02dL>zNzw+*W6jAaR E04k|r9smFU delta 1348 zcmey&^_gpePJNPLhOnMH(q zGM9f~W@5N`YOr%vmA0pLNkoy4YrbzpMx;e?X?R##Sg}h*x<`Jk0xq)dG=8-``mPLW}l}Q0%$rfq8T>8qj9BaFe>}*|g@xG4I z>~6-T*2X7hH*(E$4|LE!QzhCpX@ykkm5shr#3>uS`yD zn!Cj`>sI}oxjnqr%{D8RrDbN>>ym_yUQl6Bd5Je zSr}b5Cq6m%`;?xqvC-rWX(4W>gMWE$DUm%YZD1ODB)jDQC28fiO_NMLo0Jwg^6pPf zJY=kM)cflPZM__u^lNL}j})X`TIs>RZkyA=wOH{vW!Yazgv*^Q)t9{I?cR!fD>)au~ zNwGIJ{5g{RJCOh0z0QZnk2`riou%*FuT{^uc+akFPd}{sFa0NtZC>ifDbY+OM>3=8 zK5Dsp2&eA8`j2~C($f8hoTRo1y}NQ)a>+Y6o|2aeA`1gP>r7&3;Vk{KXSzrCht1QT ziMsyh5)ab;DjU@iYjyY61;aBdy!&PxDrepOH{Z4LA@{FehMv=W#h;&M-yYVj?tkM+ z-v{RUlzNxH+jn-Jm7BBh;fWvm22$N0nWZ$e?lI;6TKafu_Q!BG*&C^D zvs(M(>MJe$Cvv3g#ozQ_%uDs;FwRzq?EY4IG~r@-f6ULB zo0*F_IAwpo-hT6o$H9-OZ&sdIfB(gf*?(@l$#yz%Phy7bLzPQ4w@<8FI;*Al{1t~^ I{f}1w04_Lrd;kCd diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index 7c752b14a6a62fe8b6d91a46ff8fc14a8a5c50a1..43b0594a09248e1e5f06ff9f86ce12d25f7b22da 100644 GIT binary patch delta 1979 zcmcc5|CE1%PQ6!%TcJgPQC?+)pOb#Dk87!Mnz3VAYD!u~m3xXyWnOxEa-?N&YNe%H zIhVV>iBF`qn}thuX_RreXJ&bRR#=u>p0S64yJbp1SeTiQPf})nVsUn)Czr0BLUD11 zZfc5=si~o*f@e`wu4B4FP(W3pi;GjDc4(DPN=9K+id&Aep|6u!vWI86cBF-GsY`Nr zmUCcEN|JFgms6;pd#-+Eh`)AmnRaqYPElc$Z(e~@Wr&G?fw5<4hM~JbVnK08u3Jd? z#E;_j`JTn5p_ysCKTnl#G`LKA~h4d9&OB&A3 z*>*)lU{=4zw0mcg-%mNsa86*;y#u*f8w7KYb6o%5%eXatZr>CO<5LGG6!c_3#OQpO`RU&?o|exKF1ml&WGeK{w&h*5a0bh1u?TV1JNJXkd!MJ& zPE6o-NKn|rzag|HW_tR>O6!07Oe%7Bynn8jotKpy>X6l}wBk_z!@$+yUJIX=x$E5X zVg=6w9Xu)4LNZbGv9)O#0x=v}t)tG}{h@dN1_{hN1Gd{aKv zU1PlF%M|9W837Ypwr(p>Kbr7wY1v*2Ej85>76mW1t?qfttYmbW?{Q=B1y+Gqx2M*A zSkInjz<+y}K&Tu0%B3!KPcu^bvgb>2J(skmD{JbG)>(r z7q5MJ5$AAw!|vmDr*dyQ*56S7Rp)!yWc&Q-0wgE*Ijo0LxSo>|;?gv$@lS+!9??9GAlTm>;-RYZItbUc`nu7+4}9s9lk|I?v5T=k69L3R9igCpE@hzQ~0!qs{Om_b!z1Q z#3>~0WSCdc@k^pO<;2ITLXD3pALSGBf35wzpiR$*gXb>u$E=ADdJm>t$@^^+bRu|( z&`!&Hk$>2ZP4+fQQVji~oF5X`%XVOTa`?NxiJzXmiEe-H_btijswn3Pn}@}%R_`~t zzu6|R|8DE#5XiAxWY^41)gdm!LM z>8y+2>eZco&2f0aSJPfHtKe>b^wndF_nkOop7W#6Ms)WHgNb?*rgr!qKK}gvBJ;g> zXZU`1egW9nw>p6mT|cK5hCGg;(HeX(-)LDkv38*8faR^D+s zwctN@T|;gpgEDJ|P3+l3KU)KG7 zdYh2LTW>w{Kbtqozc?zn=$+;P2mk)2f3g>CRw;E=CFCSd&H2y$In9`v6W8a>fe+xk(jEy?R(W$v7n@;5IXE8%WxI5~e|cTsuRdrG-MegN+y0=Ou8pQgk7_l&epY^S ze!91&$h8BrB3I0OSGQc%@%i-}r>XWDZ~k2TQIP#>@ytkXLyao|GbdFE{TDZz>eRf- zx3i+*LFHTC%KHKf&zxPg=Od@#FPVtjwfZ-&-#ex1x_+h6^RU-9tP8gtEfG4q@iy~$ z^-cxeY4vQ(8~!9U{5HNSAh3){SGB+V9czUxZP!L+#+)yPHvVjirapJIebwO#>9Ncm!3?*G>{YCthKh*?Vi+ zj+3*W6*$dxxOH?}z2DECMfYs3Pk%11tZ4u3_x2*+9m)=E-xohyS9R=AnPkI*ZQ5VU xrkO8h^8I#^|2X@>d+se~mONd3Z5dD2g}W&m!fZYMFD*OzcFHxOVzpeJ%>adTtwI0* delta 1973 zcmaFLf1iJXPJL!bNnV&isAq0Pq?uuep<7CdVV0AJS-!rjPqMy|qe*0Peo$maKxA%s zC|6KrkU?;6rB6YSVPTMVk+z??Q&o_DN|<+EMOjL5a)^ajpn<7VUTUzrCzr0BLUD11 zZfc5=si~o*f@e`wu4B4_ufI`px?iS8fSY%2SZ=;)u3KnPW_pH!yQ51^P*!@9W4W1r zVVa|pzkg^TSF(qcX;i6Nm}y1Kdwk;Wwkh34hq z`B~<<6;)XUKB*O^0i{kwW~OPyiH;VYsh(!_LE%OLu3m-)Ts&U9H)5nzlZ=;K3pF`D zUCq(t;Hs6Yrs;g#{=%?W>YKLzTyKX^M~i8j&+q1nTh7fZ`PMk=&X=AnHo@QFLYa#b z13Pk^)-K7@XXT%<+V{Bomvzog`TXi1k1ssZYWI5a<_c%O!d8z@d;K<6cCQYuoM(M2 z|Nr`g$ogHn8E;m5R(`t?Xq4W$$<(~y!an4@$$lh;X*NQ(o&sf_fw7m1~3#?mS-*Ia9`9EPvTu!2smXt~_ z`ntt^eowS>#M5-uyf4`i$2b~i-t=mhc$^h+T4>6FZ*R4tRv79yxXjOG+rQAJVN1To zmzma$jJIw6uU4A+aQhG4Ux{7}-nUn}8UI>yQ9tL-_1E9tpZKK48YdhwwXu0|RBG~i z>C6AzBd$v9dU^4QrRe4Q>J@*pJC>wYC{8XBuAf(R^5ed9^E(X1Hu=doJoDz#iMN_` zc5-_9FO3-mueBTf?6wuXH!_;5V23!LzU=P{b#Dvpo<)E4zjiS*Z)eRRnd|KO ze`i0cKAo#`an|9sALa}nA8lT=%K7cF5BaNI&de(~zB$%#;lY1P%-4Dv*>={c-FT|U zRdkcFvfggbeRr4Ah_={AlCNzN4^Q;9 z7jv?kYCYK1Kf^oWXnLc_znQs**Ut>iT{%Bz;eCN${Gz9Bbnf31E9V>Sv|>k8I%h@4 zezlW|7Kh_sC%qTYU6E{4A{A6HH{nJ;x4W^p+z<8%$~#!<_ios0ytdEgsjXj;&4CX= zxmCBm2%YU&#;!B9w`h=EX1uH8<$YC>5&C-VNYG{gMQT>iO_9qHrGWJ3ne|}rOV@63skZ4-v@$!`> zQ|vA~O%45eUs+-QSIhdP75nzRU&`F7yzW|06GNH&GO>cjgQ^a5x}#<st^D7iG6&#Su5hzoBDt$g8k_) zYYr)&JG?gNG@s^Owm&8%>vv6JbP0XwztlbLH)l`u|5Fz`A6{N^IsEoHt^@vcryAb~ zG|SF-E)-_{I75oZ#=*DWK`;7TVs_JTi$Cc6`H5)-Ms8;NsRlM zoo`phsQq@ovwYINimXS%Dr}Z93p_0!)&GBFbN$96?K8XE_U@|QEZjUZyIp)=<>7+* zM+Z9lR3nTpem!$ZxcB4~y}9(sHTK(M!L0ROR%bL`lx+q!u`*pbVfn|w*eNMmL+WdaW)3ObbF(UG|OeK?@ zC*&DiGv3)EcVxj;zTRarRy@xc+}?0&v?ttQEL`zyuC(5^d2b*1+by3RRJw+B*PVk8 zmTrM~_~|7C+oJ&8?UMU@}>_g|m2X`*C`an!plF;_h-lo+QJeksy0 zG+Nc^bM3xP_xgmnKX^{Z+Hb90S7N$br+&#PqfCB%_9#|%m^_H>`%%;}!LzMPfpj{Z_UpS#7Nes}JhjA=*K0|0_~oxm%WYkwH;pm49JDn14xrVL_#jm#KcGsf&B4u~%_LzL~$DqfcPu z#E;_j2HD}}o}neVrUu!imZk;)iD|(Gp@A8RhRzngPWsx3nL!@8xh9ptjzQ^Mc{%Ap zDd_>0k)hh2+7YHjS+0QwIXRWC8BzK^&Xsw|nUz_FfmuemzJ?}Ty1Kdwt|h+3Sw2P; znHBn;?s=9WUa0|!+x0B~+fzpQvqbpw&+v1lkKha^V^VMCFxsPJkXn9?j zxkI`9S?cCxA#3kN@R?dlDw;>m-R*l;Vbuk#(=Ohf^)??Fj8fmNIm!L|{QI;WvIn*; zQ9f&Q?|yxnMB)4Oijgx+BG&%P=Mz0-x%hEgYlGG?4xS$k!k1o&vM-Sf&^fPnMzyHw z@bY)1-&6QaozAi*IzI4`+cEir_BUnOJzK@^XffVrJ=Zkt)x!IRmkYvDPS19=o7o|K zE&h9UkH4$J&g4DvT-i>6Ycy(3p0mj-IVWGMqTkE>@g4i5`UB5jHgrz=@Gy*}DLhej z6RXgKvT3VZPjr8lmc1mx=TyhV5S}G;LF^I#-sO{P{$8;%~@4rV>;_eI|#l9)h{?bh!I(g5ptIybT>CfNB{cE?Vd)yH}o;%}nieSL1Cl95r zKH9hS@4DypMgHow=O@V2?q}N5q;tK0#)LehXQoY(!jfz9A}$Cjom8y2#`f!8q=e3L zulP*?!H=L~KeT7wKl}N`-_94o!GQG@QKmTj-QD5e%F$<4IEv&lKy?%r7^YCeF7YOpyzi;bY*-=o)f71I}q}MX; zNE>Fky-ueNRHP;_ZdPeL{VO#|)Z&`{BJ(?2X12cLKlasQ>e7gx6IfQ~xqi_RyR&NQ z`TZ6T@$y@W7))Y|Li)e zhwZy|$tM)FhOemI*HAy{@GGOljFi$viK#GW;+{xy5U&yooWd7=k@c&)kQQ}^`#Q^wqNv6@k}KaFgZznolZp?LpIf#BXH zIbzG?ZafHQ4xHd_ku2~gJL&9;<|M<_s}`y=wHcl`Klec4)-M~+t!=G&-gk|e)g)on zVJpp?S*P1=+*z`@x39V0uKru-eIZlg>G~EHldN8kTU#9yuPQNJR!t9NakqT2dWC($ z{rN^~C$lN>WjFp8*#9cuWy=RM$D8&MSEeq``K!Ci@xw=J&Vvek#{(vP-SFMgyD;Zk ziEd+$e_-m2Z2eV7WQ}UCf65QBn;dvc_qM~+zr9*-_sp_ayit+9?~ZaYd;2G;`8Ow> zNxBv8`Tg3*_&q11KfPXD!Q##N;ghF&|5TM1r@VzTuRTB0p!~S~Vt zFPBrgk8!bqV_tSap=ol7ccr&)h_8itslJP)XH|uEPNHdOVTomAS$cqnCzr0BLUD11 zZfc5=si~o*f@e`wu4B4_Uzt&ge}t2Xzjl6plu>eIfLpeyQC3QJNO_e-cz&o)u#>5Y zQL3Y}Z>~`wS81V%d8AoRW<{xcguj=zzN>zAX;yM!Zh>V=ly6#id17v=zPWd5n0JK9 z#E;_j`uTa;StWsfIVFXqxoQ61`ax#7=6;4AhN0PMz8<*+&gSkW2Ii^35g7(t;YNm~ zS%yaLNhwBYNrnbR{`#fnc@`Oo;ogM?De2~kre;AomTqo2;clT^y1KdwjwSAah5FuR zMS;$RE@@F=uFgTu;fbd1W(HMWdB$Fu{>JY0+J3o3K5mJgT&fnQ=gGbGS38&xy|gA~ z^Pl)fRkl?V;u3b2Jx}!dHE9~>e7{$HmZm+g*4X}iaM^LPf{1^~Nv)=$WxKZ8*f<;w z7F#nx>UR0f`MyOpr`u|m8P9Nb;9hMZX{I&l>Z;F6m$WgT33U*^^?Mfc$0vN*clS^5 zjJRhvC%8V4Bk$JjAZ|gw9TP4uz8No~-*5C^hDf1Q6pPTGn=T!E$|IUsEH})^< zk0ma8<9BCokL$w4Qdd4VI^RCX=>2m}Xk}1&+Rn7}sWWc8?GB%<_WM}xlj%A8raVf@ zwb3zLyDRVq)7j(xt*dk@SD0QEUefyf)Y3))@#$J=PX*Yy{Oh-C99Vzn5!33$TI|xg z>B~1&1blR!w)VsHixWcA|DJKL`nS5KaQUP4tFM*h-kA0x@7v^m&-e3GNpwB4bJ|nC zB=wKK?u9)UG?N`>FHm~J^KEKPz1m3FDsm|y#U z@$h`+2Knu;=FPpry<_(~(P{PE-&l_2*WJ8!!1DXMJ(K=sZN}uub%5c zR;sqvIUiy5pT8E}i0Rie6q~X(sR@_(Q*m$`=Hj+^}H?H zgQXEM;g5q})-Sy$YRfp6`(<^Wf}80(5o6=y0YR=8j-|d8b9tMWP$qu4h{??79Fw(PZvKmKPrbyqu%9 zd(E*t>5*;dWZhAJgE#NR|GHQ2cI(*xyQE;x(`@JAw&}t*^M$!8r&bi5o-BTFxa_F+zuC3m{I>Op*&A40JsMVD zx%@UYuO^4{v-@Q6_XS`4^@DhVMRJ~7ROMW|d+@66**y%Ndn5PAgvP9oG=8~4Vb2eN z%E>>RZYh7Baxvia)}0I=xIUlY-Wg-Y$GtLmy}?(D@9w-izj*NTb?ie%WPJ z?u#5&VHZ>D-X`;&6pVV^{9=A&;5ElP*Wb;G`myv}#dKx|r6lde<~RG5pGk2~TAjV{ zz`IMLopLD+-@Uw6&ssOfan;8Sb#FW`%spOSe;{-`18#7&y4#8RcG{_TCB3_<~EbF^@-j4p7DtZS^ikJPq-q7Gp1q}_3vC*H|)2b-=*a*E-O7(FZG$R@s-C%nS18Hnf!(iSONh>9O zX*d5aY1>(ztex+=OnjZHZZm(A;FQ|7Yg3TIeeS6rUM`;?DfhzW1th9AKtogW`q mFIu+jIp>n4OXoccy0ORW{!PE5fj=cg`tO{LyXr0bd?^5+*;&8< From 1964d1fa6e6466fd3e569c8ec279ba1580b40abb Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:26:11 -0700 Subject: [PATCH 42/50] Fix linux nix develop workflow shells --- .forgejo/workflows/build-rust.yml | 2 +- .forgejo/workflows/build-site.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index d1c3e1c..53191ab 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -58,7 +58,7 @@ jobs: shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -lc ' + nix develop .#ci -c bash -euo pipefail -c ' sccache --zero-stats >/dev/null 2>&1 || true cargo test --workspace --all-features sccache --show-stats || true diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index 3224ea9..ea4d58e 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -53,7 +53,7 @@ jobs: shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -lc ' + nix develop .#ci -c bash -euo pipefail -c ' mkdir -p site/.next rm -rf site/.next/cache ln -sfn "${NEXT_CACHE_DIR}" site/.next/cache From ff5736a81739037069b4132eb98a6fa5605880d2 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:33:54 -0700 Subject: [PATCH 43/50] Skip tun configure tests without tun access --- tun/tests/configure.rs | 73 ++++++++++++++++++++++++++++++------------ 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/tun/tests/configure.rs b/tun/tests/configure.rs index 7c05959..e5cef80 100644 --- a/tun/tests/configure.rs +++ b/tun/tests/configure.rs @@ -1,19 +1,36 @@ -use std::{io::Error, net::Ipv4Addr}; +use std::{ + io::{Error, ErrorKind}, + net::Ipv4Addr, +}; -use fehler::throws; use tun::TunInterface; -#[test] -#[throws] -fn test_create() { - TunInterface::new()?; +fn open_test_tun() -> Result, Error> { + match TunInterface::new() { + Ok(tun) => Ok(Some(tun)), + Err(error) if matches!(error.kind(), ErrorKind::NotFound | ErrorKind::PermissionDenied) => { + eprintln!("skipping test: {}", error); + Ok(None) + } + Err(error) => Err(error), + } +} + +#[test] +fn test_create() -> Result<(), Error> { + if open_test_tun()?.is_none() { + return Ok(()); + } + + Ok(()) } #[test] -#[throws] #[cfg(not(any(target_os = "windows", target_vendor = "apple")))] -fn test_set_get_broadcast_addr() { - let tun = TunInterface::new()?; +fn test_set_get_broadcast_addr() -> Result<(), Error> { + let Some(tun) = open_test_tun()? else { + return Ok(()); + }; let addr = Ipv4Addr::new(10, 0, 0, 1); tun.set_ipv4_addr(addr)?; @@ -22,52 +39,64 @@ fn test_set_get_broadcast_addr() { let result = tun.broadcast_addr()?; assert_eq!(broadcast_addr, result); + + Ok(()) } #[test] -#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_ipv4() { - let tun = TunInterface::new()?; +fn test_set_get_ipv4() -> Result<(), Error> { + let Some(tun) = open_test_tun()? else { + return Ok(()); + }; let addr = Ipv4Addr::new(10, 0, 0, 1); tun.set_ipv4_addr(addr)?; let result = tun.ipv4_addr()?; assert_eq!(addr, result); + + Ok(()) } #[test] -#[throws] #[cfg(not(any(target_os = "windows", target_vendor = "apple")))] -fn test_set_get_ipv6() { +fn test_set_get_ipv6() -> Result<(), Error> { use std::net::Ipv6Addr; - let tun = TunInterface::new()?; + let Some(tun) = open_test_tun()? else { + return Ok(()); + }; let addr = Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1); tun.add_ipv6_addr(addr, 128)?; // let result = tun.ipv6_addr()?; // assert_eq!(addr, result); + + Ok(()) } #[test] -#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_mtu() { - let interf = TunInterface::new()?; +fn test_set_get_mtu() -> Result<(), Error> { + let Some(interf) = open_test_tun()? else { + return Ok(()); + }; interf.set_mtu(500)?; assert_eq!(interf.mtu().unwrap(), 500); + + Ok(()) } #[test] -#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_netmask() { - let interf = TunInterface::new()?; +fn test_set_get_netmask() -> Result<(), Error> { + let Some(interf) = open_test_tun()? else { + return Ok(()); + }; let netmask = Ipv4Addr::new(255, 0, 0, 0); let addr = Ipv4Addr::new(192, 168, 1, 1); @@ -76,4 +105,6 @@ fn test_set_get_netmask() { interf.set_netmask(netmask)?; assert_eq!(interf.netmask()?, netmask); + + Ok(()) } From dd369bd0f82567b18121f8b78e158996f1cb1f4f Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:37:01 -0700 Subject: [PATCH 44/50] Tolerate macos nsc ssh handoff exit --- .../forgejo-nsc/internal/nsc/macos_nsc.go | 25 ++++++++++ .../internal/nsc/macos_nsc_test.go | 47 +++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 services/forgejo-nsc/internal/nsc/macos_nsc_test.go diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index 26cbab0..a337572 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -363,6 +363,15 @@ func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, insta if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { return fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) } + if nscSSHBootstrapLikelySucceeded(err, buf.String()) { + d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful", + "runner", runnerName, + "instance", instanceID, + "err", err, + ) + d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) + return nil + } return fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) } @@ -370,6 +379,22 @@ func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, insta return nil } +func nscSSHBootstrapLikelySucceeded(err error, output string) bool { + if err == nil { + return false + } + + errText := strings.ToLower(err.Error()) + if !strings.Contains(errText, "remote command exited without exit status or exit signal") { + return false + } + + output = strings.ToLower(output) + return strings.Contains(output, "runner registered successfully") && + strings.Contains(output, "starting job") && + strings.Contains(output, "task ") +} + func prependNSCRegionArgs(args []string, computeBaseURL string) []string { region := strings.TrimSpace(os.Getenv("NSC_REGION")) if region == "" { diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go new file mode 100644 index 0000000..7e5c5fa --- /dev/null +++ b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go @@ -0,0 +1,47 @@ +package nsc + +import ( + "errors" + "testing" +) + +func TestNSCSSHBootstrapLikelySucceeded(t *testing.T) { + t.Parallel() + + err := errors.New("wait: remote command exited without exit status or exit signal") + output := ` +level=info msg="Runner registered successfully." +time="2026-03-19T11:29:49Z" level=info msg="Starting job" +time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" +` + + if !nscSSHBootstrapLikelySucceeded(err, output) { + t.Fatal("expected handoff success heuristic to match") + } +} + +func TestNSCSSHBootstrapLikelySucceededRejectsIncompleteOutput(t *testing.T) { + t.Parallel() + + err := errors.New("wait: remote command exited without exit status or exit signal") + output := `level=info msg="Runner registered successfully."` + + if nscSSHBootstrapLikelySucceeded(err, output) { + t.Fatal("expected incomplete runner output to remain a failure") + } +} + +func TestNSCSSHBootstrapLikelySucceededRejectsDifferentErrors(t *testing.T) { + t.Parallel() + + err := errors.New("exit status 1") + output := ` +level=info msg="Runner registered successfully." +time="2026-03-19T11:29:49Z" level=info msg="Starting job" +time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" +` + + if nscSSHBootstrapLikelySucceeded(err, output) { + t.Fatal("expected unrelated nsc ssh errors to remain failures") + } +} From db443bcb505c23ac3369b436c347e55dff41b652 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:43:55 -0700 Subject: [PATCH 45/50] Align macos runner lifecycle with forgejo --- services/forgejo-nsc/internal/nsc/macos.go | 4 +- .../forgejo-nsc/internal/nsc/macos_nsc.go | 38 +++++++++++++++---- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 30be465..d20fd57 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -645,7 +645,9 @@ fi mkdir -p bin export PATH="${PWD}/bin:${PATH}" -runner_version="v12.6.4" +# Keep the ad-hoc macOS bootstrap on the same Forgejo runner major/minor line +# as the server-side LTS package and the Linux runner image. +runner_version="v11.0.11" runner_src_tgz="forgejo-runner-${runner_version}.tar.gz" runner_src_tgz_path="${cache_root}/downloads/${runner_src_tgz}" runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz" diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index a337572..1656028 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -64,6 +64,13 @@ func normalizeMacOSNSCMachineType(machineType string) (normalized string, change return normalized, changed, nil } +type macosNSCSSHOutcome int + +const ( + macosNSCSSHCompleted macosNSCSSHOutcome = iota + macosNSCSSHHandoff +) + func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { if machineType == "" { return errors.New("machine_type is required for macos runners") @@ -216,15 +223,30 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut) } - // Always attempt cleanup even if the runner fails. - defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) + destroyOnReturn := true + // Always attempt cleanup on failure; successful handoff is allowed to run out + // to its NSC TTL because `nsc ssh` may detach before the Forgejo job exits. + defer func() { + if destroyOnReturn { + d.destroyNSCInstance(context.Background(), runnerName, instanceID) + } + }() script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) // The CLI fallback is explicitly keychain-backed and does not rely on the // service bearer token, so use `nsc ssh` end-to-end here. - if err := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script); err != nil { + outcome, err := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script) + if err != nil { return err } + if outcome == macosNSCSSHHandoff { + destroyOnReturn = false + d.log.Info("leaving macos nsc instance running until TTL after runner handoff", + "runner", runnerName, + "instance", instanceID, + "ttl", ttl.String(), + ) + } return nil } @@ -344,7 +366,7 @@ func shellSingleQuote(value string) string { return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" } -func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) error { +func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) (macosNSCSSHOutcome, error) { sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() @@ -361,7 +383,7 @@ func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, insta if err := cmd.Run(); err != nil { if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { - return fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) + return macosNSCSSHCompleted, fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) } if nscSSHBootstrapLikelySucceeded(err, buf.String()) { d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful", @@ -370,13 +392,13 @@ func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, insta "err", err, ) d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return nil + return macosNSCSSHHandoff, nil } - return fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) + return macosNSCSSHCompleted, fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) } d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return nil + return macosNSCSSHCompleted, nil } func nscSSHBootstrapLikelySucceeded(err error, output string) bool { From 8957af0e05701e2246db45d935c7d14a10e9fdfd Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:48:39 -0700 Subject: [PATCH 46/50] Use published forgejo 11.x macos runner --- services/forgejo-nsc/internal/nsc/macos.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index d20fd57..0b1e39a 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -645,9 +645,9 @@ fi mkdir -p bin export PATH="${PWD}/bin:${PATH}" -# Keep the ad-hoc macOS bootstrap on the same Forgejo runner major/minor line -# as the server-side LTS package and the Linux runner image. -runner_version="v11.0.11" +# Keep the ad-hoc macOS bootstrap on the same Forgejo runner major line as the +# Linux runner image. Forgejo runner 11.x is currently published as v11.3.1. +runner_version="v11.3.1" runner_src_tgz="forgejo-runner-${runner_version}.tar.gz" runner_src_tgz_path="${cache_root}/downloads/${runner_src_tgz}" runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz" From fc79766a318b9f356eb73462ef8859e421a25c27 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 04:56:56 -0700 Subject: [PATCH 47/50] Skip tun tokio test without tun access --- tun/tests/tokio.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/tun/tests/tokio.rs b/tun/tests/tokio.rs index 097387c..ddec6b3 100644 --- a/tun/tests/tokio.rs +++ b/tun/tests/tokio.rs @@ -1,10 +1,27 @@ #[cfg(all(feature = "tokio", not(target_os = "windows")))] -use std::net::Ipv4Addr; +use std::{ + io::ErrorKind, + net::Ipv4Addr, +}; + +#[cfg(all(feature = "tokio", not(target_os = "windows")))] +fn open_test_tun() -> Option { + match tun::TunInterface::new() { + Ok(tun) => Some(tun), + Err(error) if matches!(error.kind(), ErrorKind::NotFound | ErrorKind::PermissionDenied) => { + eprintln!("skipping test: {}", error); + None + } + Err(error) => panic!("failed to create tun interface: {error}"), + } +} #[tokio::test] #[cfg(all(feature = "tokio", not(target_os = "windows")))] async fn test_create() { - let tun = tun::TunInterface::new().unwrap(); + let Some(tun) = open_test_tun() else { + return; + }; let _ = tun::tokio::TunInterface::new(tun).unwrap(); } @@ -12,7 +29,9 @@ async fn test_create() { #[ignore = "requires interactivity"] #[cfg(all(feature = "tokio", not(target_os = "windows")))] async fn test_write() { - let tun = tun::TunInterface::new().unwrap(); + let Some(tun) = open_test_tun() else { + return; + }; tun.set_ipv4_addr(Ipv4Addr::from([192, 168, 1, 10])) .unwrap(); let async_tun = tun::tokio::TunInterface::new(tun).unwrap(); From 283209d3649ceb625e12edb19a7606c822066c18 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 14:01:37 -0700 Subject: [PATCH 48/50] Harden macos runner cleanup --- Scripts/forgejo-prune-runners.py | 144 ++++++++++++++++++ nixos/modules/burrow-forgejo-nsc.nix | 62 ++++++++ services/forgejo-nsc/README.md | 6 + .../forgejo-nsc/internal/nsc/macos_nsc.go | 84 +--------- .../internal/nsc/macos_nsc_test.go | 56 +++---- 5 files changed, 239 insertions(+), 113 deletions(-) create mode 100755 Scripts/forgejo-prune-runners.py diff --git a/Scripts/forgejo-prune-runners.py b/Scripts/forgejo-prune-runners.py new file mode 100755 index 0000000..65c9ae9 --- /dev/null +++ b/Scripts/forgejo-prune-runners.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import json +import os +import pathlib +import subprocess +import time +import urllib.error +import urllib.request + + +def _read_token() -> str: + token = os.environ.get("FORGEJO_API_TOKEN", "").strip() + token_file = os.environ.get("FORGEJO_API_TOKEN_FILE", "").strip() + if not token and token_file: + token = pathlib.Path(token_file).read_text().strip() + if not token: + raise SystemExit("Forgejo API token is missing") + if token.startswith("PENDING-"): + raise SystemExit("Forgejo API token is pending") + return token + + +def _request(method: str, url: str, token: str) -> tuple[int, str]: + headers = {"Authorization": f"token {token}", "Accept": "application/json"} + req = urllib.request.Request(url, headers=headers, method=method) + try: + with urllib.request.urlopen(req, timeout=20) as resp: + body = resp.read().decode("utf-8") + return resp.getcode(), body + except urllib.error.HTTPError as exc: + body = exc.read().decode("utf-8") + return exc.code, body + + +def _list_runners(api_url: str, token: str, org: str | None) -> tuple[str, list[dict]]: + if org: + list_url = f"{api_url}/orgs/{org}/actions/runners" + else: + list_url = f"{api_url}/actions/runners" + status, body = _request("GET", list_url, token) + if status == 404: + return list_url, [] + if status >= 400: + raise RuntimeError(f"list runners failed ({status}) {body}") + try: + runners = json.loads(body) + except json.JSONDecodeError as exc: + raise RuntimeError(f"invalid runner list response: {exc}") from exc + if not isinstance(runners, list): + raise RuntimeError("runner list response is not a list") + return list_url, runners + + +def _delete_runner(api_url: str, token: str, org: str | None, runner_id: int) -> bool: + if org: + delete_url = f"{api_url}/orgs/{org}/actions/runners/{runner_id}" + else: + delete_url = f"{api_url}/actions/runners/{runner_id}" + status, body = _request("DELETE", delete_url, token) + if status in (200, 204): + return True + print(f"[forgejo-prune-runners] delete {runner_id} failed: {status} {body}") + return False + + +def _prune_db(ttl_seconds: int) -> int: + cutoff = int(time.time()) - ttl_seconds + now = int(time.time()) + sql = ( + "WITH updated AS (" + "UPDATE action_runner " + f"SET deleted = {now} " + "WHERE (deleted IS NULL OR deleted = 0) " + f"AND ((last_online IS NOT NULL AND last_online > 0 AND last_online < {cutoff}) " + f"OR (COALESCE(last_online, 0) = 0 AND created < {cutoff})) " + "RETURNING 1" + ") SELECT count(*) FROM updated;" + ) + result = subprocess.run( + ["psql", "-h", "/run/postgresql", "-U", "forgejo", "forgejo", "-tAc", sql], + check=True, + capture_output=True, + text=True, + ) + output = (result.stdout or "").strip() + try: + return int(output) + except ValueError: + return 0 + + +def main() -> None: + api_url = os.environ.get("FORGEJO_API_URL", "https://git.burrow.net/api/v1").rstrip("/") + org = os.environ.get("FORGEJO_ORG", "hackclub").strip() or None + dry_run = os.environ.get("FORGEJO_DRY_RUN", "0") == "1" + db_only = os.environ.get("FORGEJO_PRUNE_DB", "0") == "1" + ttl_seconds = int(os.environ.get("FORGEJO_RUNNER_TTL_SEC", "3600")) + + if db_only: + removed = _prune_db(ttl_seconds) + print(f"[forgejo-prune-runners] pruned {removed} runners via DB") + return + + token = _read_token() + + try: + _, runners = _list_runners(api_url, token, org) + except RuntimeError as exc: + if org is not None: + print(f"[forgejo-prune-runners] org runner list failed ({exc}); retrying instance scope") + _, runners = _list_runners(api_url, token, None) + org = None + else: + raise SystemExit(str(exc)) + + if not runners: + removed = _prune_db(ttl_seconds) + print(f"[forgejo-prune-runners] pruned {removed} runners via DB fallback") + return + + removed = 0 + for runner in runners: + runner_id = runner.get("id") + name = runner.get("name", "unknown") + status = (runner.get("status") or "").lower() + busy = bool(runner.get("busy")) + if status == "online" or busy: + continue + if runner_id is None: + continue + if dry_run: + print(f"[forgejo-prune-runners] would delete runner {runner_id} ({name}) status={status}") + continue + if _delete_runner(api_url, token, org, int(runner_id)): + removed += 1 + print(f"[forgejo-prune-runners] deleted runner {runner_id} ({name})") + + print(f"[forgejo-prune-runners] done; removed {removed} runners") + + +if __name__ == "__main__": + main() diff --git a/nixos/modules/burrow-forgejo-nsc.nix b/nixos/modules/burrow-forgejo-nsc.nix index ba116f7..e05b2ae 100644 --- a/nixos/modules/burrow-forgejo-nsc.nix +++ b/nixos/modules/burrow-forgejo-nsc.nix @@ -150,6 +150,38 @@ in { description = "Allow placeholder values (PENDING-) in the autoscaler config."; }; }; + + pruneRunners = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable periodic pruning of stale Forgejo action runners."; + }; + + ttlSeconds = mkOption { + type = types.ints.positive; + default = 3600; + description = "Age threshold in seconds before offline runners are marked deleted."; + }; + + onBootSec = mkOption { + type = types.str; + default = "15m"; + description = "How long after boot to wait before the first prune run."; + }; + + onUnitActiveSec = mkOption { + type = types.str; + default = "1h"; + description = "How often to rerun stale runner pruning."; + }; + + randomizedDelaySec = mkOption { + type = types.str; + default = "10m"; + description = "Randomized delay applied to the prune timer."; + }; + }; }; config = mkIf cfg.enable { @@ -230,5 +262,35 @@ in { tokenSync ]); }; + + systemd.services.forgejo-prune-runners = mkIf cfg.pruneRunners.enable { + description = "Prune offline Forgejo action runners"; + after = [ "forgejo.service" ]; + requires = [ "forgejo.service" ]; + serviceConfig = { + Type = "oneshot"; + User = "forgejo"; + Group = "forgejo"; + }; + environment = { + FORGEJO_PRUNE_DB = "1"; + FORGEJO_RUNNER_TTL_SEC = toString cfg.pruneRunners.ttlSeconds; + }; + path = [ pkgs.python3 pkgs.postgresql ]; + script = '' + ${pkgs.python3}/bin/python3 ${self}/Scripts/forgejo-prune-runners.py + ''; + }; + + systemd.timers.forgejo-prune-runners = mkIf cfg.pruneRunners.enable { + description = "Periodic Forgejo runner cleanup"; + wantedBy = [ "timers.target" ]; + timerConfig = { + OnBootSec = cfg.pruneRunners.onBootSec; + OnUnitActiveSec = cfg.pruneRunners.onUnitActiveSec; + RandomizedDelaySec = cfg.pruneRunners.randomizedDelaySec; + Unit = "forgejo-prune-runners.service"; + }; + }; }; } diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 428ff27..4cee5da 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -45,6 +45,9 @@ profile. The important knobs are: - `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral Namespace environment. The dispatcher destroys the instance after a job so the TTL acts as a hard cap, not an idle timeout. +- macOS fallback launches still use `nsc create`, but bootstrap runs over the + Compute SSH config endpoint instead of `nsc ssh` so the dispatcher can always + destroy the instance itself instead of relying on a websocket SSH proxy handoff. - `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache volumes mounted into runners so Linux can keep `/nix` plus shared build caches warm and macOS can reuse Rust toolchains, Xcode package caches, and @@ -176,6 +179,9 @@ Long-lived runtime state is now sourced from age-encrypted files: After refreshing the encrypted secrets, deploy the forge host so `config.age.secrets.*` updates the live paths for `services.burrow.forge`, `services.burrow.forgeRunner`, and `services.burrow.forgejoNsc`. +The Nix host module also installs a periodic `forgejo-prune-runners` timer that +marks stale offline runners deleted in Forgejo's database so wedged instances do +not leave the queue polluted indefinitely. Run it next to the dispatcher: diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index 1656028..6c66f34 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -64,13 +64,6 @@ func normalizeMacOSNSCMachineType(machineType string) (normalized string, change return normalized, changed, nil } -type macosNSCSSHOutcome int - -const ( - macosNSCSSHCompleted macosNSCSSHOutcome = iota - macosNSCSSHHandoff -) - func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { if machineType == "" { return errors.New("machine_type is required for macos runners") @@ -223,30 +216,16 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut) } - destroyOnReturn := true - // Always attempt cleanup on failure; successful handoff is allowed to run out - // to its NSC TTL because `nsc ssh` may detach before the Forgejo job exits. - defer func() { - if destroyOnReturn { - d.destroyNSCInstance(context.Background(), runnerName, instanceID) - } - }() + // Always attempt cleanup even if the runner fails. + defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) - // The CLI fallback is explicitly keychain-backed and does not rely on the - // service bearer token, so use `nsc ssh` end-to-end here. - outcome, err := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script) - if err != nil { + // Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which + // relies on a websocket-based SSH proxy that is less reliable under the + // revokable tenant token flow used by the dispatcher. + if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil { return err } - if outcome == macosNSCSSHHandoff { - destroyOnReturn = false - d.log.Info("leaving macos nsc instance running until TTL after runner handoff", - "runner", runnerName, - "instance", instanceID, - "ttl", ttl.String(), - ) - } return nil } @@ -366,57 +345,6 @@ func shellSingleQuote(value string) string { return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" } -func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) (macosNSCSSHOutcome, error) { - sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - - args := []string{"ssh", "--disable-pty", instanceID, "/bin/bash"} - args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) - - cmd := exec.CommandContext(sshCtx, d.opts.BinaryPath, args...) - cmd.Env = nscCLIEnv() - cmd.Stdin = strings.NewReader(script) - - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - - if err := cmd.Run(); err != nil { - if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { - return macosNSCSSHCompleted, fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) - } - if nscSSHBootstrapLikelySucceeded(err, buf.String()) { - d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful", - "runner", runnerName, - "instance", instanceID, - "err", err, - ) - d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return macosNSCSSHHandoff, nil - } - return macosNSCSSHCompleted, fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) - } - - d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return macosNSCSSHCompleted, nil -} - -func nscSSHBootstrapLikelySucceeded(err error, output string) bool { - if err == nil { - return false - } - - errText := strings.ToLower(err.Error()) - if !strings.Contains(errText, "remote command exited without exit status or exit signal") { - return false - } - - output = strings.ToLower(output) - return strings.Contains(output, "runner registered successfully") && - strings.Contains(output, "starting job") && - strings.Contains(output, "task ") -} - func prependNSCRegionArgs(args []string, computeBaseURL string) []string { region := strings.TrimSpace(os.Getenv("NSC_REGION")) if region == "" { diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go index 7e5c5fa..682f441 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go @@ -1,47 +1,33 @@ package nsc -import ( - "errors" - "testing" -) +import "testing" -func TestNSCSSHBootstrapLikelySucceeded(t *testing.T) { +func TestNormalizeMacOSNSCMachineTypeRoundsUp(t *testing.T) { t.Parallel() - err := errors.New("wait: remote command exited without exit status or exit signal") - output := ` -level=info msg="Runner registered successfully." -time="2026-03-19T11:29:49Z" level=info msg="Starting job" -time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" -` - - if !nscSSHBootstrapLikelySucceeded(err, output) { - t.Fatal("expected handoff success heuristic to match") + got, changed, err := normalizeMacOSNSCMachineType("5x10") + if err != nil { + t.Fatalf("normalizeMacOSNSCMachineType: %v", err) + } + if !changed { + t.Fatal("expected machine type to be normalized") + } + if got != "6x14" { + t.Fatalf("expected 6x14, got %q", got) } } -func TestNSCSSHBootstrapLikelySucceededRejectsIncompleteOutput(t *testing.T) { +func TestNormalizeMacOSNSCMachineTypeKeepsAllowedShape(t *testing.T) { t.Parallel() - err := errors.New("wait: remote command exited without exit status or exit signal") - output := `level=info msg="Runner registered successfully."` - - if nscSSHBootstrapLikelySucceeded(err, output) { - t.Fatal("expected incomplete runner output to remain a failure") - } -} - -func TestNSCSSHBootstrapLikelySucceededRejectsDifferentErrors(t *testing.T) { - t.Parallel() - - err := errors.New("exit status 1") - output := ` -level=info msg="Runner registered successfully." -time="2026-03-19T11:29:49Z" level=info msg="Starting job" -time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" -` - - if nscSSHBootstrapLikelySucceeded(err, output) { - t.Fatal("expected unrelated nsc ssh errors to remain failures") + got, changed, err := normalizeMacOSNSCMachineType("6x14") + if err != nil { + t.Fatalf("normalizeMacOSNSCMachineType: %v", err) + } + if changed { + t.Fatal("expected allowed machine type to remain unchanged") + } + if got != "6x14" { + t.Fatalf("expected 6x14, got %q", got) } } From a4cabf9fb7fca77298ea7cc7b9859a5fa60a640f Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 19 Mar 2026 14:06:57 -0700 Subject: [PATCH 49/50] Rotate forgejo nsc secrets --- secrets/forgejo/nsc-autoscaler-config.age | Bin 1395 -> 1395 bytes secrets/forgejo/nsc-dispatcher-config.age | Bin 2021 -> 2021 bytes secrets/forgejo/nsc-token.age | Bin 1861 -> 1861 bytes 3 files changed, 0 insertions(+), 0 deletions(-) diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age index f8e55cd4c58f2ecff3314d28d752288bb909f5dc..94e1535d5d839e2e98cfe6efad81fec0195a3c6e 100644 GIT binary patch delta 1348 zcmey&^_gpePJK{*N_d55vO#b_p|3}(iCKAWu(N+qUQS*{v3FW#wtkRFSyr&OtD|3b zGMA}uRI!J@V_}||X+dQ`SXo77QB_E3a#3kPMsQxDcR*IUS58PpfNN5+BbTn7LUD11 zZfc5=si~o*f@e`wu4B4_p+!ltN4|fgS$JxaYo>NtzFSd%sbxW?k8`QEUzJ%-pt*LH zX=Q|4X;r!>S4l{4a<+wGdR}sFzH5YEpsBNKRAF(xnZCQWwt>ErqeZbnx=(3&MU{u| z#E;_jg;jyRMf%B(sj2#T$;EkrWr?{37Un^|+J;5>8TnDJSrv&E`2~@=KG_Cb6_LIr z#>v{{6=}JNCW&5d=}Cs+!Ko#M9?5ArNsjtCo>953K1E5{Zjqr}y1Kdw0g36(8E$D| z+7U?+!6tsC;YqG0X&GiY5qTvg<(X*){()Kbc}AHoPD$pjT+H>Z8TAkMy8SERm?K}& z5@UF4c1LdXT9HuSuTkGBBm|Eh?%9|nnD+IFLB>?+Et)UW9)HjChr78 zIJY}Fm3Ui6%seg-aYD>iZBpOcM{f+bdpaMOz3WUv*M_+IC#tJ*Ld8Sv=F}YeGO>Z( zpm0fmYLI2>HeHp=DXR|I3mTXmn=*CJ%E~jr0j1{aHdI#Ges4%RKF`N_Ld*)WJ=ewi zfA4vkXMaj9U$^3P!0ZC!SsKS%vXYi)ZRaU_#}qVg&5d~?Gv)-W+RD$g)9dcLW1IXv zzOL$HjBEI(7`T6B$?STkN!k6gJ_m(9`F~nh^T~6SMHYOk{P*t4nVC4h(cwwVgI4ZW zN~~T#qgiaXoLae6KB!pg%~`QcPb0J)N;wZrT~zOuAR)7O<#Y3g2P&2)t=M$I{D+`# ziSqhBhW}YpPIvHBDW9)kDro<5YMbe|#wWMWhQFS=@Xw!xdum0lthpYx-lYD|=F?Zw zUa|5tPbl~OG&`nMtm*GR?j+IOAEsUCi(4O*UGL4|9>(b2`o?)hgN?Ct`JBr|=l@+dzGwsu(zs&X%-F;@UQSUmQ+f~2*I;8)6CP9h$raJiCrDJeusYQ8ym|JO}S(R6MvSXB!r&FGPl1W&k zfvIY;iB&0q2H6%Kxh}qlU&Ynd+B?kHK-X*2lE|$eyy1Kdwxjuyf5mjd9 z23f@+0S3W_ITqSxu1+qN?zyR@g~1kiL1~8d#`(?xLFUz;#$p_z2F<$rR~x=8*EtJ zcJoNRH>|JU@NG7qe`aEGn;VPFS0yX4+KL;7eysI3zbH7gE=rf!`sw_Zv_&iB)Ay}r zFngKcyou|(w8V?n2W2zMqt&{it{lDJ663Hb`oGhfV9uc8U13VXTdptA(bRgn<`#?5 z=ARZiDu=!9tl%}neTkoa(>88tmb`JFMLGD{p&wIyP{PyLPN!xyP7w%-qlt(H!gUiGj6mzAk0r_4ensk8Ql}bt`&Bcj;`m9IFRc&SYpl zUApDeJ=2zzt7k1N7rWGwVpC(7J3Y@P;+W^I%g>hfwEA#LFO>e};OTkc?fmlBlRkbn zJ9qVW7*}w1+GMA{%~#ILEP3pDwEA^lTKxr%7pu*)4mfMxsr@ne>kS1>>2BA4j?Zex z>hBw*`EI`6v><=_Yw0s`{?6~iZvB1E^fOSibke6dD|ODid-`vFPpV{+|5Mw+6`^n- z;#r|^P2`%q93I_3LotmhT{7*HjJa0bKla;cOU7RHgO8gd)n?o5e;>Z#po@Owt)mBK zMIOIYzx#M$t97*yJ9B4q`LU&Sr<+f+Y%(hAxvYNXhUwLMQIU;#O^shSX1-?=U->6t zhV9WW?3=lF2ygSJQv|~8?TOUPh)v!SaN4C=lfLl*Ud)FI^oc?#caVV zBj%p{KdX2S>$Q^W^CJRH*y0zjt-8Nq!EJ*T;YyClg*S?2*q>Lloi_dHmERc?6xn#g zHSGIC)p=PC{JCM;-<_3u61gwvU-;+pa!>VR^=Xc=&!u!voabFDI8$=+(!(KsZyg=V zpC+2*RrxGjUu&?#a>9n}4cFg=$T;%${*$kWs^Dx5|D1bzR^$02dL>zNzw+*W6jAaR E04k|r9smFU diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age index 43b0594a09248e1e5f06ff9f86ce12d25f7b22da..ab4bff7cb461238b7c702b9e10cedb37707f3905 100644 GIT binary patch delta 1979 zcmaFL|CE1%PJLBWNREH5sZ&n6MWUO(c|@Lno|CI~VxYcufMIz`xxZ&lSxT^pfrn9M zAXij@sZ*kdNtT;Ig>i{{a%N(_QD~M?c%@fjT1lo^W{O34Sx|O_hm%vW374*&LUD11 zZfc5=si~o*f@e`wu4B4Fa8-ClL5PpHuWO)TR8mr=t6M;LReF)De_^&ocy>x&rct?5 zp-H8?i?)FYS9V22YNV5EVoHIBlUsIqS(d4heyVXuq;Gmvj+39Ub7G`nq+gbiVNsCl z#E;_jxn5DZ$rhI7`Qee3zV4+d;o&OwoG6%oeX`bHUfc^>&(ewOJW zY1u}(6%i(0o(5hc*Mgy=GD9 zecN!+Z8>G@877@RpfO_$r_IgM>g~2yeAml*_f7rW^y&87?Jv`(^L>}G-shre+NdB_ zaZ%a!&?*6)?{A;nmPyA;d-i3&5xH!zkI6mlwA)8^o2||<)_j*X#%+6I z5M1Q+$guvK^vb!uPo-ycx}D0OSsG{iZ<}k=uV@uE8;LorT*uCRcfP*s-oJYdt)c7A z+3#qGIb^q2I^6H8|Izs^6~PZ~Y>s#3O1>WPWy}q`>1?^*P>#+aJ1%(@VB2+|Dj}v@2=R|C>u$%3ns*=T6zZ`M~YG zgUzM%6&Rn7@ zs{2a*8N;28E(?X0bG=)&u6{OGz;fw#x5DIKtjW$=K85#=K(Au@&u_;&3##+XeYDMb z*?2$bn@+j@L%Xf}=oxwUV}&j00y!UK*WPH`Qgw0GALmt1ULL*t$v#u>-g*IRqeP{! zuchh%$D+P|IlKM1mtKZU-Nzoy;6zP!k%J4TYn&86?qah3!)LXb>oaa{{dHQtzI-yL zUQ3F7$9i$!Z|ceu6g*z5=I%AWQmA6#bnW?%wyp|f77!5R-<(xQ>4yzo#oGS`@R2c+hg{1-#i{#mFCV1 zwO;o8x9A~l*{)yn1-)M!uU5=bq1siK*YUMt)2D$#6A68hqi{s?fK>`{r4^kFUVeZ&Dgi0kl*}RSG=8z z!Ksr)ceDKe7)?B|t>hoKl1W?9#v2w#js_iQ3;OSMS4^(Hgrz=W&+_U1(Xt)qs&!x9 zo56E?>6Oy8Tq_)0@*={|WbG-HG`g zSG!!hdKn|1Rf1;sPF2?vt9y-j_k>rcwJCpRX1Lg^dtvIzYQ{y=-Y_|TDgK-$B_u05 zF=X?Do4JQVcDw$2xKu)|xxOI4Exz5NclU3H%dRQeEcz+1Dxr!N=U3JCEjqw#9r>%Lopt1YEM$DNrZ6E$A-jiH-QuVtM*H-55 zww6H-r`4+-=(s#z=(RKM@RW>SH@Kf>PUDLXo%wZ9R5-`6e^N5P;v$sId1C$RjkY^n zIOV`|u

to`9B3uf$f&Vwqz#d6i)9Az9D6V&B9A3KCtPo%Fe@DZ44=vvo`+}a(!&Q^Z5#Ua7Cr`H^Bd!z*Re@?WJms`0%MQTy( z?%1yn)$2od{8IULqc+*F=Rjp7sTbHj8b+I>xJ-HXhUH1!rrst?z7O--_!6#t zysMD(pSh`GXZeq`#Jj3#cbBC5?QFVIaIAQnJW=a}4xioYGUi^c~K`G%%cfbQMMh0U?%o3+{2wa9C~@XD?VE<9$J*Cv&12uPF zJ$)q8&Qj%9t?Ogu=Q%rH&zFyAuJ6cvy6?%a&Fg1x%iBF`qn}thuX_RreXJ&bRR#=u>p0S64yJbp1SeTiQPf})nVsUn)Czr0BLUD11 zZfc5=si~o*f@e`wu4B4FP(W3pi;GjDc4(DPN=9K+id&Aep|6u!vWI86cBF-GsY`Nr zmUCcEN|JFgms6;pd#-+Eh`)AmnRaqYPElc$Z(e~@Wr&G?fw5<4hM~JbVnK08u3Jd? z#E;_j`JTn5p_ysCKTnl#G`LKA~h4d9&OB&A3 z*>*)lU{=4zw0mcg-%mNsa86*;y#u*f8w7KYb6o%5%eXatZr>CO<5LGG6!c_3#OQpO`RU&?o|exKF1ml&WGeK{w&h*5a0bh1u?TV1JNJXkd!MJ& zPE6o-NKn|rzag|HW_tR>O6!07Oe%7Bynn8jotKpy>X6l}wBk_z!@$+yUJIX=x$E5X zVg=6w9Xu)4LNZbGv9)O#0x=v}t)tG}{h@dN1_{hN1Gd{aKv zU1PlF%M|9W837Ypwr(p>Kbr7wY1v*2Ej85>76mW1t?qfttYmbW?{Q=B1y+Gqx2M*A zSkInjz<+y}K&Tu0%B3!KPcu^bvgb>2J(skmD{JbG)>(r z7q5MJ5$AAw!|vmDr*dyQ*56S7Rp)!yWc&Q-0wgE*Ijo0LxSo>|;?gv$@lS+!9??9GAlTm>;-RYZItbUc`nu7+4}9s9lk|I?v5T=k69L3R9igCpE@hzQ~0!qs{Om_b!z1Q z#3>~0WSCdc@k^pO<;2ITLXD3pALSGBf35wzpiR$*gXb>u$E=ADdJm>t$@^^+bRu|( z&`!&Hk$>2ZP4+fQQVji~oF5X`%XVOTa`?NxiJzXmiEe-H_btijswn3Pn}@}%R_`~t zzu6|R|8DE#5XiAxWY^41)gdm!LM z>8y+2>eZco&2f0aSJPfHtKe>b^wndF_nkOop7W#6Ms)WHgNb?*rgr!qKK}gvBJ;g> zXZU`1egW9nw>p6mT|cK5hCGg;(HeX(-)LDkv38*8faR^D+s zwctN@T|;gpgEDJ|P3+l3KU)KG7 zdYh2LTW>w{Kbtqozc?zn=$+;P2mk)2f3g>CRw;E=CFCSd&H2y$In9`v6W8a>fe+xk(jEy?R(W$v7n@;5IXE8%WxI5~e|cTsuRdrG-MegN+y0=Ou8pQgk7_l&epY^S ze!91&$h8BrB3I0OSGQc%@%i-}r>XWDZ~k2TQIP#>@ytkXLyao|GbdFE{TDZz>eRf- zx3i+*LFHTC%KHKf&zxPg=Od@#FPVtjwfZ-&-#ex1x_+h6^RU-9tP8gtEfG4q@iy~$ z^-cxeY4vQ(8~!9U{5HNSAh3){SGB+V9czUxZP!L+#+)yPHvVjirapJIebwO#>9Ncm!3?*G>{YCthKh*?Vi+ zj+3*W6*$dxxOH?}z2DECMfYs3Pk%11tZ4u3_x2*+9m)=E-xohyS9R=AnPkI*ZQ5VU xrkO8h^8I#^|2X@>d+se~mONd3Z5dD2g}W&m!fZYMFD*OzcFHxOVzpeJ%>c93tw;a> diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age index d24251a24740d5224fb708d5d55c9796d4b84573..2f26639417662d936e9d3bf03b58c164f6a71e20 100644 GIT binary patch delta 1817 zcmX@gca(2}PJM`lMR-tAc(_-Io0)I1SE+Y#M1FaAs(D6=Q$T@sa#odrfqqU{Zf<30 zF;_}SV2*!|k%gIdUa6y#VYsEgQDS0{Pg!~NnWU#qosj%rDxE@ z4-)l8*+Jpv$vI_7rkUo+<=TZQp1v-g8G$Jw&aSDYft7B#Ciy0wKK>Ra1_oTN&c^A5 zc}W#fAr={?`I#1#ett=29!7=fu2~hO#irinq3+pU?nPdPp4nWwy1EJlWtjoq#u4SY z`WDXmNoD%RrNz#EUIqcaNltkt2AO8*&XM)um3he(Zr=G^{dev>=vprSM!dDt%TFMx z;+ft%wbE;sCUu^D&MRHwc2x8S_aToi56^q-GJ-0fe=FyGJb8G=jD)@qTm9pE`z{Mh z?haa%`CY#zQ2vBy_nTnWA7Rct0n!YA0|dQ)OtOxNno_{yp6T)Y07Lw|2gU3>PS>+{ z)$M;=AK51I^@CM=!ku3>q9P_|nofKO*?oV_R|OlcY5v!LlnI4rFxg&gxEEU3DYU}2 zzb;08#?@^%E-b1MT;M8dU~)*gUFGlI;)46PO)g5-Fl_qiP#@u^Z{f#tHFtKT$@$r8 zcW38qo#~q)y?-`?L}-X){I=;X4a^3sTV@IZ#ObZu@w%xQvv1j|@ z>2LDCMT@+2Jm1~!kRr74&Q%&Vb*V}$CLg(-{UfQ!3TEjkhht)7T2bJShzpd z@%E*KT56iUvv1CJ{L5c|X)B9E^5eDl1xj64T38=4TG;+{MnN%KNBckd3H}AQgBD{I8)5B1;gO~LF?&EX$ zceF;sV|jAHiG?rL#?8L;e%0!V6Cd;cPOhrpU>DrFC})}FIln#p`EfSMXaCj5Wtkb5 z87aSN-|NvRQF`5uDYonC-fAwbOk&n$y|H86 zuE6x`Y^#}pIc}l{I^8k zEIakp`Y@{npLR-CFrIEVnKApv-PG(ebMHvc_c=ECpxf@wGs%B^zT575d7y{Ky&^Z= z;B-*&2p9x~~{>g9mAW-w?o{%D&1dY2vPxl$Lo~S!{ zs%L3P;+6g*Nq;PNWN>6B?n<5>>i=+o&t_qX>ANw_p zBia=Y&5`k8&-oQ3>sZCpk^8$%X4}47ZvT`wy6nH5Ij7xJ*+egNI-87Fy{aiw$ckxg z$J76)C+U@aS&_O?;)~Zq&L1uF?zpF0-<98Wp{ax8Q^#DB+=gYdwJcpTcK@qqJ$yP) zBPx82&rC_)gwHL{bzdzs$(J}pU^S!SpBxA8%LB6?PA2)JEhbn15zwy{*;tXMt2O58E z4K1byS#Qh!A%9_RNb_Qwh^yurVG%sWzdx-%vukNn$%~}wl*LCpE*(0%jD4Dpugt%h zcbRS}HSCWzd-rk2!^K7w0oC5-X-8UFSG}5|Kikxq`xm%WYkwH;pm49JDn14xrVL_#jm#KcGsf&B4u~%_LzL~$Dqfg+( z4-)kT+2Q7%p(VMd2HB;SrUn6tX~71effe9Jq1v9>5vE01u7L(QIhC#%QTjg4m3hgTm05;?Sw^|Oh9+FPy1ELkCBDU3K1LOp z75bj;d6pqwsR5Oy{!x|AmHFBxQTY|VfdTbpj=qJK9;p^wo!f&PZ@C0`6d&1souMJT zR^#j5?_VojFYKJ#^dqOYljmrG(uWA6D_<1b;+HKy(P6Cf)p6a8o{g%xk7Czod0m*f zL%IA}>gHu3YwtzynOaIJnn%vv?R!>X)dj87F5aE>HXj*`Qs1pP$^HBM`?MXh2evIy zK5KOEetnum;rsQ9kuyvp*8a=q6Fp?P_;FingVr$)o*xaumtKjoFOdt-Ij?s{wW#Xw z@^_`*Q}|7t&ax&tKJby-G5Le`H)Yv9TgC5aG2UlA*EH?b!uy7o3&K)P&vvz&*&%%` z{(E+hzpKK|=FOo<&$gvWz2c-&`Y?u{ZFq}`n%_0fhUWm zib!AilaOzANLXX8UHqdDkykhK^>oh2Oi;UYz{B%phvBAayVz{FcSzPRefeDR=VT*( z*A>mHjwLJJ*&ojMe^Y%#zSYUAc_|iCggP3$B4>)9{Gw%J@Ot`Pjaf6V+WwzzsHVAs ziF4jEyJuk=d0kd{TD7POY+Pzxz3<3$xjhEEgbvSlob$Eg&hqfbrP0F7=YNaL@vhs| zt*Ab2iR-nHL#LD0U2%IGYIo$9b4*pA%j2r|{@Z3qIrMJL@H>7g=@k-Nb(2hpZmP^>5GGWV*a+BHF=IT5=;v>1v&WX zt=%;5zeiN!?hGHrzA4iF(oG*adC#w_&)9S6&)>%VYqzL-+z~&XJL7YTV8E&;52da? z+PC%Zy65#p{_3^oC&<+9XWG-GbG?7Yggm2XrcIKiY6__Pcta&g$v}fKw`}xJ+&MubUe|NdNoqk*d=aVM8n;FUdoYg)n%q*ju z*Z!@aaYHvrblYZrCe9PvA6!n@V0X*a`h7lU(htwCzcyTAn0_trTgcYa?MzcG4{9|Y z`Sq{kVy~uxE??iRd0BcXfuH3B*B7rlxlYdQ%=OLfhu(bIu=;jZ`tKE7wGq{~^1EHV zZfd78z06%d|7-D4U*@SX3y(%Eth&^_euMGz@M&uo2=dgwZ|hvyQBcT#()(JZ*D~%% z8)mt^PNxo3q$V(KR%tx_D>X^f;+p;<^E+E+w!Y&(_SIwR(ukiESXSq`e$f%Tvuf(( zIUUZQHy+S?wsFS16CW&2h^iH5 z?Ynl#Cls`Xuc+PEP(SJLE2G4Wl+s0ultg}Yvfp4ch&gRo6>02#jCYd!=lUlWe>d$Y zcy&|6o;9uhHG9L)k_ifVq6dF?t-0h=_w@c##@uzWno+esjck;^oLp+5c>hg-;NB%U zV$0-iJP2nFoZxPeEbt~f>FkTTOAXxDluJFO%G&ow|udBg?+;P z`9^CevnlaqH~tsc|0>^Q%Lg;ZoAwb`rY_F;tGmka!$)h*g9?1d115dl@ZHk8Fy~r{ zZex#sVCsx){Z&U~jcTud$`7%d9C%Cjw!_oEy;^Vg%(7R!QIWpyj&d=3`zNXSHz%D* zx)tvE{o2R)Jtw0-ylA%eVzPw h(z#81MRA|4Jub1^YN$%u{*@F Date: Thu, 19 Mar 2026 14:11:40 -0700 Subject: [PATCH 50/50] Fallback macos ssh bootstrap to nsc --- services/forgejo-nsc/README.md | 7 +- .../forgejo-nsc/internal/nsc/macos_nsc.go | 106 +++++++++++++++++- .../internal/nsc/macos_nsc_test.go | 38 ++++++- 3 files changed, 145 insertions(+), 6 deletions(-) diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 4cee5da..95167c1 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -45,9 +45,10 @@ profile. The important knobs are: - `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral Namespace environment. The dispatcher destroys the instance after a job so the TTL acts as a hard cap, not an idle timeout. -- macOS fallback launches still use `nsc create`, but bootstrap runs over the - Compute SSH config endpoint instead of `nsc ssh` so the dispatcher can always - destroy the instance itself instead of relying on a websocket SSH proxy handoff. +- macOS fallback launches still use `nsc create`. Bootstrap prefers the + Compute SSH config endpoint, and falls back to keychain-backed `nsc ssh` + only when the Compute bearer is rejected. That keeps the fast path on direct + TCP while preserving a working fallback when tenant auth drifts. - `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache volumes mounted into runners so Linux can keep `/nix` plus shared build caches warm and macOS can reuse Rust toolchains, Xcode package caches, and diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index 6c66f34..159634a 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -12,6 +12,8 @@ import ( "path/filepath" "strings" "time" + + "connectrpc.com/connect" ) func nscCLIEnv() []string { @@ -64,6 +66,13 @@ func normalizeMacOSNSCMachineType(machineType string) (normalized string, change return normalized, changed, nil } +type macosNSCSSHOutcome int + +const ( + macosNSCSSHCompleted macosNSCSSHOutcome = iota + macosNSCSSHHandoff +) + func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { if machineType == "" { return errors.New("machine_type is required for macos runners") @@ -216,14 +225,38 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut) } - // Always attempt cleanup even if the runner fails. - defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) + destroyOnReturn := true + defer func() { + if destroyOnReturn { + d.destroyNSCInstance(context.Background(), runnerName, instanceID) + } + }() script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) // Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which // relies on a websocket-based SSH proxy that is less reliable under the // revokable tenant token flow used by the dispatcher. if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil { + if shouldFallbackToNSCSSH(err) { + d.log.Warn("compute ssh bootstrap failed; falling back to nsc ssh", + "runner", runnerName, + "instance", instanceID, + "err", err, + ) + outcome, sshErr := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script) + if sshErr != nil { + return sshErr + } + if outcome == macosNSCSSHHandoff { + destroyOnReturn = false + d.log.Info("leaving macos nsc instance running until TTL after runner handoff", + "runner", runnerName, + "instance", instanceID, + "ttl", ttl.String(), + ) + } + return nil + } return err } return nil @@ -345,6 +378,75 @@ func shellSingleQuote(value string) string { return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" } +func shouldFallbackToNSCSSH(err error) bool { + if err == nil { + return false + } + + switch connect.CodeOf(err) { + case connect.CodeUnauthenticated, connect.CodePermissionDenied, connect.CodeUnimplemented: + return true + } + + errText := strings.ToLower(err.Error()) + return strings.Contains(errText, "compute get ssh config failed") && + (strings.Contains(errText, "unauthenticated") || + strings.Contains(errText, "permission_denied") || + strings.Contains(errText, "permission denied") || + strings.Contains(errText, "unimplemented")) +} + +func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) (macosNSCSSHOutcome, error) { + sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + args := []string{"ssh", "--disable-pty", instanceID, "/bin/bash"} + args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) + + cmd := exec.CommandContext(sshCtx, d.opts.BinaryPath, args...) + cmd.Env = nscCLIEnv() + cmd.Stdin = strings.NewReader(script) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Run(); err != nil { + if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { + return macosNSCSSHCompleted, fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) + } + if nscSSHBootstrapLikelySucceeded(err, buf.String()) { + d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful", + "runner", runnerName, + "instance", instanceID, + "err", err, + ) + d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) + return macosNSCSSHHandoff, nil + } + return macosNSCSSHCompleted, fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) + } + + d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) + return macosNSCSSHCompleted, nil +} + +func nscSSHBootstrapLikelySucceeded(err error, output string) bool { + if err == nil { + return false + } + + errText := strings.ToLower(err.Error()) + if !strings.Contains(errText, "remote command exited without exit status or exit signal") { + return false + } + + output = strings.ToLower(output) + return strings.Contains(output, "runner registered successfully") && + strings.Contains(output, "starting job") && + strings.Contains(output, "task ") +} + func prependNSCRegionArgs(args []string, computeBaseURL string) []string { region := strings.TrimSpace(os.Getenv("NSC_REGION")) if region == "" { diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go index 682f441..d2aabc6 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go @@ -1,6 +1,9 @@ package nsc -import "testing" +import ( + "errors" + "testing" +) func TestNormalizeMacOSNSCMachineTypeRoundsUp(t *testing.T) { t.Parallel() @@ -31,3 +34,36 @@ func TestNormalizeMacOSNSCMachineTypeKeepsAllowedShape(t *testing.T) { t.Fatalf("expected 6x14, got %q", got) } } + +func TestShouldFallbackToNSCSSHFallbackForComputeAuthErrors(t *testing.T) { + t.Parallel() + + err := errors.New("compute get ssh config failed: unauthenticated: invalid tenant credentials") + if !shouldFallbackToNSCSSH(err) { + t.Fatal("expected compute auth error to fall back to nsc ssh") + } +} + +func TestShouldFallbackToNSCSSHRejectsOtherErrors(t *testing.T) { + t.Parallel() + + err := errors.New("compute ssh runner bootstrap failed: exit status 1") + if shouldFallbackToNSCSSH(err) { + t.Fatal("expected unrelated bootstrap errors to remain fatal") + } +} + +func TestNSCSSHBootstrapLikelySucceeded(t *testing.T) { + t.Parallel() + + err := errors.New("wait: remote command exited without exit status or exit signal") + output := ` +level=info msg="Runner registered successfully." +time="2026-03-19T11:29:49Z" level=info msg="Starting job" +time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" +` + + if !nscSSHBootstrapLikelySucceeded(err, output) { + t.Fatal("expected handoff success heuristic to match") + } +}