Compare commits
50 commits
main
...
codex/arti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0e4351a9d | ||
|
|
a4cabf9fb7 | ||
|
|
283209d364 | ||
|
|
fc79766a31 | ||
|
|
8957af0e05 | ||
|
|
db443bcb50 | ||
|
|
dd369bd0f8 | ||
|
|
ff5736a817 | ||
|
|
1964d1fa6e | ||
|
|
3210570ff3 | ||
|
|
c47f0e6bea | ||
|
|
0310ef17dc | ||
|
|
5b09f3a742 | ||
|
|
5c0a9b3f54 | ||
|
|
028627bfcb | ||
|
|
5bd95b7a7c | ||
|
|
e0fe21fad8 | ||
|
|
9fcaf137ac | ||
|
|
f7193728df | ||
|
|
9a5f147585 | ||
|
|
52c3048458 | ||
|
|
8678ef61ba | ||
|
|
bf4b270db5 | ||
|
|
2da0244d42 | ||
|
|
7fb6419fa0 | ||
|
|
ef3585bb14 | ||
|
|
52b7f102f0 | ||
|
|
b81a3377df | ||
|
|
4fbebdf85c | ||
|
|
03415e579b | ||
|
|
7039bf5aad | ||
|
|
15e897d262 | ||
|
|
1a3d59d25f | ||
|
|
ed247b2f5e | ||
|
|
afc3e79eb0 | ||
|
|
6fcd7ff6ed | ||
|
|
28fd58b009 | ||
|
|
17112e4e48 | ||
|
|
9b642aa5b7 | ||
|
|
b9fb30c18c | ||
|
|
6300c661ff | ||
|
|
5c57ac3655 | ||
|
|
f74a17c124 | ||
|
|
c72426ef52 | ||
|
|
48b8a3c32f | ||
|
|
251922da9e | ||
|
|
5115eb831a | ||
|
|
44dd88c111 | ||
|
|
865b676c99 | ||
|
|
482fd5d085 |
105 changed files with 16668 additions and 508 deletions
|
|
@ -1,6 +1,3 @@
|
||||||
[target.'cfg(unix)']
|
|
||||||
runner = "sudo -E"
|
|
||||||
|
|
||||||
[alias] # command aliases
|
[alias] # command aliases
|
||||||
rr = "run --release"
|
rr = "run --release"
|
||||||
bb = "build --release"
|
bb = "build --release"
|
||||||
|
|
|
||||||
159
.forgejo/workflows/build-apple.yml
Normal file
159
.forgejo/workflows/build-apple.yml
Normal file
|
|
@ -0,0 +1,159 @@
|
||||||
|
name: Build Apple
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "**"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build App (${{ matrix.platform }})
|
||||||
|
runs-on: namespace-profile-macos-large
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- platform: macOS
|
||||||
|
cache-id: macos
|
||||||
|
destination: platform=macOS
|
||||||
|
rust-targets: x86_64-apple-darwin,aarch64-apple-darwin
|
||||||
|
- platform: iOS Simulator
|
||||||
|
cache-id: ios-simulator
|
||||||
|
destination: platform=iOS Simulator,name=iPhone 17 Pro
|
||||||
|
rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios
|
||||||
|
env:
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
RUST_BACKTRACE: short
|
||||||
|
RUSTC_WRAPPER: sccache
|
||||||
|
SCCACHE_CACHE_SIZE: 20G
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: https://code.forgejo.org/actions/checkout@v4
|
||||||
|
with:
|
||||||
|
token: ${{ github.token }}
|
||||||
|
fetch-depth: 0
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Select Xcode
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
candidates=(
|
||||||
|
"/Applications/Xcode_26.1.app/Contents/Developer"
|
||||||
|
"/Applications/Xcode_26_1.app/Contents/Developer"
|
||||||
|
"/Applications/Xcode.app/Contents/Developer"
|
||||||
|
"/Applications/Xcode/Xcode.app/Contents/Developer"
|
||||||
|
)
|
||||||
|
selected=""
|
||||||
|
for candidate in "${candidates[@]}"; do
|
||||||
|
if [[ -d "$candidate" ]]; then
|
||||||
|
selected="$candidate"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [[ -z "$selected" ]] && command -v xcode-select >/dev/null 2>&1; then
|
||||||
|
selected="$(xcode-select -p)"
|
||||||
|
fi
|
||||||
|
if [[ -z "$selected" ]]; then
|
||||||
|
echo "::error ::Unable to locate an Xcode toolchain" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV"
|
||||||
|
DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true
|
||||||
|
|
||||||
|
- name: Prepare Cache Dirs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}"
|
||||||
|
shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}"
|
||||||
|
lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/${{ matrix.cache-id }}}"
|
||||||
|
mkdir -p \
|
||||||
|
"${shared_root}/cargo" \
|
||||||
|
"${shared_root}/rustup" \
|
||||||
|
"${shared_root}/sccache" \
|
||||||
|
"${shared_root}/homebrew" \
|
||||||
|
"${shared_root}/apple/PackageCache" \
|
||||||
|
"${shared_root}/apple/SourcePackages" \
|
||||||
|
"${lane_root}/cargo-target" \
|
||||||
|
"${lane_root}/DerivedData"
|
||||||
|
echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}"
|
||||||
|
echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}"
|
||||||
|
echo "RUSTUP_HOME=${shared_root}/rustup" >> "${GITHUB_ENV}"
|
||||||
|
echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}"
|
||||||
|
echo "HOMEBREW_CACHE=${shared_root}/homebrew" >> "${GITHUB_ENV}"
|
||||||
|
echo "APPLE_PACKAGE_CACHE=${shared_root}/apple/PackageCache" >> "${GITHUB_ENV}"
|
||||||
|
echo "APPLE_SOURCE_PACKAGES=${shared_root}/apple/SourcePackages" >> "${GITHUB_ENV}"
|
||||||
|
echo "APPLE_DERIVED_DATA=${lane_root}/DerivedData" >> "${GITHUB_ENV}"
|
||||||
|
df -h "${shared_root}" "${lane_root}" || true
|
||||||
|
|
||||||
|
- name: Install Rust
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
export PATH="${CARGO_HOME}/bin:${PATH}"
|
||||||
|
|
||||||
|
if ! command -v rustup >/dev/null 2>&1; then
|
||||||
|
curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.93.1
|
||||||
|
else
|
||||||
|
rustup set profile minimal
|
||||||
|
rustup toolchain install 1.93.1
|
||||||
|
rustup default 1.93.1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "${CARGO_HOME}/bin"
|
||||||
|
echo "${CARGO_HOME}/bin" >> "${GITHUB_PATH}"
|
||||||
|
export PATH="${CARGO_HOME}/bin:${PATH}"
|
||||||
|
|
||||||
|
rustup show active-toolchain
|
||||||
|
toolchain="$(rustup show active-toolchain | awk '{print $1}')"
|
||||||
|
cargo_bin="$(rustup which --toolchain "${toolchain}" cargo)"
|
||||||
|
rustc_bin="$(rustup which --toolchain "${toolchain}" rustc)"
|
||||||
|
|
||||||
|
targets='${{ matrix.rust-targets }}'
|
||||||
|
for target in ${targets//,/ }; do
|
||||||
|
rustup target add --toolchain "${toolchain}" "${target}"
|
||||||
|
done
|
||||||
|
|
||||||
|
"${rustc_bin}" --version
|
||||||
|
"${cargo_bin}" --version
|
||||||
|
|
||||||
|
- name: Install Protobuf
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if ! command -v protoc >/dev/null 2>&1; then
|
||||||
|
brew install protobuf
|
||||||
|
fi
|
||||||
|
if ! command -v sccache >/dev/null 2>&1; then
|
||||||
|
brew install sccache
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
shell: bash
|
||||||
|
working-directory: Apple
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
xcodebuild build \
|
||||||
|
-project Burrow.xcodeproj \
|
||||||
|
-scheme App \
|
||||||
|
-destination '${{ matrix.destination }}' \
|
||||||
|
-skipPackagePluginValidation \
|
||||||
|
-skipMacroValidation \
|
||||||
|
-onlyUsePackageVersionsFromResolvedFile \
|
||||||
|
-clonedSourcePackagesDirPath "$APPLE_SOURCE_PACKAGES" \
|
||||||
|
-packageCachePath "$APPLE_PACKAGE_CACHE" \
|
||||||
|
-derivedDataPath "$APPLE_DERIVED_DATA" \
|
||||||
|
CODE_SIGNING_ALLOWED=NO \
|
||||||
|
CODE_SIGNING_REQUIRED=NO \
|
||||||
|
CODE_SIGN_IDENTITY="" \
|
||||||
|
DEVELOPMENT_TEAM=""
|
||||||
65
.forgejo/workflows/build-rust.yml
Normal file
65
.forgejo/workflows/build-rust.yml
Normal file
|
|
@ -0,0 +1,65 @@
|
||||||
|
name: Build Rust
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "**"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
rust:
|
||||||
|
name: Cargo Test
|
||||||
|
runs-on: namespace-profile-linux-medium
|
||||||
|
env:
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
NIX_CONFIG: |
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true
|
||||||
|
RUSTC_WRAPPER: sccache
|
||||||
|
SCCACHE_CACHE_SIZE: 20G
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: https://code.forgejo.org/actions/checkout@v4
|
||||||
|
with:
|
||||||
|
token: ${{ github.token }}
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Prepare Cache Dirs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}"
|
||||||
|
shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}"
|
||||||
|
lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-rust}"
|
||||||
|
mkdir -p \
|
||||||
|
"${shared_root}/cargo" \
|
||||||
|
"${shared_root}/sccache" \
|
||||||
|
"${shared_root}/xdg" \
|
||||||
|
"${lane_root}/cargo-target"
|
||||||
|
echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}"
|
||||||
|
echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}"
|
||||||
|
echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}"
|
||||||
|
echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}"
|
||||||
|
{
|
||||||
|
echo 'NIX_CONFIG<<EOF'
|
||||||
|
printf '%s\n' "${NIX_CONFIG}"
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "${GITHUB_ENV}"
|
||||||
|
df -h /nix "${shared_root}" "${lane_root}" || true
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
nix develop .#ci -c bash -euo pipefail -c '
|
||||||
|
sccache --zero-stats >/dev/null 2>&1 || true
|
||||||
|
cargo test --workspace --all-features
|
||||||
|
sccache --show-stats || true
|
||||||
|
'
|
||||||
63
.forgejo/workflows/build-site.yml
Normal file
63
.forgejo/workflows/build-site.yml
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
name: Build Site
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "**"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
site:
|
||||||
|
name: Next.js Build
|
||||||
|
runs-on: namespace-profile-linux-medium
|
||||||
|
env:
|
||||||
|
NIX_CONFIG: |
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: https://code.forgejo.org/actions/checkout@v4
|
||||||
|
with:
|
||||||
|
token: ${{ github.token }}
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Prepare Cache Dirs
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}"
|
||||||
|
shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}"
|
||||||
|
lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-site}"
|
||||||
|
mkdir -p \
|
||||||
|
"${shared_root}/npm" \
|
||||||
|
"${shared_root}/xdg" \
|
||||||
|
"${lane_root}/next-cache"
|
||||||
|
echo "NPM_CONFIG_CACHE=${shared_root}/npm" >> "${GITHUB_ENV}"
|
||||||
|
echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}"
|
||||||
|
echo "NEXT_CACHE_DIR=${lane_root}/next-cache" >> "${GITHUB_ENV}"
|
||||||
|
{
|
||||||
|
echo 'NIX_CONFIG<<EOF'
|
||||||
|
printf '%s\n' "${NIX_CONFIG}"
|
||||||
|
echo 'EOF'
|
||||||
|
} >> "${GITHUB_ENV}"
|
||||||
|
df -h /nix "${shared_root}" "${lane_root}" || true
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
nix develop .#ci -c bash -euo pipefail -c '
|
||||||
|
mkdir -p site/.next
|
||||||
|
rm -rf site/.next/cache
|
||||||
|
ln -sfn "${NEXT_CACHE_DIR}" site/.next/cache
|
||||||
|
cd site
|
||||||
|
npm install
|
||||||
|
npm run build
|
||||||
|
'
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -12,6 +12,8 @@ target/
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
tmp/
|
tmp/
|
||||||
|
intake/
|
||||||
|
|
||||||
*.db
|
*.db
|
||||||
|
*.sqlite3
|
||||||
*.sock
|
*.sock
|
||||||
|
|
@ -2,22 +2,20 @@ import AsyncAlgorithms
|
||||||
import BurrowConfiguration
|
import BurrowConfiguration
|
||||||
import BurrowCore
|
import BurrowCore
|
||||||
import libburrow
|
import libburrow
|
||||||
import NetworkExtension
|
@preconcurrency import NetworkExtension
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
// Xcode 26 imports `startTunnel(options:)` as `[String: NSObject]?` and treats the
|
||||||
|
// override as crossing a nonisolated boundary. The extension target does not
|
||||||
|
// mutate or forward these Cocoa objects, so treat them as an unchecked escape hatch.
|
||||||
|
extension NSObject: @retroactive @unchecked Sendable {}
|
||||||
|
|
||||||
class PacketTunnelProvider: NEPacketTunnelProvider {
|
class PacketTunnelProvider: NEPacketTunnelProvider {
|
||||||
enum Error: Swift.Error {
|
enum Error: Swift.Error {
|
||||||
case missingTunnelConfiguration
|
case missingTunnelConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
private let logger = Logger.logger(for: PacketTunnelProvider.self)
|
private static let logger = Logger.logger(for: PacketTunnelProvider.self)
|
||||||
|
|
||||||
private var client: TunnelClient {
|
|
||||||
get throws { try _client.get() }
|
|
||||||
}
|
|
||||||
private let _client: Result<TunnelClient, Swift.Error> = Result {
|
|
||||||
try TunnelClient.unix(socketURL: Constants.socketURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
override init() {
|
override init() {
|
||||||
do {
|
do {
|
||||||
|
|
@ -26,31 +24,33 @@ class PacketTunnelProvider: NEPacketTunnelProvider {
|
||||||
databasePath: try Constants.databaseURL.path(percentEncoded: false)
|
databasePath: try Constants.databaseURL.path(percentEncoded: false)
|
||||||
)
|
)
|
||||||
} catch {
|
} catch {
|
||||||
logger.error("Failed to spawn networking thread: \(error)")
|
Self.logger.error("Failed to spawn networking thread: \(error)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override func startTunnel(options: [String: NSObject]? = nil) async throws {
|
nonisolated override func startTunnel(options: [String: NSObject]? = nil) async throws {
|
||||||
do {
|
do {
|
||||||
|
let client = try TunnelClient.unix(socketURL: Constants.socketURL)
|
||||||
let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first
|
let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first
|
||||||
guard let settings = configuration?.settings else {
|
guard let settings = configuration?.settings else {
|
||||||
throw Error.missingTunnelConfiguration
|
throw Error.missingTunnelConfiguration
|
||||||
}
|
}
|
||||||
try await setTunnelNetworkSettings(settings)
|
try await setTunnelNetworkSettings(settings)
|
||||||
_ = try await client.tunnelStart(.init())
|
_ = try await client.tunnelStart(.init())
|
||||||
logger.log("Started tunnel with network settings: \(settings)")
|
Self.logger.log("Started tunnel with network settings: \(settings)")
|
||||||
} catch {
|
} catch {
|
||||||
logger.error("Failed to start tunnel: \(error)")
|
Self.logger.error("Failed to start tunnel: \(error)")
|
||||||
throw error
|
throw error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
override func stopTunnel(with reason: NEProviderStopReason) async {
|
nonisolated override func stopTunnel(with reason: NEProviderStopReason) async {
|
||||||
do {
|
do {
|
||||||
|
let client = try TunnelClient.unix(socketURL: Constants.socketURL)
|
||||||
_ = try await client.tunnelStop(.init())
|
_ = try await client.tunnelStop(.init())
|
||||||
logger.log("Stopped client")
|
Self.logger.log("Stopped client")
|
||||||
} catch {
|
} catch {
|
||||||
logger.error("Failed to stop tunnel: \(error)")
|
Self.logger.error("Failed to stop tunnel: \(error)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -62,22 +62,79 @@ else
|
||||||
CARGO_TARGET_SUBDIR="release"
|
CARGO_TARGET_SUBDIR="release"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
RUSTUP_TOOLCHAIN=""
|
||||||
if [[ -x "$(command -v rustup)" ]]; then
|
if [[ -x "$(command -v rustup)" ]]; then
|
||||||
CARGO_PATH="$(dirname $(rustup which cargo)):/usr/bin"
|
RUSTUP_TOOLCHAIN="$(rustup show active-toolchain | awk '{print $1}')"
|
||||||
|
if [[ -z "${RUSTUP_TOOLCHAIN}" ]]; then
|
||||||
|
echo 'error: Unable to determine active rustup toolchain'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
CARGO_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" cargo)"
|
||||||
|
RUSTC_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" rustc)"
|
||||||
|
CARGO_PATH="$(dirname "${CARGO_BIN}"):$(dirname "${RUSTC_BIN}"):/usr/bin"
|
||||||
else
|
else
|
||||||
CARGO_PATH="$(dirname $(readlink -f $(which cargo))):/usr/bin"
|
CARGO_BIN="$(command -v cargo)"
|
||||||
|
CARGO_PATH="$(dirname "${CARGO_BIN}"):/usr/bin"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PROTOC=$(readlink -f $(which protoc))
|
PROTOC=$(readlink -f $(which protoc))
|
||||||
CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH"
|
CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH"
|
||||||
|
|
||||||
|
if [[ -n "${RUSTC_WRAPPER:-}" && "${RUSTC_WRAPPER}" != /* ]]; then
|
||||||
|
WRAPPER_PATH="$(command -v "${RUSTC_WRAPPER}" || true)"
|
||||||
|
if [[ -n "${WRAPPER_PATH}" ]]; then
|
||||||
|
RUSTC_WRAPPER="${WRAPPER_PATH}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -x "$(command -v rustup)" ]]; then
|
||||||
|
for TARGET in "${RUST_TARGETS[@]}"; do
|
||||||
|
if ! rustup target list --installed | grep -qx "${TARGET}"; then
|
||||||
|
rustup target add --toolchain "${RUSTUP_TOOLCHAIN}" "${TARGET}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
# Run cargo without the various environment variables set by Xcode.
|
# Run cargo without the various environment variables set by Xcode.
|
||||||
# Those variables can confuse cargo and the build scripts it runs.
|
# Those variables can confuse cargo and the build scripts it runs.
|
||||||
env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo build "${CARGO_ARGS[@]}"
|
EXTRA_ENV=()
|
||||||
|
for VAR_NAME in HOME CARGO_HOME CARGO_TARGET_DIR RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do
|
||||||
|
if [[ -n "${!VAR_NAME:-}" ]]; then
|
||||||
|
EXTRA_ENV+=("${VAR_NAME}=${!VAR_NAME}")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
EFFECTIVE_CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-${CONFIGURATION_TEMP_DIR}/target}"
|
||||||
|
BUILD_ENV=(
|
||||||
|
"PATH=$CARGO_PATH"
|
||||||
|
"PROTOC=$PROTOC"
|
||||||
|
"CARGO_TARGET_DIR=${EFFECTIVE_CARGO_TARGET_DIR}"
|
||||||
|
"${EXTRA_ENV[@]}"
|
||||||
|
)
|
||||||
|
if [[ -n "${RUSTUP_TOOLCHAIN}" ]]; then
|
||||||
|
BUILD_ENV+=("RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${RUSTC_BIN:-}" ]]; then
|
||||||
|
BUILD_ENV+=("RUSTC=${RUSTC_BIN}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${IPHONEOS_DEPLOYMENT_TARGET:-}" ]]; then
|
||||||
|
BUILD_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${MACOSX_DEPLOYMENT_TARGET:-}" ]]; then
|
||||||
|
BUILD_ENV+=("MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}")
|
||||||
|
fi
|
||||||
|
echo "Using Rust toolchain: ${RUSTUP_TOOLCHAIN:-system}"
|
||||||
|
echo "Using cargo: ${CARGO_BIN}"
|
||||||
|
if [[ -n "${RUSTC_BIN:-}" ]]; then
|
||||||
|
echo "Using rustc: ${RUSTC_BIN}"
|
||||||
|
fi
|
||||||
|
if [[ -n "${RUSTC_WRAPPER:-}" ]]; then
|
||||||
|
echo "Using rustc wrapper: ${RUSTC_WRAPPER}"
|
||||||
|
fi
|
||||||
|
env -i "${BUILD_ENV[@]}" "${CARGO_BIN}" build "${CARGO_ARGS[@]}"
|
||||||
|
|
||||||
mkdir -p "${BUILT_PRODUCTS_DIR}"
|
mkdir -p "${BUILT_PRODUCTS_DIR}"
|
||||||
|
|
||||||
# Use `lipo` to merge the architectures together into BUILT_PRODUCTS_DIR
|
# Use `lipo` to merge the architectures together into BUILT_PRODUCTS_DIR
|
||||||
/usr/bin/xcrun --sdk $PLATFORM_NAME lipo \
|
/usr/bin/xcrun --sdk $PLATFORM_NAME lipo \
|
||||||
-create $(printf "${CONFIGURATION_TEMP_DIR}/target/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \
|
-create $(printf "${EFFECTIVE_CARGO_TARGET_DIR}/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \
|
||||||
-output "${BUILT_PRODUCTS_DIR}/libburrow.a"
|
-output "${BUILT_PRODUCTS_DIR}/libburrow.a"
|
||||||
|
|
|
||||||
38
CONSTITUTION.md
Normal file
38
CONSTITUTION.md
Normal file
|
|
@ -0,0 +1,38 @@
|
||||||
|
# Burrow Constitution
|
||||||
|
|
||||||
|
1. Mission
|
||||||
|
|
||||||
|
Burrow exists to build a proper VPN: fast, inspectable, deployable on infrastructure the project controls, and legible enough that future contributors can extend it without guesswork.
|
||||||
|
|
||||||
|
2. Commitments
|
||||||
|
|
||||||
|
- Protocol work must favor correctness over novelty. Burrow does not claim support for a transport or control-plane feature until the wire format, state handling, and recovery behavior are implemented and tested.
|
||||||
|
- Security is a design constraint, not a cleanup phase. Key material, bootstrap credentials, control-plane tokens, and routing policy must have explicit storage and rotation paths.
|
||||||
|
- Performance matters. Burrow should avoid needless copies, hidden blocking, and ad hoc process graphs that make packet forwarding or control-plane convergence harder to reason about.
|
||||||
|
- Source, infrastructure, and release logic live in the repository. If the forge cannot be rebuilt from the tree, the work is incomplete.
|
||||||
|
- Non-trivial changes require a Burrow Evolution Proposal. Durable rationale belongs in the repository, not only in chat.
|
||||||
|
|
||||||
|
3. Infrastructure
|
||||||
|
|
||||||
|
Burrow controls its own forge, runners, deployment automation, and edge configuration for `burrow.net` and `burrow.rs`.
|
||||||
|
|
||||||
|
- Dedicated compute is preferred over SaaS dependencies when the dependency would hold release, source, or identity authority.
|
||||||
|
- Secrets may be bootstrapped from local intake for initial bring-up, but long-lived operation must converge on encrypted, versioned secret handling.
|
||||||
|
- Production access must be attributable. Automation identities, SSH keys, and service accounts must be named and documented.
|
||||||
|
|
||||||
|
4. Contributors
|
||||||
|
|
||||||
|
- Read this constitution before drafting product, protocol, or infrastructure changes.
|
||||||
|
- Capture intent, testing expectations, and rollback procedures in proposals.
|
||||||
|
- Prefer reversible migrations. If a change is destructive, document the preconditions and teardown plan first.
|
||||||
|
- Security-sensitive work requires explicit reviewer attention, even when the implementation is performed by an agent.
|
||||||
|
|
||||||
|
5. Governance
|
||||||
|
|
||||||
|
- Burrow Evolution Proposals (BEPs) are the primary design record for architectural, protocol, forge, and deployment changes.
|
||||||
|
- Accepted proposals are authoritative until superseded.
|
||||||
|
- Constitutional changes require a dedicated proposal that quotes the affected text and records the decision.
|
||||||
|
|
||||||
|
6. Origin
|
||||||
|
|
||||||
|
Burrow started as a firewall-burrowing client and now carries its own transport, daemon, mesh, and control-plane work. This constitution exists so the project can finish that evolution coherently.
|
||||||
4292
Cargo.lock
generated
4292
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
55
Makefile
55
Makefile
|
|
@ -1,21 +1,64 @@
|
||||||
|
FLAKE ?= .
|
||||||
|
AGENIX ?= nix run ${FLAKE}\#agenix --
|
||||||
|
|
||||||
|
SECRETS := forgejo/admin-password \
|
||||||
|
forgejo/agent-ssh-key \
|
||||||
|
forgejo/nsc-token \
|
||||||
|
forgejo/nsc-dispatcher-config \
|
||||||
|
forgejo/nsc-autoscaler-config \
|
||||||
|
cloudflare/api-token \
|
||||||
|
hetzner/api-token \
|
||||||
|
forwardemail/api-token \
|
||||||
|
forwardemail/hetzner-s3-user \
|
||||||
|
forwardemail/hetzner-s3-secret
|
||||||
|
|
||||||
tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1)
|
tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1)
|
||||||
cargo_console := RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features
|
cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
|
||||||
cargo_norm := RUST_BACKTRACE=1 RUST_LOG=debug cargo run
|
cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
|
||||||
|
sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
|
||||||
|
sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
|
||||||
|
|
||||||
|
.PHONY: secret secret-file secrets-list
|
||||||
|
|
||||||
|
secret:
|
||||||
|
@if [ -z "${name}" ]; then \
|
||||||
|
printf 'Usage: make secret name=<secret-path>\nAvailable secrets:\n %s\n' "${SECRETS}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
${AGENIX} -e secrets/${name}.age
|
||||||
|
|
||||||
|
secret-file:
|
||||||
|
@if [ -z "${name}" ]; then \
|
||||||
|
printf 'Usage: make secret-file name=<secret-path> file=<source-file>\nAvailable secrets:\n %s\n' "${SECRETS}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@if [ -z "${file}" ]; then \
|
||||||
|
printf 'Usage: make secret-file name=<secret-path> file=<source-file>\n'; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@if [ ! -f "${file}" ]; then \
|
||||||
|
printf 'Source file "%s" not found.\n' "${file}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
SECRET_SOURCE_FILE="${file}" EDITOR="${PWD}/Scripts/agenix-load-file.sh" ${AGENIX} -e secrets/${name}.age </dev/tty
|
||||||
|
|
||||||
|
secrets-list:
|
||||||
|
@printf '%s\n' ${SECRETS}
|
||||||
|
|
||||||
check:
|
check:
|
||||||
@cargo check
|
@cargo check
|
||||||
|
|
||||||
build:
|
build:
|
||||||
@cargo run build
|
@cargo build
|
||||||
|
|
||||||
daemon-console:
|
daemon-console:
|
||||||
@$(cargo_console) daemon
|
@$(sudo_cargo_console) daemon
|
||||||
|
|
||||||
daemon:
|
daemon:
|
||||||
@$(cargo_norm) daemon
|
@$(sudo_cargo_norm) daemon
|
||||||
|
|
||||||
start:
|
start:
|
||||||
@$(cargo_norm) start
|
@$(sudo_cargo_norm) start
|
||||||
|
|
||||||
stop:
|
stop:
|
||||||
@$(cargo_norm) stop
|
@$(cargo_norm) stop
|
||||||
|
|
|
||||||
11
README.md
11
README.md
|
|
@ -5,10 +5,19 @@
|
||||||
Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/).
|
Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/).
|
||||||
|
|
||||||
`burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them.
|
`burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them.
|
||||||
|
Routine verification now runs unprivileged with `cargo test --workspace --all-features`; only tunnel startup needs elevation.
|
||||||
|
|
||||||
|
The repository now carries its own design and deployment record:
|
||||||
|
|
||||||
|
- [Constitution](./CONSTITUTION.md)
|
||||||
|
- [Burrow Evolution](./evolution/README.md)
|
||||||
|
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
|
||||||
|
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
|
||||||
|
- [Forward Email Runbook](./docs/FORWARDEMAIL.md)
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow! Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app.
|
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh).
|
||||||
|
|
||||||
The project structure is divided in the following folders:
|
The project structure is divided in the following folders:
|
||||||
|
|
||||||
|
|
|
||||||
95
Scripts/_burrow-flake.sh
Executable file
95
Scripts/_burrow-flake.sh
Executable file
|
|
@ -0,0 +1,95 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
burrow_require_cmd() {
|
||||||
|
if ! command -v "$1" >/dev/null 2>&1; then
|
||||||
|
echo "missing required command: $1" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_cleanup_flake_tmpdirs() {
|
||||||
|
if [[ "${#BURROW_FLAKE_TMPDIRS[@]}" -eq 0 ]]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
rm -rf "${BURROW_FLAKE_TMPDIRS[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_prepare_flake_ref() {
|
||||||
|
local input="${1:-.}"
|
||||||
|
|
||||||
|
case "${input}" in
|
||||||
|
path:*|git+*|github:*|tarball+*|http://*|https://*)
|
||||||
|
printf '%s\n' "${input}"
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
local resolved
|
||||||
|
resolved="$(cd "${input}" && pwd)"
|
||||||
|
|
||||||
|
local cache_root="${HOME}/.cache/burrow"
|
||||||
|
mkdir -p "${cache_root}"
|
||||||
|
|
||||||
|
local copy_root
|
||||||
|
copy_root="$(mktemp -d "${cache_root}/flake-XXXXXX")"
|
||||||
|
mkdir -p "${copy_root}/repo"
|
||||||
|
|
||||||
|
rsync -a \
|
||||||
|
--delete \
|
||||||
|
--exclude '.git' \
|
||||||
|
--exclude '.direnv' \
|
||||||
|
--exclude 'result' \
|
||||||
|
--exclude 'burrow.sock' \
|
||||||
|
--exclude 'node_modules' \
|
||||||
|
--exclude 'target' \
|
||||||
|
--exclude 'build' \
|
||||||
|
"${resolved}/" "${copy_root}/repo/"
|
||||||
|
|
||||||
|
BURROW_FLAKE_TMPDIRS+=("${copy_root}")
|
||||||
|
printf 'path:%s/repo\n' "${copy_root}"
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_resolve_image_artifact() {
|
||||||
|
local store_path="$1"
|
||||||
|
|
||||||
|
if [[ -f "${store_path}" ]]; then
|
||||||
|
printf '%s\n' "${store_path}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -d "${store_path}" ]]; then
|
||||||
|
local candidate
|
||||||
|
candidate="$(
|
||||||
|
find "${store_path}" -type f \
|
||||||
|
\( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) \
|
||||||
|
| sort \
|
||||||
|
| head -n1
|
||||||
|
)"
|
||||||
|
if [[ -n "${candidate}" ]]; then
|
||||||
|
printf '%s\n' "${candidate}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "unable to locate disk image artifact under ${store_path}" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_detect_compression() {
|
||||||
|
local artifact="$1"
|
||||||
|
|
||||||
|
case "${artifact}" in
|
||||||
|
*.bz2)
|
||||||
|
printf 'bz2\n'
|
||||||
|
;;
|
||||||
|
*.xz)
|
||||||
|
printf 'xz\n'
|
||||||
|
;;
|
||||||
|
*.zst|*.zstd)
|
||||||
|
printf 'zstd\n'
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
printf '\n'
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
131
Scripts/_burrow-secrets.sh
Normal file
131
Scripts/_burrow-secrets.sh
Normal file
|
|
@ -0,0 +1,131 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
BURROW_SECRET_TMPFILES=()
|
||||||
|
|
||||||
|
burrow_secret_repo_path() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local secret_path="$2"
|
||||||
|
|
||||||
|
case "${secret_path}" in
|
||||||
|
"${repo_root}"/*)
|
||||||
|
printf '%s\n' "${secret_path#${repo_root}/}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
printf '%s\n' "${secret_path}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_agenix_identity_path() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local candidate
|
||||||
|
|
||||||
|
for candidate in \
|
||||||
|
"${BURROW_AGE_IDENTITY:-}" \
|
||||||
|
"${BURROW_FORGE_SSH_KEY:-}" \
|
||||||
|
"${repo_root}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519" \
|
||||||
|
"${HOME}/.ssh/id_ed25519"
|
||||||
|
do
|
||||||
|
if [[ -n "${candidate}" && -f "${candidate}" ]]; then
|
||||||
|
printf '%s\n' "${candidate}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_cleanup_secret_tmpfiles() {
|
||||||
|
local path
|
||||||
|
for path in "${BURROW_SECRET_TMPFILES[@]:-}"; do
|
||||||
|
[[ -n "${path}" ]] && rm -f "${path}" >/dev/null 2>&1 || true
|
||||||
|
done
|
||||||
|
BURROW_SECRET_TMPFILES=()
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_decrypt_age_secret_to_temp() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local secret_path="$2"
|
||||||
|
local agenix_path
|
||||||
|
local identity_path
|
||||||
|
local tmp_file
|
||||||
|
|
||||||
|
if [[ ! -f "${secret_path}" ]]; then
|
||||||
|
echo "age secret not found: ${secret_path}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")"
|
||||||
|
identity_path="$(burrow_agenix_identity_path "${repo_root}")"
|
||||||
|
|
||||||
|
tmp_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret.XXXXXX")"
|
||||||
|
if [[ -n "${identity_path}" ]]; then
|
||||||
|
nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" -i "${identity_path}" > "${tmp_file}"
|
||||||
|
else
|
||||||
|
nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" > "${tmp_file}"
|
||||||
|
fi
|
||||||
|
chmod 600 "${tmp_file}"
|
||||||
|
BURROW_SECRET_TMPFILES+=("${tmp_file}")
|
||||||
|
printf '%s\n' "${tmp_file}"
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_resolve_secret_file() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local explicit_path="$2"
|
||||||
|
local intake_path="$3"
|
||||||
|
local age_path="$4"
|
||||||
|
local fallback_path="${5:-}"
|
||||||
|
|
||||||
|
if [[ -n "${explicit_path}" ]]; then
|
||||||
|
if [[ ! -s "${explicit_path}" ]]; then
|
||||||
|
echo "required file missing or empty: ${explicit_path}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
printf '%s\n' "${explicit_path}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${age_path}" && -f "${age_path}" ]]; then
|
||||||
|
burrow_decrypt_age_secret_to_temp "${repo_root}" "${age_path}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${intake_path}" && -s "${intake_path}" ]]; then
|
||||||
|
printf '%s\n' "${intake_path}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${fallback_path}" && -s "${fallback_path}" ]]; then
|
||||||
|
printf '%s\n' "${fallback_path}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_encrypt_secret_from_file() {
|
||||||
|
local repo_root="$1"
|
||||||
|
local secret_path="$2"
|
||||||
|
local source_path="$3"
|
||||||
|
local agenix_path
|
||||||
|
local backup_file=""
|
||||||
|
|
||||||
|
if [[ ! -s "${source_path}" ]]; then
|
||||||
|
echo "secret source missing or empty: ${source_path}" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")"
|
||||||
|
if [[ -f "${secret_path}" ]]; then
|
||||||
|
backup_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret-backup.XXXXXX")"
|
||||||
|
cp "${secret_path}" "${backup_file}"
|
||||||
|
fi
|
||||||
|
rm -f "${secret_path}"
|
||||||
|
|
||||||
|
if ! nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" < "${source_path}"; then
|
||||||
|
if [[ -n "${backup_file}" && -f "${backup_file}" ]]; then
|
||||||
|
mv "${backup_file}" "${secret_path}"
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ -n "${backup_file}" ]] && rm -f "${backup_file}"
|
||||||
|
}
|
||||||
22
Scripts/agenix-load-file.sh
Executable file
22
Scripts/agenix-load-file.sh
Executable file
|
|
@ -0,0 +1,22 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if [[ $# -lt 1 ]]; then
|
||||||
|
echo "Usage: agenix-load-file.sh <destination-file>" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
dest="${!#}"
|
||||||
|
source_path="${SECRET_SOURCE_FILE:-}"
|
||||||
|
|
||||||
|
if [[ -z "$source_path" ]]; then
|
||||||
|
echo "SECRET_SOURCE_FILE is not set; point it at the source file to encrypt." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -f "$source_path" ]]; then
|
||||||
|
echo "Source file '$source_path' does not exist." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp "$source_path" "$dest"
|
||||||
138
Scripts/bootstrap-forge-intake.sh
Normal file
138
Scripts/bootstrap-forge-intake.sh
Normal file
|
|
@ -0,0 +1,138 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/bootstrap-forge-intake.sh [options]
|
||||||
|
|
||||||
|
Copy the minimum Burrow forge bootstrap secrets onto the target host under
|
||||||
|
/var/lib/burrow/intake with the ownership expected by the NixOS services.
|
||||||
|
Legacy path only: the current forge runtime consumes agenix secrets directly.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||||
|
--ssh-key <path> SSH private key used to reach the host
|
||||||
|
(default: secrets/forgejo/agent-ssh-key.age, then intake/)
|
||||||
|
--password-file <path> Forgejo admin bootstrap password file
|
||||||
|
(default: secrets/forgejo/admin-password.age, then intake/)
|
||||||
|
--agent-key-file <path> Agent SSH private key copied for runner bootstrap
|
||||||
|
(default: secrets/forgejo/agent-ssh-key.age, then intake/)
|
||||||
|
--no-verify Skip remote ls/stat verification after install
|
||||||
|
-h, --help Show this help text
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||||
|
SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
|
||||||
|
PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-}"
|
||||||
|
AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-}"
|
||||||
|
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||||
|
VERIFY=1
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--host)
|
||||||
|
HOST="${2:?missing value for --host}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-key)
|
||||||
|
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--password-file)
|
||||||
|
PASSWORD_FILE="${2:?missing value for --password-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--agent-key-file)
|
||||||
|
AGENT_KEY_FILE="${2:?missing value for --agent-key-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--no-verify)
|
||||||
|
VERIFY=0
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||||
|
|
||||||
|
SSH_KEY="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${SSH_KEY}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)"
|
||||||
|
PASSWORD_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${PASSWORD_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/admin-password.age"
|
||||||
|
)"
|
||||||
|
AGENT_KEY_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${AGENT_KEY_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)"
|
||||||
|
|
||||||
|
ssh_opts=(
|
||||||
|
-i "${SSH_KEY}"
|
||||||
|
-o IdentitiesOnly=yes
|
||||||
|
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
|
||||||
|
-o StrictHostKeyChecking=accept-new
|
||||||
|
)
|
||||||
|
|
||||||
|
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
|
||||||
|
cleanup() {
|
||||||
|
if [[ -n "${remote_tmp:-}" ]]; then
|
||||||
|
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
scp "${ssh_opts[@]}" \
|
||||||
|
"${PASSWORD_FILE}" \
|
||||||
|
"${AGENT_KEY_FILE}" \
|
||||||
|
"${HOST}:${remote_tmp}/"
|
||||||
|
|
||||||
|
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||||
|
set -euo pipefail
|
||||||
|
install -d -m 0755 /var/lib/burrow/intake
|
||||||
|
install -m 0400 -o forgejo -g forgejo '${remote_tmp}/$(basename "${PASSWORD_FILE}")' /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt
|
||||||
|
install -m 0400 -o root -g root '${remote_tmp}/$(basename "${AGENT_KEY_FILE}")' /var/lib/burrow/intake/agent_at_burrow_net_ed25519
|
||||||
|
"
|
||||||
|
|
||||||
|
if [[ "${VERIFY}" -eq 1 ]]; then
|
||||||
|
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||||
|
set -euo pipefail
|
||||||
|
ls -l \
|
||||||
|
/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt \
|
||||||
|
/var/lib/burrow/intake/agent_at_burrow_net_ed25519
|
||||||
|
"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Burrow forge bootstrap intake sync complete (host=${HOST})."
|
||||||
157
Scripts/check-forge-host.sh
Executable file
157
Scripts/check-forge-host.sh
Executable file
|
|
@ -0,0 +1,157 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/check-forge-host.sh [options]
|
||||||
|
|
||||||
|
Run a post-boot verification pass against the Burrow forge host.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||||
|
--ssh-key <path> SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/)
|
||||||
|
--expect-nsc Fail if forgejo-nsc services are not active
|
||||||
|
-h, --help Show this help text
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||||
|
SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
|
||||||
|
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||||
|
EXPECT_NSC=0
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--host)
|
||||||
|
HOST="${2:?missing value for --host}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-key)
|
||||||
|
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--expect-nsc)
|
||||||
|
EXPECT_NSC=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||||
|
|
||||||
|
SSH_KEY="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${SSH_KEY}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)" || {
|
||||||
|
echo "forge SSH key could not be resolved" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
ssh \
|
||||||
|
-i "${SSH_KEY}" \
|
||||||
|
-o IdentitiesOnly=yes \
|
||||||
|
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||||
|
-o StrictHostKeyChecking=accept-new \
|
||||||
|
"${HOST}" \
|
||||||
|
EXPECT_NSC="${EXPECT_NSC}" \
|
||||||
|
'bash -s' <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
base_services=(
|
||||||
|
forgejo.service
|
||||||
|
caddy.service
|
||||||
|
burrow-forgejo-bootstrap.service
|
||||||
|
burrow-forgejo-runner-bootstrap.service
|
||||||
|
burrow-forgejo-runner.service
|
||||||
|
)
|
||||||
|
|
||||||
|
nsc_services=(
|
||||||
|
forgejo-nsc-dispatcher.service
|
||||||
|
forgejo-nsc-autoscaler.service
|
||||||
|
)
|
||||||
|
|
||||||
|
show_service() {
|
||||||
|
local service="$1"
|
||||||
|
systemctl show \
|
||||||
|
--no-pager \
|
||||||
|
--property Id \
|
||||||
|
--property LoadState \
|
||||||
|
--property UnitFileState \
|
||||||
|
--property ActiveState \
|
||||||
|
--property SubState \
|
||||||
|
--property Result \
|
||||||
|
"${service}"
|
||||||
|
}
|
||||||
|
|
||||||
|
service_is_healthy() {
|
||||||
|
local service="$1"
|
||||||
|
local active_state
|
||||||
|
local result
|
||||||
|
local unit_type
|
||||||
|
|
||||||
|
active_state="$(systemctl show --property ActiveState --value "${service}")"
|
||||||
|
result="$(systemctl show --property Result --value "${service}")"
|
||||||
|
unit_type="$(systemctl show --property Type --value "${service}")"
|
||||||
|
|
||||||
|
if [[ "${active_state}" == "active" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${unit_type}" == "oneshot" && "${active_state}" == "inactive" && "${result}" == "success" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for service in "${base_services[@]}"; do
|
||||||
|
echo "== ${service} =="
|
||||||
|
show_service "${service}"
|
||||||
|
if ! service_is_healthy "${service}"; then
|
||||||
|
echo "required service is not active: ${service}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
for service in "${nsc_services[@]}"; do
|
||||||
|
echo "== ${service} =="
|
||||||
|
show_service "${service}" || true
|
||||||
|
if [[ "${EXPECT_NSC}" == "1" && "$(systemctl is-active "${service}" 2>/dev/null || true)" != "active" ]]; then
|
||||||
|
echo "required NSC service is not active: ${service}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "== intake =="
|
||||||
|
ls -l /var/lib/burrow/intake || true
|
||||||
|
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "== http-local =="
|
||||||
|
curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login
|
||||||
|
curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/
|
||||||
|
curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
180
Scripts/cloudflare-upsert-a-record.sh
Executable file
180
Scripts/cloudflare-upsert-a-record.sh
Executable file
|
|
@ -0,0 +1,180 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/cloudflare-upsert-a-record.sh --zone <zone> --name <fqdn> --ipv4 <address> [options]
|
||||||
|
|
||||||
|
Upsert a DNS-only or proxied Cloudflare A record without putting the API token on
|
||||||
|
the process list.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--zone <zone> Cloudflare zone name, for example burrow.net
|
||||||
|
--name <fqdn> Fully-qualified DNS record name
|
||||||
|
--ipv4 <address> IPv4 address for the A record
|
||||||
|
--token-file <path> Cloudflare API token file
|
||||||
|
default: secrets/cloudflare/api-token.age, then intake/cloudflare-token.txt
|
||||||
|
--ttl <seconds|auto> Record TTL, or auto
|
||||||
|
default: auto
|
||||||
|
--proxied <true|false> Whether to proxy through Cloudflare
|
||||||
|
default: false
|
||||||
|
-h, --help Show this help
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
ZONE_NAME=""
|
||||||
|
RECORD_NAME=""
|
||||||
|
IPV4=""
|
||||||
|
TOKEN_FILE="${CLOUDFLARE_TOKEN_FILE:-}"
|
||||||
|
TTL_VALUE="auto"
|
||||||
|
PROXIED="false"
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--zone)
|
||||||
|
ZONE_NAME="${2:?missing value for --zone}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--name)
|
||||||
|
RECORD_NAME="${2:?missing value for --name}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ipv4)
|
||||||
|
IPV4="${2:?missing value for --ipv4}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--token-file)
|
||||||
|
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ttl)
|
||||||
|
TTL_VALUE="${2:?missing value for --ttl}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--proxied)
|
||||||
|
PROXIED="${2:?missing value for --proxied}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown argument: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
TOKEN_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${TOKEN_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/cloudflare-token.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/cloudflare/api-token.age"
|
||||||
|
)" || {
|
||||||
|
echo "Cloudflare token file could not be resolved" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||||
|
echo "Invalid IPv4 address: ${IPV4}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "${PROXIED}" in
|
||||||
|
true|false)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "--proxied must be true or false" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "${TTL_VALUE}" in
|
||||||
|
auto)
|
||||||
|
TTL_JSON=1
|
||||||
|
;;
|
||||||
|
''|*[!0-9]*)
|
||||||
|
echo "--ttl must be a number of seconds or auto" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
TTL_JSON="${TTL_VALUE}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
|
||||||
|
if [[ -z "${TOKEN}" ]]; then
|
||||||
|
echo "Cloudflare token file is empty: ${TOKEN_FILE}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cf_api() {
|
||||||
|
local method="$1"
|
||||||
|
local path="$2"
|
||||||
|
local body="${3-}"
|
||||||
|
if [[ -n "${body}" ]]; then
|
||||||
|
curl -fsS -X "${method}" \
|
||||||
|
-H "Authorization: Bearer ${TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
--data "${body}" \
|
||||||
|
"https://api.cloudflare.com/client/v4${path}"
|
||||||
|
else
|
||||||
|
curl -fsS -X "${method}" \
|
||||||
|
-H "Authorization: Bearer ${TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"https://api.cloudflare.com/client/v4${path}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
zone_lookup="$(cf_api GET "/zones?name=${ZONE_NAME}&status=active")"
|
||||||
|
zone_id="$(jq -r '.result[0].id // empty' <<<"${zone_lookup}")"
|
||||||
|
|
||||||
|
if [[ -z "${zone_id}" ]]; then
|
||||||
|
echo "Active Cloudflare zone not found: ${ZONE_NAME}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
payload="$(jq -cn \
|
||||||
|
--arg type "A" \
|
||||||
|
--arg name "${RECORD_NAME}" \
|
||||||
|
--arg content "${IPV4}" \
|
||||||
|
--argjson proxied "${PROXIED}" \
|
||||||
|
--argjson ttl "${TTL_JSON}" \
|
||||||
|
'{type: $type, name: $name, content: $content, proxied: $proxied, ttl: $ttl}')"
|
||||||
|
|
||||||
|
record_lookup="$(cf_api GET "/zones/${zone_id}/dns_records?type=A&name=${RECORD_NAME}")"
|
||||||
|
record_id="$(jq -r '.result[0].id // empty' <<<"${record_lookup}")"
|
||||||
|
|
||||||
|
if [[ -n "${record_id}" ]]; then
|
||||||
|
result="$(cf_api PUT "/zones/${zone_id}/dns_records/${record_id}" "${payload}")"
|
||||||
|
action="updated"
|
||||||
|
else
|
||||||
|
result="$(cf_api POST "/zones/${zone_id}/dns_records" "${payload}")"
|
||||||
|
action="created"
|
||||||
|
fi
|
||||||
|
|
||||||
|
jq -r --arg action "${action}" '
|
||||||
|
if .success != true then
|
||||||
|
.errors | tostring | halt_error(1)
|
||||||
|
else
|
||||||
|
"Cloudflare DNS " + $action + ": " + .result.name + " -> " + .result.content +
|
||||||
|
" (proxied=" + (.result.proxied | tostring) + ", ttl=" + (.result.ttl | tostring) + ")"
|
||||||
|
end
|
||||||
|
' <<<"${result}"
|
||||||
99
Scripts/forge-deploy.sh
Executable file
99
Scripts/forge-deploy.sh
Executable file
|
|
@ -0,0 +1,99 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
|
||||||
|
# shellcheck source=Scripts/_burrow-flake.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/forge-deploy.sh [--test|--switch] [--flake-attr <attr>] [--allow-dirty]
|
||||||
|
|
||||||
|
Standardized remote deploy path for the Burrow forge host.
|
||||||
|
|
||||||
|
Defaults:
|
||||||
|
--switch
|
||||||
|
--flake-attr burrow-forge
|
||||||
|
|
||||||
|
Environment:
|
||||||
|
BURROW_FORGE_HOST root@git.burrow.net
|
||||||
|
BURROW_FORGE_SSH_KEY explicit path, otherwise secrets/forgejo/agent-ssh-key.age
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
MODE="switch"
|
||||||
|
FLAKE_ATTR="burrow-forge"
|
||||||
|
ALLOW_DIRTY=0
|
||||||
|
BURROW_FLAKE_TMPDIRS=()
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--test)
|
||||||
|
MODE="test"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--switch)
|
||||||
|
MODE="switch"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--flake-attr)
|
||||||
|
FLAKE_ATTR="${2:?missing value for --flake-attr}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--allow-dirty)
|
||||||
|
ALLOW_DIRTY=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown argument: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||||
|
cd "${REPO_ROOT}"
|
||||||
|
|
||||||
|
if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then
|
||||||
|
echo "Refusing to deploy from a dirty checkout. Commit first, or pass --allow-dirty for incident-only work." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||||
|
FORGE_SSH_KEY="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${BURROW_FORGE_SSH_KEY:-}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)" || {
|
||||||
|
echo "Unable to resolve the forge SSH key." >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||||
|
mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")"
|
||||||
|
|
||||||
|
export NIX_SSHOPTS="-i ${FORGE_SSH_KEY} -o IdentitiesOnly=yes -o UserKnownHostsFile=${FORGE_KNOWN_HOSTS_FILE} -o StrictHostKeyChecking=accept-new"
|
||||||
|
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
|
||||||
|
|
||||||
|
nix --extra-experimental-features "nix-command flakes" shell nixpkgs#nixos-rebuild -c \
|
||||||
|
nixos-rebuild "${MODE}" \
|
||||||
|
--flake "${flake_ref}#${FLAKE_ATTR}" \
|
||||||
|
--build-host "${FORGE_HOST}" \
|
||||||
|
--target-host "${FORGE_HOST}"
|
||||||
144
Scripts/forgejo-prune-runners.py
Executable file
144
Scripts/forgejo-prune-runners.py
Executable file
|
|
@ -0,0 +1,144 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import urllib.error
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
|
||||||
|
def _read_token() -> str:
|
||||||
|
token = os.environ.get("FORGEJO_API_TOKEN", "").strip()
|
||||||
|
token_file = os.environ.get("FORGEJO_API_TOKEN_FILE", "").strip()
|
||||||
|
if not token and token_file:
|
||||||
|
token = pathlib.Path(token_file).read_text().strip()
|
||||||
|
if not token:
|
||||||
|
raise SystemExit("Forgejo API token is missing")
|
||||||
|
if token.startswith("PENDING-"):
|
||||||
|
raise SystemExit("Forgejo API token is pending")
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
def _request(method: str, url: str, token: str) -> tuple[int, str]:
|
||||||
|
headers = {"Authorization": f"token {token}", "Accept": "application/json"}
|
||||||
|
req = urllib.request.Request(url, headers=headers, method=method)
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=20) as resp:
|
||||||
|
body = resp.read().decode("utf-8")
|
||||||
|
return resp.getcode(), body
|
||||||
|
except urllib.error.HTTPError as exc:
|
||||||
|
body = exc.read().decode("utf-8")
|
||||||
|
return exc.code, body
|
||||||
|
|
||||||
|
|
||||||
|
def _list_runners(api_url: str, token: str, org: str | None) -> tuple[str, list[dict]]:
|
||||||
|
if org:
|
||||||
|
list_url = f"{api_url}/orgs/{org}/actions/runners"
|
||||||
|
else:
|
||||||
|
list_url = f"{api_url}/actions/runners"
|
||||||
|
status, body = _request("GET", list_url, token)
|
||||||
|
if status == 404:
|
||||||
|
return list_url, []
|
||||||
|
if status >= 400:
|
||||||
|
raise RuntimeError(f"list runners failed ({status}) {body}")
|
||||||
|
try:
|
||||||
|
runners = json.loads(body)
|
||||||
|
except json.JSONDecodeError as exc:
|
||||||
|
raise RuntimeError(f"invalid runner list response: {exc}") from exc
|
||||||
|
if not isinstance(runners, list):
|
||||||
|
raise RuntimeError("runner list response is not a list")
|
||||||
|
return list_url, runners
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_runner(api_url: str, token: str, org: str | None, runner_id: int) -> bool:
|
||||||
|
if org:
|
||||||
|
delete_url = f"{api_url}/orgs/{org}/actions/runners/{runner_id}"
|
||||||
|
else:
|
||||||
|
delete_url = f"{api_url}/actions/runners/{runner_id}"
|
||||||
|
status, body = _request("DELETE", delete_url, token)
|
||||||
|
if status in (200, 204):
|
||||||
|
return True
|
||||||
|
print(f"[forgejo-prune-runners] delete {runner_id} failed: {status} {body}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _prune_db(ttl_seconds: int) -> int:
|
||||||
|
cutoff = int(time.time()) - ttl_seconds
|
||||||
|
now = int(time.time())
|
||||||
|
sql = (
|
||||||
|
"WITH updated AS ("
|
||||||
|
"UPDATE action_runner "
|
||||||
|
f"SET deleted = {now} "
|
||||||
|
"WHERE (deleted IS NULL OR deleted = 0) "
|
||||||
|
f"AND ((last_online IS NOT NULL AND last_online > 0 AND last_online < {cutoff}) "
|
||||||
|
f"OR (COALESCE(last_online, 0) = 0 AND created < {cutoff})) "
|
||||||
|
"RETURNING 1"
|
||||||
|
") SELECT count(*) FROM updated;"
|
||||||
|
)
|
||||||
|
result = subprocess.run(
|
||||||
|
["psql", "-h", "/run/postgresql", "-U", "forgejo", "forgejo", "-tAc", sql],
|
||||||
|
check=True,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
output = (result.stdout or "").strip()
|
||||||
|
try:
|
||||||
|
return int(output)
|
||||||
|
except ValueError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
api_url = os.environ.get("FORGEJO_API_URL", "https://git.burrow.net/api/v1").rstrip("/")
|
||||||
|
org = os.environ.get("FORGEJO_ORG", "hackclub").strip() or None
|
||||||
|
dry_run = os.environ.get("FORGEJO_DRY_RUN", "0") == "1"
|
||||||
|
db_only = os.environ.get("FORGEJO_PRUNE_DB", "0") == "1"
|
||||||
|
ttl_seconds = int(os.environ.get("FORGEJO_RUNNER_TTL_SEC", "3600"))
|
||||||
|
|
||||||
|
if db_only:
|
||||||
|
removed = _prune_db(ttl_seconds)
|
||||||
|
print(f"[forgejo-prune-runners] pruned {removed} runners via DB")
|
||||||
|
return
|
||||||
|
|
||||||
|
token = _read_token()
|
||||||
|
|
||||||
|
try:
|
||||||
|
_, runners = _list_runners(api_url, token, org)
|
||||||
|
except RuntimeError as exc:
|
||||||
|
if org is not None:
|
||||||
|
print(f"[forgejo-prune-runners] org runner list failed ({exc}); retrying instance scope")
|
||||||
|
_, runners = _list_runners(api_url, token, None)
|
||||||
|
org = None
|
||||||
|
else:
|
||||||
|
raise SystemExit(str(exc))
|
||||||
|
|
||||||
|
if not runners:
|
||||||
|
removed = _prune_db(ttl_seconds)
|
||||||
|
print(f"[forgejo-prune-runners] pruned {removed} runners via DB fallback")
|
||||||
|
return
|
||||||
|
|
||||||
|
removed = 0
|
||||||
|
for runner in runners:
|
||||||
|
runner_id = runner.get("id")
|
||||||
|
name = runner.get("name", "unknown")
|
||||||
|
status = (runner.get("status") or "").lower()
|
||||||
|
busy = bool(runner.get("busy"))
|
||||||
|
if status == "online" or busy:
|
||||||
|
continue
|
||||||
|
if runner_id is None:
|
||||||
|
continue
|
||||||
|
if dry_run:
|
||||||
|
print(f"[forgejo-prune-runners] would delete runner {runner_id} ({name}) status={status}")
|
||||||
|
continue
|
||||||
|
if _delete_runner(api_url, token, org, int(runner_id)):
|
||||||
|
removed += 1
|
||||||
|
print(f"[forgejo-prune-runners] deleted runner {runner_id} ({name})")
|
||||||
|
|
||||||
|
print(f"[forgejo-prune-runners] done; removed {removed} runners")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
347
Scripts/hcloud-upload-nixos-image.sh
Executable file
347
Scripts/hcloud-upload-nixos-image.sh
Executable file
|
|
@ -0,0 +1,347 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
|
||||||
|
# shellcheck source=Scripts/_burrow-flake.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
DEFAULT_CONFIG="burrow-forge"
|
||||||
|
DEFAULT_FLAKE="."
|
||||||
|
DEFAULT_LOCATION="hel1"
|
||||||
|
DEFAULT_ARCHITECTURE="x86"
|
||||||
|
DEFAULT_TOKEN_FILE=""
|
||||||
|
|
||||||
|
CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}"
|
||||||
|
FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}"
|
||||||
|
LOCATION="${HCLOUD_IMAGE_LOCATION:-${DEFAULT_LOCATION}}"
|
||||||
|
ARCHITECTURE="${HCLOUD_IMAGE_ARCHITECTURE:-${DEFAULT_ARCHITECTURE}}"
|
||||||
|
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${DEFAULT_TOKEN_FILE}}"
|
||||||
|
DESCRIPTION="${HCLOUD_IMAGE_DESCRIPTION:-}"
|
||||||
|
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
|
||||||
|
UPLOAD_VERBOSE="${HCLOUD_IMAGE_UPLOAD_VERBOSE:-0}"
|
||||||
|
ARTIFACT_PATH_INPUT=""
|
||||||
|
OUTPUT_HASH=""
|
||||||
|
NO_UPDATE=0
|
||||||
|
BUILDER_SPEC="${HCLOUD_IMAGE_BUILDER_SPEC:-}"
|
||||||
|
EXTRA_LABELS=()
|
||||||
|
NIX_BUILD_FLAGS=()
|
||||||
|
BURROW_FLAKE_TMPDIRS=()
|
||||||
|
LOCAL_STORE_DIR=""
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
}
|
||||||
|
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/hcloud-upload-nixos-image.sh [options]
|
||||||
|
|
||||||
|
Build a raw Burrow NixOS image and upload it into Hetzner Cloud as a snapshot.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--config <name> images.<name>-raw output to build (default: burrow-forge)
|
||||||
|
--flake <path> Flake path to build from (default: .)
|
||||||
|
--location <code> Hetzner location for the temporary upload server (default: hel1)
|
||||||
|
--architecture <x86|arm> CPU architecture of the image (default: x86)
|
||||||
|
--server-type <name> Hetzner server type for the temporary upload server
|
||||||
|
--token-file <path> Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt)
|
||||||
|
--artifact-path <path> Prebuilt raw image artifact to upload directly
|
||||||
|
--output-hash <hash> Stable hash label for --artifact-path uploads
|
||||||
|
--builder-spec <string> Complete builders string passed to nix build
|
||||||
|
--description <text> Description for the resulting snapshot
|
||||||
|
--upload-verbose <n> Pass -v N times to hcloud-upload-image
|
||||||
|
--label key=value Extra Hetzner image label (repeatable)
|
||||||
|
--nix-flag <arg> Extra argument passed to nix build (repeatable)
|
||||||
|
--no-update Reuse an existing snapshot with the same config/output hash
|
||||||
|
-h, --help Show this help text
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--config)
|
||||||
|
CONFIG="${2:?missing value for --config}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--flake)
|
||||||
|
FLAKE="${2:?missing value for --flake}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--location)
|
||||||
|
LOCATION="${2:?missing value for --location}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--architecture)
|
||||||
|
ARCHITECTURE="${2:?missing value for --architecture}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--server-type)
|
||||||
|
UPLOAD_SERVER_TYPE="${2:?missing value for --server-type}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--token-file)
|
||||||
|
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--artifact-path)
|
||||||
|
ARTIFACT_PATH_INPUT="${2:?missing value for --artifact-path}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--output-hash)
|
||||||
|
OUTPUT_HASH="${2:?missing value for --output-hash}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--builder-spec)
|
||||||
|
BUILDER_SPEC="${2:?missing value for --builder-spec}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--description)
|
||||||
|
DESCRIPTION="${2:?missing value for --description}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--upload-verbose)
|
||||||
|
UPLOAD_VERBOSE="${2:?missing value for --upload-verbose}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--label)
|
||||||
|
EXTRA_LABELS+=("${2:?missing value for --label}")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--nix-flag)
|
||||||
|
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--no-update)
|
||||||
|
NO_UPDATE=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
TOKEN_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${TOKEN_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/hetzner-api-token.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/hetzner/api-token.age"
|
||||||
|
)" || {
|
||||||
|
echo "Hetzner API token file could not be resolved" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then
|
||||||
|
rm -rf "${LOCAL_STORE_DIR}" >/dev/null 2>&1 || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
burrow_require_cmd nix
|
||||||
|
burrow_require_cmd curl
|
||||||
|
burrow_require_cmd python3
|
||||||
|
burrow_require_cmd rsync
|
||||||
|
|
||||||
|
if [[ ! -f "${TOKEN_FILE}" ]]; then
|
||||||
|
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
|
||||||
|
if [[ -z "${HCLOUD_TOKEN}" ]]; then
|
||||||
|
echo "Hetzner API token file is empty: ${TOKEN_FILE}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
|
||||||
|
|
||||||
|
if [[ -z "${DESCRIPTION}" ]]; then
|
||||||
|
DESCRIPTION="Burrow ${CONFIG} $(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf 'Building raw image for %s from %s\n' "${CONFIG}" "${flake_ref}" >&2
|
||||||
|
|
||||||
|
if [[ -z "${ARTIFACT_PATH_INPUT}" && -n "${BUILDER_SPEC}" && -z "${NIX_BUILD_STORE:-}" ]]; then
|
||||||
|
mkdir -p "${HOME}/.cache/burrow"
|
||||||
|
LOCAL_STORE_DIR="$(mktemp -d "${HOME}/.cache/burrow/local-store-XXXXXX")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
artifact_path=""
|
||||||
|
compression=""
|
||||||
|
output_hash="${OUTPUT_HASH}"
|
||||||
|
if [[ -n "${ARTIFACT_PATH_INPUT}" ]]; then
|
||||||
|
artifact_path="${ARTIFACT_PATH_INPUT}"
|
||||||
|
if [[ ! -f "${artifact_path}" ]]; then
|
||||||
|
echo "artifact path does not exist: ${artifact_path}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
compression="$(burrow_detect_compression "${artifact_path}")"
|
||||||
|
if [[ -z "${output_hash}" ]]; then
|
||||||
|
if command -v sha256sum >/dev/null 2>&1; then
|
||||||
|
output_hash="$(sha256sum "${artifact_path}" | awk '{print $1}')"
|
||||||
|
else
|
||||||
|
output_hash="$(shasum -a 256 "${artifact_path}" | awk '{print $1}')"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
nix_build_cmd=(
|
||||||
|
nix
|
||||||
|
--extra-experimental-features
|
||||||
|
"nix-command flakes"
|
||||||
|
build
|
||||||
|
"${flake_ref}#images.${CONFIG}-raw"
|
||||||
|
--no-link
|
||||||
|
--print-out-paths
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ -n "${BUILDER_SPEC}" ]]; then
|
||||||
|
nix_build_cmd+=(--builders "${BUILDER_SPEC}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${NIX_BUILD_STORE:-}" ]]; then
|
||||||
|
nix_build_cmd+=(--store "${NIX_BUILD_STORE}")
|
||||||
|
elif [[ -n "${LOCAL_STORE_DIR}" ]]; then
|
||||||
|
nix_build_cmd+=(--store "${LOCAL_STORE_DIR}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
|
||||||
|
nix_build_cmd+=("${NIX_BUILD_FLAGS[@]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
build_output=""
|
||||||
|
if ! build_output="$("${nix_build_cmd[@]}" 2>&1)"; then
|
||||||
|
printf '%s\n' "${build_output}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
store_path="$(printf '%s\n' "${build_output}" | tail -n1)"
|
||||||
|
if [[ -z "${store_path}" ]]; then
|
||||||
|
echo "nix build did not return a store path" >&2
|
||||||
|
printf '%s\n' "${build_output}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
artifact_path="$(burrow_resolve_image_artifact "${store_path}")"
|
||||||
|
compression="$(burrow_detect_compression "${artifact_path}")"
|
||||||
|
output_hash="$(basename "${store_path}")"
|
||||||
|
output_hash="${output_hash%%-*}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
label_args=(
|
||||||
|
"burrow.nixos-config=${CONFIG}"
|
||||||
|
"burrow.nixos-output-hash=${output_hash}"
|
||||||
|
)
|
||||||
|
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
|
||||||
|
label_args+=("${EXTRA_LABELS[@]}")
|
||||||
|
fi
|
||||||
|
label_csv="$(IFS=,; printf '%s' "${label_args[*]}")"
|
||||||
|
|
||||||
|
find_existing_image() {
|
||||||
|
HCLOUD_TOKEN="${HCLOUD_TOKEN}" \
|
||||||
|
BURROW_LABEL_SELECTOR="burrow.nixos-config=${CONFIG},burrow.nixos-output-hash=${output_hash}" \
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
selector = urllib.parse.quote(os.environ["BURROW_LABEL_SELECTOR"], safe=",=")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
|
||||||
|
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
|
||||||
|
)
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
data = json.load(resp)
|
||||||
|
|
||||||
|
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
|
||||||
|
if images:
|
||||||
|
print(images[-1]["id"])
|
||||||
|
PY
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "${NO_UPDATE}" -eq 1 ]]; then
|
||||||
|
existing_id="$(find_existing_image || true)"
|
||||||
|
if [[ -n "${existing_id}" ]]; then
|
||||||
|
printf 'Reusing existing Hetzner snapshot %s for %s\n' "${existing_id}" "${CONFIG}" >&2
|
||||||
|
printf '%s\n' "${existing_id}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
uploader_bin="${HCLOUD_UPLOAD_IMAGE_BIN:-}"
|
||||||
|
if [[ -z "${uploader_bin}" ]]; then
|
||||||
|
uploader_build_output="$(
|
||||||
|
nix --extra-experimental-features "nix-command flakes" build \
|
||||||
|
"${flake_ref}#hcloud-upload-image" \
|
||||||
|
--no-link \
|
||||||
|
--print-out-paths 2>&1
|
||||||
|
)" || {
|
||||||
|
printf '%s\n' "${uploader_build_output}" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
uploader_bin="$(printf '%s\n' "${uploader_build_output}" | tail -n1)/bin/hcloud-upload-image"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -x "${uploader_bin}" ]]; then
|
||||||
|
echo "unable to resolve an executable hcloud-upload-image binary; set HCLOUD_UPLOAD_IMAGE_BIN explicitly" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
upload_cmd=(
|
||||||
|
"${uploader_bin}"
|
||||||
|
)
|
||||||
|
if [[ "${UPLOAD_VERBOSE}" =~ ^[0-9]+$ ]] && [[ "${UPLOAD_VERBOSE}" -gt 0 ]]; then
|
||||||
|
for _ in $(seq 1 "${UPLOAD_VERBOSE}"); do
|
||||||
|
upload_cmd+=(-v)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
upload_cmd+=(
|
||||||
|
upload
|
||||||
|
--image-path "${artifact_path}"
|
||||||
|
--location "${LOCATION}"
|
||||||
|
--description "${DESCRIPTION}"
|
||||||
|
--labels "${label_csv}"
|
||||||
|
)
|
||||||
|
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
|
||||||
|
upload_cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
|
||||||
|
else
|
||||||
|
upload_cmd+=(--architecture "${ARCHITECTURE}")
|
||||||
|
fi
|
||||||
|
if [[ -n "${compression}" ]]; then
|
||||||
|
upload_cmd+=(--compression "${compression}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf 'Uploading %s to Hetzner Cloud via %s\n' "${artifact_path}" "${uploader_bin}" >&2
|
||||||
|
HCLOUD_TOKEN="${HCLOUD_TOKEN}" "${upload_cmd[@]}" >&2
|
||||||
|
|
||||||
|
image_id=""
|
||||||
|
for _ in $(seq 1 24); do
|
||||||
|
image_id="$(find_existing_image || true)"
|
||||||
|
if [[ -n "${image_id}" ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "${image_id}" ]]; then
|
||||||
|
echo "failed to locate uploaded Hetzner snapshot after upload completed" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s\n' "${image_id}"
|
||||||
298
Scripts/hetzner-forge.sh
Executable file
298
Scripts/hetzner-forge.sh
Executable file
|
|
@ -0,0 +1,298 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/hetzner-forge.sh [show|create|delete|recreate|build-image|create-from-image|recreate-from-image] [options]
|
||||||
|
|
||||||
|
Manage the Burrow forge server and its Hetzner snapshot lifecycle.
|
||||||
|
|
||||||
|
Defaults:
|
||||||
|
action: show
|
||||||
|
server-name: burrow-forge
|
||||||
|
server-type: ccx23
|
||||||
|
location: hel1
|
||||||
|
image: ubuntu-24.04
|
||||||
|
ssh keys: contact@burrow.net,agent@burrow.net
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--server-name <name> Server name to manage.
|
||||||
|
--server-type <type> Hetzner server type.
|
||||||
|
--location <code> Hetzner location.
|
||||||
|
--image <name|id> Image used at create time.
|
||||||
|
--config <name> Burrow image config name for snapshot lookup/build (default: burrow-forge).
|
||||||
|
--ssh-key <name> SSH key name to attach. Repeatable.
|
||||||
|
--token-file <path> Hetzner API token file.
|
||||||
|
--flake <path> Flake path used by image-build actions (default: .)
|
||||||
|
--upload-location <code> Hetzner location used for image upload (default: same as --location)
|
||||||
|
--yes Required for delete and recreate.
|
||||||
|
-h, --help Show this help text.
|
||||||
|
|
||||||
|
Environment:
|
||||||
|
HCLOUD_TOKEN_FILE Defaults to secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTION="show"
|
||||||
|
SERVER_NAME="burrow-forge"
|
||||||
|
SERVER_TYPE="ccx23"
|
||||||
|
LOCATION="hel1"
|
||||||
|
IMAGE="ubuntu-24.04"
|
||||||
|
CONFIG="burrow-forge"
|
||||||
|
FLAKE="."
|
||||||
|
UPLOAD_LOCATION=""
|
||||||
|
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}"
|
||||||
|
YES=0
|
||||||
|
SSH_KEYS=("contact@burrow.net" "agent@burrow.net")
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
if [[ $# -gt 0 ]]; then
|
||||||
|
case "$1" in
|
||||||
|
show|create|delete|recreate|build-image|create-from-image|recreate-from-image)
|
||||||
|
ACTION="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--server-name)
|
||||||
|
SERVER_NAME="${2:?missing value for --server-name}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--server-type)
|
||||||
|
SERVER_TYPE="${2:?missing value for --server-type}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--location)
|
||||||
|
LOCATION="${2:?missing value for --location}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--image)
|
||||||
|
IMAGE="${2:?missing value for --image}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--config)
|
||||||
|
CONFIG="${2:?missing value for --config}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-key)
|
||||||
|
SSH_KEYS+=("${2:?missing value for --ssh-key}")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--token-file)
|
||||||
|
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--flake)
|
||||||
|
FLAKE="${2:?missing value for --flake}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--upload-location)
|
||||||
|
UPLOAD_LOCATION="${2:?missing value for --upload-location}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--yes)
|
||||||
|
YES=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown argument: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 2
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
TOKEN_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${TOKEN_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/hetzner-api-token.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/hetzner/api-token.age"
|
||||||
|
)" || {
|
||||||
|
echo "Hetzner API token file could not be resolved" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -z "${UPLOAD_LOCATION}" ]]; then
|
||||||
|
UPLOAD_LOCATION="${LOCATION}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${ACTION}" == "delete" || "${ACTION}" == "recreate" || "${ACTION}" == "recreate-from-image" ]] && [[ ${YES} -ne 1 ]]; then
|
||||||
|
echo "--yes is required for ${ACTION}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
latest_snapshot_id() {
|
||||||
|
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" \
|
||||||
|
BURROW_CONFIG="${CONFIG}" \
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
selector = urllib.parse.quote(f"burrow.nixos-config={os.environ['BURROW_CONFIG']}", safe=",=")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
|
||||||
|
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
|
||||||
|
)
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
data = json.load(resp)
|
||||||
|
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
|
||||||
|
if images:
|
||||||
|
print(images[-1]["id"])
|
||||||
|
PY
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "${ACTION}" == "build-image" ]]; then
|
||||||
|
exec "${SCRIPT_DIR}/nsc-build-and-upload-image.sh" \
|
||||||
|
--config "${CONFIG}" \
|
||||||
|
--flake "${FLAKE}" \
|
||||||
|
--location "${UPLOAD_LOCATION}" \
|
||||||
|
--upload-server-type "${SERVER_TYPE}" \
|
||||||
|
--token-file "${TOKEN_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${ACTION}" == "create-from-image" || "${ACTION}" == "recreate-from-image" ]]; then
|
||||||
|
if [[ "${IMAGE}" == "ubuntu-24.04" ]]; then
|
||||||
|
IMAGE="$(latest_snapshot_id)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${IMAGE}" ]]; then
|
||||||
|
echo "No Burrow snapshot found for config ${CONFIG}. Run build-image first." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ "${ACTION}" == "create-from-image" ]]; then
|
||||||
|
ACTION="create"
|
||||||
|
else
|
||||||
|
ACTION="recreate"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
ssh_keys_csv=""
|
||||||
|
for key in "${SSH_KEYS[@]}"; do
|
||||||
|
if [[ -n "${ssh_keys_csv}" ]]; then
|
||||||
|
ssh_keys_csv+=","
|
||||||
|
fi
|
||||||
|
ssh_keys_csv+="${key}"
|
||||||
|
done
|
||||||
|
|
||||||
|
export BURROW_HCLOUD_ACTION="${ACTION}"
|
||||||
|
export BURROW_HCLOUD_SERVER_NAME="${SERVER_NAME}"
|
||||||
|
export BURROW_HCLOUD_SERVER_TYPE="${SERVER_TYPE}"
|
||||||
|
export BURROW_HCLOUD_LOCATION="${LOCATION}"
|
||||||
|
export BURROW_HCLOUD_IMAGE="${IMAGE}"
|
||||||
|
export BURROW_HCLOUD_TOKEN_FILE="${TOKEN_FILE}"
|
||||||
|
export BURROW_HCLOUD_SSH_KEYS="${ssh_keys_csv}"
|
||||||
|
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
base = "https://api.hetzner.cloud/v1"
|
||||||
|
action = os.environ["BURROW_HCLOUD_ACTION"]
|
||||||
|
server_name = os.environ["BURROW_HCLOUD_SERVER_NAME"]
|
||||||
|
server_type = os.environ["BURROW_HCLOUD_SERVER_TYPE"]
|
||||||
|
location = os.environ["BURROW_HCLOUD_LOCATION"]
|
||||||
|
image = os.environ["BURROW_HCLOUD_IMAGE"]
|
||||||
|
token = Path(os.environ["BURROW_HCLOUD_TOKEN_FILE"]).read_text(encoding="utf-8").strip()
|
||||||
|
ssh_keys = [key for key in os.environ["BURROW_HCLOUD_SSH_KEYS"].split(",") if key]
|
||||||
|
|
||||||
|
session = requests.Session()
|
||||||
|
session.headers.update({"Authorization": f"Bearer {token}", "Content-Type": "application/json"})
|
||||||
|
|
||||||
|
|
||||||
|
def request(method: str, path: str, **kwargs) -> requests.Response:
|
||||||
|
response = session.request(method, f"{base}{path}", timeout=30, **kwargs)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def find_server():
|
||||||
|
response = request("GET", "/servers", params={"name": server_name})
|
||||||
|
data = response.json()
|
||||||
|
for server in data.get("servers", []):
|
||||||
|
if server.get("name") == server_name:
|
||||||
|
return server
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def summarize(server):
|
||||||
|
ipv4 = (((server.get("public_net") or {}).get("ipv4")) or {}).get("ip")
|
||||||
|
image_name = ((server.get("image") or {}).get("name")) or ""
|
||||||
|
summary = {
|
||||||
|
"id": server.get("id"),
|
||||||
|
"name": server.get("name"),
|
||||||
|
"status": server.get("status"),
|
||||||
|
"server_type": ((server.get("server_type") or {}).get("name")),
|
||||||
|
"location": ((server.get("location") or {}).get("name")),
|
||||||
|
"image": image_name,
|
||||||
|
"ipv4": ipv4,
|
||||||
|
"created": server.get("created"),
|
||||||
|
}
|
||||||
|
print(json.dumps(summary, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
server = find_server()
|
||||||
|
|
||||||
|
if action == "show":
|
||||||
|
if server is None:
|
||||||
|
print(json.dumps({"name": server_name, "present": False}, indent=2))
|
||||||
|
else:
|
||||||
|
summarize(server)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if action == "delete":
|
||||||
|
if server is None:
|
||||||
|
print(json.dumps({"name": server_name, "deleted": False, "reason": "not found"}, indent=2))
|
||||||
|
sys.exit(0)
|
||||||
|
request("DELETE", f"/servers/{server['id']}")
|
||||||
|
print(json.dumps({"name": server_name, "deleted": True, "id": server["id"]}, indent=2))
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if action == "recreate" and server is not None:
|
||||||
|
request("DELETE", f"/servers/{server['id']}")
|
||||||
|
server = None
|
||||||
|
|
||||||
|
if action in {"create", "recreate"}:
|
||||||
|
if server is not None:
|
||||||
|
summarize(server)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"name": server_name,
|
||||||
|
"server_type": server_type,
|
||||||
|
"location": location,
|
||||||
|
"image": image,
|
||||||
|
"ssh_keys": ssh_keys,
|
||||||
|
"labels": {
|
||||||
|
"project": "burrow",
|
||||||
|
"role": "forge",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
response = request("POST", "/servers", json=payload)
|
||||||
|
created = response.json()["server"]
|
||||||
|
summarize(created)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
raise SystemExit(f"unsupported action: {action}")
|
||||||
|
PY
|
||||||
562
Scripts/nsc-build-and-upload-image.sh
Executable file
562
Scripts/nsc-build-and-upload-image.sh
Executable file
|
|
@ -0,0 +1,562 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
|
||||||
|
# shellcheck source=Scripts/_burrow-flake.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}"
|
||||||
|
FLAKE="${HCLOUD_IMAGE_FLAKE:-.}"
|
||||||
|
LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}"
|
||||||
|
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}"
|
||||||
|
NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}"
|
||||||
|
NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}"
|
||||||
|
NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}"
|
||||||
|
NSC_BUILDER_JOBS="${NSC_BUILDER_JOBS:-32}"
|
||||||
|
NSC_BUILDER_FEATURES="${NSC_BUILDER_FEATURES:-kvm,big-parallel}"
|
||||||
|
NSC_BIN="${NSC_BIN:-}"
|
||||||
|
REMOTE_COMPRESSION="${HCLOUD_IMAGE_REMOTE_COMPRESSION:-auto}"
|
||||||
|
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
|
||||||
|
KEEP_TMPDIR="${HCLOUD_IMAGE_KEEP_TMPDIR:-0}"
|
||||||
|
NO_UPDATE=0
|
||||||
|
NIX_BUILD_FLAGS=()
|
||||||
|
EXTRA_LABELS=()
|
||||||
|
BURROW_FLAKE_TMPDIRS=()
|
||||||
|
BUILDER_ID=""
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
}
|
||||||
|
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/nsc-build-and-upload-image.sh [options]
|
||||||
|
|
||||||
|
Create a temporary Namespace Linux builder, build the Burrow raw image on it,
|
||||||
|
and upload the resulting artifact to Hetzner Cloud.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--config <name> images.<name>-raw output to build (default: burrow-forge)
|
||||||
|
--flake <path> Flake path to build from (default: .)
|
||||||
|
--location <code> Hetzner upload location (default: hel1)
|
||||||
|
--token-file <path> Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt)
|
||||||
|
--machine-type <type> Namespace machine type (default: linux/amd64:32x64)
|
||||||
|
--ssh-host <host> Namespace SSH endpoint (default: ssh.ord2.namespace.so)
|
||||||
|
--duration <ttl> Namespace builder lifetime (default: 4h)
|
||||||
|
--builder-jobs <n> Nix builder job count advertised to the local client
|
||||||
|
--builder-features <s> Comma-separated Nix system features (default: "kvm,big-parallel")
|
||||||
|
--remote-compression <mode>
|
||||||
|
Compress raw/image artifacts on the Namespace builder
|
||||||
|
before copy-back. Modes: auto, none, xz, zstd
|
||||||
|
(default: auto)
|
||||||
|
--upload-server-type <name>
|
||||||
|
Hetzner server type for the temporary upload host
|
||||||
|
--label key=value Extra Hetzner snapshot label (repeatable)
|
||||||
|
--nix-flag <arg> Extra argument passed to nix build (repeatable)
|
||||||
|
--no-update Reuse an existing snapshot with the same config/output hash
|
||||||
|
-h, --help Show this help text
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--config)
|
||||||
|
CONFIG="${2:?missing value for --config}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--flake)
|
||||||
|
FLAKE="${2:?missing value for --flake}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--location)
|
||||||
|
LOCATION="${2:?missing value for --location}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--token-file)
|
||||||
|
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--machine-type)
|
||||||
|
NSC_MACHINE_TYPE="${2:?missing value for --machine-type}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-host)
|
||||||
|
NSC_SSH_HOST="${2:?missing value for --ssh-host}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--duration)
|
||||||
|
NSC_BUILDER_DURATION="${2:?missing value for --duration}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--builder-jobs)
|
||||||
|
NSC_BUILDER_JOBS="${2:?missing value for --builder-jobs}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--builder-features)
|
||||||
|
NSC_BUILDER_FEATURES="${2:?missing value for --builder-features}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--remote-compression)
|
||||||
|
REMOTE_COMPRESSION="${2:?missing value for --remote-compression}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--upload-server-type)
|
||||||
|
UPLOAD_SERVER_TYPE="${2:?missing value for --upload-server-type}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--label)
|
||||||
|
EXTRA_LABELS+=("${2:?missing value for --label}")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--nix-flag)
|
||||||
|
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--no-update)
|
||||||
|
NO_UPDATE=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
TOKEN_FILE="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${TOKEN_FILE}" \
|
||||||
|
"${REPO_ROOT}/intake/hetzner-api-token.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/hetzner/api-token.age"
|
||||||
|
)" || {
|
||||||
|
echo "Hetzner API token file could not be resolved" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then
|
||||||
|
"${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true
|
||||||
|
fi
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
if [[ "${KEEP_TMPDIR}" != "1" && -n "${TMPDIR_BURROW_NSC:-}" && -d "${TMPDIR_BURROW_NSC}" ]]; then
|
||||||
|
rm -rf "${TMPDIR_BURROW_NSC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
burrow_require_cmd nix
|
||||||
|
burrow_require_cmd curl
|
||||||
|
burrow_require_cmd python3
|
||||||
|
burrow_require_cmd ssh
|
||||||
|
burrow_require_cmd ssh-keygen
|
||||||
|
burrow_require_cmd ssh-keyscan
|
||||||
|
burrow_require_cmd tar
|
||||||
|
|
||||||
|
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
|
||||||
|
|
||||||
|
if [[ -z "${NSC_BIN}" ]]; then
|
||||||
|
nsc_build_output="$(
|
||||||
|
nix --extra-experimental-features "nix-command flakes" build \
|
||||||
|
"${flake_ref}#nsc" \
|
||||||
|
--no-link \
|
||||||
|
--print-out-paths 2>&1
|
||||||
|
)" || {
|
||||||
|
printf '%s\n' "${nsc_build_output}" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -x "${NSC_BIN}" ]]; then
|
||||||
|
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${NSC_SESSION:-}" && ! -f "${HOME}/.ns/session" ]]; then
|
||||||
|
mkdir -p "${HOME}/.ns"
|
||||||
|
printf '%s\n' "${NSC_SESSION}" > "${HOME}/.ns/session"
|
||||||
|
chmod 600 "${HOME}/.ns/session"
|
||||||
|
fi
|
||||||
|
|
||||||
|
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
|
||||||
|
"${NSC_BIN}" version >/dev/null || true
|
||||||
|
|
||||||
|
TMPDIR_BURROW_NSC="$(mktemp -d "${HOME}/.cache/burrow/nsc-XXXXXX")"
|
||||||
|
ssh_key="${TMPDIR_BURROW_NSC}/builder"
|
||||||
|
known_hosts="${TMPDIR_BURROW_NSC}/known_hosts"
|
||||||
|
id_file="${TMPDIR_BURROW_NSC}/builder.id"
|
||||||
|
|
||||||
|
ssh-keygen -q -t ed25519 -N "" -f "${ssh_key}"
|
||||||
|
ssh-keyscan -H "${NSC_SSH_HOST}" > "${known_hosts}"
|
||||||
|
|
||||||
|
ssh_base=(
|
||||||
|
ssh
|
||||||
|
-i "${ssh_key}"
|
||||||
|
-o UserKnownHostsFile="${known_hosts}"
|
||||||
|
-o StrictHostKeyChecking=yes
|
||||||
|
)
|
||||||
|
|
||||||
|
wait_for_ssh() {
|
||||||
|
local instance_id="$1"
|
||||||
|
for _ in $(seq 1 30); do
|
||||||
|
if "${ssh_base[@]}" -q "${instance_id}@${NSC_SSH_HOST}" true >/dev/null 2>&1; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
configure_builder() {
|
||||||
|
local instance_id="$1"
|
||||||
|
"${ssh_base[@]}" "${instance_id}@${NSC_SSH_HOST}" <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if ! command -v nix >/dev/null 2>&1; then
|
||||||
|
curl -fsSL https://install.determinate.systems/nix | sh -s -- install linux --determinate --init none --no-confirm
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
|
||||||
|
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p /etc/nix
|
||||||
|
cat <<CFG >/etc/nix/nix.conf
|
||||||
|
build-users-group =
|
||||||
|
trusted-users = root $USER
|
||||||
|
auto-optimise-store = true
|
||||||
|
substituters = https://cache.nixos.org
|
||||||
|
builders-use-substitutes = true
|
||||||
|
CFG
|
||||||
|
|
||||||
|
mkdir -p /nix/var/nix/daemon-socket
|
||||||
|
|
||||||
|
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
|
||||||
|
nohup nix-daemon >/dev/null 2>&1 </dev/null &
|
||||||
|
fi
|
||||||
|
|
||||||
|
for _ in $(seq 1 120); do
|
||||||
|
if [ -S /nix/var/nix/daemon-socket/socket ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
|
||||||
|
nohup nix-daemon >/dev/null 2>&1 </dev/null &
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "nix-daemon socket never appeared" >&2
|
||||||
|
exit 1
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
printf 'Creating temporary Namespace builder (%s)\n' "${NSC_MACHINE_TYPE}" >&2
|
||||||
|
"${NSC_BIN}" create \
|
||||||
|
--bare \
|
||||||
|
--machine_type "${NSC_MACHINE_TYPE}" \
|
||||||
|
--ssh_key "${ssh_key}.pub" \
|
||||||
|
--duration "${NSC_BUILDER_DURATION}" \
|
||||||
|
--label "burrow=true" \
|
||||||
|
--label "purpose=hetzner-image-build" \
|
||||||
|
--output_to "${id_file}" \
|
||||||
|
>/dev/null
|
||||||
|
|
||||||
|
BUILDER_ID="$(tr -d '\r\n' < "${id_file}")"
|
||||||
|
if [[ -z "${BUILDER_ID}" ]]; then
|
||||||
|
echo "nsc create did not return a builder id" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf 'Waiting for Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||||
|
wait_for_ssh "${BUILDER_ID}"
|
||||||
|
configure_builder "${BUILDER_ID}" >&2
|
||||||
|
|
||||||
|
remote_root="burrow-image-build-${BUILDER_ID}"
|
||||||
|
remote_flake_path="./${remote_root}"
|
||||||
|
local_flake_dir="${flake_ref#path:}"
|
||||||
|
remote_build_stdout="/tmp/burrow-image-build-${BUILDER_ID}.stdout"
|
||||||
|
remote_build_stderr="/tmp/burrow-image-build-${BUILDER_ID}.stderr"
|
||||||
|
|
||||||
|
printf 'Syncing flake to Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||||
|
tar -C "${local_flake_dir}" -cf - . \
|
||||||
|
| "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "rm -rf '${remote_root}' && mkdir -p '${remote_root}' && tar -C '${remote_root}' -xf -"
|
||||||
|
|
||||||
|
run_remote_build() {
|
||||||
|
local remote_cmd=(
|
||||||
|
env
|
||||||
|
"CONFIG=${CONFIG}"
|
||||||
|
"REMOTE_FLAKE_PATH=${remote_flake_path}"
|
||||||
|
"REMOTE_BUILD_STDOUT=${remote_build_stdout}"
|
||||||
|
"REMOTE_BUILD_STDERR=${remote_build_stderr}"
|
||||||
|
bash
|
||||||
|
-s
|
||||||
|
--
|
||||||
|
)
|
||||||
|
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
|
||||||
|
remote_cmd+=("${NIX_BUILD_FLAGS[@]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "${remote_cmd[@]}" <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
config="${CONFIG}"
|
||||||
|
remote_flake_path="${REMOTE_FLAKE_PATH}"
|
||||||
|
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
|
||||||
|
remote_build_stderr="${REMOTE_BUILD_STDERR}"
|
||||||
|
nix_build_cmd=(
|
||||||
|
nix
|
||||||
|
--extra-experimental-features
|
||||||
|
"nix-command flakes"
|
||||||
|
build
|
||||||
|
"path:${remote_flake_path}#images.${config}-raw"
|
||||||
|
--no-link
|
||||||
|
--print-out-paths
|
||||||
|
)
|
||||||
|
if [[ "$#" -gt 0 ]]; then
|
||||||
|
nix_build_cmd+=("$@")
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f "${remote_build_stdout}" "${remote_build_stderr}"
|
||||||
|
if ! "${nix_build_cmd[@]}" >"${remote_build_stdout}" 2>"${remote_build_stderr}"; then
|
||||||
|
cat "${remote_build_stderr}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_remote_store_path() {
|
||||||
|
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||||
|
env "REMOTE_BUILD_STDOUT=${remote_build_stdout}" "REMOTE_BUILD_STDERR=${remote_build_stderr}" bash -s <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
|
||||||
|
remote_build_stderr="${REMOTE_BUILD_STDERR}"
|
||||||
|
|
||||||
|
if [[ ! -s "${remote_build_stdout}" ]]; then
|
||||||
|
echo "remote build stdout file is missing or empty: ${remote_build_stdout}" >&2
|
||||||
|
if [[ -s "${remote_build_stderr}" ]]; then
|
||||||
|
cat "${remote_build_stderr}" >&2
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
tail -n1 "${remote_build_stdout}"
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_remote_artifact_path() {
|
||||||
|
local store_path="$1"
|
||||||
|
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||||
|
env "REMOTE_STORE_PATH=${store_path}" bash -s <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
store_path="${REMOTE_STORE_PATH}"
|
||||||
|
artifact_path="${store_path}"
|
||||||
|
if [[ -d "${artifact_path}" ]]; then
|
||||||
|
artifact_path="$(find "${artifact_path}" -type f \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) | sort | head -n1)"
|
||||||
|
fi
|
||||||
|
if [[ -z "${artifact_path}" || ! -f "${artifact_path}" ]]; then
|
||||||
|
echo "unable to locate image artifact under ${store_path}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s\n' "${artifact_path}"
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
plan_remote_artifact_transfer() {
|
||||||
|
local artifact_path="$1"
|
||||||
|
local compression_mode="$2"
|
||||||
|
|
||||||
|
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||||
|
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF'
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
artifact_path="${REMOTE_ARTIFACT_PATH}"
|
||||||
|
compression_mode="${REMOTE_COMPRESSION}"
|
||||||
|
|
||||||
|
case "${artifact_path}" in
|
||||||
|
*.bz2)
|
||||||
|
printf '%s\tbz2\n' "$(basename "${artifact_path}")"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*.xz)
|
||||||
|
printf '%s\txz\n' "$(basename "${artifact_path}")"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*.zst|*.zstd)
|
||||||
|
printf '%s\tzstd\n' "$(basename "${artifact_path}")"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
select_compression() {
|
||||||
|
case "${compression_mode}" in
|
||||||
|
auto)
|
||||||
|
if command -v zstd >/dev/null 2>&1; then
|
||||||
|
printf 'zstd\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command -v xz >/dev/null 2>&1; then
|
||||||
|
printf 'xz\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
printf 'none\n'
|
||||||
|
;;
|
||||||
|
none|xz|zstd)
|
||||||
|
printf '%s\n' "${compression_mode}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unsupported remote compression mode: ${compression_mode}" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
mode="$(select_compression)"
|
||||||
|
case "${mode}" in
|
||||||
|
none)
|
||||||
|
printf '%s\tnone\n' "$(basename "${artifact_path}")"
|
||||||
|
;;
|
||||||
|
zstd)
|
||||||
|
printf '%s.zst\tzstd\n' "$(basename "${artifact_path}")"
|
||||||
|
;;
|
||||||
|
xz)
|
||||||
|
printf '%s.xz\txz\n' "$(basename "${artifact_path}")"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
stream_remote_artifact() {
|
||||||
|
local artifact_path="$1"
|
||||||
|
local compression_mode="$2"
|
||||||
|
local destination="$3"
|
||||||
|
|
||||||
|
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||||
|
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' > "${destination}"
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
artifact_path="${REMOTE_ARTIFACT_PATH}"
|
||||||
|
compression_mode="${REMOTE_COMPRESSION}"
|
||||||
|
|
||||||
|
case "${artifact_path}" in
|
||||||
|
*.bz2|*.xz|*.zst|*.zstd)
|
||||||
|
cat "${artifact_path}"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
select_compression() {
|
||||||
|
case "${compression_mode}" in
|
||||||
|
auto)
|
||||||
|
if command -v zstd >/dev/null 2>&1; then
|
||||||
|
printf 'zstd\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if command -v xz >/dev/null 2>&1; then
|
||||||
|
printf 'xz\n'
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
printf 'none\n'
|
||||||
|
;;
|
||||||
|
none|xz|zstd)
|
||||||
|
printf '%s\n' "${compression_mode}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unsupported remote compression mode: ${compression_mode}" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
mode="$(select_compression)"
|
||||||
|
case "${mode}" in
|
||||||
|
none)
|
||||||
|
cat "${artifact_path}"
|
||||||
|
;;
|
||||||
|
zstd)
|
||||||
|
if ! command -v zstd >/dev/null 2>&1; then
|
||||||
|
echo "zstd requested but not available on Namespace builder" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
zstd -T0 -19 -c "${artifact_path}"
|
||||||
|
;;
|
||||||
|
xz)
|
||||||
|
if ! command -v xz >/dev/null 2>&1; then
|
||||||
|
echo "xz requested but not available on Namespace builder" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
xz -T0 -c "${artifact_path}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
printf 'Building raw image on Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||||
|
run_remote_build
|
||||||
|
|
||||||
|
remote_store_path="$(resolve_remote_store_path)"
|
||||||
|
if [[ -z "${remote_store_path}" ]]; then
|
||||||
|
echo "remote build did not return a store path" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
remote_artifact_path="$(resolve_remote_artifact_path "${remote_store_path}")"
|
||||||
|
if [[ -z "${remote_artifact_path}" ]]; then
|
||||||
|
echo "remote build did not return an artifact path" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
transfer_plan="$(plan_remote_artifact_transfer "${remote_artifact_path}" "${REMOTE_COMPRESSION}")"
|
||||||
|
local_artifact_name="$(printf '%s\n' "${transfer_plan}" | cut -f1)"
|
||||||
|
transfer_compression="$(printf '%s\n' "${transfer_plan}" | cut -f2)"
|
||||||
|
if [[ -z "${local_artifact_name}" || -z "${transfer_compression}" ]]; then
|
||||||
|
echo "unable to determine artifact transfer plan for ${remote_artifact_path}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
output_hash="$(basename "${remote_store_path}")"
|
||||||
|
output_hash="${output_hash%%-*}"
|
||||||
|
local_artifact="${TMPDIR_BURROW_NSC}/${local_artifact_name}"
|
||||||
|
|
||||||
|
printf 'Streaming built artifact back from Namespace builder %s (%s)\n' "${BUILDER_ID}" "${transfer_compression}" >&2
|
||||||
|
stream_remote_artifact "${remote_artifact_path}" "${REMOTE_COMPRESSION}" "${local_artifact}"
|
||||||
|
|
||||||
|
cmd=(
|
||||||
|
"${SCRIPT_DIR}/hcloud-upload-nixos-image.sh"
|
||||||
|
--config "${CONFIG}"
|
||||||
|
--flake "${FLAKE}"
|
||||||
|
--location "${LOCATION}"
|
||||||
|
--token-file "${TOKEN_FILE}"
|
||||||
|
--artifact-path "${local_artifact}"
|
||||||
|
--output-hash "${output_hash}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
|
||||||
|
cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${NO_UPDATE}" -eq 1 ]]; then
|
||||||
|
cmd+=(--no-update)
|
||||||
|
fi
|
||||||
|
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
|
||||||
|
for label in "${EXTRA_LABELS[@]}"; do
|
||||||
|
cmd+=(--label "${label}")
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
"${cmd[@]}"
|
||||||
310
Scripts/provision-forgejo-nsc.sh
Executable file
310
Scripts/provision-forgejo-nsc.sh
Executable file
|
|
@ -0,0 +1,310 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
|
||||||
|
# shellcheck source=Scripts/_burrow-flake.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/provision-forgejo-nsc.sh [options]
|
||||||
|
|
||||||
|
Generate Burrow forgejo-nsc runtime inputs and refresh the authoritative
|
||||||
|
`secrets/forgejo/*.age` files, optionally refreshing the Namespace token from
|
||||||
|
the currently logged-in namespace account.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--host <user@host> SSH target used to mint the Forgejo PAT.
|
||||||
|
Default: root@git.burrow.net
|
||||||
|
--ssh-key <path> SSH private key for the forge host.
|
||||||
|
Default: secrets/forgejo/agent-ssh-key.age, then intake/
|
||||||
|
--nsc-bin <path> Override the nsc binary.
|
||||||
|
--no-refresh-token Reuse the existing encrypted Namespace token if it already exists.
|
||||||
|
--token-name <name> Forgejo PAT name prefix (default: forgejo-nsc)
|
||||||
|
--contact-user <name> Forgejo username used for PAT creation (default: contact)
|
||||||
|
--scope-owner <name> Forgejo org/user owner for the default NSC scope (default: hackclub)
|
||||||
|
--scope-name <name> Forgejo repository name for the default NSC scope (default: burrow)
|
||||||
|
-h, --help Show this help text.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||||
|
SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
|
||||||
|
NSC_BIN="${NSC_BIN:-}"
|
||||||
|
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||||
|
REFRESH_TOKEN=1
|
||||||
|
TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}"
|
||||||
|
CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}"
|
||||||
|
SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}"
|
||||||
|
SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}"
|
||||||
|
BURROW_FLAKE_TMPDIRS=()
|
||||||
|
TMP_DIR=""
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
[[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
burrow_cleanup_flake_tmpdirs
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--host)
|
||||||
|
HOST="${2:?missing value for --host}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-key)
|
||||||
|
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--nsc-bin)
|
||||||
|
NSC_BIN="${2:?missing value for --nsc-bin}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--no-refresh-token)
|
||||||
|
REFRESH_TOKEN=0
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--token-name)
|
||||||
|
TOKEN_NAME_PREFIX="${2:?missing value for --token-name}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--contact-user)
|
||||||
|
CONTACT_USER="${2:?missing value for --contact-user}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--scope-owner)
|
||||||
|
SCOPE_OWNER="${2:?missing value for --scope-owner}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--scope-name)
|
||||||
|
SCOPE_NAME="${2:?missing value for --scope-name}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||||
|
|
||||||
|
burrow_require_cmd nix
|
||||||
|
burrow_require_cmd ssh
|
||||||
|
burrow_require_cmd python3
|
||||||
|
|
||||||
|
SSH_KEY="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${SSH_KEY}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)"
|
||||||
|
TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/burrow-forgejo-nsc.XXXXXX")"
|
||||||
|
|
||||||
|
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
|
||||||
|
if [[ -z "${NSC_BIN}" ]]; then
|
||||||
|
if command -v nsc >/dev/null 2>&1; then
|
||||||
|
NSC_BIN="$(command -v nsc)"
|
||||||
|
else
|
||||||
|
nsc_build_output="$(
|
||||||
|
nix --extra-experimental-features "nix-command flakes" build \
|
||||||
|
"${flake_ref}#nsc" \
|
||||||
|
--no-link \
|
||||||
|
--print-out-paths 2>&1
|
||||||
|
)" || {
|
||||||
|
printf '%s\n' "${nsc_build_output}" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -x "${NSC_BIN}" ]]; then
|
||||||
|
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
token_file="${TMP_DIR}/forgejo_nsc_token.txt"
|
||||||
|
dispatcher_out="${TMP_DIR}/forgejo_nsc_dispatcher.yaml"
|
||||||
|
autoscaler_out="${TMP_DIR}/forgejo_nsc_autoscaler.yaml"
|
||||||
|
dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml"
|
||||||
|
autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml"
|
||||||
|
token_secret="${REPO_ROOT}/secrets/forgejo/nsc-token.age"
|
||||||
|
dispatcher_secret="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age"
|
||||||
|
autoscaler_secret="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age"
|
||||||
|
|
||||||
|
if [[ "${REFRESH_TOKEN}" -eq 1 ]]; then
|
||||||
|
ssh \
|
||||||
|
-i "${SSH_KEY}" \
|
||||||
|
-o IdentitiesOnly=yes \
|
||||||
|
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||||
|
-o StrictHostKeyChecking=accept-new \
|
||||||
|
"${HOST}" \
|
||||||
|
'sudo -u forgejo-nsc python3 - <<'"'"'PY'"'"'
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
payload = {}
|
||||||
|
|
||||||
|
token_json = Path("/var/lib/forgejo-nsc/.config/ns/token.json")
|
||||||
|
if token_json.exists():
|
||||||
|
data = json.loads(token_json.read_text(encoding="utf-8"))
|
||||||
|
session = str(data.get("session_token", "")).strip()
|
||||||
|
if session:
|
||||||
|
payload["session_token"] = session
|
||||||
|
|
||||||
|
token_cache = Path("/var/lib/forgejo-nsc/.config/ns/token.cache")
|
||||||
|
if token_cache.exists():
|
||||||
|
bearer = token_cache.read_text(encoding="utf-8").strip()
|
||||||
|
if bearer:
|
||||||
|
payload["bearer_token"] = bearer
|
||||||
|
|
||||||
|
if not payload:
|
||||||
|
raise SystemExit("forgejo-nsc host does not have a usable Namespace session")
|
||||||
|
|
||||||
|
print(json.dumps(payload, indent=2))
|
||||||
|
PY' > "${token_file}"
|
||||||
|
chmod 600 "${token_file}"
|
||||||
|
elif [[ -f "${token_secret}" ]]; then
|
||||||
|
burrow_decrypt_age_secret_to_temp "${REPO_ROOT}" "${token_secret}" > "${token_file}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -s "${token_file}" ]]; then
|
||||||
|
TOKEN_FILE="${token_file}" python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
path = Path(os.environ["TOKEN_FILE"])
|
||||||
|
raw = path.read_text(encoding="utf-8").strip()
|
||||||
|
if not raw:
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
try:
|
||||||
|
parsed = json.loads(raw)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
parsed = None
|
||||||
|
|
||||||
|
if isinstance(parsed, dict):
|
||||||
|
bearer = parsed.get("bearer_token")
|
||||||
|
session = parsed.get("session_token")
|
||||||
|
if isinstance(bearer, str) and bearer.strip():
|
||||||
|
raise SystemExit(0)
|
||||||
|
if isinstance(session, str) and session.strip():
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
path.write_text(json.dumps({"bearer_token": raw}, indent=2) + "\n", encoding="utf-8")
|
||||||
|
PY
|
||||||
|
fi
|
||||||
|
|
||||||
|
webhook_secret="$(python3 - <<'PY'
|
||||||
|
import secrets
|
||||||
|
print(secrets.token_hex(32))
|
||||||
|
PY
|
||||||
|
)"
|
||||||
|
|
||||||
|
token_name="${TOKEN_NAME_PREFIX}-$(date -u +%Y%m%dT%H%M%SZ)"
|
||||||
|
forgejo_pat="$(
|
||||||
|
ssh \
|
||||||
|
-i "${SSH_KEY}" \
|
||||||
|
-o IdentitiesOnly=yes \
|
||||||
|
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||||
|
-o StrictHostKeyChecking=accept-new \
|
||||||
|
"${HOST}" \
|
||||||
|
"set -euo pipefail; forgejo_bin=\$(systemctl show -p ExecStart forgejo.service --value | sed -E 's/^\\{ path=([^ ;]+).*/\\1/'); sudo -u forgejo \"\${forgejo_bin}\" --config /var/lib/forgejo/custom/conf/app.ini --custom-path /var/lib/forgejo/custom --work-path /var/lib/forgejo admin user generate-access-token --username '${CONTACT_USER}' --scopes all --raw --token-name '${token_name}'" \
|
||||||
|
| tr -d '\r\n'
|
||||||
|
)"
|
||||||
|
|
||||||
|
if [[ -z "${forgejo_pat}" ]]; then
|
||||||
|
echo "failed to mint Forgejo PAT on ${HOST}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ssh \
|
||||||
|
-i "${SSH_KEY}" \
|
||||||
|
-o IdentitiesOnly=yes \
|
||||||
|
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||||
|
-o StrictHostKeyChecking=accept-new \
|
||||||
|
"${HOST}" \
|
||||||
|
'bash -s' <<EOF
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
base_url='http://127.0.0.1:3000'
|
||||||
|
token='${forgejo_pat}'
|
||||||
|
scope_owner='${SCOPE_OWNER}'
|
||||||
|
scope_name='${SCOPE_NAME}'
|
||||||
|
|
||||||
|
api() {
|
||||||
|
curl -sS -o /tmp/forgejo-provision-response.json -w '%{http_code}' \
|
||||||
|
-H "Authorization: token \${token}" \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
"\$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
org_code="\$(api "\${base_url}/api/v1/orgs/\${scope_owner}")"
|
||||||
|
if [[ "\${org_code}" == "404" ]]; then
|
||||||
|
cat >/tmp/forgejo-provision-org.json <<JSON
|
||||||
|
{"username":"${SCOPE_OWNER}","full_name":"${SCOPE_OWNER}","visibility":"public"}
|
||||||
|
JSON
|
||||||
|
org_code="\$(api -X POST --data @/tmp/forgejo-provision-org.json "\${base_url}/api/v1/orgs")"
|
||||||
|
if [[ "\${org_code}" != "201" ]]; then
|
||||||
|
echo "failed to create Forgejo org ${SCOPE_OWNER} (HTTP \${org_code})" >&2
|
||||||
|
cat /tmp/forgejo-provision-response.json >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
repo_code="\$(api "\${base_url}/api/v1/repos/\${scope_owner}/\${scope_name}")"
|
||||||
|
if [[ "\${repo_code}" == "404" ]]; then
|
||||||
|
cat >/tmp/forgejo-provision-repo.json <<JSON
|
||||||
|
{"name":"${SCOPE_NAME}","description":"Burrow forge bootstrap repository","private":false,"default_branch":"main","auto_init":false}
|
||||||
|
JSON
|
||||||
|
repo_code="\$(api -X POST --data @/tmp/forgejo-provision-repo.json "\${base_url}/api/v1/orgs/\${scope_owner}/repos")"
|
||||||
|
if [[ "\${repo_code}" != "201" ]]; then
|
||||||
|
echo "failed to create Forgejo repo ${SCOPE_OWNER}/${SCOPE_NAME} (HTTP \${repo_code})" >&2
|
||||||
|
cat /tmp/forgejo-provision-response.json >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
|
||||||
|
FORGEJO_PAT="${forgejo_pat}" \
|
||||||
|
WEBHOOK_SECRET="${webhook_secret}" \
|
||||||
|
DISPATCHER_SRC="${dispatcher_src}" \
|
||||||
|
AUTOSCALER_SRC="${autoscaler_src}" \
|
||||||
|
DISPATCHER_OUT="${dispatcher_out}" \
|
||||||
|
AUTOSCALER_OUT="${autoscaler_out}" \
|
||||||
|
python3 - <<'PY'
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def render(src: str, dst: str) -> None:
|
||||||
|
text = Path(src).read_text(encoding="utf-8")
|
||||||
|
text = text.replace("PENDING-FORGEJO-PAT", os.environ["FORGEJO_PAT"])
|
||||||
|
text = text.replace("PENDING-WEBHOOK-SECRET", os.environ["WEBHOOK_SECRET"])
|
||||||
|
Path(dst).write_text(text, encoding="utf-8")
|
||||||
|
|
||||||
|
render(os.environ["DISPATCHER_SRC"], os.environ["DISPATCHER_OUT"])
|
||||||
|
render(os.environ["AUTOSCALER_SRC"], os.environ["AUTOSCALER_OUT"])
|
||||||
|
PY
|
||||||
|
|
||||||
|
chmod 600 "${dispatcher_out}" "${autoscaler_out}"
|
||||||
|
|
||||||
|
burrow_encrypt_secret_from_file "${REPO_ROOT}" "${token_secret}" "${token_file}"
|
||||||
|
burrow_encrypt_secret_from_file "${REPO_ROOT}" "${dispatcher_secret}" "${dispatcher_out}"
|
||||||
|
burrow_encrypt_secret_from_file "${REPO_ROOT}" "${autoscaler_secret}" "${autoscaler_out}"
|
||||||
|
|
||||||
|
echo "Updated secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age."
|
||||||
|
echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}."
|
||||||
109
Scripts/sync-forgejo-nsc-config.sh
Executable file
109
Scripts/sync-forgejo-nsc-config.sh
Executable file
|
|
@ -0,0 +1,109 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage: Scripts/sync-forgejo-nsc-config.sh [options]
|
||||||
|
|
||||||
|
Deploy Burrow forgejo-nsc runtime inputs from age secrets onto the forge host.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||||
|
--ssh-key <path> SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/)
|
||||||
|
--rotate-pat Re-render the encrypted runtime inputs before deploying.
|
||||||
|
--no-restart Validate the encrypted inputs only; do not deploy.
|
||||||
|
-h, --help Show this help text.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${SCRIPT_DIR}/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||||
|
SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
|
||||||
|
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||||
|
ROTATE_PAT=0
|
||||||
|
NO_RESTART=0
|
||||||
|
TMP_DIR=""
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
[[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--host)
|
||||||
|
HOST="${2:?missing value for --host}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--ssh-key)
|
||||||
|
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--rotate-pat)
|
||||||
|
ROTATE_PAT=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-restart)
|
||||||
|
NO_RESTART=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "unknown option: $1" >&2
|
||||||
|
usage >&2
|
||||||
|
exit 64
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||||
|
|
||||||
|
burrow_require_cmd() {
|
||||||
|
if ! command -v "$1" >/dev/null 2>&1; then
|
||||||
|
echo "missing required command: $1" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
burrow_require_cmd ssh
|
||||||
|
|
||||||
|
SSH_KEY="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${SSH_KEY}" \
|
||||||
|
"${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \
|
||||||
|
"${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \
|
||||||
|
"${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||||
|
)"
|
||||||
|
|
||||||
|
if [[ "${ROTATE_PAT}" -eq 1 ]]; then
|
||||||
|
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
token_file="${REPO_ROOT}/secrets/forgejo/nsc-token.age"
|
||||||
|
dispatcher_file="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age"
|
||||||
|
autoscaler_file="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age"
|
||||||
|
|
||||||
|
for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do
|
||||||
|
if [[ ! -s "${path}" ]]; then
|
||||||
|
echo "required runtime input missing or empty: ${path}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "${NO_RESTART}" -eq 0 ]]; then
|
||||||
|
BURROW_FORGE_HOST="${HOST}" \
|
||||||
|
BURROW_FORGE_SSH_KEY="${SSH_KEY}" \
|
||||||
|
BURROW_FORGE_KNOWN_HOSTS_FILE="${KNOWN_HOSTS_FILE}" \
|
||||||
|
"${SCRIPT_DIR}/forge-deploy.sh" --switch
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "forgejo-nsc runtime sync complete (host=${HOST}, deployed=$((1 - NO_RESTART)))."
|
||||||
203
Tools/forwardemail-custom-s3.sh
Executable file
203
Tools/forwardemail-custom-s3.sh
Executable file
|
|
@ -0,0 +1,203 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
umask 077
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||||
|
# shellcheck source=Scripts/_burrow-secrets.sh
|
||||||
|
source "${REPO_ROOT}/Scripts/_burrow-secrets.sh"
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<'EOF'
|
||||||
|
Usage:
|
||||||
|
Tools/forwardemail-custom-s3.sh \
|
||||||
|
--domain burrow.net \
|
||||||
|
--api-token-file secrets/forwardemail/api-token.age \
|
||||||
|
--s3-endpoint https://<endpoint> \
|
||||||
|
--s3-region <region> \
|
||||||
|
--s3-bucket <bucket> \
|
||||||
|
--s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \
|
||||||
|
--s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--domain <domain> Forward Email domain to update.
|
||||||
|
--api-token-file <path> File containing the Forward Email API token.
|
||||||
|
--s3-endpoint <url> S3-compatible endpoint URL.
|
||||||
|
--s3-region <region> S3 region string expected by Forward Email.
|
||||||
|
--s3-bucket <name> Bucket used for alias backup uploads.
|
||||||
|
--s3-access-key-file <path> File containing the S3 access key id.
|
||||||
|
--s3-secret-key-file <path> File containing the S3 secret access key.
|
||||||
|
--test-only Skip the update call and only test the saved connection.
|
||||||
|
--help Show this help text.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- Secrets are passed to curl through a temporary config file to avoid putting
|
||||||
|
them in the process list.
|
||||||
|
- By default the script updates the domain settings and then calls
|
||||||
|
/test-s3-connection.
|
||||||
|
- For Hetzner Object Storage, use the regional S3 endpoint such as
|
||||||
|
https://hel1.your-objectstorage.com, not an account alias endpoint.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
printf 'error: %s\n' "$*" >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
require_file() {
|
||||||
|
local path="$1"
|
||||||
|
[[ -f "$path" ]] || fail "missing file: $path"
|
||||||
|
}
|
||||||
|
|
||||||
|
read_secret() {
|
||||||
|
local path="$1"
|
||||||
|
local value
|
||||||
|
value="$(tr -d '\r\n' < "$path")"
|
||||||
|
[[ -n "$value" ]] || fail "empty secret file: $path"
|
||||||
|
printf '%s' "$value"
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
burrow_cleanup_secret_tmpfiles
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
domain=""
|
||||||
|
api_token_file="${FORWARDEMAIL_API_TOKEN_FILE:-}"
|
||||||
|
s3_endpoint=""
|
||||||
|
s3_region=""
|
||||||
|
s3_bucket=""
|
||||||
|
s3_access_key_file="${FORWARDEMAIL_S3_ACCESS_KEY_FILE:-}"
|
||||||
|
s3_secret_key_file="${FORWARDEMAIL_S3_SECRET_KEY_FILE:-}"
|
||||||
|
test_only=false
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--domain)
|
||||||
|
domain="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--api-token-file)
|
||||||
|
api_token_file="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--s3-endpoint)
|
||||||
|
s3_endpoint="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--s3-region)
|
||||||
|
s3_region="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--s3-bucket)
|
||||||
|
s3_bucket="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--s3-access-key-file)
|
||||||
|
s3_access_key_file="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--s3-secret-key-file)
|
||||||
|
s3_secret_key_file="${2:-}"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--test-only)
|
||||||
|
test_only=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--help|-h)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
fail "unknown argument: $1"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
[[ -n "$domain" ]] || fail "--domain is required"
|
||||||
|
[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set"
|
||||||
|
[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set"
|
||||||
|
[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set"
|
||||||
|
api_token_file="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${api_token_file}" \
|
||||||
|
"${REPO_ROOT}/intake/forwardemail_api_token.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/forwardemail/api-token.age"
|
||||||
|
)" || fail "unable to resolve Forward Email API token file"
|
||||||
|
require_file "$api_token_file"
|
||||||
|
api_token="$(read_secret "$api_token_file")"
|
||||||
|
|
||||||
|
if [[ "$test_only" != true ]]; then
|
||||||
|
s3_access_key_file="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${s3_access_key_file}" \
|
||||||
|
"${REPO_ROOT}/intake/hetzner-s3-user.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/forwardemail/hetzner-s3-user.age"
|
||||||
|
)" || fail "unable to resolve Hetzner S3 access key file"
|
||||||
|
s3_secret_key_file="$(
|
||||||
|
burrow_resolve_secret_file \
|
||||||
|
"${REPO_ROOT}" \
|
||||||
|
"${s3_secret_key_file}" \
|
||||||
|
"${REPO_ROOT}/intake/hetzner-s3-secret.txt" \
|
||||||
|
"${REPO_ROOT}/secrets/forwardemail/hetzner-s3-secret.age"
|
||||||
|
)" || fail "unable to resolve Hetzner S3 secret key file"
|
||||||
|
require_file "$s3_access_key_file"
|
||||||
|
require_file "$s3_secret_key_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$test_only" == false ]]; then
|
||||||
|
require_file "$s3_access_key_file"
|
||||||
|
require_file "$s3_secret_key_file"
|
||||||
|
s3_access_key_id="$(read_secret "$s3_access_key_file")"
|
||||||
|
s3_secret_access_key="$(read_secret "$s3_secret_key_file")"
|
||||||
|
|
||||||
|
case "$s3_endpoint" in
|
||||||
|
http://*|https://*)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
fail "--s3-endpoint must start with http:// or https://"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
curl_config="$(mktemp)"
|
||||||
|
trap 'rm -f "$curl_config"' EXIT
|
||||||
|
|
||||||
|
if [[ "$test_only" == false ]]; then
|
||||||
|
cat >"$curl_config" <<EOF
|
||||||
|
silent
|
||||||
|
show-error
|
||||||
|
fail-with-body
|
||||||
|
url = "https://api.forwardemail.net/v1/domains/${domain}"
|
||||||
|
request = "PUT"
|
||||||
|
user = "${api_token}:"
|
||||||
|
data = "has_custom_s3=true"
|
||||||
|
data-urlencode = "s3_endpoint=${s3_endpoint}"
|
||||||
|
data-urlencode = "s3_access_key_id=${s3_access_key_id}"
|
||||||
|
data-urlencode = "s3_secret_access_key=${s3_secret_access_key}"
|
||||||
|
data-urlencode = "s3_region=${s3_region}"
|
||||||
|
data-urlencode = "s3_bucket=${s3_bucket}"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
printf 'Configuring Forward Email custom S3 for %s\n' "$domain" >&2
|
||||||
|
curl --config "$curl_config"
|
||||||
|
printf '\n' >&2
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat >"$curl_config" <<EOF
|
||||||
|
silent
|
||||||
|
show-error
|
||||||
|
fail-with-body
|
||||||
|
url = "https://api.forwardemail.net/v1/domains/${domain}/test-s3-connection"
|
||||||
|
request = "POST"
|
||||||
|
user = "${api_token}:"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
printf 'Testing Forward Email custom S3 for %s\n' "$domain" >&2
|
||||||
|
curl --config "$curl_config"
|
||||||
|
printf '\n' >&2
|
||||||
289
Tools/forwardemail-hetzner-storage.py
Executable file
289
Tools/forwardemail-hetzner-storage.py
Executable file
|
|
@ -0,0 +1,289 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime as dt
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import textwrap
|
||||||
|
from pathlib import Path
|
||||||
|
from urllib.parse import urlencode, urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
REPO_ROOT = Path(__file__).resolve().parent.parent
|
||||||
|
|
||||||
|
|
||||||
|
def default_secret_path(age_rel: str, intake_rel: str) -> str:
|
||||||
|
age_path = REPO_ROOT / age_rel
|
||||||
|
if age_path.exists():
|
||||||
|
return str(age_path)
|
||||||
|
return intake_rel
|
||||||
|
|
||||||
|
|
||||||
|
def read_secret(path: str) -> str:
|
||||||
|
file_path = Path(path)
|
||||||
|
if not file_path.is_absolute():
|
||||||
|
file_path = REPO_ROOT / file_path
|
||||||
|
if file_path.suffix == ".age":
|
||||||
|
value = subprocess.check_output(
|
||||||
|
[
|
||||||
|
"nix",
|
||||||
|
"--extra-experimental-features",
|
||||||
|
"nix-command flakes",
|
||||||
|
"run",
|
||||||
|
f"{REPO_ROOT}#agenix",
|
||||||
|
"--",
|
||||||
|
"-d",
|
||||||
|
str(file_path),
|
||||||
|
],
|
||||||
|
text=True,
|
||||||
|
).strip()
|
||||||
|
else:
|
||||||
|
value = file_path.read_text(encoding="utf-8").strip()
|
||||||
|
if not value:
|
||||||
|
raise SystemExit(f"error: empty secret file: {file_path}")
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
def sign(key: bytes, msg: str) -> bytes:
|
||||||
|
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def request(
|
||||||
|
*,
|
||||||
|
method: str,
|
||||||
|
endpoint: str,
|
||||||
|
region: str,
|
||||||
|
access_key: str,
|
||||||
|
secret_key: str,
|
||||||
|
bucket: str,
|
||||||
|
query: dict[str, str] | None = None,
|
||||||
|
body: bytes = b"",
|
||||||
|
content_type: str | None = None,
|
||||||
|
) -> requests.Response:
|
||||||
|
parsed = urlparse(endpoint)
|
||||||
|
if parsed.scheme != "https":
|
||||||
|
raise SystemExit("error: endpoint must use https")
|
||||||
|
|
||||||
|
host = parsed.netloc
|
||||||
|
canonical_uri = f"/{bucket}"
|
||||||
|
query = query or {}
|
||||||
|
canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~")
|
||||||
|
|
||||||
|
now = dt.datetime.now(dt.timezone.utc)
|
||||||
|
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
|
||||||
|
date_stamp = now.strftime("%Y%m%d")
|
||||||
|
payload_hash = hashlib.sha256(body).hexdigest()
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"host": host,
|
||||||
|
"x-amz-content-sha256": payload_hash,
|
||||||
|
"x-amz-date": amz_date,
|
||||||
|
}
|
||||||
|
if content_type:
|
||||||
|
headers["content-type"] = content_type
|
||||||
|
|
||||||
|
signed_headers = ";".join(sorted(headers.keys()))
|
||||||
|
canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys()))
|
||||||
|
canonical_request = "\n".join(
|
||||||
|
[
|
||||||
|
method,
|
||||||
|
canonical_uri,
|
||||||
|
canonical_querystring,
|
||||||
|
canonical_headers,
|
||||||
|
signed_headers,
|
||||||
|
payload_hash,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
algorithm = "AWS4-HMAC-SHA256"
|
||||||
|
credential_scope = f"{date_stamp}/{region}/s3/aws4_request"
|
||||||
|
string_to_sign = "\n".join(
|
||||||
|
[
|
||||||
|
algorithm,
|
||||||
|
amz_date,
|
||||||
|
credential_scope,
|
||||||
|
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
|
||||||
|
k_region = sign(k_date, region)
|
||||||
|
k_service = sign(k_region, "s3")
|
||||||
|
signing_key = sign(k_service, "aws4_request")
|
||||||
|
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
|
auth_header = (
|
||||||
|
f"{algorithm} Credential={access_key}/{credential_scope}, "
|
||||||
|
f"SignedHeaders={signed_headers}, Signature={signature}"
|
||||||
|
)
|
||||||
|
|
||||||
|
url = f"{parsed.scheme}://{host}{canonical_uri}"
|
||||||
|
if canonical_querystring:
|
||||||
|
url = f"{url}?{canonical_querystring}"
|
||||||
|
|
||||||
|
response = requests.request(
|
||||||
|
method,
|
||||||
|
url,
|
||||||
|
headers={**headers, "Authorization": auth_header},
|
||||||
|
data=body,
|
||||||
|
timeout=30,
|
||||||
|
)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_bucket(args: argparse.Namespace, bucket: str) -> None:
|
||||||
|
head = request(
|
||||||
|
method="HEAD",
|
||||||
|
endpoint=args.endpoint,
|
||||||
|
region=args.region,
|
||||||
|
access_key=args.access_key,
|
||||||
|
secret_key=args.secret_key,
|
||||||
|
bucket=bucket,
|
||||||
|
)
|
||||||
|
if head.status_code == 200:
|
||||||
|
print(f"{bucket}: exists")
|
||||||
|
return
|
||||||
|
if head.status_code != 404:
|
||||||
|
raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}")
|
||||||
|
|
||||||
|
body = textwrap.dedent(
|
||||||
|
f"""\
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
<LocationConstraint>{args.region}</LocationConstraint>
|
||||||
|
</CreateBucketConfiguration>
|
||||||
|
"""
|
||||||
|
).encode("utf-8")
|
||||||
|
create = request(
|
||||||
|
method="PUT",
|
||||||
|
endpoint=args.endpoint,
|
||||||
|
region=args.region,
|
||||||
|
access_key=args.access_key,
|
||||||
|
secret_key=args.secret_key,
|
||||||
|
bucket=bucket,
|
||||||
|
body=body,
|
||||||
|
content_type="application/xml",
|
||||||
|
)
|
||||||
|
if create.status_code not in (200, 204):
|
||||||
|
raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}")
|
||||||
|
print(f"{bucket}: created")
|
||||||
|
|
||||||
|
|
||||||
|
def put_lifecycle(args: argparse.Namespace, bucket: str) -> None:
|
||||||
|
body = textwrap.dedent(
|
||||||
|
f"""\
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
<Rule>
|
||||||
|
<ID>expire-forwardemail-backups-after-{args.expire_days}-days</ID>
|
||||||
|
<Status>Enabled</Status>
|
||||||
|
<Filter>
|
||||||
|
<Prefix></Prefix>
|
||||||
|
</Filter>
|
||||||
|
<Expiration>
|
||||||
|
<Days>{args.expire_days}</Days>
|
||||||
|
</Expiration>
|
||||||
|
</Rule>
|
||||||
|
</LifecycleConfiguration>
|
||||||
|
"""
|
||||||
|
).encode("utf-8")
|
||||||
|
response = request(
|
||||||
|
method="PUT",
|
||||||
|
endpoint=args.endpoint,
|
||||||
|
region=args.region,
|
||||||
|
access_key=args.access_key,
|
||||||
|
secret_key=args.secret_key,
|
||||||
|
bucket=bucket,
|
||||||
|
query={"lifecycle": ""},
|
||||||
|
body=body,
|
||||||
|
content_type="application/xml",
|
||||||
|
)
|
||||||
|
if response.status_code not in (200, 204):
|
||||||
|
raise SystemExit(
|
||||||
|
f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
|
||||||
|
)
|
||||||
|
print(f"{bucket}: lifecycle set to {args.expire_days} days")
|
||||||
|
|
||||||
|
|
||||||
|
def get_lifecycle(args: argparse.Namespace, bucket: str) -> None:
|
||||||
|
response = request(
|
||||||
|
method="GET",
|
||||||
|
endpoint=args.endpoint,
|
||||||
|
region=args.region,
|
||||||
|
access_key=args.access_key,
|
||||||
|
secret_key=args.secret_key,
|
||||||
|
bucket=bucket,
|
||||||
|
query={"lifecycle": ""},
|
||||||
|
)
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise SystemExit(
|
||||||
|
f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
|
||||||
|
)
|
||||||
|
print(f"=== {bucket} lifecycle ===")
|
||||||
|
print(response.text.strip())
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> argparse.Namespace:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Provision Hetzner object-storage buckets for Forward Email backups."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--endpoint",
|
||||||
|
default="https://hel1.your-objectstorage.com",
|
||||||
|
help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.",
|
||||||
|
)
|
||||||
|
parser.add_argument("--region", default="hel1", help="S3 region.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--access-key-file",
|
||||||
|
default=default_secret_path("secrets/forwardemail/hetzner-s3-user.age", "intake/hetzner-s3-user.txt"),
|
||||||
|
help="File containing the S3 access key id.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--secret-key-file",
|
||||||
|
default=default_secret_path("secrets/forwardemail/hetzner-s3-secret.age", "intake/hetzner-s3-secret.txt"),
|
||||||
|
help="File containing the S3 secret key.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--bucket",
|
||||||
|
action="append",
|
||||||
|
required=True,
|
||||||
|
help="Bucket to provision. Repeat for multiple buckets.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--expire-days",
|
||||||
|
type=int,
|
||||||
|
default=90,
|
||||||
|
help="Lifecycle expiry window in days.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--verify-only",
|
||||||
|
action="store_true",
|
||||||
|
help="Skip create/update and only read the current lifecycle.",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
args = parse_args()
|
||||||
|
args.access_key = read_secret(args.access_key_file)
|
||||||
|
args.secret_key = read_secret(args.secret_key_file)
|
||||||
|
|
||||||
|
for bucket in args.bucket:
|
||||||
|
if args.verify_only:
|
||||||
|
get_lifecycle(args, bucket)
|
||||||
|
continue
|
||||||
|
ensure_bucket(args, bucket)
|
||||||
|
put_lifecycle(args, bucket)
|
||||||
|
get_lifecycle(args, bucket)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
main()
|
||||||
|
except requests.RequestException as err:
|
||||||
|
raise SystemExit(f"error: request failed: {err}") from err
|
||||||
|
|
@ -10,7 +10,7 @@ crate-type = ["lib", "staticlib"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
tokio = { version = "1.37", features = [
|
tokio = { version = "1.50.0", features = [
|
||||||
"rt",
|
"rt",
|
||||||
"macros",
|
"macros",
|
||||||
"sync",
|
"sync",
|
||||||
|
|
@ -25,7 +25,6 @@ tun = { version = "0.1", path = "../tun", features = ["serde", "tokio"] }
|
||||||
clap = { version = "4.4", features = ["derive"] }
|
clap = { version = "4.4", features = ["derive"] }
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-log = "0.1"
|
tracing-log = "0.1"
|
||||||
tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" }
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["std", "env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["std", "env-filter"] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
|
|
@ -50,22 +49,25 @@ async-channel = "2.1"
|
||||||
schemars = "0.8"
|
schemars = "0.8"
|
||||||
futures = "0.3.28"
|
futures = "0.3.28"
|
||||||
once_cell = "1.19"
|
once_cell = "1.19"
|
||||||
|
arti-client = "0.40.0"
|
||||||
|
tokio-util = { version = "0.7.18", features = ["compat"] }
|
||||||
console-subscriber = { version = "0.2.0", optional = true }
|
console-subscriber = { version = "0.2.0", optional = true }
|
||||||
console = "0.15.8"
|
console = "0.15.8"
|
||||||
axum = "0.7.4"
|
axum = "0.8.8"
|
||||||
reqwest = { version = "0.12", default-features = false, features = [
|
reqwest = { version = "0.13.2", default-features = false, features = [
|
||||||
"json",
|
"json",
|
||||||
"rustls-tls",
|
"rustls",
|
||||||
] }
|
] }
|
||||||
rusqlite = { version = "0.31.0", features = ["blob"] }
|
rusqlite = { version = "0.38.0", features = ["blob"] }
|
||||||
dotenv = "0.15.0"
|
dotenv = "0.15.0"
|
||||||
tonic = "0.12.0"
|
tonic = "0.14.5"
|
||||||
prost = "0.13.1"
|
tonic-prost = "0.14.5"
|
||||||
prost-types = "0.13.1"
|
prost = "0.14.3"
|
||||||
tokio-stream = "0.1"
|
prost-types = "0.14.3"
|
||||||
|
tokio-stream = "0.1.18"
|
||||||
async-stream = "0.2"
|
async-stream = "0.2"
|
||||||
tower = "0.4.13"
|
tower = "0.5.3"
|
||||||
hyper-util = "0.1.6"
|
hyper-util = "0.1.20"
|
||||||
toml = "0.8.15"
|
toml = "0.8.15"
|
||||||
rust-ini = "0.21.0"
|
rust-ini = "0.21.0"
|
||||||
|
|
||||||
|
|
@ -73,10 +75,14 @@ rust-ini = "0.21.0"
|
||||||
caps = "0.5"
|
caps = "0.5"
|
||||||
libsystemd = "0.7"
|
libsystemd = "0.7"
|
||||||
tracing-journald = "0.3"
|
tracing-journald = "0.3"
|
||||||
|
libc = "0.2"
|
||||||
|
|
||||||
[target.'cfg(target_vendor = "apple")'.dependencies]
|
[target.'cfg(target_vendor = "apple")'.dependencies]
|
||||||
nix = { version = "0.27" }
|
nix = { version = "0.27", features = ["ioctl"] }
|
||||||
rusqlite = { version = "0.31.0", features = ["bundled", "blob"] }
|
rusqlite = { version = "0.38.0", features = ["bundled", "blob"] }
|
||||||
|
|
||||||
|
[target.'cfg(target_os = "macos")'.dependencies]
|
||||||
|
tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
insta = { version = "1.32", features = ["yaml"] }
|
insta = { version = "1.32", features = ["yaml"] }
|
||||||
|
|
@ -96,4 +102,4 @@ bundled = ["rusqlite/bundled"]
|
||||||
|
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
tonic-build = "0.12.0"
|
tonic-prost-build = "0.14.5"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
tonic_build::compile_protos("../proto/burrow.proto")?;
|
tonic_prost_build::compile_protos("../proto/burrow.proto")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,61 +1,186 @@
|
||||||
use std::{
|
use std::{
|
||||||
ops::Deref,
|
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::Duration,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::{anyhow, Context, Result};
|
||||||
use rusqlite::Connection;
|
use rusqlite::Connection;
|
||||||
use tokio::sync::{mpsc, watch, Notify, RwLock};
|
use tokio::{
|
||||||
|
sync::{mpsc, watch, RwLock},
|
||||||
|
task::JoinHandle,
|
||||||
|
};
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
use tonic::{Request, Response, Status as RspStatus};
|
use tonic::{Request, Response, Status as RspStatus};
|
||||||
use tracing::{debug, info, warn};
|
use tracing::warn;
|
||||||
use tun::{tokio::TunInterface, TunOptions};
|
use tun::{tokio::TunInterface, TunOptions};
|
||||||
|
|
||||||
use super::rpc::grpc_defs::{
|
use super::rpc::{
|
||||||
networks_server::Networks,
|
grpc_defs::{
|
||||||
tunnel_server::Tunnel,
|
networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest,
|
||||||
Empty,
|
NetworkListResponse, NetworkReorderRequest, NetworkType, State as RPCTunnelState,
|
||||||
Network,
|
TunnelConfigurationResponse, TunnelStatusResponse,
|
||||||
NetworkDeleteRequest,
|
},
|
||||||
NetworkListResponse,
|
ServerConfig,
|
||||||
NetworkReorderRequest,
|
|
||||||
State as RPCTunnelState,
|
|
||||||
TunnelConfigurationResponse,
|
|
||||||
TunnelStatusResponse,
|
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
daemon::rpc::{
|
database::{add_network, delete_network, get_connection, list_networks, reorder_network},
|
||||||
DaemonCommand,
|
tor::{self, Config as TorConfig, TorHandle},
|
||||||
DaemonNotification,
|
wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface},
|
||||||
DaemonResponse,
|
|
||||||
DaemonResponseData,
|
|
||||||
ServerConfig,
|
|
||||||
ServerInfo,
|
|
||||||
},
|
|
||||||
database::{
|
|
||||||
add_network,
|
|
||||||
delete_network,
|
|
||||||
get_connection,
|
|
||||||
list_networks,
|
|
||||||
load_interface,
|
|
||||||
reorder_network,
|
|
||||||
},
|
|
||||||
wireguard::{Config, Interface},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
enum RunState {
|
enum RunState {
|
||||||
Running,
|
Running,
|
||||||
Idle,
|
Idle,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RunState {
|
impl RunState {
|
||||||
pub fn to_rpc(&self) -> RPCTunnelState {
|
fn to_rpc(&self) -> RPCTunnelState {
|
||||||
match self {
|
match self {
|
||||||
RunState::Running => RPCTunnelState::Running,
|
Self::Running => RPCTunnelState::Running,
|
||||||
RunState::Idle => RPCTunnelState::Stopped,
|
Self::Idle => RPCTunnelState::Stopped,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
|
enum RuntimeIdentity {
|
||||||
|
DefaultWireGuard,
|
||||||
|
Network { id: i32, network_type: NetworkType },
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
enum ResolvedTunnel {
|
||||||
|
WireGuard {
|
||||||
|
identity: RuntimeIdentity,
|
||||||
|
config: WireGuardConfig,
|
||||||
|
},
|
||||||
|
Tor {
|
||||||
|
identity: RuntimeIdentity,
|
||||||
|
config: TorConfig,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ResolvedTunnel {
|
||||||
|
fn from_networks(networks: &[Network], fallback: &WireGuardConfig) -> Result<Self> {
|
||||||
|
let Some(network) = networks.first() else {
|
||||||
|
return Ok(Self::WireGuard {
|
||||||
|
identity: RuntimeIdentity::DefaultWireGuard,
|
||||||
|
config: fallback.clone(),
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
let identity = RuntimeIdentity::Network {
|
||||||
|
id: network.id,
|
||||||
|
network_type: network.r#type(),
|
||||||
|
};
|
||||||
|
|
||||||
|
match network.r#type() {
|
||||||
|
NetworkType::WireGuard => {
|
||||||
|
let payload = String::from_utf8(network.payload.clone())
|
||||||
|
.context("wireguard payload must be valid UTF-8")?;
|
||||||
|
let config = WireGuardConfig::from_content_fmt(&payload, "ini")?;
|
||||||
|
Ok(Self::WireGuard { identity, config })
|
||||||
|
}
|
||||||
|
NetworkType::Tor => {
|
||||||
|
let config = TorConfig::from_payload(&network.payload)?;
|
||||||
|
Ok(Self::Tor { identity, config })
|
||||||
|
}
|
||||||
|
NetworkType::HackClub => {
|
||||||
|
Err(anyhow!("HackClub runtime is not available on this branch"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn identity(&self) -> &RuntimeIdentity {
|
||||||
|
match self {
|
||||||
|
Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn server_config(&self) -> Result<ServerConfig> {
|
||||||
|
match self {
|
||||||
|
Self::WireGuard { config, .. } => ServerConfig::try_from(config),
|
||||||
|
Self::Tor { config, .. } => Ok(ServerConfig {
|
||||||
|
address: config.address.clone(),
|
||||||
|
name: config.tun_name.clone(),
|
||||||
|
mtu: config.mtu.map(|mtu| mtu as i32),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(self, tun_interface: Arc<RwLock<Option<TunInterface>>>) -> Result<ActiveTunnel> {
|
||||||
|
match self {
|
||||||
|
Self::WireGuard { identity, config } => {
|
||||||
|
let tun = TunOptions::new()
|
||||||
|
.address(config.interface.address.clone())
|
||||||
|
.open()?;
|
||||||
|
tun_interface.write().await.replace(tun);
|
||||||
|
|
||||||
|
let mut interface: WireGuardInterface = config.try_into()?;
|
||||||
|
interface.set_tun_ref(tun_interface.clone()).await;
|
||||||
|
let interface = Arc::new(RwLock::new(interface));
|
||||||
|
let run_interface = interface.clone();
|
||||||
|
let task = tokio::spawn(async move {
|
||||||
|
let guard = run_interface.read().await;
|
||||||
|
guard.run().await
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(ActiveTunnel::WireGuard { identity, interface, task })
|
||||||
|
}
|
||||||
|
Self::Tor { identity, config } => {
|
||||||
|
let mut tun_options = TunOptions::new().address(config.address.clone());
|
||||||
|
if let Some(name) = config.tun_name.as_deref() {
|
||||||
|
tun_options = tun_options.name(name);
|
||||||
|
}
|
||||||
|
let tun = tun_options.open()?;
|
||||||
|
tun_interface.write().await.replace(tun);
|
||||||
|
|
||||||
|
match tor::spawn(config).await {
|
||||||
|
Ok(handle) => Ok(ActiveTunnel::Tor { identity, handle }),
|
||||||
|
Err(err) => {
|
||||||
|
tun_interface.write().await.take();
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ActiveTunnel {
|
||||||
|
WireGuard {
|
||||||
|
identity: RuntimeIdentity,
|
||||||
|
interface: Arc<RwLock<WireGuardInterface>>,
|
||||||
|
task: JoinHandle<Result<()>>,
|
||||||
|
},
|
||||||
|
Tor {
|
||||||
|
identity: RuntimeIdentity,
|
||||||
|
handle: TorHandle,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActiveTunnel {
|
||||||
|
fn identity(&self) -> &RuntimeIdentity {
|
||||||
|
match self {
|
||||||
|
Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn shutdown(self, tun_interface: &Arc<RwLock<Option<TunInterface>>>) -> Result<()> {
|
||||||
|
match self {
|
||||||
|
Self::WireGuard { interface, task, .. } => {
|
||||||
|
interface.read().await.remove_tun().await;
|
||||||
|
let task_result = task.await;
|
||||||
|
tun_interface.write().await.take();
|
||||||
|
task_result??;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Self::Tor { handle, .. } => {
|
||||||
|
let result = handle.shutdown().await;
|
||||||
|
tun_interface.write().await.take();
|
||||||
|
result
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -63,30 +188,26 @@ impl RunState {
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DaemonRPCServer {
|
pub struct DaemonRPCServer {
|
||||||
tun_interface: Arc<RwLock<Option<TunInterface>>>,
|
tun_interface: Arc<RwLock<Option<TunInterface>>>,
|
||||||
wg_interface: Arc<RwLock<Interface>>,
|
default_config: Arc<RwLock<WireGuardConfig>>,
|
||||||
config: Arc<RwLock<Config>>,
|
|
||||||
db_path: Option<PathBuf>,
|
db_path: Option<PathBuf>,
|
||||||
wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>),
|
wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>),
|
||||||
network_update_chan: (watch::Sender<()>, watch::Receiver<()>),
|
network_update_chan: (watch::Sender<()>, watch::Receiver<()>),
|
||||||
|
active_tunnel: Arc<RwLock<Option<ActiveTunnel>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DaemonRPCServer {
|
impl DaemonRPCServer {
|
||||||
pub fn new(
|
pub fn new(config: Arc<RwLock<WireGuardConfig>>, db_path: Option<&Path>) -> Result<Self> {
|
||||||
wg_interface: Arc<RwLock<Interface>>,
|
|
||||||
config: Arc<RwLock<Config>>,
|
|
||||||
db_path: Option<&Path>,
|
|
||||||
) -> Result<Self> {
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
tun_interface: Arc::new(RwLock::new(None)),
|
tun_interface: Arc::new(RwLock::new(None)),
|
||||||
wg_interface,
|
default_config: config,
|
||||||
config,
|
db_path: db_path.map(Path::to_owned),
|
||||||
db_path: db_path.map(|p| p.to_owned()),
|
|
||||||
wg_state_chan: watch::channel(RunState::Idle),
|
wg_state_chan: watch::channel(RunState::Idle),
|
||||||
network_update_chan: watch::channel(()),
|
network_update_chan: watch::channel(()),
|
||||||
|
active_tunnel: Arc::new(RwLock::new(None)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_connection(&self) -> Result<Connection, RspStatus> {
|
fn get_connection(&self) -> Result<Connection, RspStatus> {
|
||||||
get_connection(self.db_path.as_deref()).map_err(proc_err)
|
get_connection(self.db_path.as_deref()).map_err(proc_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -94,13 +215,70 @@ impl DaemonRPCServer {
|
||||||
self.wg_state_chan.0.send(state).map_err(proc_err)
|
self.wg_state_chan.0.send(state).map_err(proc_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_wg_state(&self) -> RunState {
|
|
||||||
self.wg_state_chan.1.borrow().to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn notify_network_update(&self) -> Result<(), RspStatus> {
|
async fn notify_network_update(&self) -> Result<(), RspStatus> {
|
||||||
self.network_update_chan.0.send(()).map_err(proc_err)
|
self.network_update_chan.0.send(()).map_err(proc_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn resolve_tunnel(&self) -> Result<ResolvedTunnel, RspStatus> {
|
||||||
|
let conn = self.get_connection()?;
|
||||||
|
let networks = list_networks(&conn).map_err(proc_err)?;
|
||||||
|
let fallback = self.default_config.read().await.clone();
|
||||||
|
ResolvedTunnel::from_networks(&networks, &fallback).map_err(proc_err)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn current_tunnel_configuration(&self) -> Result<TunnelConfigurationResponse, RspStatus> {
|
||||||
|
let config = self
|
||||||
|
.resolve_tunnel()
|
||||||
|
.await?
|
||||||
|
.server_config()
|
||||||
|
.map_err(proc_err)?;
|
||||||
|
Ok(TunnelConfigurationResponse {
|
||||||
|
addresses: config.address,
|
||||||
|
mtu: config.mtu.unwrap_or(1500),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn stop_active_tunnel(&self) -> Result<bool, RspStatus> {
|
||||||
|
let current = { self.active_tunnel.write().await.take() };
|
||||||
|
let Some(current) = current else {
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
current
|
||||||
|
.shutdown(&self.tun_interface)
|
||||||
|
.await
|
||||||
|
.map_err(proc_err)?;
|
||||||
|
self.set_wg_state(RunState::Idle).await?;
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> {
|
||||||
|
let _ = self.stop_active_tunnel().await?;
|
||||||
|
let active = desired
|
||||||
|
.start(self.tun_interface.clone())
|
||||||
|
.await
|
||||||
|
.map_err(proc_err)?;
|
||||||
|
self.active_tunnel.write().await.replace(active);
|
||||||
|
self.set_wg_state(RunState::Running).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reconcile_runtime(&self) -> Result<(), RspStatus> {
|
||||||
|
let desired = self.resolve_tunnel().await?;
|
||||||
|
let needs_restart = {
|
||||||
|
let guard = self.active_tunnel.read().await;
|
||||||
|
guard
|
||||||
|
.as_ref()
|
||||||
|
.map(|active| active.identity() != desired.identity())
|
||||||
|
.unwrap_or(false)
|
||||||
|
};
|
||||||
|
|
||||||
|
if needs_restart {
|
||||||
|
self.replace_active_tunnel(desired).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tonic::async_trait]
|
#[tonic::async_trait]
|
||||||
|
|
@ -113,55 +291,46 @@ impl Tunnel for DaemonRPCServer {
|
||||||
_request: Request<Empty>,
|
_request: Request<Empty>,
|
||||||
) -> Result<Response<Self::TunnelConfigurationStream>, RspStatus> {
|
) -> Result<Response<Self::TunnelConfigurationStream>, RspStatus> {
|
||||||
let (tx, rx) = mpsc::channel(10);
|
let (tx, rx) = mpsc::channel(10);
|
||||||
|
let server = self.clone();
|
||||||
|
let mut sub = self.network_update_chan.1.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let serv_config = ServerConfig::default();
|
loop {
|
||||||
tx.send(Ok(TunnelConfigurationResponse {
|
let response = server.current_tunnel_configuration().await;
|
||||||
mtu: serv_config.mtu.unwrap_or(1000),
|
if tx.send(response).await.is_err() {
|
||||||
addresses: serv_config.address,
|
break;
|
||||||
}))
|
}
|
||||||
.await
|
if sub.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Response::new(ReceiverStream::new(rx)))
|
Ok(Response::new(ReceiverStream::new(rx)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tunnel_start(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
|
async fn tunnel_start(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
|
||||||
let wg_state = self.get_wg_state().await;
|
let desired = self.resolve_tunnel().await?;
|
||||||
match wg_state {
|
let already_running = {
|
||||||
RunState::Idle => {
|
let guard = self.active_tunnel.read().await;
|
||||||
let tun_if = TunOptions::new().open()?;
|
guard
|
||||||
debug!("Setting tun on wg_interface");
|
.as_ref()
|
||||||
self.tun_interface.write().await.replace(tun_if);
|
.map(|active| active.identity() == desired.identity())
|
||||||
self.wg_interface
|
.unwrap_or(false)
|
||||||
.write()
|
};
|
||||||
.await
|
|
||||||
.set_tun_ref(self.tun_interface.clone())
|
|
||||||
.await;
|
|
||||||
debug!("tun set on wg_interface");
|
|
||||||
|
|
||||||
debug!("Setting tun_interface");
|
if already_running {
|
||||||
debug!("tun_interface set: {:?}", self.tun_interface);
|
warn!("Got start, but active tunnel already matches desired network.");
|
||||||
|
return Ok(Response::new(Empty {}));
|
||||||
debug!("Cloning wg_interface");
|
|
||||||
let tmp_wg = self.wg_interface.clone();
|
|
||||||
let run_task = tokio::spawn(async move {
|
|
||||||
let twlock = tmp_wg.read().await;
|
|
||||||
twlock.run().await
|
|
||||||
});
|
|
||||||
self.set_wg_state(RunState::Running).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
RunState::Running => {
|
|
||||||
warn!("Got start, but tun interface already up.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Ok(Response::new(Empty {}));
|
self.replace_active_tunnel(desired).await?;
|
||||||
|
Ok(Response::new(Empty {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tunnel_stop(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
|
async fn tunnel_stop(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
|
||||||
self.wg_interface.write().await.remove_tun().await;
|
let _ = self.stop_active_tunnel().await?;
|
||||||
self.set_wg_state(RunState::Idle).await?;
|
Ok(Response::new(Empty {}))
|
||||||
return Ok(Response::new(Empty {}));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tunnel_status(
|
async fn tunnel_status(
|
||||||
|
|
@ -172,13 +341,16 @@ impl Tunnel for DaemonRPCServer {
|
||||||
let mut state_rx = self.wg_state_chan.1.clone();
|
let mut state_rx = self.wg_state_chan.1.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let cur = state_rx.borrow_and_update().to_owned();
|
let cur = state_rx.borrow_and_update().to_owned();
|
||||||
tx.send(Ok(status_rsp(cur))).await;
|
if tx.send(Ok(status_rsp(cur))).await.is_err() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
state_rx.changed().await.unwrap();
|
if state_rx.changed().await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
let cur = state_rx.borrow().to_owned();
|
let cur = state_rx.borrow().to_owned();
|
||||||
let res = tx.send(Ok(status_rsp(cur))).await;
|
if tx.send(Ok(status_rsp(cur))).await.is_err() {
|
||||||
if res.is_err() {
|
|
||||||
eprintln!("Tunnel status channel closed");
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -196,6 +368,7 @@ impl Networks for DaemonRPCServer {
|
||||||
let network = request.into_inner();
|
let network = request.into_inner();
|
||||||
add_network(&conn, &network).map_err(proc_err)?;
|
add_network(&conn, &network).map_err(proc_err)?;
|
||||||
self.notify_network_update().await?;
|
self.notify_network_update().await?;
|
||||||
|
self.reconcile_runtime().await?;
|
||||||
Ok(Response::new(Empty {}))
|
Ok(Response::new(Empty {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -203,7 +376,6 @@ impl Networks for DaemonRPCServer {
|
||||||
&self,
|
&self,
|
||||||
_request: Request<Empty>,
|
_request: Request<Empty>,
|
||||||
) -> Result<Response<Self::NetworkListStream>, RspStatus> {
|
) -> Result<Response<Self::NetworkListStream>, RspStatus> {
|
||||||
debug!("Mock network_list called");
|
|
||||||
let (tx, rx) = mpsc::channel(10);
|
let (tx, rx) = mpsc::channel(10);
|
||||||
let conn = self.get_connection()?;
|
let conn = self.get_connection()?;
|
||||||
let mut sub = self.network_update_chan.1.clone();
|
let mut sub = self.network_update_chan.1.clone();
|
||||||
|
|
@ -212,12 +384,12 @@ impl Networks for DaemonRPCServer {
|
||||||
let networks = list_networks(&conn)
|
let networks = list_networks(&conn)
|
||||||
.map(|res| NetworkListResponse { network: res })
|
.map(|res| NetworkListResponse { network: res })
|
||||||
.map_err(proc_err);
|
.map_err(proc_err);
|
||||||
let res = tx.send(networks).await;
|
if tx.send(networks).await.is_err() {
|
||||||
if res.is_err() {
|
break;
|
||||||
eprintln!("Network list channel closed");
|
}
|
||||||
|
if sub.changed().await.is_err() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sub.changed().await.unwrap();
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Ok(Response::new(ReceiverStream::new(rx)))
|
Ok(Response::new(ReceiverStream::new(rx)))
|
||||||
|
|
@ -230,6 +402,7 @@ impl Networks for DaemonRPCServer {
|
||||||
let conn = self.get_connection()?;
|
let conn = self.get_connection()?;
|
||||||
reorder_network(&conn, request.into_inner()).map_err(proc_err)?;
|
reorder_network(&conn, request.into_inner()).map_err(proc_err)?;
|
||||||
self.notify_network_update().await?;
|
self.notify_network_update().await?;
|
||||||
|
self.reconcile_runtime().await?;
|
||||||
Ok(Response::new(Empty {}))
|
Ok(Response::new(Empty {}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -240,6 +413,7 @@ impl Networks for DaemonRPCServer {
|
||||||
let conn = self.get_connection()?;
|
let conn = self.get_connection()?;
|
||||||
delete_network(&conn, request.into_inner()).map_err(proc_err)?;
|
delete_network(&conn, request.into_inner()).map_err(proc_err)?;
|
||||||
self.notify_network_update().await?;
|
self.notify_network_update().await?;
|
||||||
|
self.reconcile_runtime().await?;
|
||||||
Ok(Response::new(Empty {}))
|
Ok(Response::new(Empty {}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -251,6 +425,6 @@ fn proc_err(err: impl ToString) -> RspStatus {
|
||||||
fn status_rsp(state: RunState) -> TunnelStatusResponse {
|
fn status_rsp(state: RunState) -> TunnelStatusResponse {
|
||||||
TunnelStatusResponse {
|
TunnelStatusResponse {
|
||||||
state: state.to_rpc().into(),
|
state: state.to_rpc().into(),
|
||||||
start: None, // TODO: Add timestamp
|
start: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,12 +15,11 @@ use tokio::{
|
||||||
};
|
};
|
||||||
use tokio_stream::wrappers::UnixListenerStream;
|
use tokio_stream::wrappers::UnixListenerStream;
|
||||||
use tonic::transport::Server;
|
use tonic::transport::Server;
|
||||||
use tracing::{error, info};
|
use tracing::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer},
|
daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer},
|
||||||
database::{get_connection, load_interface},
|
database::{get_connection, load_interface},
|
||||||
wireguard::Interface,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub async fn daemon_main(
|
pub async fn daemon_main(
|
||||||
|
|
@ -33,11 +32,7 @@ pub async fn daemon_main(
|
||||||
}
|
}
|
||||||
let conn = get_connection(db_path)?;
|
let conn = get_connection(db_path)?;
|
||||||
let config = load_interface(&conn, "1")?;
|
let config = load_interface(&conn, "1")?;
|
||||||
let burrow_server = DaemonRPCServer::new(
|
let burrow_server = DaemonRPCServer::new(Arc::new(RwLock::new(config)), db_path.clone())?;
|
||||||
Arc::new(RwLock::new(config.clone().try_into()?)),
|
|
||||||
Arc::new(RwLock::new(config)),
|
|
||||||
db_path.clone(),
|
|
||||||
)?;
|
|
||||||
let spp = socket_path.clone();
|
let spp = socket_path.clone();
|
||||||
let tmp = get_socket_path();
|
let tmp = get_socket_path();
|
||||||
let sock_path = spp.unwrap_or(Path::new(tmp.as_str()));
|
let sock_path = spp.unwrap_or(Path::new(tmp.as_str()));
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ END;
|
||||||
pub fn initialize_tables(conn: &Connection) -> Result<()> {
|
pub fn initialize_tables(conn: &Connection) -> Result<()> {
|
||||||
conn.execute(CREATE_WG_INTERFACE_TABLE, [])?;
|
conn.execute(CREATE_WG_INTERFACE_TABLE, [])?;
|
||||||
conn.execute(CREATE_WG_PEER_TABLE, [])?;
|
conn.execute(CREATE_WG_PEER_TABLE, [])?;
|
||||||
conn.execute(CREATE_NETWORK_TABLE, [])?;
|
conn.execute_batch(CREATE_NETWORK_TABLE)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,22 +1,20 @@
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
|
pub mod tor;
|
||||||
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
pub mod wireguard;
|
pub mod wireguard;
|
||||||
|
|
||||||
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
|
mod auth;
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
mod daemon;
|
mod daemon;
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
pub mod database;
|
pub mod database;
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
|
||||||
mod auth;
|
|
||||||
pub(crate) mod tracing;
|
pub(crate) mod tracing;
|
||||||
|
|
||||||
#[cfg(target_vendor = "apple")]
|
#[cfg(target_vendor = "apple")]
|
||||||
pub use daemon::apple::spawn_in_process;
|
pub use daemon::apple::spawn_in_process;
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
pub use daemon::{
|
pub use daemon::{
|
||||||
rpc::DaemonResponse,
|
rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData,
|
||||||
rpc::ServerInfo,
|
|
||||||
DaemonClient,
|
|
||||||
DaemonCommand,
|
|
||||||
DaemonResponseData,
|
|
||||||
DaemonStartOptions,
|
DaemonStartOptions,
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,8 @@ use clap::{Args, Parser, Subcommand};
|
||||||
|
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
mod daemon;
|
mod daemon;
|
||||||
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
|
mod tor;
|
||||||
pub(crate) mod tracing;
|
pub(crate) mod tracing;
|
||||||
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
|
||||||
mod wireguard;
|
mod wireguard;
|
||||||
|
|
|
||||||
125
burrow/src/tor/config.rs
Normal file
125
burrow/src/tor/config.rs
Normal file
|
|
@ -0,0 +1,125 @@
|
||||||
|
use std::{net::SocketAddr, str};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
#[serde(default)]
|
||||||
|
pub address: Vec<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub dns: Vec<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub mtu: Option<u32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub tun_name: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub arti: ArtiConfig,
|
||||||
|
#[serde(default)]
|
||||||
|
pub tcp_stack: TcpStackConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct ArtiConfig {
|
||||||
|
pub state_dir: String,
|
||||||
|
pub cache_dir: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ArtiConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
state_dir: "/var/lib/burrow/arti/state".to_string(),
|
||||||
|
cache_dir: "/var/cache/burrow/arti".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "kind", rename_all = "snake_case")]
|
||||||
|
pub enum TcpStackConfig {
|
||||||
|
System(SystemTcpStackConfig),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TcpStackConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::System(SystemTcpStackConfig::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct SystemTcpStackConfig {
|
||||||
|
#[serde(default = "default_system_listen")]
|
||||||
|
pub listen: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SystemTcpStackConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
listen: default_system_listen(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn from_payload(payload: &[u8]) -> Result<Self> {
|
||||||
|
if let Ok(config) = serde_json::from_slice(payload) {
|
||||||
|
return Ok(config);
|
||||||
|
}
|
||||||
|
|
||||||
|
let payload = str::from_utf8(payload).context("tor payload must be valid UTF-8")?;
|
||||||
|
toml::from_str(payload).context("failed to parse tor payload as JSON or TOML")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn listen_addr(&self) -> Result<SocketAddr> {
|
||||||
|
match &self.tcp_stack {
|
||||||
|
TcpStackConfig::System(config) => config
|
||||||
|
.listen
|
||||||
|
.parse()
|
||||||
|
.with_context(|| format!("invalid system tcp listen address '{}'", config.listen)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_system_listen() -> String {
|
||||||
|
"127.0.0.1:9040".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_json_payload() {
|
||||||
|
let payload = br#"{
|
||||||
|
"address":["100.64.0.2/32"],
|
||||||
|
"mtu":1400,
|
||||||
|
"arti":{"state_dir":"/tmp/state","cache_dir":"/tmp/cache"},
|
||||||
|
"tcp_stack":{"kind":"system","listen":"127.0.0.1:9150"}
|
||||||
|
}"#;
|
||||||
|
|
||||||
|
let config = Config::from_payload(payload).unwrap();
|
||||||
|
assert_eq!(config.address, vec!["100.64.0.2/32"]);
|
||||||
|
assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9150");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_toml_payload() {
|
||||||
|
let payload = r#"
|
||||||
|
address = ["100.64.0.3/32"]
|
||||||
|
mtu = 1280
|
||||||
|
tun_name = "burrow-tor"
|
||||||
|
|
||||||
|
[arti]
|
||||||
|
state_dir = "/tmp/state"
|
||||||
|
cache_dir = "/tmp/cache"
|
||||||
|
|
||||||
|
[tcp_stack]
|
||||||
|
kind = "system"
|
||||||
|
listen = "127.0.0.1:9140"
|
||||||
|
"#;
|
||||||
|
|
||||||
|
let config = Config::from_payload(payload.as_bytes()).unwrap();
|
||||||
|
assert_eq!(config.tun_name.as_deref(), Some("burrow-tor"));
|
||||||
|
assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9140");
|
||||||
|
}
|
||||||
|
}
|
||||||
6
burrow/src/tor/mod.rs
Normal file
6
burrow/src/tor/mod.rs
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
mod config;
|
||||||
|
mod runtime;
|
||||||
|
mod system;
|
||||||
|
|
||||||
|
pub use config::{ArtiConfig, Config, SystemTcpStackConfig, TcpStackConfig};
|
||||||
|
pub use runtime::{spawn, TorHandle};
|
||||||
116
burrow/src/tor/runtime.rs
Normal file
116
burrow/src/tor/runtime.rs
Normal file
|
|
@ -0,0 +1,116 @@
|
||||||
|
use std::{sync::Arc, time::Duration};
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use arti_client::{config::TorClientConfigBuilder, TorClient};
|
||||||
|
use tokio::{
|
||||||
|
sync::watch,
|
||||||
|
task::{JoinError, JoinSet},
|
||||||
|
};
|
||||||
|
use tokio_util::compat::FuturesAsyncReadCompatExt;
|
||||||
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
|
use super::{system::SystemTcpStackRuntime, Config, TcpStackConfig};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TorHandle {
|
||||||
|
shutdown: watch::Sender<bool>,
|
||||||
|
task: tokio::task::JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TorHandle {
|
||||||
|
pub async fn shutdown(self) -> Result<()> {
|
||||||
|
let _ = self.shutdown.send(true);
|
||||||
|
match self.task.await {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
Err(err) if err.is_cancelled() => Ok(()),
|
||||||
|
Err(err) => Err(join_error(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn spawn(config: Config) -> Result<TorHandle> {
|
||||||
|
let builder =
|
||||||
|
TorClientConfigBuilder::from_directories(&config.arti.state_dir, &config.arti.cache_dir);
|
||||||
|
let tor_config = builder.build().context("failed to build arti config")?;
|
||||||
|
let tor_client = Arc::new(
|
||||||
|
TorClient::create_bootstrapped(tor_config)
|
||||||
|
.await
|
||||||
|
.context("failed to bootstrap arti client")?,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (shutdown_tx, mut shutdown_rx) = watch::channel(false);
|
||||||
|
let task = match config.tcp_stack.clone() {
|
||||||
|
TcpStackConfig::System(system_config) => tokio::spawn(async move {
|
||||||
|
let stack = match SystemTcpStackRuntime::bind(&system_config).await {
|
||||||
|
Ok(stack) => stack,
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "failed to bind system tcp stack listener");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
listen = %stack.local_addr(),
|
||||||
|
"system tcp stack listener bound for tor transparent proxy"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut connections = JoinSet::new();
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
changed = shutdown_rx.changed() => {
|
||||||
|
match changed {
|
||||||
|
Ok(()) if *shutdown_rx.borrow() => break,
|
||||||
|
Ok(()) => continue,
|
||||||
|
Err(_) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(res) = connections.join_next(), if !connections.is_empty() => {
|
||||||
|
match res {
|
||||||
|
Ok(Ok(())) => {}
|
||||||
|
Ok(Err(err)) => warn!(?err, "transparent proxy task failed"),
|
||||||
|
Err(err) => warn!(?err, "transparent proxy task panicked"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
accepted = stack.accept() => {
|
||||||
|
let (mut inbound, original_dst) = match accepted {
|
||||||
|
Ok(pair) => pair,
|
||||||
|
Err(err) => {
|
||||||
|
warn!(?err, "failed to accept transparent tcp connection");
|
||||||
|
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let tor_client = tor_client.clone();
|
||||||
|
connections.spawn(async move {
|
||||||
|
debug!(%original_dst, "accepted transparent tcp connection");
|
||||||
|
let tor_stream = tor_client
|
||||||
|
.connect((original_dst.ip().to_string(), original_dst.port()))
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("failed to connect to {original_dst} over tor"))?;
|
||||||
|
let mut tor_stream = tor_stream.compat();
|
||||||
|
tokio::io::copy_bidirectional(&mut inbound, &mut tor_stream)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("failed to bridge tor stream for {original_dst}"))?;
|
||||||
|
Result::<()>::Ok(())
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
connections.abort_all();
|
||||||
|
while let Some(res) = connections.join_next().await {
|
||||||
|
match res {
|
||||||
|
Ok(Ok(())) => {}
|
||||||
|
Ok(Err(err)) => debug!(?err, "transparent proxy task failed during shutdown"),
|
||||||
|
Err(err) => debug!(?err, "transparent proxy task exited during shutdown"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(TorHandle { shutdown: shutdown_tx, task })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn join_error(err: JoinError) -> anyhow::Error {
|
||||||
|
anyhow::anyhow!("tor runtime task failed: {err}")
|
||||||
|
}
|
||||||
856
burrow/src/tor/system.rs
Normal file
856
burrow/src/tor/system.rs
Normal file
|
|
@ -0,0 +1,856 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
|
use super::SystemTcpStackConfig;
|
||||||
|
|
||||||
|
pub struct SystemTcpStackRuntime {
|
||||||
|
listener: TcpListener,
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
flow_tracker: AppleFlowTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SystemTcpStackRuntime {
|
||||||
|
pub async fn bind(config: &SystemTcpStackConfig) -> Result<Self> {
|
||||||
|
let listener = TcpListener::bind(&config.listen)
|
||||||
|
.await
|
||||||
|
.with_context(|| format!("failed to bind transparent listener on {}", config.listen))?;
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
let flow_tracker = AppleFlowTracker::new(
|
||||||
|
listener
|
||||||
|
.local_addr()
|
||||||
|
.expect("listener should always have a local address"),
|
||||||
|
)
|
||||||
|
.context("failed to open /dev/pf for transparent destination lookups")?;
|
||||||
|
Ok(Self {
|
||||||
|
listener,
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
flow_tracker,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn local_addr(&self) -> SocketAddr {
|
||||||
|
self.listener
|
||||||
|
.local_addr()
|
||||||
|
.expect("listener should always have a local address")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn accept(&self) -> Result<(TcpStream, SocketAddr)> {
|
||||||
|
let (stream, _) = self
|
||||||
|
.listener
|
||||||
|
.accept()
|
||||||
|
.await
|
||||||
|
.context("failed to accept transparent listener connection")?;
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
let original_dst = self.flow_tracker.resolve(&stream)?;
|
||||||
|
#[cfg(not(target_vendor = "apple"))]
|
||||||
|
let original_dst = original_destination(&stream)?;
|
||||||
|
Ok((stream, original_dst))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
fn original_destination(stream: &TcpStream) -> Result<SocketAddr> {
|
||||||
|
use std::{
|
||||||
|
mem::{size_of, MaybeUninit},
|
||||||
|
os::fd::AsRawFd,
|
||||||
|
};
|
||||||
|
|
||||||
|
let level = if stream.local_addr()?.is_ipv6() {
|
||||||
|
libc::SOL_IPV6
|
||||||
|
} else {
|
||||||
|
libc::SOL_IP
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut addr = MaybeUninit::<libc::sockaddr_storage>::zeroed();
|
||||||
|
let mut len = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
|
||||||
|
let rc = unsafe {
|
||||||
|
libc::getsockopt(
|
||||||
|
stream.as_raw_fd(),
|
||||||
|
level,
|
||||||
|
80,
|
||||||
|
addr.as_mut_ptr().cast(),
|
||||||
|
&mut len,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if rc != 0 {
|
||||||
|
return Err(std::io::Error::last_os_error()).context("SO_ORIGINAL_DST lookup failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
socket_addr_from_storage(unsafe { &addr.assume_init() }, len as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(not(target_os = "linux"), not(target_vendor = "apple")))]
|
||||||
|
fn original_destination(_stream: &TcpStream) -> Result<SocketAddr> {
|
||||||
|
anyhow::bail!("system tcp stack transparent destination lookup is only implemented on linux")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
mod apple_pf {
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
fs::File,
|
||||||
|
io,
|
||||||
|
mem::zeroed,
|
||||||
|
io::Read,
|
||||||
|
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||||
|
os::fd::{AsRawFd, RawFd},
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use anyhow::{anyhow, bail, Context, Result};
|
||||||
|
use nix::{ioctl_readwrite, libc};
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
|
||||||
|
ioctl_readwrite!(pf_natlook, b'D', 23, PfiocNatlook);
|
||||||
|
|
||||||
|
const FLOW_CACHE_LIMIT: usize = 4096;
|
||||||
|
const FLOW_CACHE_TTL: Duration = Duration::from_secs(30);
|
||||||
|
const PF_OUT: u8 = 2;
|
||||||
|
const PFLOG_RULESET_NAME_SIZE: usize = 16;
|
||||||
|
const PFLOG_DEVICE: &str = "pflog0";
|
||||||
|
const OBSERVER_WAIT_STEPS: usize = 20;
|
||||||
|
const OBSERVER_WAIT_INTERVAL: Duration = Duration::from_millis(10);
|
||||||
|
|
||||||
|
pub(super) struct AppleFlowTracker {
|
||||||
|
pf: File,
|
||||||
|
listener_addr: SocketAddr,
|
||||||
|
state: Mutex<FlowState>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AppleFlowTracker {
|
||||||
|
pub(super) fn new(listener_addr: SocketAddr) -> io::Result<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
pf: File::options().read(true).write(true).open("/dev/pf")?,
|
||||||
|
listener_addr,
|
||||||
|
state: Mutex::new(FlowState {
|
||||||
|
cache: HashMap::new(),
|
||||||
|
observer: PacketObserver::new(listener_addr).ok(),
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn resolve(&self, stream: &TcpStream) -> Result<SocketAddr> {
|
||||||
|
let key = FlowKey::from_stream(stream)?;
|
||||||
|
if let Some(original_dst) = self.cached_destination(key) {
|
||||||
|
return Ok(original_dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
match lookup_pf_original_destination(self.pf.as_raw_fd(), key.peer, key.local) {
|
||||||
|
Ok(original_dst) => {
|
||||||
|
self.remember(key, original_dst);
|
||||||
|
Ok(original_dst)
|
||||||
|
}
|
||||||
|
Err(err)
|
||||||
|
if matches!(
|
||||||
|
err.raw_os_error(),
|
||||||
|
Some(code) if code == libc::EPERM || code == libc::ENOENT
|
||||||
|
) =>
|
||||||
|
{
|
||||||
|
if let Some(original_dst) = self.wait_for_observer(key) {
|
||||||
|
return Ok(original_dst);
|
||||||
|
}
|
||||||
|
match err.raw_os_error() {
|
||||||
|
Some(code) if code == libc::EPERM => Err(anyhow!(
|
||||||
|
"PF NAT lookups are denied on this macOS build and no logged redirect flow was observed for {} -> {}",
|
||||||
|
key.peer,
|
||||||
|
key.local
|
||||||
|
)),
|
||||||
|
Some(code) if code == libc::ENOENT => Err(anyhow!(
|
||||||
|
"PF did not have a redirect state for {} -> {} and no logged redirect flow was observed; ensure outbound TCP is redirected and logged before Burrow accepts it",
|
||||||
|
key.peer,
|
||||||
|
key.local
|
||||||
|
)),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => Err(err).context("DIOCNATLOOK failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cached_destination(&self, key: FlowKey) -> Option<SocketAddr> {
|
||||||
|
let mut state = self.state.lock();
|
||||||
|
state.prune();
|
||||||
|
state.drain_observer(self.listener_addr);
|
||||||
|
state.cache.get(&key).map(|entry| entry.original_dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remember(&self, key: FlowKey, original_dst: SocketAddr) {
|
||||||
|
let mut state = self.state.lock();
|
||||||
|
state.prune();
|
||||||
|
remember_flow(&mut state.cache, key, original_dst, Instant::now());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_for_observer(&self, key: FlowKey) -> Option<SocketAddr> {
|
||||||
|
for _ in 0..OBSERVER_WAIT_STEPS {
|
||||||
|
if let Some(original_dst) = self.cached_destination(key) {
|
||||||
|
return Some(original_dst);
|
||||||
|
}
|
||||||
|
std::thread::sleep(OBSERVER_WAIT_INTERVAL);
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct FlowState {
|
||||||
|
cache: HashMap<FlowKey, FlowEntry>,
|
||||||
|
observer: Option<PacketObserver>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowState {
|
||||||
|
fn prune(&mut self) {
|
||||||
|
let now = Instant::now();
|
||||||
|
self.cache.retain(|_, entry| entry.expires_at > now);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drain_observer(&mut self, listener_addr: SocketAddr) {
|
||||||
|
let Some(mut observer) = self.observer.take() else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
if observer.drain(listener_addr, &mut self.cache).is_ok() {
|
||||||
|
self.observer = Some(observer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||||
|
struct FlowKey {
|
||||||
|
peer: SocketAddr,
|
||||||
|
local: SocketAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FlowKey {
|
||||||
|
fn from_stream(stream: &TcpStream) -> Result<Self> {
|
||||||
|
let peer = stream.peer_addr().context("failed to read transparent peer address")?;
|
||||||
|
let local = stream
|
||||||
|
.local_addr()
|
||||||
|
.context("failed to read transparent listener address")?;
|
||||||
|
match (peer, local) {
|
||||||
|
(SocketAddr::V4(_), SocketAddr::V4(_)) | (SocketAddr::V6(_), SocketAddr::V6(_)) => {
|
||||||
|
Ok(Self { peer, local })
|
||||||
|
}
|
||||||
|
_ => bail!("transparent socket had mismatched source/destination address families"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
struct FlowEntry {
|
||||||
|
original_dst: SocketAddr,
|
||||||
|
expires_at: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remember_flow(
|
||||||
|
cache: &mut HashMap<FlowKey, FlowEntry>,
|
||||||
|
key: FlowKey,
|
||||||
|
original_dst: SocketAddr,
|
||||||
|
now: Instant,
|
||||||
|
) {
|
||||||
|
cache.retain(|_, entry| entry.expires_at > now);
|
||||||
|
if cache.len() >= FLOW_CACHE_LIMIT {
|
||||||
|
if let Some(oldest) = cache
|
||||||
|
.iter()
|
||||||
|
.min_by_key(|(_, entry)| entry.expires_at)
|
||||||
|
.map(|(flow_key, _)| *flow_key)
|
||||||
|
{
|
||||||
|
cache.remove(&oldest);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cache.insert(
|
||||||
|
key,
|
||||||
|
FlowEntry {
|
||||||
|
original_dst,
|
||||||
|
expires_at: now + FLOW_CACHE_TTL,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lookup_pf_original_destination(
|
||||||
|
fd: RawFd,
|
||||||
|
peer: SocketAddr,
|
||||||
|
local: SocketAddr,
|
||||||
|
) -> io::Result<SocketAddr> {
|
||||||
|
let mut request = PfiocNatlook::for_flow(peer, local)
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
|
||||||
|
let ioctl_result = unsafe { pf_natlook(fd, &mut request) };
|
||||||
|
if let Err(errno) = ioctl_result {
|
||||||
|
return Err(io::Error::from(errno));
|
||||||
|
}
|
||||||
|
request
|
||||||
|
.original_destination()
|
||||||
|
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PacketObserver {
|
||||||
|
file: File,
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PacketObserver {
|
||||||
|
fn new(listener_addr: SocketAddr) -> io::Result<Self> {
|
||||||
|
if listener_addr.ip().is_unspecified() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"packet observer requires an explicit listener address",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let file = open_bpf_device()?;
|
||||||
|
bind_bpf_to_interface(file.as_raw_fd(), PFLOG_DEVICE)?;
|
||||||
|
set_bpf_flag(file.as_raw_fd(), libc::BIOCIMMEDIATE, 1)?;
|
||||||
|
set_bpf_flag(file.as_raw_fd(), libc::BIOCSSEESENT, 1)?;
|
||||||
|
set_nonblocking(file.as_raw_fd())?;
|
||||||
|
|
||||||
|
let mut buffer_len: libc::c_uint = 0;
|
||||||
|
ioctl_value(file.as_raw_fd(), libc::BIOCGBLEN, &mut buffer_len)?;
|
||||||
|
Ok(Self {
|
||||||
|
file,
|
||||||
|
buffer: vec![0; buffer_len as usize],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drain(
|
||||||
|
&mut self,
|
||||||
|
listener_addr: SocketAddr,
|
||||||
|
cache: &mut HashMap<FlowKey, FlowEntry>,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
loop {
|
||||||
|
match self.file.read(&mut self.buffer) {
|
||||||
|
Ok(0) => break,
|
||||||
|
Ok(read) => self.consume(&self.buffer[..read], listener_addr, cache),
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::WouldBlock => break,
|
||||||
|
Err(err) => return Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn consume(
|
||||||
|
&self,
|
||||||
|
buffer: &[u8],
|
||||||
|
listener_addr: SocketAddr,
|
||||||
|
cache: &mut HashMap<FlowKey, FlowEntry>,
|
||||||
|
) {
|
||||||
|
let mut offset = 0usize;
|
||||||
|
let now = Instant::now();
|
||||||
|
while offset + std::mem::size_of::<libc::bpf_hdr>() <= buffer.len() {
|
||||||
|
let header = unsafe {
|
||||||
|
std::ptr::read_unaligned(buffer[offset..].as_ptr() as *const libc::bpf_hdr)
|
||||||
|
};
|
||||||
|
let header_len = header.bh_hdrlen as usize;
|
||||||
|
let captured_len = header.bh_caplen as usize;
|
||||||
|
let packet_start = offset + header_len;
|
||||||
|
let packet_end = packet_start + captured_len;
|
||||||
|
let next_record = offset + bpf_wordalign(header_len + captured_len);
|
||||||
|
if packet_end > buffer.len() || next_record > buffer.len() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some((peer, original_dst)) =
|
||||||
|
parse_logged_syn(&buffer[packet_start..packet_end], listener_addr)
|
||||||
|
{
|
||||||
|
remember_flow(
|
||||||
|
cache,
|
||||||
|
FlowKey {
|
||||||
|
peer,
|
||||||
|
local: listener_addr,
|
||||||
|
},
|
||||||
|
original_dst,
|
||||||
|
now,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = next_record;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_bpf_device() -> io::Result<File> {
|
||||||
|
for index in 0..=255 {
|
||||||
|
match File::options()
|
||||||
|
.read(true)
|
||||||
|
.open(format!("/dev/bpf{index}"))
|
||||||
|
{
|
||||||
|
Ok(file) => return Ok(file),
|
||||||
|
Err(err) if err.raw_os_error() == Some(libc::EBUSY) => continue,
|
||||||
|
Err(err) => return Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(io::Error::new(
|
||||||
|
io::ErrorKind::NotFound,
|
||||||
|
"no free /dev/bpf devices were available",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bind_bpf_to_interface(fd: RawFd, ifname: &str) -> io::Result<()> {
|
||||||
|
let mut ifreq = unsafe { zeroed::<libc::ifreq>() };
|
||||||
|
let bytes = ifname.as_bytes();
|
||||||
|
let max = std::cmp::min(bytes.len(), libc::IFNAMSIZ.saturating_sub(1));
|
||||||
|
for (index, byte) in bytes.iter().take(max).enumerate() {
|
||||||
|
ifreq.ifr_name[index] = *byte as libc::c_char;
|
||||||
|
}
|
||||||
|
ioctl_value(fd, libc::BIOCSETIF, &mut ifreq)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_bpf_flag(fd: RawFd, request: libc::c_ulong, value: libc::c_uint) -> io::Result<()> {
|
||||||
|
let mut flag = value;
|
||||||
|
ioctl_value(fd, request, &mut flag)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_nonblocking(fd: RawFd) -> io::Result<()> {
|
||||||
|
let current = unsafe { libc::fcntl(fd, libc::F_GETFL) };
|
||||||
|
if current < 0 {
|
||||||
|
return Err(io::Error::last_os_error());
|
||||||
|
}
|
||||||
|
if unsafe { libc::fcntl(fd, libc::F_SETFL, current | libc::O_NONBLOCK) } != 0 {
|
||||||
|
return Err(io::Error::last_os_error());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ioctl_value<T>(fd: RawFd, request: libc::c_ulong, value: &mut T) -> io::Result<()> {
|
||||||
|
if unsafe { libc::ioctl(fd, request, value) } != 0 {
|
||||||
|
return Err(io::Error::last_os_error());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_logged_syn(
|
||||||
|
record: &[u8],
|
||||||
|
listener_addr: SocketAddr,
|
||||||
|
) -> Option<(SocketAddr, SocketAddr)> {
|
||||||
|
let header = read_pflog_header(record)?;
|
||||||
|
if header.dir != PF_OUT {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let packet = record.get(header.length as usize..)?;
|
||||||
|
match header.af as i32 {
|
||||||
|
libc::AF_INET => parse_ipv4_syn(packet, listener_addr),
|
||||||
|
libc::AF_INET6 => parse_ipv6_syn(packet, listener_addr),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_ipv4_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> {
|
||||||
|
if !matches!(listener_addr, SocketAddr::V4(_)) || packet.len() < 20 || packet[0] >> 4 != 4 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let header_len = usize::from(packet[0] & 0x0f) * 4;
|
||||||
|
if header_len < 20 || packet.len() < header_len + 20 || packet[9] != libc::IPPROTO_TCP as u8 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let tcp = &packet[header_len..];
|
||||||
|
let flags = tcp[13];
|
||||||
|
if flags & 0x02 == 0 || flags & 0x10 != 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let source_ip = Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]);
|
||||||
|
let dest_ip = Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]);
|
||||||
|
let source_port = u16::from_be_bytes([tcp[0], tcp[1]]);
|
||||||
|
let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]);
|
||||||
|
Some((
|
||||||
|
SocketAddr::V4(SocketAddrV4::new(source_ip, source_port)),
|
||||||
|
SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port)),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_ipv6_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> {
|
||||||
|
if !matches!(listener_addr, SocketAddr::V6(_)) || packet.len() < 40 || packet[0] >> 4 != 6 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if packet[6] != libc::IPPROTO_TCP as u8 || packet.len() < 60 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let tcp = &packet[40..];
|
||||||
|
let flags = tcp[13];
|
||||||
|
if flags & 0x02 == 0 || flags & 0x10 != 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let source_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[8..24]).ok()?);
|
||||||
|
let dest_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[24..40]).ok()?);
|
||||||
|
let source_port = u16::from_be_bytes([tcp[0], tcp[1]]);
|
||||||
|
let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]);
|
||||||
|
Some((
|
||||||
|
SocketAddr::V6(SocketAddrV6::new(source_ip, source_port, 0, 0)),
|
||||||
|
SocketAddr::V6(SocketAddrV6::new(dest_ip, dest_port, 0, 0)),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_pflog_header(record: &[u8]) -> Option<PflogHdr> {
|
||||||
|
if record.len() < std::mem::size_of::<PflogHdr>() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let header =
|
||||||
|
unsafe { std::ptr::read_unaligned(record.as_ptr() as *const PflogHdr) };
|
||||||
|
if header.length as usize > record.len() || (header.length as usize) < PFLOG_REAL_HDRLEN {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(header)
|
||||||
|
}
|
||||||
|
|
||||||
|
const fn bpf_wordalign(len: usize) -> usize {
|
||||||
|
let alignment = std::mem::size_of::<i32>();
|
||||||
|
(len + (alignment - 1)) & !(alignment - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct PfiocNatlook {
|
||||||
|
saddr: PfAddr,
|
||||||
|
daddr: PfAddr,
|
||||||
|
rsaddr: PfAddr,
|
||||||
|
rdaddr: PfAddr,
|
||||||
|
sxport: PfStateXport,
|
||||||
|
dxport: PfStateXport,
|
||||||
|
rsxport: PfStateXport,
|
||||||
|
rdxport: PfStateXport,
|
||||||
|
af: libc::sa_family_t,
|
||||||
|
proto: u8,
|
||||||
|
proto_variant: u8,
|
||||||
|
direction: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PfiocNatlook {
|
||||||
|
fn for_flow(peer: SocketAddr, local: SocketAddr) -> Result<Self> {
|
||||||
|
let (saddr, sxport, source_af) = pf_endpoint(peer);
|
||||||
|
let (daddr, dxport, destination_af) = pf_endpoint(local);
|
||||||
|
if source_af != destination_af {
|
||||||
|
bail!("transparent flow key changed address family across redirect");
|
||||||
|
}
|
||||||
|
Ok(Self {
|
||||||
|
saddr,
|
||||||
|
daddr,
|
||||||
|
rsaddr: PfAddr::default(),
|
||||||
|
rdaddr: PfAddr::default(),
|
||||||
|
sxport,
|
||||||
|
dxport,
|
||||||
|
rsxport: PfStateXport::default(),
|
||||||
|
rdxport: PfStateXport::default(),
|
||||||
|
af: source_af,
|
||||||
|
proto: libc::IPPROTO_TCP as u8,
|
||||||
|
proto_variant: 0,
|
||||||
|
direction: PF_OUT,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn original_destination(&self) -> Result<SocketAddr> {
|
||||||
|
socket_addr_from_pf(self.af, self.rdaddr, self.rdxport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pf_endpoint(addr: SocketAddr) -> (PfAddr, PfStateXport, libc::sa_family_t) {
|
||||||
|
let port = PfStateXport {
|
||||||
|
port: u16::to_be(addr.port()),
|
||||||
|
};
|
||||||
|
match addr {
|
||||||
|
SocketAddr::V4(addr) => (
|
||||||
|
PfAddr::from_ipv4(*addr.ip()),
|
||||||
|
port,
|
||||||
|
libc::AF_INET as libc::sa_family_t,
|
||||||
|
),
|
||||||
|
SocketAddr::V6(addr) => (
|
||||||
|
PfAddr::from_ipv6(*addr.ip()),
|
||||||
|
port,
|
||||||
|
libc::AF_INET6 as libc::sa_family_t,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn socket_addr_from_pf(
|
||||||
|
af: libc::sa_family_t,
|
||||||
|
addr: PfAddr,
|
||||||
|
port: PfStateXport,
|
||||||
|
) -> Result<SocketAddr> {
|
||||||
|
match af as i32 {
|
||||||
|
libc::AF_INET => Ok(SocketAddr::V4(SocketAddrV4::new(
|
||||||
|
Ipv4Addr::from(addr.v4_octets()),
|
||||||
|
u16::from_be(unsafe { port.port }),
|
||||||
|
))),
|
||||||
|
libc::AF_INET6 => Ok(SocketAddr::V6(SocketAddrV6::new(
|
||||||
|
Ipv6Addr::from(addr.v6_octets()),
|
||||||
|
u16::from_be(unsafe { port.port }),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
))),
|
||||||
|
family => bail!("unsupported PF address family {family}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
union PfAddrRepr {
|
||||||
|
v4addr: libc::in_addr,
|
||||||
|
v6addr: libc::in6_addr,
|
||||||
|
addr8: [u8; 16],
|
||||||
|
addr16: [u16; 8],
|
||||||
|
addr32: [u32; 4],
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct PfAddr {
|
||||||
|
pfa: PfAddrRepr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PfAddr {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
pfa: PfAddrRepr { addr32: [0; 4] },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PfAddr {
|
||||||
|
fn from_ipv4(ip: Ipv4Addr) -> Self {
|
||||||
|
let mut bytes = [0u8; 16];
|
||||||
|
bytes[..4].copy_from_slice(&ip.octets());
|
||||||
|
Self {
|
||||||
|
pfa: PfAddrRepr { addr8: bytes },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_ipv6(ip: Ipv6Addr) -> Self {
|
||||||
|
Self {
|
||||||
|
pfa: PfAddrRepr {
|
||||||
|
addr8: ip.octets(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn v4_octets(self) -> [u8; 4] {
|
||||||
|
let bytes = unsafe { self.pfa.addr8 };
|
||||||
|
[bytes[0], bytes[1], bytes[2], bytes[3]]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn v6_octets(self) -> [u8; 16] {
|
||||||
|
unsafe { self.pfa.addr8 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
union PfStateXport {
|
||||||
|
port: u16,
|
||||||
|
call_id: u16,
|
||||||
|
spi: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct PflogHdr {
|
||||||
|
length: u8,
|
||||||
|
af: libc::sa_family_t,
|
||||||
|
action: u8,
|
||||||
|
reason: u8,
|
||||||
|
ifname: [libc::c_char; libc::IFNAMSIZ],
|
||||||
|
ruleset: [libc::c_char; PFLOG_RULESET_NAME_SIZE],
|
||||||
|
rulenr: u32,
|
||||||
|
subrulenr: u32,
|
||||||
|
uid: libc::uid_t,
|
||||||
|
pid: libc::pid_t,
|
||||||
|
rule_uid: libc::uid_t,
|
||||||
|
rule_pid: libc::pid_t,
|
||||||
|
dir: u8,
|
||||||
|
pad: [u8; 3],
|
||||||
|
}
|
||||||
|
|
||||||
|
const PFLOG_REAL_HDRLEN: usize = std::mem::offset_of!(PflogHdr, pad);
|
||||||
|
|
||||||
|
impl Default for PfStateXport {
|
||||||
|
fn default() -> Self {
|
||||||
|
unsafe { zeroed() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn builds_natlook_request_from_redirected_flow() {
|
||||||
|
let request = PfiocNatlook::for_flow(
|
||||||
|
"192.0.2.10:41000".parse().unwrap(),
|
||||||
|
"127.0.0.1:9040".parse().unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(request.af as i32, libc::AF_INET);
|
||||||
|
assert_eq!(request.proto, libc::IPPROTO_TCP as u8);
|
||||||
|
assert_eq!(request.direction, PF_OUT);
|
||||||
|
assert_eq!(request.saddr.v4_octets(), [192, 0, 2, 10]);
|
||||||
|
assert_eq!(request.daddr.v4_octets(), [127, 0, 0, 1]);
|
||||||
|
assert_eq!(u16::from_be(unsafe { request.sxport.port }), 41000);
|
||||||
|
assert_eq!(u16::from_be(unsafe { request.dxport.port }), 9040);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn decodes_original_ipv6_destination() {
|
||||||
|
let mut request =
|
||||||
|
PfiocNatlook::for_flow("[::1]:41000".parse().unwrap(), "[::1]:9040".parse().unwrap())
|
||||||
|
.unwrap();
|
||||||
|
request.rdaddr = PfAddr::from_ipv6("2001:db8::42".parse().unwrap());
|
||||||
|
request.rdxport = PfStateXport {
|
||||||
|
port: u16::to_be(443),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
request.original_destination().unwrap(),
|
||||||
|
"[2001:db8::42]:443".parse::<SocketAddr>().unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_logged_ipv4_syn() {
|
||||||
|
let mut record = Vec::new();
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
PFLOG_REAL_HDRLEN as u8,
|
||||||
|
libc::AF_INET as u8,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
]);
|
||||||
|
record.extend_from_slice(&[0; libc::IFNAMSIZ]);
|
||||||
|
record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]);
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.push(PF_OUT);
|
||||||
|
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
0x45, 0, 0, 40, 0, 0, 0, 0, 64, libc::IPPROTO_TCP as u8, 0, 0, 192, 0, 2, 10,
|
||||||
|
198, 51, 100, 42,
|
||||||
|
]);
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
|
]);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
parse_logged_syn(&record, "127.0.0.1:9040".parse().unwrap()),
|
||||||
|
Some((
|
||||||
|
"192.0.2.10:39976".parse().unwrap(),
|
||||||
|
"198.51.100.42:443".parse().unwrap(),
|
||||||
|
))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_logged_ipv6_syn() {
|
||||||
|
let mut record = Vec::new();
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
PFLOG_REAL_HDRLEN as u8,
|
||||||
|
libc::AF_INET6 as u8,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
]);
|
||||||
|
record.extend_from_slice(&[0; libc::IFNAMSIZ]);
|
||||||
|
record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]);
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.extend_from_slice(&0u32.to_ne_bytes());
|
||||||
|
record.push(PF_OUT);
|
||||||
|
|
||||||
|
let source_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x10).octets();
|
||||||
|
let dest_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x42).octets();
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
0x60, 0, 0, 0, 0, 20, libc::IPPROTO_TCP as u8, 64,
|
||||||
|
]);
|
||||||
|
record.extend_from_slice(&source_ip);
|
||||||
|
record.extend_from_slice(&dest_ip);
|
||||||
|
record.extend_from_slice(&[
|
||||||
|
0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
|
]);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
parse_logged_syn(&record, "[::1]:9040".parse().unwrap()),
|
||||||
|
Some((
|
||||||
|
"[2001:db8::10]:39976".parse().unwrap(),
|
||||||
|
"[2001:db8::42]:443".parse().unwrap(),
|
||||||
|
))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_vendor = "apple")]
|
||||||
|
use apple_pf::AppleFlowTracker;
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
fn socket_addr_from_storage(addr: &libc::sockaddr_storage, len: usize) -> Result<SocketAddr> {
|
||||||
|
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
|
||||||
|
|
||||||
|
if len < std::mem::size_of::<libc::sa_family_t>() {
|
||||||
|
anyhow::bail!("socket address buffer was too short");
|
||||||
|
}
|
||||||
|
|
||||||
|
match addr.ss_family as i32 {
|
||||||
|
libc::AF_INET => {
|
||||||
|
let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in) };
|
||||||
|
let ip = Ipv4Addr::from(u32::from_be(addr_in.sin_addr.s_addr));
|
||||||
|
let port = u16::from_be(addr_in.sin_port);
|
||||||
|
Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
|
||||||
|
}
|
||||||
|
libc::AF_INET6 => {
|
||||||
|
let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in6) };
|
||||||
|
let ip = Ipv6Addr::from(addr_in.sin6_addr.s6_addr);
|
||||||
|
let port = u16::from_be(addr_in.sin6_port);
|
||||||
|
Ok(SocketAddr::V6(SocketAddrV6::new(
|
||||||
|
ip,
|
||||||
|
port,
|
||||||
|
addr_in.sin6_flowinfo,
|
||||||
|
addr_in.sin6_scope_id,
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
family => anyhow::bail!("unsupported socket address family {family}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, target_os = "linux"))]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::{
|
||||||
|
mem::size_of,
|
||||||
|
net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_ipv4_socket_addr() {
|
||||||
|
let mut storage = unsafe { std::mem::zeroed::<libc::sockaddr_storage>() };
|
||||||
|
let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in) };
|
||||||
|
addr_in.sin_family = libc::AF_INET as libc::sa_family_t;
|
||||||
|
addr_in.sin_port = u16::to_be(9040);
|
||||||
|
addr_in.sin_addr = libc::in_addr {
|
||||||
|
s_addr: u32::to_be(u32::from(Ipv4Addr::new(127, 0, 0, 1))),
|
||||||
|
};
|
||||||
|
|
||||||
|
let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in>()).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
parsed,
|
||||||
|
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 9040))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parses_ipv6_socket_addr() {
|
||||||
|
let mut storage = unsafe { std::mem::zeroed::<libc::sockaddr_storage>() };
|
||||||
|
let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in6) };
|
||||||
|
addr_in.sin6_family = libc::AF_INET6 as libc::sa_family_t;
|
||||||
|
addr_in.sin6_port = u16::to_be(9150);
|
||||||
|
addr_in.sin6_addr = libc::in6_addr {
|
||||||
|
s6_addr: Ipv6Addr::LOCALHOST.octets(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in6>()).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
parsed,
|
||||||
|
SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9150, 0, 0))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -29,12 +29,15 @@ pub fn initialize() {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(target_vendor = "apple")]
|
#[cfg(target_os = "macos")]
|
||||||
let system_log = Some(tracing_oslog::OsLogger::new(
|
let system_log = Some(tracing_oslog::OsLogger::new(
|
||||||
"com.hackclub.burrow",
|
"com.hackclub.burrow",
|
||||||
"tracing",
|
"tracing",
|
||||||
));
|
));
|
||||||
|
|
||||||
|
#[cfg(all(target_vendor = "apple", not(target_os = "macos")))]
|
||||||
|
let system_log = None::<tracing_subscriber::layer::Identity>;
|
||||||
|
|
||||||
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(|| {
|
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(|| {
|
||||||
tracing_subscriber::fmt::layer()
|
tracing_subscriber::fmt::layer()
|
||||||
.with_level(true)
|
.with_level(true)
|
||||||
|
|
|
||||||
104
docs/FORWARDEMAIL.md
Normal file
104
docs/FORWARDEMAIL.md
Normal file
|
|
@ -0,0 +1,104 @@
|
||||||
|
# Forward Email Backups
|
||||||
|
|
||||||
|
Burrow's mail direction is hosted mail on [Forward Email](https://forwardemail.net/), with domain-owned backup retention in our own S3-compatible object storage.
|
||||||
|
|
||||||
|
This is the first mail path to operationalize for `burrow.net` and `burrow.rs`. It keeps SMTP/IMAP hosting off the first forge host while still giving Burrow control over backup retention and object ownership.
|
||||||
|
|
||||||
|
## What Forward Email Requires
|
||||||
|
|
||||||
|
Forward Email exposes custom backup storage per domain. The documented API shape is:
|
||||||
|
|
||||||
|
- `PUT /v1/domains/{domain}` with:
|
||||||
|
- `has_custom_s3=true`
|
||||||
|
- `s3_endpoint`
|
||||||
|
- `s3_access_key_id`
|
||||||
|
- `s3_secret_access_key`
|
||||||
|
- `s3_region`
|
||||||
|
- `s3_bucket`
|
||||||
|
- `POST /v1/domains/{domain}/test-s3-connection`
|
||||||
|
|
||||||
|
Forward Email also documents these operational constraints:
|
||||||
|
|
||||||
|
- the bucket must remain private
|
||||||
|
- credentials are validated with `HeadBucket`
|
||||||
|
- failed or public-bucket configurations fall back to Forward Email's default storage and notify domain administrators
|
||||||
|
- custom S3 keeps every backup version, so lifecycle expiration is our responsibility
|
||||||
|
|
||||||
|
## Burrow Secret Layout
|
||||||
|
|
||||||
|
Authoritative secrets now live in:
|
||||||
|
|
||||||
|
- `secrets/forwardemail/api-token.age`
|
||||||
|
- `secrets/forwardemail/hetzner-s3-user.age`
|
||||||
|
- `secrets/forwardemail/hetzner-s3-secret.age`
|
||||||
|
|
||||||
|
Legacy plaintext `intake/` files may still exist locally for debugging, but the
|
||||||
|
tooling now prefers the age-encrypted files above.
|
||||||
|
- Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com`
|
||||||
|
- Hetzner object storage region: `hel1`
|
||||||
|
- Hetzner bucket used for Forward Email backups: `burrow`
|
||||||
|
|
||||||
|
## Verified Storage State
|
||||||
|
|
||||||
|
As of March 15, 2026, Burrow's Forward Email custom S3 configuration is live:
|
||||||
|
|
||||||
|
- endpoint: `https://hel1.your-objectstorage.com`
|
||||||
|
- region: `hel1`
|
||||||
|
- bucket: `burrow`
|
||||||
|
- `burrow.net` has `has_custom_s3=true`
|
||||||
|
- `burrow.rs` has `has_custom_s3=true`
|
||||||
|
- Forward Email's `/test-s3-connection` succeeded for both domains
|
||||||
|
- the `burrow` bucket enforces lifecycle expiration after `90` days
|
||||||
|
|
||||||
|
Forward Email performs bucket validation with bucket-style addressing. For Hetzner Object Storage, this means the working endpoint is the regional S3 endpoint (`https://hel1.your-objectstorage.com`), not the account alias (`https://burrow.hel1.your-objectstorage.com`). Using the account alias causes TLS hostname mismatches when the vendor prepends the bucket name.
|
||||||
|
|
||||||
|
## Helper
|
||||||
|
|
||||||
|
Use [`Tools/forwardemail-custom-s3.sh`](../Tools/forwardemail-custom-s3.sh) to configure or retest the domain setting without putting secrets on the process list.
|
||||||
|
|
||||||
|
Use [`Tools/forwardemail-hetzner-storage.py`](../Tools/forwardemail-hetzner-storage.py) to ensure the Hetzner backup bucket exists and to apply lifecycle expiry before enabling custom S3 on the Forward Email side.
|
||||||
|
|
||||||
|
Bucket bootstrap example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
Tools/forwardemail-hetzner-storage.py \
|
||||||
|
--endpoint https://hel1.your-objectstorage.com \
|
||||||
|
--bucket burrow \
|
||||||
|
--expire-days 90
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
Tools/forwardemail-custom-s3.sh \
|
||||||
|
--domain burrow.net \
|
||||||
|
--api-token-file secrets/forwardemail/api-token.age \
|
||||||
|
--s3-endpoint https://hel1.your-objectstorage.com \
|
||||||
|
--s3-region hel1 \
|
||||||
|
--s3-bucket burrow \
|
||||||
|
--s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \
|
||||||
|
--s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age
|
||||||
|
```
|
||||||
|
|
||||||
|
Retest an existing domain configuration without rewriting it:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
Tools/forwardemail-custom-s3.sh \
|
||||||
|
--domain burrow.net \
|
||||||
|
--api-token-file secrets/forwardemail/api-token.age \
|
||||||
|
--test-only
|
||||||
|
```
|
||||||
|
|
||||||
|
## Retention
|
||||||
|
|
||||||
|
Forward Email preserves every backup object when custom S3 is enabled. Configure lifecycle expiration on the bucket itself. A 30-day or 90-day expiry window is the baseline recommendation from the vendor docs; Burrow should choose explicitly per domain instead of letting the bucket grow without bound. The current Burrow bootstrap helper defaults to `90` days.
|
||||||
|
|
||||||
|
## Identity Direction
|
||||||
|
|
||||||
|
Hosted mail and SaaS identity are separate concerns:
|
||||||
|
|
||||||
|
- mail hosting/backups: Forward Email + Burrow-owned S3-compatible storage
|
||||||
|
- interactive identity: Authentik as the long-term IdP
|
||||||
|
- future SaaS SSO target: Linear via SAML once the workspace and plan are ready
|
||||||
|
|
||||||
|
This means the forge host does not need to become the first mail server just to give Burrow mailboxes or retention control.
|
||||||
31
docs/PROTOCOL_ROADMAP.md
Normal file
31
docs/PROTOCOL_ROADMAP.md
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
# Protocol Roadmap
|
||||||
|
|
||||||
|
Burrow currently has two tunnel paths in-tree:
|
||||||
|
|
||||||
|
- a WireGuard data plane
|
||||||
|
- a mesh transport built on `iroh`
|
||||||
|
|
||||||
|
What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer:
|
||||||
|
|
||||||
|
- control-plane data structures in `burrow/src/control/mod.rs`
|
||||||
|
- local auth bootstrap and persistent node/session storage in `burrow/src/auth/server/`
|
||||||
|
- governance documents under `evolution/` for the bigger protocol work
|
||||||
|
|
||||||
|
## `CONNECT-IP`
|
||||||
|
|
||||||
|
Full RFC 9484 support requires more than packet forwarding. It needs HTTP/3 session management, Capsule handling, HTTP Datagram context identifiers, address assignment, route advertisement, and request-scope enforcement. Burrow does not implement those end to end yet.
|
||||||
|
|
||||||
|
## Tailscale-Style Negotiation
|
||||||
|
|
||||||
|
Burrow now has register/map request and response types plus persistent node records, but it does not yet implement the full Tailscale capability surface, peer delta protocol, DERP coordination, or Noise-based control transport.
|
||||||
|
|
||||||
|
## Current Direction
|
||||||
|
|
||||||
|
The intended sequence is:
|
||||||
|
|
||||||
|
1. Stabilize the control-plane data model and bootstrap auth.
|
||||||
|
2. Introduce transport-neutral route and address abstractions.
|
||||||
|
3. Add MASQUE framing and HTTP/3 transport support.
|
||||||
|
4. Expand policy, relay, and interoperability testing.
|
||||||
|
|
||||||
|
This keeps Burrow honest about what is running today while creating a clean path for the rest.
|
||||||
41
docs/TOR.md
Normal file
41
docs/TOR.md
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
# Tor Transport
|
||||||
|
|
||||||
|
Burrow now has a `Tor` network type that boots an in-process [Arti](https://gitlab.torproject.org/tpo/core/arti) client and exposes a transparent TCP listener for outbound stream forwarding.
|
||||||
|
|
||||||
|
The first implementation is intentionally narrow:
|
||||||
|
|
||||||
|
- `tcp_stack.kind = "system"` is the only supported TCP stack backend.
|
||||||
|
- transparent destination recovery uses Linux `SO_ORIGINAL_DST` and macOS PF lookups.
|
||||||
|
- on macOS, Burrow first tries PF `DIOCNATLOOK`, then falls back to a `pflog0` observer backed by an in-memory flow cache keyed by the redirected socket tuple.
|
||||||
|
- Burrow does not yet install firewall redirect rules for you.
|
||||||
|
- traffic reaches Arti only if the host already redirects outbound TCP flows to Burrow's local listener.
|
||||||
|
- the macOS observer fallback only works when the redirect rule is logged to `pflog0` and Burrow listens on an explicit local address such as `127.0.0.1:9040`.
|
||||||
|
- destination handling is IP-and-port based, so this does not yet capture DNS or `.onion` names before local resolution.
|
||||||
|
- Burrow still does not install loop-avoidance rules for Arti's own relay connections, so redirect rules must exempt those flows externally for now.
|
||||||
|
|
||||||
|
## Payload format
|
||||||
|
|
||||||
|
`Network.payload` can be JSON or TOML.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"address": ["100.64.0.2/32"],
|
||||||
|
"tun_name": "burrow-tor",
|
||||||
|
"mtu": 1400,
|
||||||
|
"arti": {
|
||||||
|
"state_dir": "/var/lib/burrow/arti/state",
|
||||||
|
"cache_dir": "/var/cache/burrow/arti"
|
||||||
|
},
|
||||||
|
"tcp_stack": {
|
||||||
|
"kind": "system",
|
||||||
|
"listen": "127.0.0.1:9040"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next steps
|
||||||
|
|
||||||
|
- teach Burrow to program and tear down redirect rules safely.
|
||||||
|
- add loop-avoidance for Arti's own relay connections before enabling automatic redirect.
|
||||||
|
- add DNS capture or hostname-aware forwarding for `.onion` and other unresolved destinations.
|
||||||
|
- add alternate pure-Rust TCP stack backends behind the same `tcp_stack` enum.
|
||||||
30
docs/WIREGUARD_LINEAGE.md
Normal file
30
docs/WIREGUARD_LINEAGE.md
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
# WireGuard Rust Lineage
|
||||||
|
|
||||||
|
Burrow's in-tree WireGuard engine is not a greenfield implementation. It was lifted from the Rust WireGuard lineage around Cloudflare's BoringTun, then cut down and reshaped to fit Burrow's own daemon and tunnel abstractions.
|
||||||
|
|
||||||
|
## What Was Lifted
|
||||||
|
|
||||||
|
- The repository history includes `1b39eca` (`boringtun wip`) and `28af9003` (`merge boringtun into burrow`).
|
||||||
|
- The current `burrow/src/wireguard/noise/*` files still carry the original Cloudflare copyright and SPDX headers.
|
||||||
|
- Core protocol machinery such as the Noise handshake, session state, rate limiter, and timer logic came from that imported body of work.
|
||||||
|
|
||||||
|
## What Changed in Burrow
|
||||||
|
|
||||||
|
Burrow does not embed BoringTun unchanged.
|
||||||
|
|
||||||
|
- The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`.
|
||||||
|
- Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`.
|
||||||
|
- The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload.
|
||||||
|
- Burrow added its own runtime switching path so WireGuard and mesh transports can share one daemon lifecycle.
|
||||||
|
|
||||||
|
## What Was Improved
|
||||||
|
|
||||||
|
The lifted code has been tightened further in-repo.
|
||||||
|
|
||||||
|
- Deprecated constant-time comparisons were replaced with `subtle`.
|
||||||
|
- Network ordering and runtime selection are now deterministic and test-covered.
|
||||||
|
- The Burrow runtime can swap between WireGuard and mesh-backed networks without restarting the daemon process itself.
|
||||||
|
|
||||||
|
## Why This Matters
|
||||||
|
|
||||||
|
This project should be explicit about lineage. Burrow benefits from proven Rust WireGuard work, but it owns the integration surface, runtime behavior, and future maintenance burden. That is why the code should be documented as lifted, modified, and improved rather than described as wholly original.
|
||||||
60
evolution/README.md
Normal file
60
evolution/README.md
Normal file
|
|
@ -0,0 +1,60 @@
|
||||||
|
# Burrow Evolution
|
||||||
|
|
||||||
|
Burrow Evolution Proposals (BEPs) are the repository's durable design record for protocol work, control-plane changes, forge infrastructure, and operational policy.
|
||||||
|
|
||||||
|
## Goals
|
||||||
|
|
||||||
|
1. Capture intent before implementation outruns the architecture.
|
||||||
|
2. Give contributors and agents enough context to work safely without re-discovering prior decisions.
|
||||||
|
3. Tie ambitious work to concrete validation, rollout, and rollback criteria.
|
||||||
|
|
||||||
|
## When a BEP is required
|
||||||
|
|
||||||
|
Open a BEP for:
|
||||||
|
|
||||||
|
- new transports or protocol families
|
||||||
|
- control-plane and identity changes
|
||||||
|
- deployment, forge, runner, or secrets changes
|
||||||
|
- data model migrations
|
||||||
|
- user-visible behavior that changes security or routing semantics
|
||||||
|
|
||||||
|
Small bug fixes and isolated refactors do not need a BEP unless they materially change one of the areas above.
|
||||||
|
|
||||||
|
## Lifecycle
|
||||||
|
|
||||||
|
1. Pitch
|
||||||
|
Capture the problem and why it matters now.
|
||||||
|
2. Draft
|
||||||
|
Copy `evolution/proposals/0000-template.md` to `evolution/proposals/BEP-XXXX-short-slug.md`.
|
||||||
|
3. Review
|
||||||
|
Collect feedback, tighten the design, and document unresolved concerns.
|
||||||
|
4. Decision
|
||||||
|
Mark the proposal `Accepted`, `Rejected`, or `Returned for Revision`.
|
||||||
|
5. Implementation
|
||||||
|
Link code changes, tests, and rollout evidence.
|
||||||
|
6. Supersession
|
||||||
|
Keep historical proposals in-tree and point forward to the replacing BEP.
|
||||||
|
|
||||||
|
## Status Values
|
||||||
|
|
||||||
|
- `Pitch`
|
||||||
|
- `Draft`
|
||||||
|
- `In Review`
|
||||||
|
- `Accepted`
|
||||||
|
- `Implemented`
|
||||||
|
- `Rejected`
|
||||||
|
- `Returned for Revision`
|
||||||
|
- `Superseded`
|
||||||
|
- `Archived`
|
||||||
|
|
||||||
|
## Layout
|
||||||
|
|
||||||
|
```text
|
||||||
|
evolution/
|
||||||
|
README.md
|
||||||
|
proposals/
|
||||||
|
0000-template.md
|
||||||
|
BEP-0001-...
|
||||||
|
```
|
||||||
|
|
||||||
|
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.
|
||||||
57
evolution/proposals/0000-template.md
Normal file
57
evolution/proposals/0000-template.md
Normal file
|
|
@ -0,0 +1,57 @@
|
||||||
|
# `BEP-XXXX` - Title Case Summary
|
||||||
|
|
||||||
|
```text
|
||||||
|
Status: Draft | In Review | Accepted | Implemented | Rejected | Returned for Revision | Superseded | Archived
|
||||||
|
Proposal: BEP-XXXX
|
||||||
|
Authors: <name(s) or agent ids>
|
||||||
|
Coordinator: <name>
|
||||||
|
Reviewers: <people, bots, contributors>
|
||||||
|
Constitution Sections: <II, III, IV, etc.>
|
||||||
|
Implementation PRs: <link(s)> (optional while drafting)
|
||||||
|
Decision Date: <YYYY-MM-DD or Pending>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
One or two paragraphs that state the desired outcome and why it matters.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
- What problem exists today?
|
||||||
|
- Why should Burrow solve it now?
|
||||||
|
- Which issues, incidents, or constraints support the change?
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Architecture and boundaries
|
||||||
|
- Data model and migration plan
|
||||||
|
- Protocol or API changes
|
||||||
|
- Observability, testing, and failure handling
|
||||||
|
|
||||||
|
## Security and Operational Considerations
|
||||||
|
|
||||||
|
- Access and secret handling
|
||||||
|
- Abuse, downgrade, or supply-chain risks
|
||||||
|
- Rollback and kill-switch plans
|
||||||
|
|
||||||
|
## Contributor Playbook
|
||||||
|
|
||||||
|
Give the concrete steps, commands, checks, and evidence a contributor should produce while implementing or rolling out the change.
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
List alternatives and why they were rejected.
|
||||||
|
|
||||||
|
## Impact on Other Work
|
||||||
|
|
||||||
|
- follow-up tasks
|
||||||
|
- dependencies
|
||||||
|
- compatibility constraints
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Record the final call, who made it, and any conditions.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
Link relevant issues, specs, transcripts, and external research.
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
# `BEP-0001` - Sovereign Forge and Governance Bootstrap
|
||||||
|
|
||||||
|
```text
|
||||||
|
Status: Draft
|
||||||
|
Proposal: BEP-0001
|
||||||
|
Authors: gpt-5.4
|
||||||
|
Coordinator: gpt-5.4
|
||||||
|
Reviewers: Pending
|
||||||
|
Constitution Sections: II, III, V
|
||||||
|
Implementation PRs: Pending
|
||||||
|
Decision Date: Pending
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Burrow should own its forge, deployment logic, and operational context under `burrow.net`. This proposal establishes the repository-local governance and forge bootstrap required to move build, release, and infrastructure control out of GitHub-centric assumptions and into a self-hosted operating model.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
- The repository currently keeps CI definitions under `.github/workflows/` but has no first-class self-hosted forge layout.
|
||||||
|
- Infrastructure changes and protocol work are already entangled; without a design record, the project risks landing irreversible operations without enough context.
|
||||||
|
- A self-hosted forge is a prerequisite for durable autonomy over source, runners, and release pipelines.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Add a project constitution and BEP process under `evolution/`.
|
||||||
|
- Introduce a Nix flake and NixOS host/module layout for `burrow-forge`.
|
||||||
|
- Add Forgejo-native workflows under `.forgejo/workflows/` for repository-local CI.
|
||||||
|
- Bootstrap the initial forge identity around `contact@burrow.net` and an agent-owned SSH workflow.
|
||||||
|
|
||||||
|
## Security and Operational Considerations
|
||||||
|
|
||||||
|
- Initial bootstrap may read credentials from local intake, but production must converge on encrypted secret handling.
|
||||||
|
- The first forge host replacement must preserve rollback information before deleting any existing VM.
|
||||||
|
- DNS for `burrow.net` is currently pending activation; the forge rollout must not assume public reachability until nameserver cutover completes.
|
||||||
|
|
||||||
|
## Contributor Playbook
|
||||||
|
|
||||||
|
- Keep destructive host operations behind explicit verification of the current Hetzner state.
|
||||||
|
- Build and test repository-local workflows before using them for deployment.
|
||||||
|
- Record the active server id, image, IPs, and SSH path before replacement.
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
- Continue relying on GitHub Actions while separately hosting services. Rejected because it leaves source authority and CI policy split across systems.
|
||||||
|
- Stand up Forgejo without a repository-local operating model. Rejected because the repo would still be missing deployment truth.
|
||||||
|
|
||||||
|
## Impact on Other Work
|
||||||
|
|
||||||
|
- Blocks long-term migration of workflows away from GitHub.
|
||||||
|
- Provides the governance anchor for protocol and control-plane proposals.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Pending.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- `CONSTITUTION.md`
|
||||||
|
- `.github/workflows/`
|
||||||
|
- `.forgejo/workflows/`
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
# `BEP-0002` - Control-Plane Bootstrap and Local Auth
|
||||||
|
|
||||||
|
```text
|
||||||
|
Status: Draft
|
||||||
|
Proposal: BEP-0002
|
||||||
|
Authors: gpt-5.4
|
||||||
|
Coordinator: gpt-5.4
|
||||||
|
Reviewers: Pending
|
||||||
|
Constitution Sections: I, II, III, V
|
||||||
|
Implementation PRs: Pending
|
||||||
|
Decision Date: Pending
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Burrow needs a repository-owned control-plane model instead of ad hoc network payload storage plus third-party-only auth. This proposal introduces a local username/password bootstrap for `contact@burrow.net`, plus a register/map data model shaped to support a Tailscale-style control server without claiming full parity yet.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
- Current auth support is limited and does not provide a plain local bootstrap path for the project's own operator identity.
|
||||||
|
- The existing database stores network payloads, but not a durable model for users, nodes, sessions, or control-plane negotiation state.
|
||||||
|
- Future work on route policy, device coordination, and richer negotiation needs a real data model now.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Add control-plane types for users, nodes, register requests, and map responses.
|
||||||
|
- Extend the auth server schema with local credentials, sessions, provider logins, and control nodes.
|
||||||
|
- Expose JSON endpoints for local login, node registration, and map retrieval.
|
||||||
|
- Seed the initial operator account from intake-backed bootstrap credentials.
|
||||||
|
|
||||||
|
## Security and Operational Considerations
|
||||||
|
|
||||||
|
- Passwords are stored with Argon2id hashes only.
|
||||||
|
- Session tokens are bearer credentials and must be treated as sensitive.
|
||||||
|
- The bootstrap credential path is a short-term path; follow-up work should move it into encrypted secret management before public deployment.
|
||||||
|
|
||||||
|
## Contributor Playbook
|
||||||
|
|
||||||
|
- Verify bootstrap account creation in an isolated test database.
|
||||||
|
- Exercise login, register, and map end to end with integration tests.
|
||||||
|
- Do not advertise protocol parity beyond the implemented request/response contract.
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
- Wait for full external identity-provider integration first. Rejected because the forge needs an operator account now.
|
||||||
|
- Keep control-plane state implicit in daemon-local configuration. Rejected because it cannot express multi-device coordination.
|
||||||
|
|
||||||
|
## Impact on Other Work
|
||||||
|
|
||||||
|
- Unblocks forge bootstrap and future device control-plane work.
|
||||||
|
- Creates the storage model needed for richer policy and transport proposals.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Pending.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- `burrow/src/auth/server/`
|
||||||
|
- `burrow/src/control/`
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
# `BEP-0003` - CONNECT-IP and Negotiation Roadmap
|
||||||
|
|
||||||
|
```text
|
||||||
|
Status: Draft
|
||||||
|
Proposal: BEP-0003
|
||||||
|
Authors: gpt-5.4
|
||||||
|
Coordinator: gpt-5.4
|
||||||
|
Reviewers: Pending
|
||||||
|
Constitution Sections: I, II, V
|
||||||
|
Implementation PRs: Pending
|
||||||
|
Decision Date: Pending
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Burrow should grow from a WireGuard-first tunnel runner into a transport stack that can support HTTP/3 MASQUE `CONNECT-IP` and a richer node negotiation model. This proposal stages that work so Burrow can adopt the right abstractions instead of stapling QUIC-era semantics onto a WireGuard-only daemon.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
- `CONNECT-IP` introduces HTTP/3 sessions, context identifiers, address assignment, and route advertisements that do not fit the current daemon model.
|
||||||
|
- A Tailscale-style control plane requires explicit node, endpoint, and session state rather than raw network blobs.
|
||||||
|
- The project needs a roadmap that distinguishes data-model work, control-plane work, and actual transport implementation.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Stage 1: land control-plane types and persistent auth/session/node storage.
|
||||||
|
- Stage 2: add transport-agnostic route, address-assignment, and policy abstractions in Burrow.
|
||||||
|
- Stage 3: implement MASQUE `CONNECT-IP` framing and HTTP Datagram handling.
|
||||||
|
- Stage 4: connect the transport layer to real relay, policy, and observability paths.
|
||||||
|
|
||||||
|
## Security and Operational Considerations
|
||||||
|
|
||||||
|
- `CONNECT-IP` changes the trust boundary from WireGuard peers to HTTP/3 peers and relays; authentication, replay handling, and scope restriction must be explicit.
|
||||||
|
- Route advertisements and delegated prefixes must be validated before touching the data plane.
|
||||||
|
- Control-plane capability claims must not imply support that the transport layer does not yet implement.
|
||||||
|
|
||||||
|
## Contributor Playbook
|
||||||
|
|
||||||
|
- Keep protocol codecs independently testable before integrating them into live transports.
|
||||||
|
- Add interoperability tests for every new capsule or datagram type.
|
||||||
|
- Separate request parsing, policy validation, and packet forwarding so regressions stay localized.
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
- Implement MASQUE directly in the daemon without control-plane refactoring. Rejected because the current daemon has no transport-neutral contract for routes or prefixes.
|
||||||
|
- Treat Tailscale negotiation as a one-off compatibility shim. Rejected because Burrow needs first-class control-plane concepts either way.
|
||||||
|
|
||||||
|
## Impact on Other Work
|
||||||
|
|
||||||
|
- Depends on BEP-0002.
|
||||||
|
- Informs future relay, policy, and node coordination work.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Pending.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- RFC 9484
|
||||||
|
- `burrow/src/daemon/`
|
||||||
|
- `burrow/src/control/`
|
||||||
|
|
@ -0,0 +1,68 @@
|
||||||
|
# `BEP-0004` - Hosted Mail Backups and SaaS Identity
|
||||||
|
|
||||||
|
```text
|
||||||
|
Status: Draft
|
||||||
|
Proposal: BEP-0004
|
||||||
|
Authors: gpt-5.4
|
||||||
|
Coordinator: gpt-5.4
|
||||||
|
Reviewers: Pending
|
||||||
|
Constitution Sections: II, III, V
|
||||||
|
Implementation PRs: Pending
|
||||||
|
Decision Date: Pending
|
||||||
|
```
|
||||||
|
|
||||||
|
## Summary
|
||||||
|
|
||||||
|
Burrow should start with hosted mail on Forward Email instead of self-hosting SMTP and IMAP on the first forge machine. Backup retention should still be controlled by Burrow through custom S3-compatible storage backed by Burrow-owned object storage. In parallel, Burrow should treat SaaS identity as a separate track and converge on Authentik as the long-term IdP, with Linear SAML SSO as a planned downstream integration rather than an immediate bootstrap dependency.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
- The first forge host already carries source control, CI, and deployment bootstrap risk. Adding a self-hosted mail stack increases operational scope before the forge is stable.
|
||||||
|
- Forward Email already exposes SMTP and IMAP while allowing per-domain custom S3 backup storage, which preserves Burrow's data ownership over mailbox backups.
|
||||||
|
- The repository needs a durable decision record that separates hosted mail operations from future SaaS SSO work.
|
||||||
|
|
||||||
|
## Detailed Design
|
||||||
|
|
||||||
|
- Use Forward Email as the operational mail provider for `burrow.net` and `burrow.rs`.
|
||||||
|
- Configure custom S3-compatible storage per domain using Burrow-controlled object storage credentials.
|
||||||
|
- Keep one backup bucket per domain and enforce lifecycle expiration at the bucket layer.
|
||||||
|
- Add repository-owned tooling and documentation for applying and testing the Forward Email custom S3 configuration.
|
||||||
|
- Treat Authentik as the future identity authority for SaaS applications, but keep Linear SAML as a later rollout once the workspace and vendor prerequisites are available. Linear's current docs place SAML and SCIM behind higher-tier workspace security settings, so Burrow should treat plan availability as an explicit precondition.
|
||||||
|
|
||||||
|
## Security and Operational Considerations
|
||||||
|
|
||||||
|
- Forward Email API tokens and S3 credentials must stay in secret files and must not be passed directly on the shell command line.
|
||||||
|
- Buckets must remain private. Public bucket detection by the vendor should be treated as a hard failure, not a warning.
|
||||||
|
- Backup growth is unbounded without lifecycle rules. Retention policy is part of the rollout, not optional cleanup.
|
||||||
|
- Hosted mail reduces MTA attack surface on the forge host, but it adds third-party dependency risk; keeping backups in Burrow-owned storage limits that blast radius.
|
||||||
|
|
||||||
|
## Contributor Playbook
|
||||||
|
|
||||||
|
- Put the Forward Email API token in `intake/forwardemail_api_token.txt`.
|
||||||
|
- Use `Tools/forwardemail-custom-s3.sh` to configure `burrow.net` and `burrow.rs`.
|
||||||
|
- Run the helper again with `--test-only` after any credential rotation.
|
||||||
|
- Record the chosen endpoint, region, bucket names, and lifecycle policy alongside rollout evidence.
|
||||||
|
- Do not claim Linear SAML is live until the Authentik app, Linear workspace settings, workspace plan prerequisites, and end-to-end login flow are verified.
|
||||||
|
|
||||||
|
## Alternatives Considered
|
||||||
|
|
||||||
|
- Self-host Stalwart on the forge host immediately. Rejected for the first rollout because it expands host scope before source control and CI are stable.
|
||||||
|
- Rely on Forward Email default backup storage only. Rejected because it gives Burrow less control over retention and data location.
|
||||||
|
- Delay all SaaS identity planning until after forge cutover. Rejected because Linear and other SaaS integrations will otherwise accrete without an agreed authority.
|
||||||
|
|
||||||
|
## Impact on Other Work
|
||||||
|
|
||||||
|
- Narrows the first forge host scope.
|
||||||
|
- Creates a clean mail path for `contact@burrow.net` without requiring self-hosted SMTP and IMAP.
|
||||||
|
- Leaves Authentik and Linear SAML as explicit follow-up work instead of hidden assumptions.
|
||||||
|
|
||||||
|
## Decision
|
||||||
|
|
||||||
|
Pending.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- `docs/FORWARDEMAIL.md`
|
||||||
|
- `Tools/forwardemail-custom-s3.sh`
|
||||||
|
- Forward Email FAQ: custom S3-compatible storage for backups
|
||||||
|
- Linear docs: SAML SSO
|
||||||
165
flake.lock
generated
Normal file
165
flake.lock
generated
Normal file
|
|
@ -0,0 +1,165 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"agenix": {
|
||||||
|
"inputs": {
|
||||||
|
"darwin": "darwin",
|
||||||
|
"home-manager": "home-manager",
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1770165109,
|
||||||
|
"narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/ryantm/agenix/tar.gz/main"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/ryantm/agenix/tar.gz/main"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"darwin": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"agenix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1744478979,
|
||||||
|
"narHash": "sha256-dyN+teG9G82G+m+PX/aSAagkC+vUv0SgUw3XkPhQodQ=",
|
||||||
|
"owner": "lnl7",
|
||||||
|
"repo": "nix-darwin",
|
||||||
|
"rev": "43975d782b418ebf4969e9ccba82466728c2851b",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "lnl7",
|
||||||
|
"ref": "master",
|
||||||
|
"repo": "nix-darwin",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"disko": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1773506317,
|
||||||
|
"narHash": "sha256-qWKbLUJpavIpvOdX1fhHYm0WGerytFHRoh9lVck6Bh0=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems_2"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1731533236,
|
||||||
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"hcloud-upload-image-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1766413232,
|
||||||
|
"narHash": "sha256-1u9tpzciYjB/EgBI81pg9w0kez7hHZON7+AHvfKW7k0=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"home-manager": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"agenix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1745494811,
|
||||||
|
"narHash": "sha256-YZCh2o9Ua1n9uCvrvi5pRxtuVNml8X2a03qIFfRKpFs=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "home-manager",
|
||||||
|
"rev": "abfad3d2958c9e6300a883bd443512c55dfeb1be",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "home-manager",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1773389992,
|
||||||
|
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"agenix": "agenix",
|
||||||
|
"disko": "disko",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"hcloud-upload-image-src": "hcloud-upload-image-src",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"systems_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
201
flake.nix
Normal file
201
flake.nix
Normal file
|
|
@ -0,0 +1,201 @@
|
||||||
|
{
|
||||||
|
description = "Burrow development shell and forge host configuration";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable";
|
||||||
|
flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main";
|
||||||
|
agenix = {
|
||||||
|
url = "tarball+https://codeload.github.com/ryantm/agenix/tar.gz/main";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
disko = {
|
||||||
|
url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
hcloud-upload-image-src = {
|
||||||
|
url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0";
|
||||||
|
flake = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }:
|
||||||
|
let
|
||||||
|
supportedSystems = [
|
||||||
|
"x86_64-linux"
|
||||||
|
"aarch64-linux"
|
||||||
|
"x86_64-darwin"
|
||||||
|
"aarch64-darwin"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
(flake-utils.lib.eachSystem supportedSystems (system:
|
||||||
|
let
|
||||||
|
pkgs = import nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
};
|
||||||
|
lib = pkgs.lib;
|
||||||
|
agenixPkg = agenix.packages.${system}.agenix;
|
||||||
|
commonPackages = with pkgs; [
|
||||||
|
cargo
|
||||||
|
sccache
|
||||||
|
rustc
|
||||||
|
rustfmt
|
||||||
|
clippy
|
||||||
|
protobuf
|
||||||
|
pkg-config
|
||||||
|
sqlite
|
||||||
|
git
|
||||||
|
openssh
|
||||||
|
curl
|
||||||
|
jq
|
||||||
|
nodejs_20
|
||||||
|
python3
|
||||||
|
rsync
|
||||||
|
];
|
||||||
|
nscPkg =
|
||||||
|
if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then
|
||||||
|
let
|
||||||
|
version = "0.0.484";
|
||||||
|
osName =
|
||||||
|
if pkgs.stdenv.isLinux then
|
||||||
|
"linux"
|
||||||
|
else if pkgs.stdenv.isDarwin then
|
||||||
|
"darwin"
|
||||||
|
else
|
||||||
|
throw "nsc: unsupported host OS ${pkgs.stdenv.hostPlatform.system}";
|
||||||
|
archInfo =
|
||||||
|
if pkgs.stdenv.hostPlatform.isx86_64 then
|
||||||
|
{
|
||||||
|
arch = "amd64";
|
||||||
|
hash =
|
||||||
|
if pkgs.stdenv.isLinux then
|
||||||
|
"sha256-sT4YWSjQ7dU6/QV+vucm1ARSXf5yIcAtHoCYxbXJpRs="
|
||||||
|
else
|
||||||
|
"sha256-u0pSyUQw0IJcIipkLtm0MemD9BFO2/ZoAlBuFpfX1HI=";
|
||||||
|
}
|
||||||
|
else if pkgs.stdenv.hostPlatform.isAarch64 then
|
||||||
|
{
|
||||||
|
arch = "arm64";
|
||||||
|
hash =
|
||||||
|
if pkgs.stdenv.isLinux then
|
||||||
|
"sha256-n3nOIBjGnHdNUhfWD7QHvGOW+DdrZaNlfatj4o17NvM="
|
||||||
|
else
|
||||||
|
"sha256-8k2Jby6HCPClBaSGUrqIKP6MioVFrGD6HwAsjKZSSQA=";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}";
|
||||||
|
src = pkgs.fetchurl {
|
||||||
|
url = "https://github.com/namespacelabs/foundation/releases/download/v${version}/nsc_${version}_${osName}_${archInfo.arch}.tar.gz";
|
||||||
|
sha256 = archInfo.hash;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
pkgs.stdenvNoCC.mkDerivation {
|
||||||
|
pname = "nsc";
|
||||||
|
inherit version src;
|
||||||
|
dontConfigure = true;
|
||||||
|
dontBuild = true;
|
||||||
|
unpackPhase = ''
|
||||||
|
tar -xzf "$src"
|
||||||
|
'';
|
||||||
|
installPhase = ''
|
||||||
|
install -d "$out/bin"
|
||||||
|
install -m 0555 nsc "$out/bin/nsc"
|
||||||
|
install -m 0555 docker-credential-nsc "$out/bin/docker-credential-nsc"
|
||||||
|
install -m 0555 bazel-credential-nsc "$out/bin/bazel-credential-nsc"
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
null;
|
||||||
|
hcloudUploadImagePkg = pkgs.buildGoModule {
|
||||||
|
pname = "hcloud-upload-image";
|
||||||
|
version = "1.3.0";
|
||||||
|
src = hcloud-upload-image-src;
|
||||||
|
vendorHash = "sha256-IdOAUBPg0CEuHd2rdc7jOlw0XtnAhr3PVPJbnFs2+x4=";
|
||||||
|
subPackages = [ "." ];
|
||||||
|
env.GOWORK = "off";
|
||||||
|
ldflags = [
|
||||||
|
"-s"
|
||||||
|
"-w"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
forgejoNscSrc = lib.cleanSourceWith {
|
||||||
|
src = ./services/forgejo-nsc;
|
||||||
|
filter = path: type:
|
||||||
|
let
|
||||||
|
p = toString path;
|
||||||
|
name = builtins.baseNameOf path;
|
||||||
|
hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p;
|
||||||
|
in
|
||||||
|
!(hasDir ".git" || hasDir "vendor" || hasDir "node_modules" || name == "result");
|
||||||
|
};
|
||||||
|
forgejoNscDispatcher = pkgs.buildGoModule {
|
||||||
|
pname = "forgejo-nsc-dispatcher";
|
||||||
|
version = "0.1.0";
|
||||||
|
src = forgejoNscSrc;
|
||||||
|
subPackages = [ "./cmd/forgejo-nsc-dispatcher" ];
|
||||||
|
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
|
||||||
|
};
|
||||||
|
forgejoNscAutoscaler = pkgs.buildGoModule {
|
||||||
|
pname = "forgejo-nsc-autoscaler";
|
||||||
|
version = "0.1.0";
|
||||||
|
src = forgejoNscSrc;
|
||||||
|
subPackages = [ "./cmd/forgejo-nsc-autoscaler" ];
|
||||||
|
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
packages =
|
||||||
|
commonPackages
|
||||||
|
++ [
|
||||||
|
agenixPkg
|
||||||
|
hcloudUploadImagePkg
|
||||||
|
forgejoNscDispatcher
|
||||||
|
forgejoNscAutoscaler
|
||||||
|
]
|
||||||
|
++ lib.optionals (nscPkg != null) [ nscPkg ];
|
||||||
|
};
|
||||||
|
|
||||||
|
devShells.ci = pkgs.mkShell {
|
||||||
|
packages =
|
||||||
|
commonPackages
|
||||||
|
++ [
|
||||||
|
agenixPkg
|
||||||
|
hcloudUploadImagePkg
|
||||||
|
]
|
||||||
|
++ lib.optionals (nscPkg != null) [ nscPkg ];
|
||||||
|
};
|
||||||
|
|
||||||
|
formatter = pkgs.nixpkgs-fmt;
|
||||||
|
|
||||||
|
packages =
|
||||||
|
{
|
||||||
|
agenix = agenixPkg;
|
||||||
|
hcloud-upload-image = hcloudUploadImagePkg;
|
||||||
|
forgejo-nsc-dispatcher = forgejoNscDispatcher;
|
||||||
|
forgejo-nsc-autoscaler = forgejoNscAutoscaler;
|
||||||
|
}
|
||||||
|
// lib.optionalAttrs (nscPkg != null) { nsc = nscPkg; };
|
||||||
|
}))
|
||||||
|
// {
|
||||||
|
nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix;
|
||||||
|
nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix;
|
||||||
|
nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix;
|
||||||
|
|
||||||
|
nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem {
|
||||||
|
system = "x86_64-linux";
|
||||||
|
specialArgs = {
|
||||||
|
inherit self;
|
||||||
|
agenixPackage = agenix.packages.x86_64-linux.agenix;
|
||||||
|
};
|
||||||
|
modules = [
|
||||||
|
agenix.nixosModules.default
|
||||||
|
disko.nixosModules.disko
|
||||||
|
./nixos/hosts/burrow-forge/default.nix
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
images = {
|
||||||
|
burrow-forge-raw = self.nixosConfigurations.burrow-forge.config.system.build.diskoImages;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
53
nixos/README.md
Normal file
53
nixos/README.md
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
# Burrow Forge Runbook
|
||||||
|
|
||||||
|
This directory contains the Burrow forge host definition and the Hetzner bootstrap shape for `burrow-forge`.
|
||||||
|
|
||||||
|
Mail hosting is intentionally not part of this NixOS host in the current plan. Burrow's first mail path is Forward Email with Burrow-owned custom S3 backups; see [`docs/FORWARDEMAIL.md`](../docs/FORWARDEMAIL.md).
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
- `hosts/burrow-forge/default.nix`: host entrypoint
|
||||||
|
- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module
|
||||||
|
- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap
|
||||||
|
- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services
|
||||||
|
- `hetzner-cloud-config.yaml`: desired Hetzner host shape
|
||||||
|
- `keys/contact_at_burrow_net.pub`: initial operator SSH public key
|
||||||
|
- `keys/agent_at_burrow_net.pub`: automation SSH public key
|
||||||
|
- `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow
|
||||||
|
- `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot
|
||||||
|
- `../Scripts/bootstrap-forge-intake.sh`: legacy intake bootstrap helper; current forge runtime secrets should live in `../secrets/forgejo/*.age`
|
||||||
|
- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot
|
||||||
|
- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers
|
||||||
|
- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host
|
||||||
|
- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists
|
||||||
|
- `../secrets/forgejo/*.age`: authoritative encrypted forge admin password, agent SSH key, and Namespace runtime configs for the forge host
|
||||||
|
|
||||||
|
## Intended Flow
|
||||||
|
|
||||||
|
1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`.
|
||||||
|
2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`.
|
||||||
|
3. Encrypt the Forgejo admin password and agent SSH key into `secrets/forgejo/{admin-password,agent-ssh-key}.age`.
|
||||||
|
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account from the agenix secret path.
|
||||||
|
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
|
||||||
|
6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths.
|
||||||
|
7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
|
||||||
|
8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.
|
||||||
|
9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`.
|
||||||
|
|
||||||
|
## Current Constraints
|
||||||
|
|
||||||
|
- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host.
|
||||||
|
- Public Burrow forge cutover completed on March 15, 2026:
|
||||||
|
- `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21`
|
||||||
|
- HTTP redirects to HTTPS on all three names
|
||||||
|
- `https://burrow.net` returns the root forge landing response
|
||||||
|
- `https://git.burrow.net` returns the live Forgejo front door
|
||||||
|
- `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/`
|
||||||
|
- The Cloudflare token now lives in `secrets/cloudflare/api-token.age`; the current token is account-scoped: `POST /accounts/<account>/tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`.
|
||||||
|
- `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response.
|
||||||
|
- Both domains publish Forward Email MX/TXT records.
|
||||||
|
- Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`.
|
||||||
|
- The current Hetzner account contains both:
|
||||||
|
- the older Ubuntu bootstrap server in `hil`
|
||||||
|
- the live `burrow-forge` NixOS server in `hel1`
|
||||||
|
- The remaining forge work is follow-on product/integration work, not host bring-up, mail backup wiring, or public DNS cutover.
|
||||||
10
nixos/hetzner-cloud-config.yaml
Normal file
10
nixos/hetzner-cloud-config.yaml
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
name: burrow-forge
|
||||||
|
server_type: ccx23
|
||||||
|
location: hel1
|
||||||
|
image: ubuntu-24.04
|
||||||
|
ssh_keys:
|
||||||
|
- contact@burrow.net
|
||||||
|
- agent@burrow.net
|
||||||
|
labels:
|
||||||
|
project: burrow
|
||||||
|
role: forge
|
||||||
81
nixos/hosts/burrow-forge/default.nix
Normal file
81
nixos/hosts/burrow-forge/default.nix
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
{ config, self, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./hardware-configuration.nix
|
||||||
|
./disko-config.nix
|
||||||
|
self.nixosModules.burrow-forge
|
||||||
|
self.nixosModules.burrow-forge-runner
|
||||||
|
self.nixosModules.burrow-forgejo-nsc
|
||||||
|
];
|
||||||
|
|
||||||
|
system.stateVersion = "24.11";
|
||||||
|
|
||||||
|
time.timeZone = "America/Los_Angeles";
|
||||||
|
|
||||||
|
nix.settings.experimental-features = [
|
||||||
|
"nix-command"
|
||||||
|
"flakes"
|
||||||
|
];
|
||||||
|
|
||||||
|
services.burrow.forge = {
|
||||||
|
enable = true;
|
||||||
|
adminPasswordFile = config.age.secrets.forgejoAdminPassword.path;
|
||||||
|
authorizedKeys = [
|
||||||
|
(builtins.readFile ../../keys/contact_at_burrow_net.pub)
|
||||||
|
(builtins.readFile ../../keys/agent_at_burrow_net.pub)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.burrow.forgeRunner = {
|
||||||
|
enable = true;
|
||||||
|
sshPrivateKeyFile = config.age.secrets.forgejoAgentSshKey.path;
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets.forgejoAdminPassword = {
|
||||||
|
file = ../../../secrets/forgejo/admin-password.age;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "forgejo";
|
||||||
|
group = "forgejo";
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets.forgejoAgentSshKey = {
|
||||||
|
file = ../../../secrets/forgejo/agent-ssh-key.age;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "root";
|
||||||
|
group = "root";
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets.forgejoNscToken = {
|
||||||
|
file = ../../../secrets/forgejo/nsc-token.age;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "forgejo-nsc";
|
||||||
|
group = "forgejo-nsc";
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets.forgejoNscDispatcherConfig = {
|
||||||
|
file = ../../../secrets/forgejo/nsc-dispatcher-config.age;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "forgejo-nsc";
|
||||||
|
group = "forgejo-nsc";
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets.forgejoNscAutoscalerConfig = {
|
||||||
|
file = ../../../secrets/forgejo/nsc-autoscaler-config.age;
|
||||||
|
mode = "0400";
|
||||||
|
owner = "forgejo-nsc";
|
||||||
|
group = "forgejo-nsc";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.burrow.forgejoNsc = {
|
||||||
|
enable = true;
|
||||||
|
nscTokenFile = config.age.secrets.forgejoNscToken.path;
|
||||||
|
dispatcher = {
|
||||||
|
configFile = config.age.secrets.forgejoNscDispatcherConfig.path;
|
||||||
|
};
|
||||||
|
autoscaler = {
|
||||||
|
enable = true;
|
||||||
|
configFile = config.age.secrets.forgejoNscAutoscalerConfig.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
36
nixos/hosts/burrow-forge/disko-config.nix
Normal file
36
nixos/hosts/burrow-forge/disko-config.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
{ lib, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
disko.devices = {
|
||||||
|
disk.main = {
|
||||||
|
type = "disk";
|
||||||
|
device = lib.mkDefault "/dev/sda";
|
||||||
|
imageName = "burrow-forge";
|
||||||
|
imageSize = "80G";
|
||||||
|
content = {
|
||||||
|
type = "gpt";
|
||||||
|
partitions = {
|
||||||
|
ESP = {
|
||||||
|
size = "512M";
|
||||||
|
type = "EF00";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = "vfat";
|
||||||
|
mountpoint = "/boot";
|
||||||
|
mountOptions = [ "umask=0077" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
root = {
|
||||||
|
size = "100%";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = "ext4";
|
||||||
|
mountpoint = "/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
11
nixos/hosts/burrow-forge/hardware-configuration.nix
Normal file
11
nixos/hosts/burrow-forge/hardware-configuration.nix
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
{ ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
# Derived from Hetzner Cloud rescue-mode hardware inspection.
|
||||||
|
boot.initrd.availableKernelModules = [
|
||||||
|
"ahci"
|
||||||
|
"sd_mod"
|
||||||
|
"virtio_pci"
|
||||||
|
"virtio_scsi"
|
||||||
|
];
|
||||||
|
}
|
||||||
1
nixos/keys/agent_at_burrow_net.pub
Normal file
1
nixos/keys/agent_at_burrow_net.pub
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net
|
||||||
1
nixos/keys/contact_at_burrow_net.pub
Normal file
1
nixos/keys/contact_at_burrow_net.pub
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa
|
||||||
213
nixos/modules/burrow-forge-runner.nix
Normal file
213
nixos/modules/burrow-forge-runner.nix
Normal file
|
|
@ -0,0 +1,213 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.burrow.forgeRunner;
|
||||||
|
runnerPkg = pkgs.forgejo-runner;
|
||||||
|
stateDir = cfg.stateDir;
|
||||||
|
runnerFile = "${stateDir}/.runner";
|
||||||
|
configFile = "${stateDir}/runner.yaml";
|
||||||
|
labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels);
|
||||||
|
sshPrivateKeyFile = cfg.sshPrivateKeyFile or "";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.burrow.forgeRunner = {
|
||||||
|
enable = lib.mkEnableOption "the Burrow Forgejo Actions runner";
|
||||||
|
|
||||||
|
instanceUrl = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "http://127.0.0.1:3000";
|
||||||
|
description = "Forgejo base URL used by the local runner for registration and job polling.";
|
||||||
|
};
|
||||||
|
|
||||||
|
labels = lib.mkOption {
|
||||||
|
type = with lib.types; listOf str;
|
||||||
|
default = [ "burrow-forge" ];
|
||||||
|
description = "Runner labels exposed to Forgejo Actions.";
|
||||||
|
};
|
||||||
|
|
||||||
|
name = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "burrow-forge-agent";
|
||||||
|
description = "Runner name shown in Forgejo.";
|
||||||
|
};
|
||||||
|
|
||||||
|
capacity = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = 1;
|
||||||
|
description = "Maximum concurrent jobs on this runner.";
|
||||||
|
};
|
||||||
|
|
||||||
|
stateDir = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "/var/lib/forgejo-runner-agent";
|
||||||
|
description = "Persistent runner state directory.";
|
||||||
|
};
|
||||||
|
|
||||||
|
user = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "forgejo-runner-agent";
|
||||||
|
description = "System user that runs the Forgejo runner.";
|
||||||
|
};
|
||||||
|
|
||||||
|
group = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "forgejo-runner-agent";
|
||||||
|
description = "System group that runs the Forgejo runner.";
|
||||||
|
};
|
||||||
|
|
||||||
|
forgejoConfigFile = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "/var/lib/forgejo/custom/conf/app.ini";
|
||||||
|
description = "Forgejo app.ini path used to generate runner tokens.";
|
||||||
|
};
|
||||||
|
|
||||||
|
gitUserName = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "agent";
|
||||||
|
description = "Git commit author name for automation on the forge host.";
|
||||||
|
};
|
||||||
|
|
||||||
|
gitUserEmail = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "agent@burrow.net";
|
||||||
|
description = "Git commit author email for automation on the forge host.";
|
||||||
|
};
|
||||||
|
|
||||||
|
sshPrivateKeyFile = lib.mkOption {
|
||||||
|
type = with lib.types; nullOr str;
|
||||||
|
default = null;
|
||||||
|
description = "Optional host-local path to the agent SSH private key copied into the runner home.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
users.groups.${cfg.group} = { };
|
||||||
|
|
||||||
|
users.users.${cfg.user} = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = cfg.group;
|
||||||
|
description = "Burrow Forgejo Actions runner";
|
||||||
|
home = cfg.stateDir;
|
||||||
|
createHome = true;
|
||||||
|
shell = pkgs.bashInteractive;
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.systemPackages = with pkgs; [
|
||||||
|
runnerPkg
|
||||||
|
bash
|
||||||
|
coreutils
|
||||||
|
findutils
|
||||||
|
git
|
||||||
|
git-lfs
|
||||||
|
openssh
|
||||||
|
python3
|
||||||
|
rsync
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.tmpfiles.rules = [
|
||||||
|
"d ${stateDir} 0750 ${cfg.user} ${cfg.group} - -"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.burrow-forgejo-runner-bootstrap = {
|
||||||
|
description = "Bootstrap Burrow Forgejo runner registration";
|
||||||
|
after = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ];
|
||||||
|
wants = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ];
|
||||||
|
before = [ "burrow-forgejo-runner.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
User = "root";
|
||||||
|
Group = "root";
|
||||||
|
};
|
||||||
|
script = ''
|
||||||
|
set -euo pipefail
|
||||||
|
umask 077
|
||||||
|
|
||||||
|
install -d -m 0750 -o ${cfg.user} -g ${cfg.group} ${stateDir}
|
||||||
|
cat > ${configFile} <<EOF
|
||||||
|
runner:
|
||||||
|
file: ${runnerFile}
|
||||||
|
capacity: ${toString cfg.capacity}
|
||||||
|
name: ${cfg.name}
|
||||||
|
labels:
|
||||||
|
EOF
|
||||||
|
for label in ${lib.concatStringsSep " " cfg.labels}; do
|
||||||
|
echo " - ${"$"}label:host" >> ${configFile}
|
||||||
|
done
|
||||||
|
cat >> ${configFile} <<'EOF'
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
EOF
|
||||||
|
chown ${cfg.user}:${cfg.group} ${configFile}
|
||||||
|
chmod 0640 ${configFile}
|
||||||
|
|
||||||
|
install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh
|
||||||
|
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
|
||||||
|
${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName}
|
||||||
|
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
|
||||||
|
${pkgs.git}/bin/git config --global user.email ${lib.escapeShellArg cfg.gitUserEmail}
|
||||||
|
|
||||||
|
if [ -n ${lib.escapeShellArg sshPrivateKeyFile} ] && [ -s ${lib.escapeShellArg sshPrivateKeyFile} ]; then
|
||||||
|
install -m 0600 -o ${cfg.user} -g ${cfg.group} \
|
||||||
|
${lib.escapeShellArg sshPrivateKeyFile} \
|
||||||
|
${stateDir}/.ssh/id_ed25519
|
||||||
|
cat > ${stateDir}/.ssh/config <<EOF
|
||||||
|
Host *
|
||||||
|
IdentityFile ${stateDir}/.ssh/id_ed25519
|
||||||
|
IdentitiesOnly yes
|
||||||
|
StrictHostKeyChecking accept-new
|
||||||
|
EOF
|
||||||
|
chown ${cfg.user}:${cfg.group} ${stateDir}/.ssh/config
|
||||||
|
chmod 0600 ${stateDir}/.ssh/config
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -s ${runnerFile} ]; then
|
||||||
|
token="$(${pkgs.util-linux}/bin/runuser -u forgejo -- \
|
||||||
|
${config.services.forgejo.package}/bin/forgejo actions generate-runner-token --config ${cfg.forgejoConfigFile} | tr -d '\r\n')"
|
||||||
|
if [ -z "${"$"}token" ]; then
|
||||||
|
echo "[burrow-forgejo-runner] failed to generate runner token" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
|
||||||
|
${runnerPkg}/bin/forgejo-runner register \
|
||||||
|
--no-interactive \
|
||||||
|
--instance ${lib.escapeShellArg cfg.instanceUrl} \
|
||||||
|
--token "${"$"}token" \
|
||||||
|
--name ${lib.escapeShellArg cfg.name} \
|
||||||
|
--labels ${lib.escapeShellArg labelsCsv} \
|
||||||
|
--config ${configFile}
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.burrow-forgejo-runner = {
|
||||||
|
description = "Burrow Forgejo Actions runner";
|
||||||
|
after = [ "burrow-forgejo-runner-bootstrap.service" ];
|
||||||
|
wants = [ "burrow-forgejo-runner-bootstrap.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
WorkingDirectory = stateDir;
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 2;
|
||||||
|
ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" ''
|
||||||
|
set -euo pipefail
|
||||||
|
export PATH="/run/wrappers/bin:/run/current-system/sw/bin:${"$"}{PATH:-}"
|
||||||
|
tmp="$(${pkgs.coreutils}/bin/mktemp)"
|
||||||
|
set +e
|
||||||
|
${runnerPkg}/bin/forgejo-runner daemon --config ${configFile} 2>&1 | ${pkgs.coreutils}/bin/tee "${"$"}tmp"
|
||||||
|
rc="${"$"}{PIPESTATUS[0]}"
|
||||||
|
set -e
|
||||||
|
if ${pkgs.gnugrep}/bin/grep -qi "unregistered runner" "${"$"}tmp"; then
|
||||||
|
rm -f ${runnerFile}
|
||||||
|
fi
|
||||||
|
rm -f "${"$"}tmp"
|
||||||
|
exit "${"$"}rc"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
247
nixos/modules/burrow-forge.nix
Normal file
247
nixos/modules/burrow-forge.nix
Normal file
|
|
@ -0,0 +1,247 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.burrow.forge;
|
||||||
|
forgejoCfg = config.services.forgejo;
|
||||||
|
forgejoExe = lib.getExe forgejoCfg.package;
|
||||||
|
forgejoWorkPath = forgejoCfg.stateDir;
|
||||||
|
forgejoCustomPath = "${forgejoWorkPath}/custom";
|
||||||
|
forgejoConfigFile = "${forgejoCustomPath}/conf/app.ini";
|
||||||
|
forgejoAdminArgs = "--config ${lib.escapeShellArg forgejoConfigFile} --work-path ${lib.escapeShellArg forgejoWorkPath} --custom-path ${lib.escapeShellArg forgejoCustomPath}";
|
||||||
|
homeRepoPath = "/${cfg.homeOwner}/${cfg.homeRepo}";
|
||||||
|
homeRepoUrl = "https://${cfg.gitDomain}${homeRepoPath}";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.burrow.forge = {
|
||||||
|
enable = lib.mkEnableOption "the Burrow Forge host";
|
||||||
|
|
||||||
|
gitDomain = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "git.burrow.net";
|
||||||
|
description = "Public Forgejo domain.";
|
||||||
|
};
|
||||||
|
|
||||||
|
siteDomain = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "burrow.net";
|
||||||
|
description = "Root site domain.";
|
||||||
|
};
|
||||||
|
|
||||||
|
homeOwner = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "hackclub";
|
||||||
|
description = "Canonical Forgejo org/user for the Burrow home repository.";
|
||||||
|
};
|
||||||
|
|
||||||
|
homeRepo = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "burrow";
|
||||||
|
description = "Canonical Forgejo repository name for the Burrow home repository.";
|
||||||
|
};
|
||||||
|
|
||||||
|
contactEmail = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "contact@burrow.net";
|
||||||
|
description = "Operator contact email.";
|
||||||
|
};
|
||||||
|
|
||||||
|
nscAutoscalerDomain = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "nsc-autoscaler.burrow.net";
|
||||||
|
description = "Public webhook domain for the Forgejo Namespace autoscaler.";
|
||||||
|
};
|
||||||
|
|
||||||
|
adminUsername = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "contact";
|
||||||
|
description = "Initial Forgejo admin username.";
|
||||||
|
};
|
||||||
|
|
||||||
|
adminEmail = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "contact@burrow.net";
|
||||||
|
description = "Initial Forgejo admin email.";
|
||||||
|
};
|
||||||
|
|
||||||
|
adminPasswordFile = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin.";
|
||||||
|
};
|
||||||
|
|
||||||
|
authorizedKeys = lib.mkOption {
|
||||||
|
type = with lib.types; listOf str;
|
||||||
|
default = [ ];
|
||||||
|
description = "SSH keys allowed for root login and operational bootstrap.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
networking.hostName = "burrow-forge";
|
||||||
|
networking.useDHCP = lib.mkDefault true;
|
||||||
|
|
||||||
|
services.qemuGuest.enable = true;
|
||||||
|
|
||||||
|
boot.loader.grub = {
|
||||||
|
enable = true;
|
||||||
|
efiSupport = true;
|
||||||
|
efiInstallAsRemovable = true;
|
||||||
|
device = "nodev";
|
||||||
|
};
|
||||||
|
|
||||||
|
fileSystems."/boot".neededForBoot = true;
|
||||||
|
|
||||||
|
services.postgresql = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.postgresql_16;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.openssh = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
PasswordAuthentication = false;
|
||||||
|
KbdInteractiveAuthentication = false;
|
||||||
|
PermitRootLogin = "prohibit-password";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users.root.openssh.authorizedKeys.keys = cfg.authorizedKeys;
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [
|
||||||
|
22
|
||||||
|
80
|
||||||
|
443
|
||||||
|
2222
|
||||||
|
];
|
||||||
|
|
||||||
|
services.forgejo = {
|
||||||
|
enable = true;
|
||||||
|
database = {
|
||||||
|
type = "postgres";
|
||||||
|
createDatabase = true;
|
||||||
|
};
|
||||||
|
lfs.enable = true;
|
||||||
|
settings = {
|
||||||
|
server = {
|
||||||
|
DOMAIN = cfg.gitDomain;
|
||||||
|
ROOT_URL = "https://${cfg.gitDomain}/";
|
||||||
|
HTTP_PORT = 3000;
|
||||||
|
SSH_DOMAIN = cfg.gitDomain;
|
||||||
|
SSH_PORT = 2222;
|
||||||
|
START_SSH_SERVER = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
service = {
|
||||||
|
DISABLE_REGISTRATION = true;
|
||||||
|
REQUIRE_SIGNIN_VIEW = false;
|
||||||
|
DEFAULT_ALLOW_CREATE_ORGANIZATION = false;
|
||||||
|
ENABLE_NOTIFY_MAIL = false;
|
||||||
|
NO_REPLY_ADDRESS = cfg.adminEmail;
|
||||||
|
};
|
||||||
|
|
||||||
|
session = {
|
||||||
|
COOKIE_SECURE = true;
|
||||||
|
SAME_SITE = "strict";
|
||||||
|
};
|
||||||
|
|
||||||
|
openid = {
|
||||||
|
ENABLE_OPENID_SIGNIN = false;
|
||||||
|
ENABLE_OPENID_SIGNUP = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
actions = {
|
||||||
|
ENABLED = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
repository = {
|
||||||
|
DEFAULT_BRANCH = "main";
|
||||||
|
ENABLE_PUSH_CREATE_USER = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
ui = {
|
||||||
|
DEFAULT_THEME = "forgejo-auto";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.caddy = {
|
||||||
|
enable = true;
|
||||||
|
email = cfg.contactEmail;
|
||||||
|
virtualHosts =
|
||||||
|
{
|
||||||
|
"${cfg.gitDomain}".extraConfig = ''
|
||||||
|
encode gzip zstd
|
||||||
|
@root path /
|
||||||
|
redir @root ${homeRepoPath} 308
|
||||||
|
reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}
|
||||||
|
'';
|
||||||
|
"${cfg.siteDomain}".extraConfig = ''
|
||||||
|
@root path /
|
||||||
|
redir @root ${homeRepoUrl} 308
|
||||||
|
respond 404
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
// lib.optionalAttrs (
|
||||||
|
config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable
|
||||||
|
) {
|
||||||
|
"${cfg.nscAutoscalerDomain}".extraConfig = ''
|
||||||
|
encode gzip zstd
|
||||||
|
reverse_proxy 127.0.0.1:8090
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.burrow-forgejo-bootstrap = {
|
||||||
|
description = "Seed the initial Burrow Forgejo admin account";
|
||||||
|
after = [ "forgejo.service" ];
|
||||||
|
requires = [ "forgejo.service" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
path = [
|
||||||
|
forgejoCfg.package
|
||||||
|
pkgs.coreutils
|
||||||
|
pkgs.gnugrep
|
||||||
|
];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
User = forgejoCfg.user;
|
||||||
|
Group = forgejoCfg.group;
|
||||||
|
WorkingDirectory = forgejoCfg.stateDir;
|
||||||
|
};
|
||||||
|
script = ''
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
if [ ! -s ${lib.escapeShellArg cfg.adminPasswordFile} ]; then
|
||||||
|
echo "bootstrap password file is missing; skipping admin bootstrap" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.adminPasswordFile})"
|
||||||
|
if [ -z "$password" ]; then
|
||||||
|
echo "bootstrap password file is empty; skipping admin bootstrap" >&2
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_file="$(mktemp)"
|
||||||
|
trap 'rm -f "$log_file"' EXIT
|
||||||
|
|
||||||
|
if ! ${forgejoExe} admin user create \
|
||||||
|
${forgejoAdminArgs} \
|
||||||
|
--admin \
|
||||||
|
--username ${lib.escapeShellArg cfg.adminUsername} \
|
||||||
|
--email ${lib.escapeShellArg cfg.adminEmail} \
|
||||||
|
--password "$password" \
|
||||||
|
--must-change-password=false >"$log_file" 2>&1; then
|
||||||
|
if grep -qi "already exists" "$log_file"; then
|
||||||
|
${forgejoExe} admin user change-password \
|
||||||
|
${forgejoAdminArgs} \
|
||||||
|
--username ${lib.escapeShellArg cfg.adminUsername} \
|
||||||
|
--password "$password" \
|
||||||
|
--must-change-password=false
|
||||||
|
else
|
||||||
|
cat "$log_file" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
296
nixos/modules/burrow-forgejo-nsc.nix
Normal file
296
nixos/modules/burrow-forgejo-nsc.nix
Normal file
|
|
@ -0,0 +1,296 @@
|
||||||
|
{ config, lib, pkgs, self, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
inherit (lib)
|
||||||
|
mkEnableOption
|
||||||
|
mkIf
|
||||||
|
mkOption
|
||||||
|
types
|
||||||
|
mkAfter
|
||||||
|
mkDefault
|
||||||
|
optional
|
||||||
|
optionalAttrs
|
||||||
|
optionalString
|
||||||
|
;
|
||||||
|
|
||||||
|
cfg = config.services.burrow.forgejoNsc;
|
||||||
|
dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml";
|
||||||
|
autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml";
|
||||||
|
|
||||||
|
pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" ''
|
||||||
|
set -euo pipefail
|
||||||
|
if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then
|
||||||
|
echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
nscTokenPath = "${cfg.stateDir}/nsc.token";
|
||||||
|
tokenSync = optionalString (cfg.nscTokenFile != null) ''
|
||||||
|
install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath}
|
||||||
|
chown ${cfg.user}:${cfg.group} ${nscTokenPath}
|
||||||
|
chmod 600 ${nscTokenPath}
|
||||||
|
'';
|
||||||
|
dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) ''
|
||||||
|
install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig}
|
||||||
|
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig}
|
||||||
|
chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig}
|
||||||
|
'';
|
||||||
|
autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) ''
|
||||||
|
install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig}
|
||||||
|
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig}
|
||||||
|
chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig}
|
||||||
|
'';
|
||||||
|
|
||||||
|
dispatcherEnv =
|
||||||
|
cfg.extraEnv
|
||||||
|
// optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; }
|
||||||
|
// optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; }
|
||||||
|
// optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; };
|
||||||
|
in {
|
||||||
|
options.services.burrow.forgejoNsc = {
|
||||||
|
enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher";
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "forgejo-nsc";
|
||||||
|
description = "System user that runs the forgejo-nsc services.";
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "forgejo-nsc";
|
||||||
|
description = "System group for the forgejo-nsc services.";
|
||||||
|
};
|
||||||
|
|
||||||
|
stateDir = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "/var/lib/forgejo-nsc";
|
||||||
|
description = "State directory for the dispatcher/autoscaler.";
|
||||||
|
};
|
||||||
|
|
||||||
|
nscTokenFile = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = "Optional NSC token file (exported as NSC_TOKEN_FILE).";
|
||||||
|
};
|
||||||
|
|
||||||
|
nscTokenSpecFile = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE).";
|
||||||
|
};
|
||||||
|
|
||||||
|
nscEndpoint = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = "Optional NSC endpoint override (exported as NSC_ENDPOINT).";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraEnv = mkOption {
|
||||||
|
type = types.attrsOf types.str;
|
||||||
|
default = { };
|
||||||
|
description = "Extra environment variables injected into the services.";
|
||||||
|
};
|
||||||
|
|
||||||
|
nscPackage = mkOption {
|
||||||
|
type = types.nullOr types.package;
|
||||||
|
default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null;
|
||||||
|
description = "Optional nsc CLI package added to the service PATH.";
|
||||||
|
};
|
||||||
|
|
||||||
|
dispatcher = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = true;
|
||||||
|
description = "Enable the forgejo-nsc dispatcher service.";
|
||||||
|
};
|
||||||
|
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher;
|
||||||
|
description = "Package providing the forgejo-nsc dispatcher binary.";
|
||||||
|
};
|
||||||
|
|
||||||
|
configFile = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = "Host-local YAML config file for the dispatcher.";
|
||||||
|
};
|
||||||
|
|
||||||
|
allowPending = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Allow placeholder values (PENDING-) in the dispatcher config.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
autoscaler = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Enable the forgejo-nsc autoscaler service.";
|
||||||
|
};
|
||||||
|
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler;
|
||||||
|
description = "Package providing the forgejo-nsc autoscaler binary.";
|
||||||
|
};
|
||||||
|
|
||||||
|
configFile = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = "Host-local YAML config file for the autoscaler.";
|
||||||
|
};
|
||||||
|
|
||||||
|
allowPending = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Allow placeholder values (PENDING-) in the autoscaler config.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
pruneRunners = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = true;
|
||||||
|
description = "Enable periodic pruning of stale Forgejo action runners.";
|
||||||
|
};
|
||||||
|
|
||||||
|
ttlSeconds = mkOption {
|
||||||
|
type = types.ints.positive;
|
||||||
|
default = 3600;
|
||||||
|
description = "Age threshold in seconds before offline runners are marked deleted.";
|
||||||
|
};
|
||||||
|
|
||||||
|
onBootSec = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "15m";
|
||||||
|
description = "How long after boot to wait before the first prune run.";
|
||||||
|
};
|
||||||
|
|
||||||
|
onUnitActiveSec = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "1h";
|
||||||
|
description = "How often to rerun stale runner pruning.";
|
||||||
|
};
|
||||||
|
|
||||||
|
randomizedDelaySec = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "10m";
|
||||||
|
description = "Randomized delay applied to the prune timer.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
assertions = [
|
||||||
|
{
|
||||||
|
assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null;
|
||||||
|
message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled.";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null;
|
||||||
|
message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled.";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
users.groups.${cfg.group} = { };
|
||||||
|
users.users.${cfg.user} = {
|
||||||
|
uid = mkDefault 2011;
|
||||||
|
isSystemUser = true;
|
||||||
|
group = cfg.group;
|
||||||
|
description = "Forgejo Namespace Cloud runner services";
|
||||||
|
home = cfg.stateDir;
|
||||||
|
createHome = true;
|
||||||
|
shell = pkgs.bashInteractive;
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.tmpfiles.rules = mkAfter [
|
||||||
|
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable {
|
||||||
|
description = "Forgejo Namespace Cloud dispatcher";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
unitConfig.ConditionPathExists =
|
||||||
|
optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile
|
||||||
|
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
WorkingDirectory = cfg.stateDir;
|
||||||
|
ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}";
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 5;
|
||||||
|
};
|
||||||
|
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
|
||||||
|
environment = dispatcherEnv;
|
||||||
|
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
|
||||||
|
(optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile))
|
||||||
|
dispatcherConfigSync
|
||||||
|
tokenSync
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable {
|
||||||
|
description = "Forgejo Namespace Cloud autoscaler";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
unitConfig.ConditionPathExists =
|
||||||
|
optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile
|
||||||
|
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
WorkingDirectory = cfg.stateDir;
|
||||||
|
ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}";
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 5;
|
||||||
|
};
|
||||||
|
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
|
||||||
|
environment = dispatcherEnv;
|
||||||
|
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
|
||||||
|
(optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile))
|
||||||
|
autoscalerConfigSync
|
||||||
|
tokenSync
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.forgejo-prune-runners = mkIf cfg.pruneRunners.enable {
|
||||||
|
description = "Prune offline Forgejo action runners";
|
||||||
|
after = [ "forgejo.service" ];
|
||||||
|
requires = [ "forgejo.service" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
User = "forgejo";
|
||||||
|
Group = "forgejo";
|
||||||
|
};
|
||||||
|
environment = {
|
||||||
|
FORGEJO_PRUNE_DB = "1";
|
||||||
|
FORGEJO_RUNNER_TTL_SEC = toString cfg.pruneRunners.ttlSeconds;
|
||||||
|
};
|
||||||
|
path = [ pkgs.python3 pkgs.postgresql ];
|
||||||
|
script = ''
|
||||||
|
${pkgs.python3}/bin/python3 ${self}/Scripts/forgejo-prune-runners.py
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.timers.forgejo-prune-runners = mkIf cfg.pruneRunners.enable {
|
||||||
|
description = "Periodic Forgejo runner cleanup";
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnBootSec = cfg.pruneRunners.onBootSec;
|
||||||
|
OnUnitActiveSec = cfg.pruneRunners.onUnitActiveSec;
|
||||||
|
RandomizedDelaySec = cfg.pruneRunners.randomizedDelaySec;
|
||||||
|
Unit = "forgejo-prune-runners.service";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -46,6 +46,7 @@ message Network {
|
||||||
enum NetworkType {
|
enum NetworkType {
|
||||||
WireGuard = 0;
|
WireGuard = 0;
|
||||||
HackClub = 1;
|
HackClub = 1;
|
||||||
|
Tor = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message NetworkListResponse {
|
message NetworkListResponse {
|
||||||
|
|
|
||||||
4
rust-toolchain.toml
Normal file
4
rust-toolchain.toml
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
[toolchain]
|
||||||
|
channel = "1.93.1"
|
||||||
|
components = ["rustfmt"]
|
||||||
|
profile = "minimal"
|
||||||
1
secrets.nix
Normal file
1
secrets.nix
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
import ./secrets/secrets.nix
|
||||||
28
secrets/README.md
Normal file
28
secrets/README.md
Normal file
|
|
@ -0,0 +1,28 @@
|
||||||
|
# Secrets
|
||||||
|
|
||||||
|
Burrow secrets live in `secrets/<name>.age` and are managed with `agenix`.
|
||||||
|
|
||||||
|
For the Forgejo Namespace Cloud runtime:
|
||||||
|
|
||||||
|
- `secrets/forgejo/admin-password.age`
|
||||||
|
- `secrets/forgejo/agent-ssh-key.age`
|
||||||
|
- `secrets/forgejo/nsc-token.age`
|
||||||
|
- `secrets/forgejo/nsc-dispatcher-config.age`
|
||||||
|
- `secrets/forgejo/nsc-autoscaler-config.age`
|
||||||
|
- `secrets/cloudflare/api-token.age`
|
||||||
|
- `secrets/hetzner/api-token.age`
|
||||||
|
- `secrets/forwardemail/api-token.age`
|
||||||
|
- `secrets/forwardemail/hetzner-s3-user.age`
|
||||||
|
- `secrets/forwardemail/hetzner-s3-secret.age`
|
||||||
|
|
||||||
|
Use:
|
||||||
|
|
||||||
|
- `make secret name=forgejo/nsc-token`
|
||||||
|
- `make secret-file name=forgejo/agent-ssh-key file=/path/to/source`
|
||||||
|
- `Scripts/provision-forgejo-nsc.sh` to refresh the Forgejo Namespace token and runtime configs in `secrets/forgejo/*.age`
|
||||||
|
- `make secret-file name=cloudflare/api-token file=/path/to/cloudflare-token.txt`
|
||||||
|
- `make secret-file name=hetzner/api-token file=/path/to/hetzner-api-token.txt`
|
||||||
|
|
||||||
|
The forge host decrypts these files at activation time and feeds the resulting
|
||||||
|
paths into `services.burrow.forge`, `services.burrow.forgeRunner`, and
|
||||||
|
`services.burrow.forgejoNsc`.
|
||||||
7
secrets/cloudflare/api-token.age
Normal file
7
secrets/cloudflare/api-token.age
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q rX5+bmtxyHNgD+xNdHkB1fKdjUlrX275DaKTIHssYyA
|
||||||
|
KwbfKHx14QXRKBIGWwJDR8+DONyCdVssh8Ti8mdajyQ
|
||||||
|
-> ssh-ed25519 IrZmAg SOG/KvURA6PrxVhtZyIbazFGNQZyp0BR4MH+YInHGB4
|
||||||
|
79pENXhtLwlCQVnqkPEzoFgrXMmTqRsfs4ULluTevWA
|
||||||
|
--- gDA64KNbgN+eGHsQbIbKvhOg1T/Nqui6I/wy2MK8VWE
|
||||||
|
û<EFBFBD>[|V{[ƒöŽ’ýö¯'E .Í{CÃǶÕö{ha
|
||||||
11
secrets/forgejo/admin-password.age
Normal file
11
secrets/forgejo/admin-password.age
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q nmGFzw38TKiVVuA9CM8wHQDVib0RddB+M/UjQnD45jk
|
||||||
|
iZNLNBlS32zR+TNfcK27T1V3w27sFKJkWfuOzHwcOL0
|
||||||
|
-> ssh-ed25519 IrZmAg Y53DC0wGX8mjaXkD3+jZn2DviO5iSXsnZDBNCBTmLgA
|
||||||
|
XLz+YXzT4fYb7q0xuZMKgv88lAd0gGKaquSMcA6Yu3c
|
||||||
|
-> ssh-ed25519 JzXUWA EDAXBKEvHccJ4KKtHjUTA+KA+wN9bBu9v+kzRTFt9AI
|
||||||
|
JNADezBCxx26+QPD2tIpz5O8cncrJwnqaYQEWY56VGY
|
||||||
|
--- RpjdftRPUGT80IMYKFDFuHkKEr1heJOvqrqYLufhc10
|
||||||
|
ûÈÂ_
|
||||||
|
F(
|
||||||
|
((0ˆ‡Õɉ·',¿€8d]d%T[MÁ¼¬KRQÿxiIf<49>0§Òæ
|
||||||
BIN
secrets/forgejo/agent-ssh-key.age
Normal file
BIN
secrets/forgejo/agent-ssh-key.age
Normal file
Binary file not shown.
BIN
secrets/forgejo/nsc-autoscaler-config.age
Normal file
BIN
secrets/forgejo/nsc-autoscaler-config.age
Normal file
Binary file not shown.
BIN
secrets/forgejo/nsc-dispatcher-config.age
Normal file
BIN
secrets/forgejo/nsc-dispatcher-config.age
Normal file
Binary file not shown.
BIN
secrets/forgejo/nsc-token.age
Normal file
BIN
secrets/forgejo/nsc-token.age
Normal file
Binary file not shown.
7
secrets/forwardemail/api-token.age
Normal file
7
secrets/forwardemail/api-token.age
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q ICuXuDsZiw1ShfUX9qjq8bCkeNdsbHWnG4e+3ZOC3jg
|
||||||
|
wswxqzQtf7jumSYB8ZeQzRBpMrBPVsUnWOYsmlDvpSs
|
||||||
|
-> ssh-ed25519 IrZmAg Xrvp/tXzXrHF1+NxgTZs9nNufyxtTq5NoYT5gaW6p1M
|
||||||
|
UWGlhZpV19CWMR9abp30vkQwZUMb/ylvInGEBlDdjjE
|
||||||
|
--- qhAaAECwhmAY4g3/e+Dz9RvL1MBQkHGWyoe1NkdTuqA
|
||||||
|
ìÑdÍéé?)¼ ñ<3ïŽ6ÜF:a•Ë<E280A2>
ųñÖ²Ä
|
||||||
7
secrets/forwardemail/hetzner-s3-secret.age
Normal file
7
secrets/forwardemail/hetzner-s3-secret.age
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q jwJzvmXUV5rCB6ku7ILLQUDInuQJL2gN+pjmX/ccXWE
|
||||||
|
q9OSyVhTuzERRRZZOCQzbwAwLOvOFIT/l9MxJ0V3UTo
|
||||||
|
-> ssh-ed25519 IrZmAg 8IutYG3CnNP9gw5fTFOaXm1Ue4i/cVs1apA88bNs9mo
|
||||||
|
daaf+6HoE3bmUEKR8/zu9jKTstVFCXqBlBxBdNVpQ90
|
||||||
|
--- gRGNkWqoh+lZWpDG7yNLd4fjoX2jCyHTWbzImzoFGko
|
||||||
|
R@+‰fu9ËÏRB‘±áÎX³2öúæ<C3BA>“[I¤<49>®
|
||||||
7
secrets/forwardemail/hetzner-s3-user.age
Normal file
7
secrets/forwardemail/hetzner-s3-user.age
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q jwyFpeVX18Q/1vnK2A1gwETTTH/QDUmW7vhCA+E/1lc
|
||||||
|
vtG1Ra+hR0cc/o9oJw7YTWMc2+JmrehzBE5QkIHQMKY
|
||||||
|
-> ssh-ed25519 IrZmAg KljcDNRlBmn7ElVfXq/E2prFHnRQD2TkQY9Vto+OQUA
|
||||||
|
T37sFc3xVrhky6e0n4KbsX18/fBqP3VjS/mNbxX6bfI
|
||||||
|
--- lvSjWGriUCYC14eI2eH9MdO2cB76Pe3gWD7pidw8Qjo
|
||||||
|
s‘&¾Ùßxö™<C3B6>*‘°4–}‰<1D>Í”z&¢F¥Å
|
||||||
7
secrets/hetzner/api-token.age
Normal file
7
secrets/hetzner/api-token.age
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 ux4N8Q pEJA2VJkPC+NzA9yFvBrpXHD8qFMTD9iIHYSkx8P2RI
|
||||||
|
AGE1QJya77d92ERA1yQYylvZPNAJEQKoCL32BY5XBzo
|
||||||
|
-> ssh-ed25519 IrZmAg VMpoTBpNG/TAlnbJ2APwc4VMt2CX5rQwlrrihtmojFo
|
||||||
|
caOwayLgVDGPrjqLLH8hHHQ3Fy2WeRI2tf+R02HFqx0
|
||||||
|
--- Ey1DYpyA4lnVqPaabNsEuSihl4fvZ2vpSc/IRGZwYBw
|
||||||
|
¥Uï2Q÷‘âÖã*ð÷m¹¼†<C2BC>F<EFBFBD>ÒÞž|^–EVÜ"
|
||||||
19
secrets/secrets.nix
Normal file
19
secrets/secrets.nix
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
let
|
||||||
|
contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa";
|
||||||
|
agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net";
|
||||||
|
forge = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlkGo4lwpwIIZ0J01KjTuJuf/U/wGgy4/aKwPIUzutL root@burrow-forge";
|
||||||
|
|
||||||
|
operatorSecrets = [ contact agent ];
|
||||||
|
forgeAutomation = [ contact agent forge ];
|
||||||
|
in {
|
||||||
|
"secrets/forgejo/admin-password.age".publicKeys = forgeAutomation;
|
||||||
|
"secrets/forgejo/agent-ssh-key.age".publicKeys = forgeAutomation;
|
||||||
|
"secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation;
|
||||||
|
"secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation;
|
||||||
|
"secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation;
|
||||||
|
"secrets/cloudflare/api-token.age".publicKeys = operatorSecrets;
|
||||||
|
"secrets/hetzner/api-token.age".publicKeys = operatorSecrets;
|
||||||
|
"secrets/forwardemail/api-token.age".publicKeys = operatorSecrets;
|
||||||
|
"secrets/forwardemail/hetzner-s3-user.age".publicKeys = operatorSecrets;
|
||||||
|
"secrets/forwardemail/hetzner-s3-secret.age".publicKeys = operatorSecrets;
|
||||||
|
}
|
||||||
203
services/forgejo-nsc/README.md
Normal file
203
services/forgejo-nsc/README.md
Normal file
|
|
@ -0,0 +1,203 @@
|
||||||
|
## forgejo-nsc-dispatcher
|
||||||
|
|
||||||
|
This service exposes a simple HTTP API that tells Namespace Cloud to start
|
||||||
|
ephemeral Forgejo Actions runners on demand. It glues together three pieces:
|
||||||
|
|
||||||
|
1. **Forgejo Actions** – the service requests a scoped registration token
|
||||||
|
for the repository/organization/instance where you want to run jobs.
|
||||||
|
2. **Namespace (`nsc`)** – the dispatcher shells out to the `nsc` CLI to create
|
||||||
|
a short‑lived environment, runs the `forgejo-runner` container inside it,
|
||||||
|
and exits after a single job (`forgejo-runner one-job`). The Namespace TTL is
|
||||||
|
the hard cap, not the typical lifetime.
|
||||||
|
3. **Your automation** – you call the service via HTTP (directly, through Caddy,
|
||||||
|
via Forgejo webhooks, etc.) whenever a new runner is needed.
|
||||||
|
|
||||||
|
### Directory layout
|
||||||
|
|
||||||
|
```
|
||||||
|
.
|
||||||
|
├── cmd/forgejo-nsc-dispatcher # main entry point
|
||||||
|
├── internal/ # service packages (config, forgejo client, nsc dispatcher, HTTP server)
|
||||||
|
├── config.example.yaml # starter config referenced by README
|
||||||
|
├── flake.nix / flake.lock # reproducible builds (Go binary + container image)
|
||||||
|
└── .forgejo/workflows # CI that runs go test/build and publishes manifests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
Copy `config.example.yaml` and update it for your Forgejo instance and Namespace
|
||||||
|
profile. The important knobs are:
|
||||||
|
|
||||||
|
- `forgejo.base_url` – HTTPS endpoint of your Forgejo server. A PAT with
|
||||||
|
`actions:runner` scope is required in `forgejo.token`.
|
||||||
|
- `forgejo.instance_url` – URL that spawned runners use to register back to Forgejo.
|
||||||
|
This must be reachable from the runner (typically the public URL like
|
||||||
|
`https://git.burrow.net`). On the forge host it commonly differs from `base_url`
|
||||||
|
(which may be `http://127.0.0.1:3000`).
|
||||||
|
- `forgejo.default_scope` – where new runners register
|
||||||
|
(`instance`, `organization`, or `repository`).
|
||||||
|
- `forgejo.default_labels` – labels applied to every spawned runner. GateForge
|
||||||
|
workflows via `runs-on: ["namespace-profile-linux-medium"]` (or other
|
||||||
|
`namespace-profile-linux-*` labels).
|
||||||
|
- `namespace.nsc_binary` – path to the `nsc` binary (the Nix container ships one
|
||||||
|
compiled from `namespacelabs/foundation` so `/app/bin/nsc` works out of the box).
|
||||||
|
- `namespace.image` – OCI image containing `forgejo-runner`.
|
||||||
|
- `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral
|
||||||
|
Namespace environment. The dispatcher destroys the instance after a job so the
|
||||||
|
TTL acts as a hard cap, not an idle timeout.
|
||||||
|
- macOS fallback launches still use `nsc create`. Bootstrap prefers the
|
||||||
|
Compute SSH config endpoint, and falls back to keychain-backed `nsc ssh`
|
||||||
|
only when the Compute bearer is rejected. That keeps the fast path on direct
|
||||||
|
TCP while preserving a working fallback when tenant auth drifts.
|
||||||
|
- `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache
|
||||||
|
volumes mounted into runners so Linux can keep `/nix` plus shared build
|
||||||
|
caches warm and macOS can reuse Rust toolchains, Xcode package caches, and
|
||||||
|
lane-local derived data. If Namespace keeps reusing an older undersized cache
|
||||||
|
volume, bump the cache tag name to force a fresh allocation at the new size.
|
||||||
|
|
||||||
|
### Running locally
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Ensure nsc is available (e.g. `go build ./foundation/cmd/nsc`)
|
||||||
|
cp config.example.yaml config.yaml
|
||||||
|
nix develop # optional dev shell with Go toolchain
|
||||||
|
go run ./cmd/forgejo-nsc-dispatcher --config config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
API example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl -X POST http://localhost:8080/api/v1/dispatch \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d '{
|
||||||
|
"count": 1,
|
||||||
|
"ttl": "20m",
|
||||||
|
"labels": ["namespace-profile-linux-medium"],
|
||||||
|
"scope": {"level": "repository", "owner": "example", "name": "app"}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deploying with Nix + GHCR
|
||||||
|
|
||||||
|
- `nix build .#packages.x86_64-linux.container-amd64` produces a deterministic
|
||||||
|
tarball containing the service, the `nsc` binary, BusyBox, and `forgejo-runner`.
|
||||||
|
- The included `Build Container` workflow builds both `amd64` and `arm64` images
|
||||||
|
on Namespace runners and pushes them to `ghcr.io/<owner>/<repo>`.
|
||||||
|
No Fly.io manifests are emitted – the multi‑arch manifest points only at GHCR.
|
||||||
|
|
||||||
|
### How this fits behind Caddy (last-mile networking)
|
||||||
|
|
||||||
|
The dispatcher is just an HTTP server. You can:
|
||||||
|
|
||||||
|
1. Run it anywhere that can reach Forgejo and Namespace: bare metal, Namespace
|
||||||
|
cluster, Kubernetes, Fly, etc.
|
||||||
|
2. Put Caddy (or any reverse proxy) in front to terminate TLS, do auth, or
|
||||||
|
rewrite URLs. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
forgejo-dispatcher.example.com {
|
||||||
|
reverse_proxy 127.0.0.1:8080
|
||||||
|
basicauth /api/* {
|
||||||
|
user JDJhJDE...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The service doesn’t assume Caddy, nor does it manipulate HTTP clients
|
||||||
|
directly – it simply waits for POST requests. As long as the dispatcher can
|
||||||
|
reach Forgejo’s REST API and run the `nsc` binary, you can drop it anywhere.
|
||||||
|
|
||||||
|
### Autoscaling (webhook + poller)
|
||||||
|
|
||||||
|
If you don’t want to call `/api/v1/dispatch` manually, there’s a companion
|
||||||
|
autoscaler (`cmd/forgejo-nsc-autoscaler`) that watches Forgejo job queues and
|
||||||
|
triggers the dispatcher for you. It operates in two modes simultaneously:
|
||||||
|
|
||||||
|
1. **Polling** – every instance polls `GET /api/v1/.../actions/runners` to keep a
|
||||||
|
minimum number of idle Namespace runners per label. This continues until a
|
||||||
|
webhook is successfully processed, so the system is self-bootstrapping.
|
||||||
|
2. **Webhooks** – once Forgejo reaches the autoscaler via the `/webhook/{name}`
|
||||||
|
endpoint, the autoscaler stops polling and reacts to `workflow_job` events in
|
||||||
|
real time. Each payload is mapped to a target label set and results in a
|
||||||
|
dispatch call.
|
||||||
|
|
||||||
|
You can manage multiple Forgejo instances by listing them under `instances` in
|
||||||
|
`autoscaler.example.yaml`:
|
||||||
|
|
||||||
|
```
|
||||||
|
listen: ":8090"
|
||||||
|
dispatcher:
|
||||||
|
url: "http://dispatcher:8080"
|
||||||
|
|
||||||
|
instances:
|
||||||
|
- name: burrow
|
||||||
|
forgejo:
|
||||||
|
base_url: "https://git.burrow.net"
|
||||||
|
token: "PENDING-FORGEJO-PAT"
|
||||||
|
scope:
|
||||||
|
level: "repository"
|
||||||
|
owner: "hackclub"
|
||||||
|
name: "burrow"
|
||||||
|
disable_polling: true # webhook-only mode
|
||||||
|
poll_interval: "30s"
|
||||||
|
webhook_secret: "supersecret"
|
||||||
|
webhook:
|
||||||
|
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
|
||||||
|
content_type: "json"
|
||||||
|
events: ["workflow_job"]
|
||||||
|
active: true
|
||||||
|
targets:
|
||||||
|
- labels: ["namespace-profile-linux-medium"]
|
||||||
|
min_idle: 0 # set to 0 to scale-to-zero between jobs
|
||||||
|
ttl: "20m"
|
||||||
|
- labels: ["namespace-profile-macos-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "90m"
|
||||||
|
machine_type: "6x14"
|
||||||
|
- labels: ["namespace-profile-windows-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "45m"
|
||||||
|
machine_type: "windows/amd64:8x16"
|
||||||
|
```
|
||||||
|
|
||||||
|
For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT,
|
||||||
|
generate a Namespace token from the logged-in Namespace account, and refresh
|
||||||
|
`secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`.
|
||||||
|
The token file is emitted as JSON with a long-lived `session_token` plus the
|
||||||
|
current `bearer_token`. The `nsc` CLI paths use the session-backed login flow,
|
||||||
|
while the Compute API path can consume the bearer token directly. The forge
|
||||||
|
host consumes the encrypted secrets through agenix; avoid keeping local
|
||||||
|
plaintext `intake/` copies around.
|
||||||
|
|
||||||
|
Long-lived runtime state is now sourced from age-encrypted files:
|
||||||
|
|
||||||
|
- `secrets/forgejo/admin-password.age`
|
||||||
|
- `secrets/forgejo/agent-ssh-key.age`
|
||||||
|
- `secrets/forgejo/nsc-token.age`
|
||||||
|
- `secrets/forgejo/nsc-dispatcher-config.age`
|
||||||
|
- `secrets/forgejo/nsc-autoscaler-config.age`
|
||||||
|
|
||||||
|
After refreshing the encrypted secrets, deploy the forge host so
|
||||||
|
`config.age.secrets.*` updates the live paths for `services.burrow.forge`,
|
||||||
|
`services.burrow.forgeRunner`, and `services.burrow.forgejoNsc`.
|
||||||
|
The Nix host module also installs a periodic `forgejo-prune-runners` timer that
|
||||||
|
marks stale offline runners deleted in Forgejo's database so wedged instances do
|
||||||
|
not leave the queue polluted indefinitely.
|
||||||
|
|
||||||
|
Run it next to the dispatcher:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run ./cmd/forgejo-nsc-autoscaler --config autoscaler.yaml
|
||||||
|
# or build the binary/container via `nix build .#forgejo-nsc-autoscaler`
|
||||||
|
```
|
||||||
|
|
||||||
|
If your Forgejo build doesn’t expose the runner listing API, set
|
||||||
|
`disable_polling: true` and rely on `webhook` entries. The autoscaler will
|
||||||
|
auto-create/update the webhook (using the PAT) so that new `workflow_job` events
|
||||||
|
immediately call the dispatcher even if the service isn’t publicly reachable yet.
|
||||||
|
|
||||||
|
In Forgejo add a webhook pointing to `https://nsc-autoscaler.burrow.net/webhook/burrow`
|
||||||
|
with the shared secret (or let the autoscaler create it by specifying `webhook.url`
|
||||||
|
in config). The autoscaler continues polling until it receives the first valid
|
||||||
|
webhook (unless disabled), so you get capacity immediately even if outbound
|
||||||
|
webhooks from Forgejo aren’t yet configured.
|
||||||
34
services/forgejo-nsc/autoscaler.example.yaml
Normal file
34
services/forgejo-nsc/autoscaler.example.yaml
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
listen: ":8090"
|
||||||
|
dispatcher:
|
||||||
|
url: "http://localhost:8080"
|
||||||
|
|
||||||
|
instances:
|
||||||
|
- name: burrow
|
||||||
|
forgejo:
|
||||||
|
base_url: "https://git.burrow.net"
|
||||||
|
token: "PENDING-FORGEJO-PAT"
|
||||||
|
scope:
|
||||||
|
level: "repository"
|
||||||
|
owner: "hackclub"
|
||||||
|
name: "burrow"
|
||||||
|
disable_polling: true
|
||||||
|
poll_interval: "30s"
|
||||||
|
webhook_secret: "supersecret"
|
||||||
|
webhook:
|
||||||
|
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
|
||||||
|
content_type: "json"
|
||||||
|
events: ["workflow_job"]
|
||||||
|
active: true
|
||||||
|
targets:
|
||||||
|
- labels: ["namespace-profile-linux-medium"]
|
||||||
|
min_idle: 1
|
||||||
|
ttl: "20m"
|
||||||
|
machine_type: "4x8"
|
||||||
|
- labels: ["namespace-profile-macos-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "90m"
|
||||||
|
machine_type: "6x14"
|
||||||
|
- labels: ["namespace-profile-windows-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "45m"
|
||||||
|
machine_type: "windows/amd64:8x16"
|
||||||
46
services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go
Normal file
46
services/forgejo-nsc/cmd/forgejo-nsc-autoscaler/main.go
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"namespacelabs.dev/foundation/std/tasks"
|
||||||
|
"namespacelabs.dev/foundation/std/tasks/simplelog"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/autoscaler"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var configPath string
|
||||||
|
flag.StringVar(&configPath, "config", "autoscaler.yaml", "Path to the autoscaler config file")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))
|
||||||
|
|
||||||
|
cfg, err := autoscaler.LoadConfig(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to load config", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
service, err := autoscaler.NewService(cfg)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to initialize autoscaler", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
defer cancel()
|
||||||
|
ctx = tasks.WithSink(ctx, simplelog.NewSink(os.Stdout, 0))
|
||||||
|
|
||||||
|
if err := tasks.Action("autoscaler.run").Run(ctx, func(ctx context.Context) error {
|
||||||
|
return service.Start(ctx)
|
||||||
|
}); err != nil {
|
||||||
|
logger.Error("autoscaler exited", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
106
services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go
Normal file
106
services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go
Normal file
|
|
@ -0,0 +1,106 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/app"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/config"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/forgejo"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/nsc"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var configPath string
|
||||||
|
flag.StringVar(&configPath, "config", "config.yaml", "Path to the dispatcher config file.")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))
|
||||||
|
|
||||||
|
cfg, err := config.Load(configPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to load config", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
scope, err := cfg.Forgejo.DefaultScope.ToScope()
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("invalid default scope", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
forgejoClient, err := forgejo.NewClient(cfg.Forgejo.BaseURL, cfg.Forgejo.Token)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to create forgejo client", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
dispatcher, err := nsc.NewDispatcher(nsc.Options{
|
||||||
|
BinaryPath: cfg.Namespace.NSCBinary,
|
||||||
|
ComputeBaseURL: cfg.Namespace.ComputeBaseURL,
|
||||||
|
DefaultImage: cfg.Namespace.Image,
|
||||||
|
DefaultMachine: cfg.Namespace.MachineType,
|
||||||
|
MacosBaseImageID: cfg.Namespace.MacosBaseImageID,
|
||||||
|
MacosMachineArch: cfg.Namespace.MacosMachineArch,
|
||||||
|
DefaultDuration: cfg.Namespace.Duration.Duration,
|
||||||
|
WorkDir: cfg.Namespace.WorkDir,
|
||||||
|
MaxParallel: cfg.Namespace.MaxParallel,
|
||||||
|
LinuxCachePath: cfg.Namespace.LinuxCachePath,
|
||||||
|
LinuxCacheVolumes: toNSCCacheVolumes(cfg.Namespace.LinuxCacheVolumes),
|
||||||
|
MacosCachePath: cfg.Namespace.MacosCachePath,
|
||||||
|
MacosCacheVolumes: toNSCCacheVolumes(cfg.Namespace.MacosCacheVolumes),
|
||||||
|
RunnerNamePrefix: cfg.Runner.NamePrefix,
|
||||||
|
Executor: cfg.Runner.Executor,
|
||||||
|
Network: cfg.Namespace.Network,
|
||||||
|
Logger: logger,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to create dispatcher", "error", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
service := app.NewService(app.Config{
|
||||||
|
DefaultScope: scope,
|
||||||
|
DefaultLabels: cfg.Forgejo.DefaultLabels,
|
||||||
|
InstanceURL: cfg.Forgejo.InstanceURL,
|
||||||
|
DefaultTTL: cfg.Namespace.Duration.Duration,
|
||||||
|
AllowLabels: cfg.Namespace.AllowLabels,
|
||||||
|
AllowScopes: cfg.Namespace.AllowScopes,
|
||||||
|
}, forgejoClient, dispatcher, logger)
|
||||||
|
|
||||||
|
srv := server.New(cfg.Listen, service, logger)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
logger.Info("dispatcher listening", "addr", cfg.Listen)
|
||||||
|
if err := srv.ListenAndServe(); err != nil && err != context.Canceled && err != http.ErrServerClosed {
|
||||||
|
logger.Error("server terminated", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
interrupt := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(interrupt, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
<-interrupt
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_ = srv.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toNSCCacheVolumes(volumes []config.CacheVolumeConfig) []nsc.CacheVolume {
|
||||||
|
out := make([]nsc.CacheVolume, 0, len(volumes))
|
||||||
|
for _, volume := range volumes {
|
||||||
|
out = append(out, nsc.CacheVolume{
|
||||||
|
Tag: volume.Tag,
|
||||||
|
MountPoint: volume.MountPoint,
|
||||||
|
SizeGb: volume.SizeGb,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
46
services/forgejo-nsc/config.example.yaml
Normal file
46
services/forgejo-nsc/config.example.yaml
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
listen: ":8080"
|
||||||
|
|
||||||
|
forgejo:
|
||||||
|
base_url: "https://forgejo.example.com"
|
||||||
|
token: "${FORGEJO_PERSONAL_ACCESS_TOKEN}"
|
||||||
|
default_scope:
|
||||||
|
level: "organization"
|
||||||
|
owner: "example"
|
||||||
|
default_labels:
|
||||||
|
- namespace-profile-linux-medium
|
||||||
|
timeout: "30s"
|
||||||
|
|
||||||
|
namespace:
|
||||||
|
nsc_binary: "nsc"
|
||||||
|
compute_base_url: "https://ord4.compute.namespaceapis.com"
|
||||||
|
image: "code.forgejo.org/forgejo/runner:11"
|
||||||
|
machine_type: "4x8"
|
||||||
|
macos_base_image_id: "tahoe"
|
||||||
|
macos_machine_arch: "arm64"
|
||||||
|
duration: "30m"
|
||||||
|
workdir: "/var/lib/forgejo-runner"
|
||||||
|
max_parallel: 4
|
||||||
|
network: ""
|
||||||
|
linux_cache_path: "/var/cache/burrow"
|
||||||
|
linux_cache_volumes:
|
||||||
|
- tag: "burrow-forgejo-linux-nix-v2"
|
||||||
|
mount_point: "/nix"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-linux-cache-v2"
|
||||||
|
mount_point: "/var/cache/burrow"
|
||||||
|
size_gb: 80
|
||||||
|
macos_cache_path: "/Users/runner/.cache/burrow"
|
||||||
|
macos_cache_volumes:
|
||||||
|
- tag: "burrow-forgejo-macos-shared-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/shared"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-macos-macos-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/lane/macos"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-macos-ios-simulator-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator"
|
||||||
|
size_gb: 80
|
||||||
|
|
||||||
|
runner:
|
||||||
|
name_prefix: "nscloud-"
|
||||||
|
executor: "shell"
|
||||||
35
services/forgejo-nsc/deploy/autoscaler.yaml
Normal file
35
services/forgejo-nsc/deploy/autoscaler.yaml
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
listen: "127.0.0.1:8090"
|
||||||
|
|
||||||
|
dispatcher:
|
||||||
|
url: "http://127.0.0.1:8080"
|
||||||
|
|
||||||
|
instances:
|
||||||
|
- name: burrow
|
||||||
|
forgejo:
|
||||||
|
base_url: "http://127.0.0.1:3000"
|
||||||
|
token: "PENDING-FORGEJO-PAT"
|
||||||
|
scope:
|
||||||
|
level: "repository"
|
||||||
|
owner: "hackclub"
|
||||||
|
name: "burrow"
|
||||||
|
disable_polling: false
|
||||||
|
poll_interval: "30s"
|
||||||
|
webhook_secret: "PENDING-WEBHOOK-SECRET"
|
||||||
|
webhook:
|
||||||
|
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
|
||||||
|
content_type: "json"
|
||||||
|
events: ["workflow_job"]
|
||||||
|
active: true
|
||||||
|
targets:
|
||||||
|
- labels: ["namespace-profile-linux-medium"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "20m"
|
||||||
|
machine_type: "4x8"
|
||||||
|
- labels: ["namespace-profile-macos-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "90m"
|
||||||
|
machine_type: "6x14"
|
||||||
|
- labels: ["namespace-profile-windows-large"]
|
||||||
|
min_idle: 0
|
||||||
|
ttl: "45m"
|
||||||
|
machine_type: "windows/amd64:8x16"
|
||||||
56
services/forgejo-nsc/deploy/dispatcher.yaml
Normal file
56
services/forgejo-nsc/deploy/dispatcher.yaml
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
listen: "127.0.0.1:8080"
|
||||||
|
|
||||||
|
forgejo:
|
||||||
|
base_url: "http://127.0.0.1:3000"
|
||||||
|
instance_url: "https://git.burrow.net"
|
||||||
|
token: "PENDING-FORGEJO-PAT"
|
||||||
|
default_scope:
|
||||||
|
level: "repository"
|
||||||
|
owner: "hackclub"
|
||||||
|
name: "burrow"
|
||||||
|
default_labels:
|
||||||
|
- namespace-profile-linux-medium
|
||||||
|
timeout: "30s"
|
||||||
|
|
||||||
|
namespace:
|
||||||
|
nsc_binary: "nsc"
|
||||||
|
compute_base_url: "https://ord4.compute.namespaceapis.com"
|
||||||
|
image: "code.forgejo.org/forgejo/runner:11"
|
||||||
|
machine_type: "4x8"
|
||||||
|
macos_base_image_id: "tahoe"
|
||||||
|
macos_machine_arch: "arm64"
|
||||||
|
duration: "30m"
|
||||||
|
workdir: "/var/lib/forgejo-runner"
|
||||||
|
max_parallel: 4
|
||||||
|
allow_labels:
|
||||||
|
- namespace-profile-linux-medium
|
||||||
|
- namespace-profile-macos-large
|
||||||
|
- namespace-profile-windows-large
|
||||||
|
allow_scopes:
|
||||||
|
- "repository:hackclub/burrow"
|
||||||
|
instance_tags:
|
||||||
|
- "burrow"
|
||||||
|
network: ""
|
||||||
|
linux_cache_path: "/var/cache/burrow"
|
||||||
|
linux_cache_volumes:
|
||||||
|
- tag: "burrow-forgejo-linux-nix-v2"
|
||||||
|
mount_point: "/nix"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-linux-cache-v2"
|
||||||
|
mount_point: "/var/cache/burrow"
|
||||||
|
size_gb: 80
|
||||||
|
macos_cache_path: "/Users/runner/.cache/burrow"
|
||||||
|
macos_cache_volumes:
|
||||||
|
- tag: "burrow-forgejo-macos-shared-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/shared"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-macos-macos-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/lane/macos"
|
||||||
|
size_gb: 80
|
||||||
|
- tag: "burrow-forgejo-macos-ios-simulator-v1"
|
||||||
|
mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator"
|
||||||
|
size_gb: 80
|
||||||
|
|
||||||
|
runner:
|
||||||
|
name_prefix: "nscloud-"
|
||||||
|
executor: "shell"
|
||||||
65
services/forgejo-nsc/go.mod
Normal file
65
services/forgejo-nsc/go.mod
Normal file
|
|
@ -0,0 +1,65 @@
|
||||||
|
module github.com/burrow/forgejo-nsc
|
||||||
|
|
||||||
|
go 1.24.4
|
||||||
|
|
||||||
|
require (
|
||||||
|
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2
|
||||||
|
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1
|
||||||
|
connectrpc.com/connect v1.19.1
|
||||||
|
github.com/go-chi/chi/v5 v5.2.1
|
||||||
|
github.com/google/uuid v1.6.0
|
||||||
|
golang.org/x/crypto v0.48.0
|
||||||
|
golang.org/x/sync v0.19.0
|
||||||
|
google.golang.org/protobuf v1.36.11
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
namespacelabs.dev/foundation v0.0.478
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||||
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
|
github.com/jxskiss/base62 v1.1.0 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
|
github.com/mattn/go-zglob v0.0.3 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/muesli/reflow v0.3.0 // indirect
|
||||||
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
|
github.com/rivo/uniseg v0.4.2 // indirect
|
||||||
|
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||||
|
github.com/spf13/afero v1.9.2 // indirect
|
||||||
|
github.com/spf13/cast v1.7.0 // indirect
|
||||||
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.7 // indirect
|
||||||
|
github.com/spf13/viper v1.14.0 // indirect
|
||||||
|
github.com/subosito/gotenv v1.4.1 // indirect
|
||||||
|
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
|
||||||
|
golang.org/x/net v0.49.0 // indirect
|
||||||
|
golang.org/x/sys v0.41.0 // indirect
|
||||||
|
golang.org/x/term v0.40.0 // indirect
|
||||||
|
golang.org/x/text v0.34.0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
|
||||||
|
google.golang.org/grpc v1.76.0 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
helm.sh/helm/v3 v3.18.4 // indirect
|
||||||
|
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 // indirect
|
||||||
|
)
|
||||||
575
services/forgejo-nsc/go.sum
Normal file
575
services/forgejo-nsc/go.sum
Normal file
|
|
@ -0,0 +1,575 @@
|
||||||
|
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2 h1:XaeFtt6yN8G5q2uYoiTjyshOyai1Q+GzwfEKlxrTzVw=
|
||||||
|
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2/go.mod h1:QvCL7PUDMFotMXVUoWMeRClEEnCbh7S51xHy39mO+H4=
|
||||||
|
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1 h1:xTgPJaOj5QNRPAA3nxW3fTz01aAOLr/6SG7C4Iqxm54=
|
||||||
|
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1/go.mod h1:Il2wpJNQB40Yj3Rmuhg5xKJPSXaZVwij+Q30d1PNuNY=
|
||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
|
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||||
|
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||||
|
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||||
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
|
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||||
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
|
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||||
|
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||||
|
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||||
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
|
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||||
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
|
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||||
|
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||||
|
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||||
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
|
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||||
|
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
||||||
|
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||||
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
|
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||||
|
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||||
|
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
|
||||||
|
github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||||
|
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||||
|
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
|
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||||
|
github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw=
|
||||||
|
github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||||
|
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
|
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||||
|
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||||
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
|
github.com/mattn/go-zglob v0.0.3 h1:6Ry4EYsScDyt5di4OI6xw1bYhOqfE5S33Z1OPy+d+To=
|
||||||
|
github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
|
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
|
||||||
|
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
|
||||||
|
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||||
|
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
|
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||||
|
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
|
||||||
|
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
|
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||||
|
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||||
|
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||||
|
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||||
|
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
|
||||||
|
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
|
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||||
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
|
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||||
|
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
|
||||||
|
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
|
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
|
||||||
|
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||||
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
|
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||||
|
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||||
|
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||||
|
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||||
|
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
|
||||||
|
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||||
|
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||||
|
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||||
|
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||||
|
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||||
|
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
|
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
|
||||||
|
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||||
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||||
|
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||||
|
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||||
|
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||||
|
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||||
|
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||||
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||||
|
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
|
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||||
|
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||||
|
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
|
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||||
|
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||||
|
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||||
|
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||||
|
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||||
|
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
|
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||||
|
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
|
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
|
||||||
|
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||||
|
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ=
|
||||||
|
helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
namespacelabs.dev/foundation v0.0.478 h1:3xFLZcrjih7Jjey2N7faSfr6EoBCg2LMTHipq/3Hlrg=
|
||||||
|
namespacelabs.dev/foundation v0.0.478/go.mod h1:svBrTIfZK773sytmjudGkCzQWNisxcQtcWNCs+uLznI=
|
||||||
|
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 h1:8NlnfPlzDSJr8TYV/qarIWwhjLd1gOXf3Jme0M/oGBM=
|
||||||
|
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7/go.mod h1:J+Sd+ngeffnCsaO/M7zgs2bR8Klq/ZBhS0+bbnDEH2M=
|
||||||
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
267
services/forgejo-nsc/internal/app/service.go
Normal file
267
services/forgejo-nsc/internal/app/service.go
Normal file
|
|
@ -0,0 +1,267 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/forgejo"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/nsc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Dispatcher interface {
|
||||||
|
LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForgejoClient interface {
|
||||||
|
RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
forgejo ForgejoClient
|
||||||
|
dispatcher Dispatcher
|
||||||
|
logger *slog.Logger
|
||||||
|
|
||||||
|
defaultScope forgejo.Scope
|
||||||
|
defaultLabels []string
|
||||||
|
instanceURL string
|
||||||
|
defaultTTL time.Duration
|
||||||
|
|
||||||
|
allowLabels map[string]struct{}
|
||||||
|
allowScopes map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
DefaultScope forgejo.Scope
|
||||||
|
DefaultLabels []string
|
||||||
|
InstanceURL string
|
||||||
|
DefaultTTL time.Duration
|
||||||
|
AllowLabels []string
|
||||||
|
AllowScopes []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(cfg Config, forgejo ForgejoClient, dispatcher Dispatcher, logger *slog.Logger) *Service {
|
||||||
|
if logger == nil {
|
||||||
|
logger = slog.Default()
|
||||||
|
}
|
||||||
|
allowLabels := make(map[string]struct{}, len(cfg.AllowLabels))
|
||||||
|
for _, label := range cfg.AllowLabels {
|
||||||
|
allowLabels[normalizeLabel(label)] = struct{}{}
|
||||||
|
}
|
||||||
|
allowScopes := make(map[string]struct{}, len(cfg.AllowScopes))
|
||||||
|
for _, scope := range cfg.AllowScopes {
|
||||||
|
allowScopes[scope] = struct{}{}
|
||||||
|
}
|
||||||
|
return &Service{
|
||||||
|
defaultScope: cfg.DefaultScope,
|
||||||
|
defaultLabels: cfg.DefaultLabels,
|
||||||
|
instanceURL: cfg.InstanceURL,
|
||||||
|
defaultTTL: cfg.DefaultTTL,
|
||||||
|
forgejo: forgejo,
|
||||||
|
dispatcher: dispatcher,
|
||||||
|
logger: logger,
|
||||||
|
allowLabels: allowLabels,
|
||||||
|
allowScopes: allowScopes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DispatchRequest struct {
|
||||||
|
Count int
|
||||||
|
Labels []string
|
||||||
|
Scope *Scope
|
||||||
|
TTL time.Duration
|
||||||
|
Machine string
|
||||||
|
Image string
|
||||||
|
ExtraEnv map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Scope struct {
|
||||||
|
Level string
|
||||||
|
Owner string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DispatchResponse struct {
|
||||||
|
Runners []RunnerHandle `json:"runners"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerHandle struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func launchContext(ttl time.Duration) (context.Context, context.CancelFunc) {
|
||||||
|
if ttl <= 0 {
|
||||||
|
return context.WithTimeout(context.Background(), 2*time.Hour)
|
||||||
|
}
|
||||||
|
// Provisioning can legitimately take several minutes before the runner starts
|
||||||
|
// processing the actual Forgejo job. Keep the launch context independent from
|
||||||
|
// the caller's HTTP timeout so autoscaler/webhook requests don't kill active
|
||||||
|
// bootstraps mid-flight.
|
||||||
|
return context.WithTimeout(context.Background(), ttl+30*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchResponse, error) {
|
||||||
|
count := req.Count
|
||||||
|
if count <= 0 {
|
||||||
|
count = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
scope, err := s.mergeScope(req.Scope)
|
||||||
|
if err != nil {
|
||||||
|
return DispatchResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
labels, err := s.mergeLabels(req.Labels)
|
||||||
|
if err != nil {
|
||||||
|
return DispatchResponse{}, err
|
||||||
|
}
|
||||||
|
if len(labels) == 0 {
|
||||||
|
return DispatchResponse{}, errors.New("no runner labels resolved")
|
||||||
|
}
|
||||||
|
|
||||||
|
ttl := req.TTL
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = s.defaultTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
res := DispatchResponse{
|
||||||
|
Runners: make([]RunnerHandle, count),
|
||||||
|
}
|
||||||
|
eg, egCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
index := i
|
||||||
|
eg.Go(func() error {
|
||||||
|
token, err := s.forgejo.RegistrationToken(egCtx, scope)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("fetching registration token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
launchCtx, cancel := launchContext(ttl)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
name, err := s.dispatcher.LaunchRunner(launchCtx, nsc.LaunchRequest{
|
||||||
|
Token: token,
|
||||||
|
InstanceURL: s.instanceURL,
|
||||||
|
Labels: labels,
|
||||||
|
Duration: ttl,
|
||||||
|
MachineType: req.Machine,
|
||||||
|
Image: req.Image,
|
||||||
|
ExtraEnv: req.ExtraEnv,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Runners[index] = RunnerHandle{Name: name}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return DispatchResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) mergeScope(value *Scope) (forgejo.Scope, error) {
|
||||||
|
if value == nil {
|
||||||
|
return s.defaultScope, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scope := forgejo.Scope{
|
||||||
|
Level: forgejo.ScopeLevel(value.Level),
|
||||||
|
Owner: value.Owner,
|
||||||
|
Name: value.Name,
|
||||||
|
}
|
||||||
|
if scope.Level == "" {
|
||||||
|
return forgejo.Scope{}, errors.New("scope level is required")
|
||||||
|
}
|
||||||
|
switch scope.Level {
|
||||||
|
case forgejo.ScopeInstance:
|
||||||
|
if !s.scopeAllowed(scope) {
|
||||||
|
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
|
||||||
|
}
|
||||||
|
return scope, nil
|
||||||
|
case forgejo.ScopeOrganization:
|
||||||
|
if scope.Owner == "" {
|
||||||
|
return forgejo.Scope{}, errors.New("organization scope requires owner")
|
||||||
|
}
|
||||||
|
if !s.scopeAllowed(scope) {
|
||||||
|
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
|
||||||
|
}
|
||||||
|
return scope, nil
|
||||||
|
case forgejo.ScopeRepository:
|
||||||
|
if scope.Owner == "" || scope.Name == "" {
|
||||||
|
return forgejo.Scope{}, errors.New("repository scope requires owner and name")
|
||||||
|
}
|
||||||
|
if !s.scopeAllowed(scope) {
|
||||||
|
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
|
||||||
|
}
|
||||||
|
return scope, nil
|
||||||
|
default:
|
||||||
|
return forgejo.Scope{}, fmt.Errorf("unsupported scope %q", scope.Level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) mergeLabels(labels []string) ([]string, error) {
|
||||||
|
var resolved []string
|
||||||
|
if len(labels) == 0 {
|
||||||
|
resolved = append([]string{}, s.defaultLabels...)
|
||||||
|
} else {
|
||||||
|
resolved = labels
|
||||||
|
}
|
||||||
|
if len(s.allowLabels) == 0 {
|
||||||
|
return resolved, nil
|
||||||
|
}
|
||||||
|
for _, label := range resolved {
|
||||||
|
norm := normalizeLabel(label)
|
||||||
|
if _, ok := s.allowLabels[norm]; !ok {
|
||||||
|
return nil, fmt.Errorf("label %q not allowed", label)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resolved, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeLabel(label string) string {
|
||||||
|
trimmed := strings.TrimSpace(label)
|
||||||
|
if trimmed == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
// Ignore any explicit executor suffix ("label:host"), since workflows
|
||||||
|
// and config allowlists typically deal in base label names.
|
||||||
|
if before, _, ok := strings.Cut(trimmed, ":"); ok {
|
||||||
|
return before
|
||||||
|
}
|
||||||
|
return trimmed
|
||||||
|
}
|
||||||
|
|
||||||
|
func scopeKey(scope forgejo.Scope) string {
|
||||||
|
switch scope.Level {
|
||||||
|
case forgejo.ScopeInstance:
|
||||||
|
return "instance"
|
||||||
|
case forgejo.ScopeOrganization:
|
||||||
|
return fmt.Sprintf("organization:%s", scope.Owner)
|
||||||
|
case forgejo.ScopeRepository:
|
||||||
|
return fmt.Sprintf("repository:%s/%s", scope.Owner, scope.Name)
|
||||||
|
default:
|
||||||
|
return string(scope.Level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) scopeAllowed(scope forgejo.Scope) bool {
|
||||||
|
if len(s.allowScopes) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
_, ok := s.allowScopes[scopeKey(scope)]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
160
services/forgejo-nsc/internal/app/service_test.go
Normal file
160
services/forgejo-nsc/internal/app/service_test.go
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/forgejo"
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/nsc"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockForgejo struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
tokens []string
|
||||||
|
scopes []forgejo.Scope
|
||||||
|
err error
|
||||||
|
counter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockForgejo) RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.scopes = append(m.scopes, scope)
|
||||||
|
if m.err != nil {
|
||||||
|
return "", m.err
|
||||||
|
}
|
||||||
|
if m.counter >= len(m.tokens) {
|
||||||
|
return "", context.Canceled
|
||||||
|
}
|
||||||
|
tok := m.tokens[m.counter]
|
||||||
|
m.counter++
|
||||||
|
return tok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockDispatcher struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
requests []nsc.LaunchRequest
|
||||||
|
responses []string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockDispatcher) LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
if m.err != nil {
|
||||||
|
return "", m.err
|
||||||
|
}
|
||||||
|
m.requests = append(m.requests, req)
|
||||||
|
idx := len(m.requests) - 1
|
||||||
|
if idx < len(m.responses) {
|
||||||
|
return m.responses[idx], nil
|
||||||
|
}
|
||||||
|
return "runner", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceDispatchUsesDefaults(t *testing.T) {
|
||||||
|
forgejoMock := &mockForgejo{tokens: []string{"token"}}
|
||||||
|
dispatcherMock := &mockDispatcher{responses: []string{"runner-default"}}
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance},
|
||||||
|
DefaultLabels: []string{"nscloud"},
|
||||||
|
InstanceURL: "https://forgejo.example.com",
|
||||||
|
DefaultTTL: 15 * time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
service := NewService(cfg, forgejoMock, dispatcherMock, nil)
|
||||||
|
|
||||||
|
resp, err := service.Dispatch(context.Background(), DispatchRequest{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dispatch returned error: %v", err)
|
||||||
|
}
|
||||||
|
if len(resp.Runners) != 1 || resp.Runners[0].Name != "runner-default" {
|
||||||
|
t.Fatalf("unexpected dispatch response: %+v", resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(forgejoMock.scopes) != 1 || forgejoMock.scopes[0].Level != forgejo.ScopeInstance {
|
||||||
|
t.Fatalf("expected default scope, got %+v", forgejoMock.scopes)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dispatcherMock.requests) != 1 {
|
||||||
|
t.Fatalf("expected one dispatcher call, got %d", len(dispatcherMock.requests))
|
||||||
|
}
|
||||||
|
req := dispatcherMock.requests[0]
|
||||||
|
if req.InstanceURL != cfg.InstanceURL {
|
||||||
|
t.Fatalf("expected instance URL %s, got %s", cfg.InstanceURL, req.InstanceURL)
|
||||||
|
}
|
||||||
|
if got := req.Labels; len(got) != 1 || got[0] != "nscloud" {
|
||||||
|
t.Fatalf("expected default labels, got %v", got)
|
||||||
|
}
|
||||||
|
if req.Duration != cfg.DefaultTTL {
|
||||||
|
t.Fatalf("expected duration %v, got %v", cfg.DefaultTTL, req.Duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceDispatchCustomScopeAndCount(t *testing.T) {
|
||||||
|
forgejoMock := &mockForgejo{tokens: []string{"token-1", "token-2"}}
|
||||||
|
dispatcherMock := &mockDispatcher{responses: []string{"runner-1", "runner-2"}}
|
||||||
|
|
||||||
|
cfg := Config{
|
||||||
|
DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance},
|
||||||
|
DefaultLabels: []string{"default"},
|
||||||
|
InstanceURL: "https://forgejo.example.com",
|
||||||
|
DefaultTTL: 10 * time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
service := NewService(cfg, forgejoMock, dispatcherMock, nil)
|
||||||
|
|
||||||
|
reqScope := &Scope{Level: string(forgejo.ScopeRepository), Owner: "acme", Name: "repo"}
|
||||||
|
res, err := service.Dispatch(context.Background(), DispatchRequest{
|
||||||
|
Count: 2,
|
||||||
|
Labels: []string{"custom"},
|
||||||
|
Scope: reqScope,
|
||||||
|
TTL: 5 * time.Minute,
|
||||||
|
Machine: "4x8",
|
||||||
|
Image: "runner:latest",
|
||||||
|
ExtraEnv: map[string]string{"FOO": "bar"},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Dispatch returned error: %v", err)
|
||||||
|
}
|
||||||
|
if len(res.Runners) != 2 {
|
||||||
|
t.Fatalf("expected two runners, got %+v", res)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(forgejoMock.scopes) != 2 {
|
||||||
|
t.Fatalf("expected two scope calls, got %d", len(forgejoMock.scopes))
|
||||||
|
}
|
||||||
|
for _, scope := range forgejoMock.scopes {
|
||||||
|
if scope.Level != forgejo.ScopeRepository || scope.Owner != "acme" || scope.Name != "repo" {
|
||||||
|
t.Fatalf("unexpected scope: %+v", scope)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dispatcherMock.requests) != 2 {
|
||||||
|
t.Fatalf("expected two dispatcher calls, got %d", len(dispatcherMock.requests))
|
||||||
|
}
|
||||||
|
for _, call := range dispatcherMock.requests {
|
||||||
|
if call.MachineType != "4x8" || call.Image != "runner:latest" {
|
||||||
|
t.Fatalf("unexpected machine/image in %+v", call)
|
||||||
|
}
|
||||||
|
if call.Duration != 5*time.Minute {
|
||||||
|
t.Fatalf("expected TTL to override default, got %v", call.Duration)
|
||||||
|
}
|
||||||
|
if call.Labels[0] != "custom" {
|
||||||
|
t.Fatalf("expected custom labels, got %v", call.Labels)
|
||||||
|
}
|
||||||
|
if call.ExtraEnv["FOO"] != "bar" {
|
||||||
|
t.Fatalf("expected env passthrough, got %v", call.ExtraEnv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceDispatchErrorsWithoutLabels(t *testing.T) {
|
||||||
|
service := NewService(Config{DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}}, &mockForgejo{}, &mockDispatcher{}, nil)
|
||||||
|
if _, err := service.Dispatch(context.Background(), DispatchRequest{}); err == nil {
|
||||||
|
t.Fatalf("expected error when no labels are available")
|
||||||
|
}
|
||||||
|
}
|
||||||
108
services/forgejo-nsc/internal/autoscaler/config.go
Normal file
108
services/forgejo-nsc/internal/autoscaler/config.go
Normal file
|
|
@ -0,0 +1,108 @@
|
||||||
|
package autoscaler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Listen string `yaml:"listen"`
|
||||||
|
Dispatcher DispatcherConfig `yaml:"dispatcher"`
|
||||||
|
Instances []InstanceConfig `yaml:"instances"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DispatcherConfig struct {
|
||||||
|
URL string `yaml:"url"`
|
||||||
|
Timeout config.Duration `yaml:"timeout"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type InstanceConfig struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
Forgejo ForgejoInstance `yaml:"forgejo"`
|
||||||
|
Scope config.ScopeConfig `yaml:"scope"`
|
||||||
|
PollInterval config.Duration `yaml:"poll_interval"`
|
||||||
|
DisablePolling bool `yaml:"disable_polling"`
|
||||||
|
WebhookSecret string `yaml:"webhook_secret"`
|
||||||
|
Webhook WebhookConfig `yaml:"webhook"`
|
||||||
|
Dispatcher *DispatcherConfig `yaml:"dispatcher"`
|
||||||
|
Targets []TargetConfig `yaml:"targets"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForgejoInstance struct {
|
||||||
|
BaseURL string `yaml:"base_url"`
|
||||||
|
Token string `yaml:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebhookConfig struct {
|
||||||
|
URL string `yaml:"url"`
|
||||||
|
ContentType string `yaml:"content_type"`
|
||||||
|
Events []string `yaml:"events"`
|
||||||
|
Active *bool `yaml:"active"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TargetConfig struct {
|
||||||
|
Labels []string `yaml:"labels"`
|
||||||
|
MinIdle int `yaml:"min_idle"`
|
||||||
|
TTL config.Duration `yaml:"ttl"`
|
||||||
|
MachineType string `yaml:"machine_type"`
|
||||||
|
Image string `yaml:"image"`
|
||||||
|
Env map[string]string `yaml:"env"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConfig(path string) (Config, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
var cfg Config
|
||||||
|
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||||
|
return Config{}, err
|
||||||
|
}
|
||||||
|
if cfg.Listen == "" {
|
||||||
|
cfg.Listen = ":8090"
|
||||||
|
}
|
||||||
|
if cfg.Dispatcher.URL == "" {
|
||||||
|
return Config{}, fmt.Errorf("dispatcher.url is required")
|
||||||
|
}
|
||||||
|
if cfg.Dispatcher.Timeout.Duration == 0 {
|
||||||
|
cfg.Dispatcher.Timeout = config.Duration{Duration: 15 * time.Second}
|
||||||
|
}
|
||||||
|
if len(cfg.Instances) == 0 {
|
||||||
|
return Config{}, fmt.Errorf("at least one instance must be configured")
|
||||||
|
}
|
||||||
|
for i := range cfg.Instances {
|
||||||
|
inst := &cfg.Instances[i]
|
||||||
|
if inst.Name == "" {
|
||||||
|
return Config{}, fmt.Errorf("instance[%d] missing name", i)
|
||||||
|
}
|
||||||
|
if inst.Forgejo.BaseURL == "" || inst.Forgejo.Token == "" {
|
||||||
|
return Config{}, fmt.Errorf("instance %s missing forgejo.base_url or token", inst.Name)
|
||||||
|
}
|
||||||
|
if inst.PollInterval.Duration == 0 {
|
||||||
|
inst.PollInterval = config.Duration{Duration: 30 * time.Second}
|
||||||
|
}
|
||||||
|
if len(inst.Webhook.Events) == 0 {
|
||||||
|
inst.Webhook.Events = []string{"workflow_job"}
|
||||||
|
}
|
||||||
|
if inst.Webhook.ContentType == "" {
|
||||||
|
inst.Webhook.ContentType = "json"
|
||||||
|
}
|
||||||
|
if len(inst.Targets) == 0 {
|
||||||
|
return Config{}, fmt.Errorf("instance %s requires at least one target", inst.Name)
|
||||||
|
}
|
||||||
|
for ti, tgt := range inst.Targets {
|
||||||
|
if len(tgt.Labels) == 0 {
|
||||||
|
return Config{}, fmt.Errorf("instance %s target[%d] missing labels", inst.Name, ti)
|
||||||
|
}
|
||||||
|
if tgt.MinIdle < 0 {
|
||||||
|
return Config{}, fmt.Errorf("instance %s target[%d] min_idle must be >= 0", inst.Name, ti)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
385
services/forgejo-nsc/internal/autoscaler/service.go
Normal file
385
services/forgejo-nsc/internal/autoscaler/service.go
Normal file
|
|
@ -0,0 +1,385 @@
|
||||||
|
package autoscaler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
|
|
||||||
|
"namespacelabs.dev/foundation/std/tasks"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/forgejo"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
listen string
|
||||||
|
controllers map[string]*InstanceController
|
||||||
|
router chi.Router
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(cfg Config) (*Service, error) {
|
||||||
|
controllers := make(map[string]*InstanceController)
|
||||||
|
for _, inst := range cfg.Instances {
|
||||||
|
scope, err := inst.Scope.ToScope()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
forgejoClient, err := forgejo.NewClient(inst.Forgejo.BaseURL, inst.Forgejo.Token)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dispCfg := cfg.Dispatcher
|
||||||
|
if inst.Dispatcher != nil && inst.Dispatcher.URL != "" {
|
||||||
|
dispCfg = *inst.Dispatcher
|
||||||
|
if dispCfg.Timeout.Duration == 0 {
|
||||||
|
dispCfg.Timeout = cfg.Dispatcher.Timeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dClient := newDispatcherClient(dispCfg.URL, dispCfg.Timeout.Duration)
|
||||||
|
webhookActive := true
|
||||||
|
if inst.Webhook.Active != nil {
|
||||||
|
webhookActive = *inst.Webhook.Active
|
||||||
|
}
|
||||||
|
controller := &InstanceController{
|
||||||
|
name: inst.Name,
|
||||||
|
cfg: inst,
|
||||||
|
scope: scope,
|
||||||
|
forgejo: forgejoClient,
|
||||||
|
dispatcher: dClient,
|
||||||
|
webhook: forgejo.WebhookConfig{
|
||||||
|
URL: inst.Webhook.URL,
|
||||||
|
ContentType: inst.Webhook.ContentType,
|
||||||
|
Events: inst.Webhook.Events,
|
||||||
|
Active: webhookActive,
|
||||||
|
},
|
||||||
|
secret: inst.WebhookSecret,
|
||||||
|
}
|
||||||
|
controllers[inst.Name] = controller
|
||||||
|
}
|
||||||
|
|
||||||
|
router := chi.NewRouter()
|
||||||
|
service := &Service{
|
||||||
|
listen: cfg.Listen,
|
||||||
|
controllers: controllers,
|
||||||
|
router: router,
|
||||||
|
}
|
||||||
|
|
||||||
|
router.Get("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, _ = w.Write([]byte("ok"))
|
||||||
|
})
|
||||||
|
router.Post("/webhook/{instance}", service.handleWebhook)
|
||||||
|
|
||||||
|
return service, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Start(ctx context.Context) error {
|
||||||
|
for _, controller := range s.controllers {
|
||||||
|
if err := controller.EnsureWebhook(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, controller := range s.controllers {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(c *InstanceController) {
|
||||||
|
defer wg.Done()
|
||||||
|
c.Run(ctx)
|
||||||
|
}(controller)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: s.listen,
|
||||||
|
Handler: s.router,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
_ = srv.Shutdown(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) handleWebhook(w http.ResponseWriter, r *http.Request) {
|
||||||
|
name := chi.URLParam(r, "instance")
|
||||||
|
controller, ok := s.controllers[name]
|
||||||
|
if !ok {
|
||||||
|
http.Error(w, "unknown instance", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "invalid body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if controller.cfg.WebhookSecret != "" {
|
||||||
|
signature := r.Header.Get("X-Gitea-Signature")
|
||||||
|
if signature == "" {
|
||||||
|
http.Error(w, "missing signature", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !verifySignature(controller.cfg.WebhookSecret, signature, body) {
|
||||||
|
http.Error(w, "invalid signature", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload workflowJobPayload
|
||||||
|
if err := json.Unmarshal(body, &payload); err != nil {
|
||||||
|
http.Error(w, "bad payload", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
controller.MarkWebhookSeen()
|
||||||
|
if payload.Action == "queued" {
|
||||||
|
controller.DispatchForJob(r.Context(), payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
}
|
||||||
|
|
||||||
|
type workflowJobPayload struct {
|
||||||
|
Action string `json:"action"`
|
||||||
|
WorkflowJob struct {
|
||||||
|
Labels []string `json:"labels"`
|
||||||
|
} `json:"workflow_job"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type InstanceController struct {
|
||||||
|
name string
|
||||||
|
cfg InstanceConfig
|
||||||
|
scope forgejo.Scope
|
||||||
|
forgejo *forgejo.Client
|
||||||
|
dispatcher *dispatcherClient
|
||||||
|
ready atomic.Bool
|
||||||
|
webhook forgejo.WebhookConfig
|
||||||
|
secret string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) EnsureWebhook(ctx context.Context) error {
|
||||||
|
if c.webhook.URL == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return tasks.Action("autoscaler.ensure-webhook").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error {
|
||||||
|
return c.forgejo.EnsureWebhook(ctx, c.scope, c.webhook, c.secret)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) Run(ctx context.Context) {
|
||||||
|
if c.cfg.DisablePolling {
|
||||||
|
<-ctx.Done()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(c.cfg.PollInterval.Duration)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
_ = tasks.Action("autoscaler.poll").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error {
|
||||||
|
return c.reconcile(ctx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) reconcile(ctx context.Context) error {
|
||||||
|
runners, err := c.forgejo.ListRunners(ctx, c.scope)
|
||||||
|
if err != nil {
|
||||||
|
// Keep polling even if runner listing fails; we can still dispatch based on queued jobs.
|
||||||
|
runners = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, target := range c.cfg.Targets {
|
||||||
|
idle := countIdle(runners, target.Labels)
|
||||||
|
|
||||||
|
need := 0
|
||||||
|
if idle < target.MinIdle {
|
||||||
|
need = target.MinIdle - idle
|
||||||
|
}
|
||||||
|
|
||||||
|
jobs, jobErr := c.forgejo.ListRunJobs(ctx, c.scope, target.Labels)
|
||||||
|
if jobErr != nil {
|
||||||
|
return jobErr
|
||||||
|
}
|
||||||
|
waiting := countWaitingJobs(jobs, target.Labels)
|
||||||
|
// Scale-to-zero friendly: if anything is waiting and there are no idle runners
|
||||||
|
// for that label set, dispatch exactly one runner to unblock the queue.
|
||||||
|
if waiting > 0 && idle == 0 && need < 1 {
|
||||||
|
need = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if need <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := c.dispatch(ctx, target, need, "poll"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) dispatch(ctx context.Context, target TargetConfig, count int, reason string) error {
|
||||||
|
if count <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
req := dispatcherRequest{
|
||||||
|
Count: count,
|
||||||
|
Labels: target.Labels,
|
||||||
|
}
|
||||||
|
if target.TTL.Duration > 0 {
|
||||||
|
req.TTL = target.TTL.Duration.String()
|
||||||
|
}
|
||||||
|
if target.MachineType != "" {
|
||||||
|
req.MachineType = target.MachineType
|
||||||
|
}
|
||||||
|
if target.Image != "" {
|
||||||
|
req.Image = target.Image
|
||||||
|
}
|
||||||
|
if len(target.Env) > 0 {
|
||||||
|
req.Env = target.Env
|
||||||
|
}
|
||||||
|
return tasks.Action("autoscaler.dispatch").Arg("instance", c.name).Arg("reason", reason).Arg("labels", strings.Join(target.Labels, ",")).Run(ctx, func(ctx context.Context) error {
|
||||||
|
return c.dispatcher.Dispatch(ctx, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) DispatchForJob(ctx context.Context, payload workflowJobPayload) {
|
||||||
|
action := strings.ToLower(payload.Action)
|
||||||
|
if action != "queued" && action != "waiting" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
jobLabels := payload.WorkflowJob.Labels
|
||||||
|
for _, target := range c.cfg.Targets {
|
||||||
|
if labelsMatch(jobLabels, target.Labels) {
|
||||||
|
_ = c.dispatch(ctx, target, 1, "webhook")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InstanceController) MarkWebhookSeen() {
|
||||||
|
c.ready.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func countIdle(runners []forgejo.Runner, labels []string) int {
|
||||||
|
count := 0
|
||||||
|
for _, runner := range runners {
|
||||||
|
if strings.ToLower(runner.Status) != "online" || runner.Busy {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if labelsMatch(extractLabels(runner.Labels), labels) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func countWaitingJobs(jobs []forgejo.RunJob, labels []string) int {
|
||||||
|
count := 0
|
||||||
|
for _, job := range jobs {
|
||||||
|
if status := strings.ToLower(job.Status); status != "waiting" && status != "queued" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if labelsMatch(job.RunsOn, labels) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractLabels(src []forgejo.RunnerLabel) []string {
|
||||||
|
result := make([]string, 0, len(src))
|
||||||
|
for _, lbl := range src {
|
||||||
|
result = append(result, lbl.Name)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelsMatch(have, want []string) bool {
|
||||||
|
set := make(map[string]struct{}, len(have))
|
||||||
|
for _, label := range have {
|
||||||
|
set[label] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, label := range want {
|
||||||
|
if _, ok := set[label]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifySignature(secret, signature string, body []byte) bool {
|
||||||
|
parts := strings.SplitN(signature, "=", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
signature = parts[1]
|
||||||
|
}
|
||||||
|
mac := hmac.New(sha256.New, []byte(secret))
|
||||||
|
mac.Write(body)
|
||||||
|
expected := hex.EncodeToString(mac.Sum(nil))
|
||||||
|
return hmac.Equal([]byte(expected), []byte(signature))
|
||||||
|
}
|
||||||
|
|
||||||
|
type dispatcherClient struct {
|
||||||
|
url string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type dispatcherRequest struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
Labels []string `json:"labels"`
|
||||||
|
TTL string `json:"ttl,omitempty"`
|
||||||
|
MachineType string `json:"machine_type,omitempty"`
|
||||||
|
Image string `json:"image,omitempty"`
|
||||||
|
Env map[string]string `json:"env,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDispatcherClient(url string, timeout time.Duration) *dispatcherClient {
|
||||||
|
if timeout == 0 {
|
||||||
|
timeout = 30 * time.Second
|
||||||
|
}
|
||||||
|
return &dispatcherClient{
|
||||||
|
url: url,
|
||||||
|
client: &http.Client{
|
||||||
|
Timeout: timeout,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dispatcherClient) Dispatch(ctx context.Context, req dispatcherRequest) error {
|
||||||
|
body, _ := json.Marshal(req)
|
||||||
|
endpoint := strings.TrimSuffix(d.url, "/") + "/api/v1/dispatch"
|
||||||
|
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := d.client.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode >= 300 {
|
||||||
|
return fmt.Errorf("dispatcher returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
245
services/forgejo-nsc/internal/config/config.go
Normal file
245
services/forgejo-nsc/internal/config/config.go
Normal file
|
|
@ -0,0 +1,245 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
|
"github.com/burrow/forgejo-nsc/internal/forgejo"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Duration wraps time.Duration to support YAML unmarshalling from strings.
|
||||||
|
type Duration struct {
|
||||||
|
time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalYAML implements yaml.v3 unmarshalling for Duration.
|
||||||
|
func (d *Duration) UnmarshalYAML(value *yaml.Node) error {
|
||||||
|
switch value.Tag {
|
||||||
|
case "!!int":
|
||||||
|
var seconds int64
|
||||||
|
if err := value.Decode(&seconds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Duration = time.Duration(seconds) * time.Second
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
parsed, err := time.ParseDuration(value.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Duration = parsed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML implements yaml.v3 marshalling.
|
||||||
|
func (d Duration) MarshalYAML() (any, error) {
|
||||||
|
return d.Duration.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Listen string `yaml:"listen"`
|
||||||
|
Forgejo ForgejoConfig `yaml:"forgejo"`
|
||||||
|
Namespace NamespaceConfig `yaml:"namespace"`
|
||||||
|
Runner RunnerConfig `yaml:"runner"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheVolumeConfig struct {
|
||||||
|
Tag string `yaml:"tag"`
|
||||||
|
MountPoint string `yaml:"mount_point"`
|
||||||
|
SizeGb int64 `yaml:"size_gb"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForgejoConfig struct {
|
||||||
|
BaseURL string `yaml:"base_url"`
|
||||||
|
// InstanceURL is the URL runners should use when registering with Forgejo.
|
||||||
|
// This must be reachable from the spawned runner (e.g. the public URL like
|
||||||
|
// https://git.burrow.net), and may differ from BaseURL (which can be a local
|
||||||
|
// loopback URL on the forge host).
|
||||||
|
InstanceURL string `yaml:"instance_url"`
|
||||||
|
Token string `yaml:"token"`
|
||||||
|
DefaultScope ScopeConfig `yaml:"default_scope"`
|
||||||
|
DefaultLabels []string `yaml:"default_labels"`
|
||||||
|
Timeout Duration `yaml:"timeout"`
|
||||||
|
ExtraHeaders yaml.Node `yaml:"extra_headers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScopeConfig struct {
|
||||||
|
Level string `yaml:"level"`
|
||||||
|
Owner string `yaml:"owner,omitempty"`
|
||||||
|
Name string `yaml:"name,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NamespaceConfig struct {
|
||||||
|
NSCBinary string `yaml:"nsc_binary"`
|
||||||
|
// ComputeBaseURL is the Namespace Cloud Compute API endpoint (Connect RPC base URL).
|
||||||
|
// This is used for macOS runners, since NSC "run" is container-based (Linux-only).
|
||||||
|
// Example: "https://ord4.compute.namespaceapis.com"
|
||||||
|
ComputeBaseURL string `yaml:"compute_base_url"`
|
||||||
|
Image string `yaml:"image"`
|
||||||
|
MachineType string `yaml:"machine_type"`
|
||||||
|
// MacosBaseImageID selects which macOS base image to use (e.g. "tahoe").
|
||||||
|
MacosBaseImageID string `yaml:"macos_base_image_id"`
|
||||||
|
// MacosMachineArch is the architecture used for macOS instances (typically "arm64").
|
||||||
|
MacosMachineArch string `yaml:"macos_machine_arch"`
|
||||||
|
Duration Duration `yaml:"duration"`
|
||||||
|
WorkDir string `yaml:"workdir"`
|
||||||
|
MaxParallel int64 `yaml:"max_parallel"`
|
||||||
|
Environment []string `yaml:"environment"`
|
||||||
|
AllowLabels []string `yaml:"allow_labels"`
|
||||||
|
AllowScopes []string `yaml:"allow_scopes"`
|
||||||
|
Network string `yaml:"network"`
|
||||||
|
InstanceTags []string `yaml:"instance_tags"`
|
||||||
|
LinuxCachePath string `yaml:"linux_cache_path"`
|
||||||
|
LinuxCacheVolumes []CacheVolumeConfig `yaml:"linux_cache_volumes"`
|
||||||
|
MacosCachePath string `yaml:"macos_cache_path"`
|
||||||
|
MacosCacheVolumes []CacheVolumeConfig `yaml:"macos_cache_volumes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerConfig struct {
|
||||||
|
NamePrefix string `yaml:"name_prefix"`
|
||||||
|
Executor string `yaml:"executor"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Load(path string) (*Config, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg Config
|
||||||
|
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cfg.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
if c.Listen == "" {
|
||||||
|
c.Listen = ":8080"
|
||||||
|
}
|
||||||
|
if c.Runner.NamePrefix == "" {
|
||||||
|
c.Runner.NamePrefix = "nscloud-"
|
||||||
|
}
|
||||||
|
if c.Runner.Executor == "" {
|
||||||
|
c.Runner.Executor = "shell"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Forgejo.BaseURL == "" {
|
||||||
|
return errors.New("forgejo.base_url is required")
|
||||||
|
}
|
||||||
|
if c.Forgejo.InstanceURL == "" {
|
||||||
|
// Backwards-compatible default: assume runners can reach the same URL.
|
||||||
|
c.Forgejo.InstanceURL = c.Forgejo.BaseURL
|
||||||
|
}
|
||||||
|
if c.Forgejo.Token == "" {
|
||||||
|
return errors.New("forgejo.token is required")
|
||||||
|
}
|
||||||
|
if c.Forgejo.Timeout.Duration == 0 {
|
||||||
|
c.Forgejo.Timeout.Duration = 30 * time.Second
|
||||||
|
}
|
||||||
|
if _, err := c.Forgejo.DefaultScope.ToScope(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Namespace.NSCBinary == "" {
|
||||||
|
c.Namespace.NSCBinary = "nsc"
|
||||||
|
}
|
||||||
|
if c.Namespace.Image == "" {
|
||||||
|
c.Namespace.Image = "code.forgejo.org/forgejo/runner:11"
|
||||||
|
}
|
||||||
|
if c.Namespace.MacosBaseImageID == "" {
|
||||||
|
c.Namespace.MacosBaseImageID = "tahoe"
|
||||||
|
}
|
||||||
|
if c.Namespace.MacosMachineArch == "" {
|
||||||
|
c.Namespace.MacosMachineArch = "arm64"
|
||||||
|
}
|
||||||
|
if c.Namespace.Duration.Duration == 0 {
|
||||||
|
c.Namespace.Duration.Duration = 30 * time.Minute
|
||||||
|
}
|
||||||
|
if c.Namespace.MaxParallel <= 0 {
|
||||||
|
c.Namespace.MaxParallel = 4
|
||||||
|
}
|
||||||
|
if c.Namespace.LinuxCachePath == "" {
|
||||||
|
c.Namespace.LinuxCachePath = "/var/cache/burrow"
|
||||||
|
}
|
||||||
|
if len(c.Namespace.LinuxCacheVolumes) == 0 {
|
||||||
|
c.Namespace.LinuxCacheVolumes = []CacheVolumeConfig{
|
||||||
|
{
|
||||||
|
Tag: "burrow-forgejo-linux-nix-v2",
|
||||||
|
MountPoint: "/nix",
|
||||||
|
SizeGb: 80,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Tag: "burrow-forgejo-linux-cache-v2",
|
||||||
|
MountPoint: c.Namespace.LinuxCachePath,
|
||||||
|
SizeGb: 80,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Namespace.MacosCachePath == "" {
|
||||||
|
c.Namespace.MacosCachePath = "/Users/runner/.cache/burrow"
|
||||||
|
}
|
||||||
|
if len(c.Namespace.MacosCacheVolumes) == 0 {
|
||||||
|
c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{
|
||||||
|
{
|
||||||
|
Tag: "burrow-forgejo-macos-shared-v1",
|
||||||
|
MountPoint: c.Namespace.MacosCachePath + "/shared",
|
||||||
|
SizeGb: 80,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Tag: "burrow-forgejo-macos-macos-v1",
|
||||||
|
MountPoint: c.Namespace.MacosCachePath + "/lane/macos",
|
||||||
|
SizeGb: 80,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Tag: "burrow-forgejo-macos-ios-simulator-v1",
|
||||||
|
MountPoint: c.Namespace.MacosCachePath + "/lane/ios-simulator",
|
||||||
|
SizeGb: 80,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, volume := range append(append([]CacheVolumeConfig{}, c.Namespace.LinuxCacheVolumes...), c.Namespace.MacosCacheVolumes...) {
|
||||||
|
if strings.TrimSpace(volume.Tag) == "" {
|
||||||
|
return errors.New("namespace cache volume tag is required")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(volume.MountPoint) == "" {
|
||||||
|
return fmt.Errorf("namespace cache volume %q mount_point is required", volume.Tag)
|
||||||
|
}
|
||||||
|
if volume.SizeGb <= 0 {
|
||||||
|
return fmt.Errorf("namespace cache volume %q size_gb must be positive", volume.Tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s ScopeConfig) ToScope() (forgejo.Scope, error) {
|
||||||
|
level := forgejo.ScopeLevel(strings.ToLower(s.Level))
|
||||||
|
switch level {
|
||||||
|
case forgejo.ScopeInstance:
|
||||||
|
return forgejo.Scope{Level: level}, nil
|
||||||
|
case forgejo.ScopeOrganization:
|
||||||
|
if s.Owner == "" {
|
||||||
|
return forgejo.Scope{}, errors.New("forgejo default scope requires owner for organization level")
|
||||||
|
}
|
||||||
|
return forgejo.Scope{Level: level, Owner: s.Owner}, nil
|
||||||
|
case forgejo.ScopeRepository:
|
||||||
|
if s.Owner == "" || s.Name == "" {
|
||||||
|
return forgejo.Scope{}, errors.New("forgejo default scope requires owner and name for repository level")
|
||||||
|
}
|
||||||
|
return forgejo.Scope{Level: level, Owner: s.Owner, Name: s.Name}, nil
|
||||||
|
default:
|
||||||
|
return forgejo.Scope{}, fmt.Errorf("unknown scope level %q", s.Level)
|
||||||
|
}
|
||||||
|
}
|
||||||
41
services/forgejo-nsc/internal/config/config_test.go
Normal file
41
services/forgejo-nsc/internal/config/config_test.go
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadConfig(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
path := filepath.Join(dir, "config.yaml")
|
||||||
|
content := `
|
||||||
|
listen: ":9090"
|
||||||
|
forgejo:
|
||||||
|
base_url: https://forgejo.test
|
||||||
|
token: abc
|
||||||
|
default_scope:
|
||||||
|
level: instance
|
||||||
|
namespace:
|
||||||
|
nsc_binary: /usr/bin/nsc
|
||||||
|
image: ghcr.io/forgejo/runner:3
|
||||||
|
duration: 15m
|
||||||
|
runner:
|
||||||
|
name_prefix: custom-
|
||||||
|
`
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o600); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := Load(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Load() error = %v", err)
|
||||||
|
}
|
||||||
|
if cfg.Listen != ":9090" {
|
||||||
|
t.Fatalf("unexpected listen addr: %s", cfg.Listen)
|
||||||
|
}
|
||||||
|
if cfg.Namespace.Duration.Duration != 15*time.Minute {
|
||||||
|
t.Fatalf("duration parsing failed: %s", cfg.Namespace.Duration.Duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
454
services/forgejo-nsc/internal/forgejo/client.go
Normal file
454
services/forgejo-nsc/internal/forgejo/client.go
Normal file
|
|
@ -0,0 +1,454 @@
|
||||||
|
package forgejo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScopeLevel string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ScopeInstance ScopeLevel = "instance"
|
||||||
|
ScopeOrganization ScopeLevel = "organization"
|
||||||
|
ScopeRepository ScopeLevel = "repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Scope struct {
|
||||||
|
Level ScopeLevel
|
||||||
|
Owner string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client struct {
|
||||||
|
baseURL *url.URL
|
||||||
|
token string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type Runner struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Busy bool `json:"busy"`
|
||||||
|
Labels []RunnerLabel `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerLabel struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunJob struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
RunsOn []string `json:"runs_on"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
TaskID int64 `json:"task_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebhookConfig struct {
|
||||||
|
URL string
|
||||||
|
ContentType string
|
||||||
|
Events []string
|
||||||
|
Active bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Option func(*Client)
|
||||||
|
|
||||||
|
func WithHTTPClient(httpClient *http.Client) Option {
|
||||||
|
return func(c *Client) {
|
||||||
|
if httpClient != nil {
|
||||||
|
c.client = httpClient
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(rawURL, token string, opts ...Option) (*Client, error) {
|
||||||
|
if rawURL == "" {
|
||||||
|
return nil, errors.New("forgejo base URL is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(rawURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &Client{
|
||||||
|
baseURL: u,
|
||||||
|
token: strings.TrimSpace(token),
|
||||||
|
client: &http.Client{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.token == "" {
|
||||||
|
return nil, errors.New("forgejo token is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type registrationTokenResponse struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
TTL time.Time `json:"expires_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) RegistrationToken(ctx context.Context, scope Scope) (string, error) {
|
||||||
|
endpoint, err := c.registrationEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return "", fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var decoded registrationTokenResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if decoded.Token == "" {
|
||||||
|
return "", errors.New("forgejo response missing token")
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoded.Token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ListRunners(ctx context.Context, scope Scope) ([]Runner, error) {
|
||||||
|
endpoint, err := c.runnersEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var decoded []Runner
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoded, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ListRunJobs(ctx context.Context, scope Scope, labels []string) ([]RunJob, error) {
|
||||||
|
endpoint, err := c.runJobsEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(labels) > 0 {
|
||||||
|
query := req.URL.Query()
|
||||||
|
query.Set("labels", strings.Join(labels, ","))
|
||||||
|
req.URL.RawQuery = query.Encode()
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var decoded []RunJob
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if decoded == nil {
|
||||||
|
decoded = []RunJob{}
|
||||||
|
}
|
||||||
|
return decoded, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) EnsureWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error {
|
||||||
|
if cfg.URL == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hooks, err := c.listWebhooks(ctx, scope)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range hooks {
|
||||||
|
if strings.EqualFold(hook.Config.URL, cfg.URL) {
|
||||||
|
return c.updateWebhook(ctx, scope, hook.ID, cfg, secret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.createWebhook(ctx, scope, cfg, secret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) registrationEndpoint(scope Scope) (string, error) {
|
||||||
|
var segments []string
|
||||||
|
switch scope.Level {
|
||||||
|
case ScopeRepository:
|
||||||
|
if scope.Owner == "" || scope.Name == "" {
|
||||||
|
return "", errors.New("repository scope requires owner and name")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "registration-token"}
|
||||||
|
case ScopeOrganization:
|
||||||
|
if scope.Owner == "" {
|
||||||
|
return "", errors.New("organization scope requires owner")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "registration-token"}
|
||||||
|
case ScopeInstance:
|
||||||
|
segments = []string{"api", "v1", "admin", "actions", "runners", "registration-token"}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unsupported scope level %q", scope.Level)
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := *c.baseURL
|
||||||
|
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
|
||||||
|
return clone.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type webhook struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
Config webhookConfigPayload `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type webhookConfigPayload struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
ContentType string `json:"content_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) listWebhooks(ctx context.Context, scope Scope) ([]webhook, error) {
|
||||||
|
endpoint, err := c.webhooksEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var hooks []webhook
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&hooks); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hooks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) createWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error {
|
||||||
|
payload := webhookRequestPayload{
|
||||||
|
Type: "gitea",
|
||||||
|
Config: map[string]string{
|
||||||
|
"url": cfg.URL,
|
||||||
|
"content_type": cfg.ContentType,
|
||||||
|
"secret": secret,
|
||||||
|
"insecure_ssl": "0",
|
||||||
|
},
|
||||||
|
Events: cfg.Events,
|
||||||
|
Active: cfg.Active,
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint, err := c.webhooksEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) updateWebhook(ctx context.Context, scope Scope, id int64, cfg WebhookConfig, secret string) error {
|
||||||
|
payload := webhookRequestPayload{
|
||||||
|
Type: "gitea",
|
||||||
|
Config: map[string]string{
|
||||||
|
"url": cfg.URL,
|
||||||
|
"content_type": cfg.ContentType,
|
||||||
|
"secret": secret,
|
||||||
|
"insecure_ssl": "0",
|
||||||
|
},
|
||||||
|
Events: cfg.Events,
|
||||||
|
Active: cfg.Active,
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint, err := c.webhooksEndpoint(scope)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("%s/%d", endpoint, id), bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
return fmt.Errorf("forgejo returned %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) webhooksEndpoint(scope Scope) (string, error) {
|
||||||
|
var segments []string
|
||||||
|
switch scope.Level {
|
||||||
|
case ScopeRepository:
|
||||||
|
if scope.Owner == "" || scope.Name == "" {
|
||||||
|
return "", errors.New("repository scope requires owner and name")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "hooks"}
|
||||||
|
case ScopeOrganization:
|
||||||
|
if scope.Owner == "" {
|
||||||
|
return "", errors.New("organization scope requires owner")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "orgs", scope.Owner, "hooks"}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("webhook management not supported for scope level %q", scope.Level)
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := *c.baseURL
|
||||||
|
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
|
||||||
|
return clone.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type webhookRequestPayload struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Config map[string]string `json:"config"`
|
||||||
|
Events []string `json:"events"`
|
||||||
|
Active bool `json:"active"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) runnersEndpoint(scope Scope) (string, error) {
|
||||||
|
var segments []string
|
||||||
|
switch scope.Level {
|
||||||
|
case ScopeRepository:
|
||||||
|
if scope.Owner == "" || scope.Name == "" {
|
||||||
|
return "", errors.New("repository scope requires owner and name")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners"}
|
||||||
|
case ScopeOrganization:
|
||||||
|
if scope.Owner == "" {
|
||||||
|
return "", errors.New("organization scope requires owner")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners"}
|
||||||
|
case ScopeInstance:
|
||||||
|
segments = []string{"api", "v1", "actions", "runners"}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unsupported scope level %q", scope.Level)
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := *c.baseURL
|
||||||
|
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
|
||||||
|
return clone.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) runJobsEndpoint(scope Scope) (string, error) {
|
||||||
|
var segments []string
|
||||||
|
switch scope.Level {
|
||||||
|
case ScopeRepository:
|
||||||
|
if scope.Owner == "" || scope.Name == "" {
|
||||||
|
return "", errors.New("repository scope requires owner and name")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "jobs"}
|
||||||
|
case ScopeOrganization:
|
||||||
|
if scope.Owner == "" {
|
||||||
|
return "", errors.New("organization scope requires owner")
|
||||||
|
}
|
||||||
|
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "jobs"}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("run jobs not supported for scope level %q", scope.Level)
|
||||||
|
}
|
||||||
|
|
||||||
|
clone := *c.baseURL
|
||||||
|
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
|
||||||
|
return clone.String(), nil
|
||||||
|
}
|
||||||
508
services/forgejo-nsc/internal/nsc/dispatcher.go
Normal file
508
services/forgejo-nsc/internal/nsc/dispatcher.go
Normal file
|
|
@ -0,0 +1,508 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"golang.org/x/sync/semaphore"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Options struct {
|
||||||
|
BinaryPath string
|
||||||
|
DefaultImage string
|
||||||
|
DefaultMachine string
|
||||||
|
DefaultDuration time.Duration
|
||||||
|
WorkDir string
|
||||||
|
MaxParallel int64
|
||||||
|
RunnerNamePrefix string
|
||||||
|
Executor string
|
||||||
|
Network string
|
||||||
|
ComputeBaseURL string
|
||||||
|
MacosBaseImageID string
|
||||||
|
MacosMachineArch string
|
||||||
|
LinuxCachePath string
|
||||||
|
LinuxCacheVolumes []CacheVolume
|
||||||
|
MacosCachePath string
|
||||||
|
MacosCacheVolumes []CacheVolume
|
||||||
|
Logger *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
type CacheVolume struct {
|
||||||
|
Tag string
|
||||||
|
MountPoint string
|
||||||
|
SizeGb int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type LaunchRequest struct {
|
||||||
|
Token string
|
||||||
|
InstanceURL string
|
||||||
|
Labels []string
|
||||||
|
Duration time.Duration
|
||||||
|
MachineType string
|
||||||
|
Image string
|
||||||
|
ExtraEnv map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Dispatcher struct {
|
||||||
|
opts Options
|
||||||
|
sem *semaphore.Weighted
|
||||||
|
log *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDispatcher(opts Options) (*Dispatcher, error) {
|
||||||
|
if opts.BinaryPath == "" {
|
||||||
|
return nil, errors.New("nsc binary path is required")
|
||||||
|
}
|
||||||
|
if opts.DefaultImage == "" {
|
||||||
|
return nil, errors.New("default Namespace runner image is required")
|
||||||
|
}
|
||||||
|
if opts.RunnerNamePrefix == "" {
|
||||||
|
opts.RunnerNamePrefix = "nscloud-"
|
||||||
|
}
|
||||||
|
if opts.Executor == "" {
|
||||||
|
opts.Executor = "shell"
|
||||||
|
}
|
||||||
|
if opts.MacosBaseImageID == "" {
|
||||||
|
opts.MacosBaseImageID = "tahoe"
|
||||||
|
}
|
||||||
|
if opts.MacosMachineArch == "" {
|
||||||
|
opts.MacosMachineArch = "arm64"
|
||||||
|
}
|
||||||
|
if opts.MaxParallel <= 0 {
|
||||||
|
opts.MaxParallel = 4
|
||||||
|
}
|
||||||
|
if opts.DefaultDuration == 0 {
|
||||||
|
opts.DefaultDuration = 30 * time.Minute
|
||||||
|
}
|
||||||
|
if opts.LinuxCachePath == "" {
|
||||||
|
opts.LinuxCachePath = "/var/cache/burrow"
|
||||||
|
}
|
||||||
|
if opts.MacosCachePath == "" {
|
||||||
|
opts.MacosCachePath = "/Users/runner/.cache/burrow"
|
||||||
|
}
|
||||||
|
logger := opts.Logger
|
||||||
|
if logger == nil {
|
||||||
|
logger = slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Dispatcher{
|
||||||
|
opts: opts,
|
||||||
|
sem: semaphore.NewWeighted(opts.MaxParallel),
|
||||||
|
log: logger,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (string, error) {
|
||||||
|
if req.Token == "" {
|
||||||
|
return "", errors.New("registration token is required")
|
||||||
|
}
|
||||||
|
if req.InstanceURL == "" {
|
||||||
|
return "", errors.New("forgejo instance url is required")
|
||||||
|
}
|
||||||
|
if err := d.sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer d.sem.Release(1)
|
||||||
|
|
||||||
|
runnerName := d.generateName()
|
||||||
|
duration := req.Duration
|
||||||
|
if duration == 0 {
|
||||||
|
duration = d.opts.DefaultDuration
|
||||||
|
}
|
||||||
|
machineType := choose(req.MachineType, d.opts.DefaultMachine)
|
||||||
|
image := choose(req.Image, d.opts.DefaultImage)
|
||||||
|
if req.ExtraEnv == nil {
|
||||||
|
req.ExtraEnv = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasWindowsLabel(req.Labels) {
|
||||||
|
if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return runnerName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasMacOSLabel(req.Labels) {
|
||||||
|
if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok {
|
||||||
|
req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.MacosCachePath
|
||||||
|
}
|
||||||
|
// Compute macOS shapes differ from the Linux "run" defaults. If the request
|
||||||
|
// didn't specify a machine type, ensure we pick a macOS-valid default.
|
||||||
|
if machineType == "" || machineType == d.opts.DefaultMachine {
|
||||||
|
machineType = "6x14"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefer the Compute API path because it uses the service token (NSC_TOKEN_FILE)
|
||||||
|
// and does not require an interactive `nsc login` session.
|
||||||
|
if err := d.launchMacOSRunner(ctx, runnerName, req, duration, machineType); err != nil {
|
||||||
|
d.log.Warn("macos compute launch failed; falling back to nsc create+ssh", "runner", runnerName, "err", err)
|
||||||
|
if err := d.launchMacOSRunnerViaNSC(ctx, runnerName, req, duration, machineType); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return runnerName, nil
|
||||||
|
}
|
||||||
|
if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok {
|
||||||
|
req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.LinuxCachePath
|
||||||
|
}
|
||||||
|
|
||||||
|
env := map[string]string{
|
||||||
|
"FORGEJO_INSTANCE_URL": req.InstanceURL,
|
||||||
|
"FORGEJO_RUNNER_TOKEN": req.Token,
|
||||||
|
"FORGEJO_RUNNER_NAME": runnerName,
|
||||||
|
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
|
||||||
|
"FORGEJO_RUNNER_EXEC": d.opts.Executor,
|
||||||
|
}
|
||||||
|
for k, v := range req.ExtraEnv {
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
script := d.bootstrapScript()
|
||||||
|
args := []string{
|
||||||
|
"run",
|
||||||
|
"--wait",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
"--duration", duration.String(),
|
||||||
|
"--image", image,
|
||||||
|
"--name", runnerName,
|
||||||
|
"--user", "root",
|
||||||
|
}
|
||||||
|
if machineType != "" {
|
||||||
|
args = append(args, "--machine_type", machineType)
|
||||||
|
}
|
||||||
|
if d.opts.Network != "" {
|
||||||
|
args = append(args, "--network", d.opts.Network)
|
||||||
|
}
|
||||||
|
args = appendVolumeArgs(args, d.opts.LinuxCacheVolumes)
|
||||||
|
for key, value := range env {
|
||||||
|
if value == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
args = append(args, "-e", fmt.Sprintf("%s=%s", key, value))
|
||||||
|
}
|
||||||
|
if d.opts.WorkDir != "" {
|
||||||
|
args = append(args, "-e", fmt.Sprintf("FORGEJO_RUNNER_WORKDIR=%s", d.opts.WorkDir))
|
||||||
|
}
|
||||||
|
|
||||||
|
args = append(args, "--", "/bin/sh", "-c", script)
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...)
|
||||||
|
// The Linux `nsc run` path uses the CLI auth flow. Keep using the service
|
||||||
|
// account's refreshed Namespace login session instead of forcing the
|
||||||
|
// short-lived NSC_TOKEN_FILE bearer token into CLI requests.
|
||||||
|
cmd.Env = nscCLIEnv()
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
d.log.Info("launching Namespace runner",
|
||||||
|
"runner", runnerName,
|
||||||
|
"machine_type", machineType,
|
||||||
|
"image", image,
|
||||||
|
)
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("nsc run failed: %w\n%s", err, buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if output := strings.TrimSpace(buf.String()); output != "" {
|
||||||
|
d.log.Info("runner output", "runner", runnerName, "output", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("runner completed",
|
||||||
|
"runner", runnerName,
|
||||||
|
"duration", time.Since(start),
|
||||||
|
)
|
||||||
|
|
||||||
|
if instanceID := parseInstanceID(buf.String()); instanceID != "" {
|
||||||
|
waitCtx, cancel := context.WithTimeout(context.Background(), duration)
|
||||||
|
defer cancel()
|
||||||
|
stopped := d.waitForInstanceStop(waitCtx, runnerName, instanceID, duration)
|
||||||
|
if !stopped {
|
||||||
|
d.log.Warn("runner did not stop before timeout", "runner", runnerName, "instance", instanceID)
|
||||||
|
}
|
||||||
|
d.destroyInstance(waitCtx, runnerName, instanceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) generateName() string {
|
||||||
|
id := strings.ReplaceAll(uuid.NewString(), "-", "")
|
||||||
|
return d.opts.RunnerNamePrefix + id[:12]
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseInstanceID(output string) string {
|
||||||
|
if jsonBlob := extractJSON(output); jsonBlob != "" {
|
||||||
|
var payload struct {
|
||||||
|
ClusterID string `json:"cluster_id"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil && payload.ClusterID != "" {
|
||||||
|
return payload.ClusterID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const marker = "ID:"
|
||||||
|
idx := strings.Index(output, marker)
|
||||||
|
if idx == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
rest := strings.TrimSpace(output[idx+len(marker):])
|
||||||
|
if rest == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
fields := strings.Fields(rest)
|
||||||
|
if len(fields) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fields[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractJSON(output string) string {
|
||||||
|
trimmed := strings.TrimSpace(output)
|
||||||
|
if trimmed == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
start := strings.IndexAny(trimmed, "[{")
|
||||||
|
if start == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
end := strings.LastIndexAny(trimmed, "]}")
|
||||||
|
if end == -1 || end < start {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return trimmed[start : end+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
type describeResponse struct {
|
||||||
|
Resource string `json:"resource"`
|
||||||
|
PerResource map[string]describeTarget `json:"per_resource"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type describeTarget struct {
|
||||||
|
Tombstone string `json:"tombstone"`
|
||||||
|
Container []describeContainer `json:"container"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type describeContainer struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
TerminatedAt string `json:"terminated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func instanceStopped(output string) bool {
|
||||||
|
jsonBlob := extractJSON(output)
|
||||||
|
if jsonBlob == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var payload []describeResponse
|
||||||
|
if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(payload) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, entry := range payload {
|
||||||
|
for _, target := range entry.PerResource {
|
||||||
|
if target.Tombstone != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(target.Container) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, container := range target.Container {
|
||||||
|
if container.Status != "stopped" && container.TerminatedAt == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) waitForInstanceStop(ctx context.Context, runnerName, instanceID string, timeout time.Duration) bool {
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = d.opts.DefaultDuration
|
||||||
|
}
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
ticker := time.NewTicker(10 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
stopped, err := d.checkInstanceStopped(ctx, instanceID)
|
||||||
|
if err != nil {
|
||||||
|
d.log.Warn("runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if stopped {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) checkInstanceStopped(ctx context.Context, instanceID string) (bool, error) {
|
||||||
|
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "describe", "--output", "json", instanceID)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
output := strings.ToLower(buf.String())
|
||||||
|
if strings.Contains(output, "destroyed") || strings.Contains(output, "not found") {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("nsc describe failed: %w\n%s", err, strings.TrimSpace(buf.String()))
|
||||||
|
}
|
||||||
|
return instanceStopped(buf.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) destroyInstance(ctx context.Context, runnerName, instanceID string) {
|
||||||
|
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "destroy", "--force", instanceID)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
d.log.Warn("runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if output := strings.TrimSpace(buf.String()); output != "" {
|
||||||
|
d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID, "output", output)
|
||||||
|
} else {
|
||||||
|
d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func choose(values ...string) string {
|
||||||
|
for _, v := range values {
|
||||||
|
if strings.TrimSpace(v) != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendVolumeArgs(args []string, volumes []CacheVolume) []string {
|
||||||
|
for _, volume := range volumes {
|
||||||
|
if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
args = append(args, "--volume", fmt.Sprintf("cache:%s:%s:%d", volume.Tag, volume.MountPoint, volume.SizeGb))
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) bootstrapScript() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString(`set -euo pipefail
|
||||||
|
export HOME=/root
|
||||||
|
export USER=root
|
||||||
|
mkdir -p "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
|
||||||
|
cd "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
|
||||||
|
|
||||||
|
if ! command -v node >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache nodejs npm >/dev/null
|
||||||
|
fi
|
||||||
|
if ! command -v sudo >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache sudo bash >/dev/null
|
||||||
|
fi
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache curl >/dev/null
|
||||||
|
fi
|
||||||
|
if ! command -v xz >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache xz >/dev/null
|
||||||
|
fi
|
||||||
|
if ! command -v nix >/dev/null 2>&1; then
|
||||||
|
apk add --no-cache nix >/dev/null
|
||||||
|
fi
|
||||||
|
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||||
|
if [ -f /etc/profile.d/nix.sh ]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. /etc/profile.d/nix.sh
|
||||||
|
fi
|
||||||
|
if [ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
|
||||||
|
fi
|
||||||
|
export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}"
|
||||||
|
export NIX_CONFIG="experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true"
|
||||||
|
node --version >/dev/null
|
||||||
|
nix --version >/dev/null
|
||||||
|
|
||||||
|
cat > runner.yaml <<'EOF'
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
runner:
|
||||||
|
file: .runner
|
||||||
|
capacity: 1
|
||||||
|
name: ${FORGEJO_RUNNER_NAME}
|
||||||
|
labels:
|
||||||
|
EOF
|
||||||
|
`)
|
||||||
|
builder.WriteString(`runner_exec="${FORGEJO_RUNNER_EXEC:-host}"
|
||||||
|
if [ "$runner_exec" = "shell" ]; then
|
||||||
|
runner_exec="host"
|
||||||
|
fi
|
||||||
|
|
||||||
|
resolved_labels=""
|
||||||
|
for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do
|
||||||
|
if [ -z "${label}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
case "${label}" in
|
||||||
|
*:*) resolved="${label}" ;;
|
||||||
|
*) resolved="${label}:${runner_exec}" ;;
|
||||||
|
esac
|
||||||
|
echo " - ${resolved}" >> runner.yaml
|
||||||
|
if [ -z "${resolved_labels}" ]; then
|
||||||
|
resolved_labels="${resolved}"
|
||||||
|
else
|
||||||
|
resolved_labels="${resolved_labels},${resolved}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
`)
|
||||||
|
builder.WriteString(`cat >> runner.yaml <<'EOF'
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
EOF
|
||||||
|
|
||||||
|
forgejo-runner register \
|
||||||
|
--no-interactive \
|
||||||
|
--instance "${FORGEJO_INSTANCE_URL}" \
|
||||||
|
--token "${FORGEJO_RUNNER_TOKEN}" \
|
||||||
|
--name "${FORGEJO_RUNNER_NAME}" \
|
||||||
|
--labels "${resolved_labels}" \
|
||||||
|
--config runner.yaml
|
||||||
|
|
||||||
|
runner_mode="${FORGEJO_RUNNER_MODE:-one-job}"
|
||||||
|
case "$runner_mode" in
|
||||||
|
one-job)
|
||||||
|
forgejo-runner one-job --config runner.yaml
|
||||||
|
;;
|
||||||
|
daemon)
|
||||||
|
forgejo-runner daemon --config runner.yaml
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown FORGEJO_RUNNER_MODE: ${runner_mode}" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
`)
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
738
services/forgejo-nsc/internal/nsc/macos.go
Normal file
738
services/forgejo-nsc/internal/nsc/macos.go
Normal file
|
|
@ -0,0 +1,738 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
computev1betaconnect "buf.build/gen/go/namespace/cloud/connectrpc/go/proto/namespace/cloud/compute/v1beta/computev1betaconnect"
|
||||||
|
computev1beta "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/cloud/compute/v1beta"
|
||||||
|
stdlib "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/stdlib"
|
||||||
|
"connectrpc.com/connect"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func hasMacOSLabel(labels []string) bool {
|
||||||
|
for _, label := range labels {
|
||||||
|
l := strings.TrimSpace(label)
|
||||||
|
if l == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(l, "namespace-profile-macos-") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type lockedBuffer struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
b bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lockedBuffer) Write(p []byte) (int, error) {
|
||||||
|
lb.mu.Lock()
|
||||||
|
defer lb.mu.Unlock()
|
||||||
|
return lb.b.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lockedBuffer) Len() int {
|
||||||
|
lb.mu.Lock()
|
||||||
|
defer lb.mu.Unlock()
|
||||||
|
return lb.b.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lockedBuffer) String() string {
|
||||||
|
lb.mu.Lock()
|
||||||
|
defer lb.mu.Unlock()
|
||||||
|
return lb.b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosSupportDiskSelectors(baseImageID string) []*stdlib.Label {
|
||||||
|
id := strings.TrimSpace(baseImageID)
|
||||||
|
if id == "" {
|
||||||
|
id = "tahoe"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow specifying selectors directly, e.g. "macos.version=26.x,image.with=xcode-26".
|
||||||
|
if strings.Contains(id, "=") {
|
||||||
|
var out []*stdlib.Label
|
||||||
|
for _, part := range strings.Split(id, ",") {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if part == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, value, ok := strings.Cut(part, "=")
|
||||||
|
name = strings.TrimSpace(name)
|
||||||
|
value = strings.TrimSpace(value)
|
||||||
|
if !ok || name == "" || value == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, &stdlib.Label{Name: name, Value: value})
|
||||||
|
}
|
||||||
|
if len(out) > 0 {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Human-friendly presets used by burrow config.
|
||||||
|
switch strings.ToLower(id) {
|
||||||
|
case "sonoma", "macos-14", "macos14", "14":
|
||||||
|
return []*stdlib.Label{{Name: "macos.version", Value: "14.x"}}
|
||||||
|
case "sequoia", "macos-15", "macos15", "15":
|
||||||
|
return []*stdlib.Label{{Name: "macos.version", Value: "15.x"}}
|
||||||
|
case "tahoe", "macos-26", "macos26", "26":
|
||||||
|
// Constrain to the Xcode 26 support disk explicitly, since Apple builds
|
||||||
|
// depend on Xcode being present and Compute currently errors if it can't
|
||||||
|
// resolve a support disk selection.
|
||||||
|
return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}, {Name: "image.with", Value: "xcode-26"}}
|
||||||
|
default:
|
||||||
|
return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosComputeBaseImageID(baseImageID string) string {
|
||||||
|
id := strings.TrimSpace(baseImageID)
|
||||||
|
if id == "" {
|
||||||
|
return "tahoe"
|
||||||
|
}
|
||||||
|
// If selectors were provided directly, we cannot safely infer a canonical
|
||||||
|
// base image ID from them.
|
||||||
|
if strings.Contains(id, "=") {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
switch strings.ToLower(id) {
|
||||||
|
case "sonoma", "macos-14", "macos14", "14":
|
||||||
|
return "sonoma"
|
||||||
|
case "sequoia", "macos-15", "macos15", "15":
|
||||||
|
return "sequoia"
|
||||||
|
case "tahoe", "macos-26", "macos26", "26":
|
||||||
|
return "tahoe"
|
||||||
|
default:
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosWorkDir(workdir string) string {
|
||||||
|
workdir = strings.TrimSpace(workdir)
|
||||||
|
switch workdir {
|
||||||
|
case "", "/var/lib/forgejo-runner":
|
||||||
|
return "/tmp/forgejo-runner"
|
||||||
|
default:
|
||||||
|
return workdir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nscBearerTokenFile struct {
|
||||||
|
BearerToken string `json:"bearer_token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readNSCBearerToken() (string, error) {
|
||||||
|
path := os.Getenv("NSC_TOKEN_FILE")
|
||||||
|
if path == "" {
|
||||||
|
return "", errors.New("NSC_TOKEN_FILE is required for macos runners")
|
||||||
|
}
|
||||||
|
raw, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("read NSC_TOKEN_FILE: %w", err)
|
||||||
|
}
|
||||||
|
trimmed := strings.TrimSpace(string(raw))
|
||||||
|
if trimmed == "" {
|
||||||
|
return "", errors.New("NSC_TOKEN_FILE is empty")
|
||||||
|
}
|
||||||
|
// Support the on-host format used by burrow: {"bearer_token":"..."}.
|
||||||
|
var parsed nscBearerTokenFile
|
||||||
|
if err := json.Unmarshal([]byte(trimmed), &parsed); err == nil && parsed.BearerToken != "" {
|
||||||
|
return parsed.BearerToken, nil
|
||||||
|
}
|
||||||
|
// Fallback: allow a raw bearer token.
|
||||||
|
return trimmed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMachineTypeCPUxMemGB(machineType string) (vcpu int32, memoryMB int32, err error) {
|
||||||
|
parts := strings.Split(machineType, "x")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return 0, 0, fmt.Errorf("invalid machine_type %q: expected CPUxMemoryGB (e.g. 12x28)", machineType)
|
||||||
|
}
|
||||||
|
cpu64, err := strconv.ParseInt(parts[0], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("invalid machine_type %q: cpu: %w", machineType, err)
|
||||||
|
}
|
||||||
|
memGB64, err := strconv.ParseInt(parts[1], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, fmt.Errorf("invalid machine_type %q: memory: %w", machineType, err)
|
||||||
|
}
|
||||||
|
return int32(cpu64), int32(memGB64 * 1024), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
|
||||||
|
if machineType == "" {
|
||||||
|
return errors.New("machine_type is required for macos runners")
|
||||||
|
}
|
||||||
|
vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bearer, err := readNSCBearerToken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient := &http.Client{Timeout: 60 * time.Second}
|
||||||
|
client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
workdir := macosWorkDir(d.opts.WorkDir)
|
||||||
|
|
||||||
|
env := map[string]string{
|
||||||
|
"FORGEJO_INSTANCE_URL": req.InstanceURL,
|
||||||
|
"FORGEJO_RUNNER_TOKEN": req.Token,
|
||||||
|
"FORGEJO_RUNNER_NAME": runnerName,
|
||||||
|
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
|
||||||
|
"FORGEJO_RUNNER_EXEC": d.opts.Executor,
|
||||||
|
"FORGEJO_RUNNER_WORKDIR": workdir,
|
||||||
|
}
|
||||||
|
for k, v := range req.ExtraEnv {
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
if _, ok := env["NSC_CACHE_PATH"]; !ok {
|
||||||
|
env["NSC_CACHE_PATH"] = d.opts.MacosCachePath
|
||||||
|
}
|
||||||
|
|
||||||
|
deadline := timestamppb.New(time.Now().Add(ttl))
|
||||||
|
|
||||||
|
createReq := &computev1beta.CreateInstanceRequest{
|
||||||
|
Shape: &computev1beta.InstanceShape{
|
||||||
|
VirtualCpu: vcpu,
|
||||||
|
MemoryMegabytes: memoryMB,
|
||||||
|
MachineArch: d.opts.MacosMachineArch,
|
||||||
|
Os: "macos",
|
||||||
|
// Namespace macOS compute requires selectors to pick the base image
|
||||||
|
// ("support disk"), otherwise instance creation fails.
|
||||||
|
Selectors: macosSupportDiskSelectors(d.opts.MacosBaseImageID),
|
||||||
|
},
|
||||||
|
DocumentedPurpose: fmt.Sprintf("burrow forgejo runner %s", runnerName),
|
||||||
|
Deadline: deadline,
|
||||||
|
Labels: []*stdlib.Label{
|
||||||
|
{Name: "nsc.source", Value: "forgejo-nsc"},
|
||||||
|
{Name: "burrow.service", Value: "forgejo-runner"},
|
||||||
|
{Name: "burrow.runner", Value: runnerName},
|
||||||
|
},
|
||||||
|
Applications: []*computev1beta.ApplicationRequest{
|
||||||
|
{
|
||||||
|
Name: "forgejo-runner",
|
||||||
|
Command: "/bin/bash",
|
||||||
|
Args: []string{"-lc", macosBootstrapScript()},
|
||||||
|
Environment: env,
|
||||||
|
WorkloadType: computev1beta.ApplicationRequest_JOB,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
experimental := &computev1beta.CreateInstanceRequest_ExperimentalFeatures{}
|
||||||
|
if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" {
|
||||||
|
experimental.MacosBaseImageId = imageID
|
||||||
|
}
|
||||||
|
if volumes := computeCacheVolumeRequests(d.opts.MacosCacheVolumes); len(volumes) > 0 {
|
||||||
|
experimental.Volumes = volumes
|
||||||
|
}
|
||||||
|
if experimental.MacosBaseImageId != "" || len(experimental.Volumes) > 0 {
|
||||||
|
createReq.Experimental = experimental
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("launching Namespace macos runner",
|
||||||
|
"runner", runnerName,
|
||||||
|
"compute_base_url", d.opts.ComputeBaseURL,
|
||||||
|
"macos_base_image_id", d.opts.MacosBaseImageID,
|
||||||
|
"shape", fmt.Sprintf("%dx%d", vcpu, memoryMB/1024),
|
||||||
|
"arch", d.opts.MacosMachineArch,
|
||||||
|
)
|
||||||
|
|
||||||
|
reqCreate := connect.NewRequest(createReq)
|
||||||
|
reqCreate.Header().Set("Authorization", "Bearer "+bearer)
|
||||||
|
resp, err := client.CreateInstance(ctx, reqCreate)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compute create instance failed: %w", err)
|
||||||
|
}
|
||||||
|
if resp.Msg == nil || resp.Msg.Metadata == nil {
|
||||||
|
return errors.New("compute create instance returned no metadata")
|
||||||
|
}
|
||||||
|
instanceID := resp.Msg.Metadata.InstanceId
|
||||||
|
|
||||||
|
waitErr := d.waitForMacOSRunnerStop(ctx, client, bearer, runnerName, instanceID, ttl)
|
||||||
|
d.destroyComputeInstance(context.Background(), client, bearer, runnerName, instanceID)
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) runMacOSComputeSSHScript(ctx context.Context, runnerName, instanceID, script string) error {
|
||||||
|
bearer, err := readNSCBearerToken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
httpClient := &http.Client{Timeout: 60 * time.Second}
|
||||||
|
client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
getReq := connect.NewRequest(&computev1beta.GetSSHConfigRequest{
|
||||||
|
InstanceId: instanceID,
|
||||||
|
// TargetContainer is optional. Keep it empty to run commands in the default instance environment.
|
||||||
|
})
|
||||||
|
getReq.Header().Set("Authorization", "Bearer "+bearer)
|
||||||
|
|
||||||
|
resp, err := client.GetSSHConfig(ctx, getReq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compute get ssh config failed: %w", err)
|
||||||
|
}
|
||||||
|
if resp.Msg == nil {
|
||||||
|
return errors.New("compute get ssh config returned empty response")
|
||||||
|
}
|
||||||
|
if resp.Msg.Endpoint == "" {
|
||||||
|
return errors.New("compute get ssh config returned empty endpoint")
|
||||||
|
}
|
||||||
|
if len(resp.Msg.SshPrivateKey) == 0 {
|
||||||
|
return errors.New("compute get ssh config returned empty ssh private key")
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(resp.Msg.Username) == "" {
|
||||||
|
return errors.New("compute get ssh config returned empty username")
|
||||||
|
}
|
||||||
|
|
||||||
|
signer, err := ssh.ParsePrivateKey(resp.Msg.SshPrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parse ssh private key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := fmt.Sprintf("%s:22", resp.Msg.Endpoint)
|
||||||
|
conn, err := net.Dial("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("dial ssh endpoint: %w", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
sshCfg := &ssh.ClientConfig{
|
||||||
|
User: resp.Msg.Username,
|
||||||
|
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Endpoint is short-lived and key is delivered out-of-band.
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ssh client conn: %w", err)
|
||||||
|
}
|
||||||
|
clientSSH := ssh.NewClient(c, chans, reqs)
|
||||||
|
defer clientSSH.Close()
|
||||||
|
|
||||||
|
session, err := clientSSH.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ssh new session: %w", err)
|
||||||
|
}
|
||||||
|
defer session.Close()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
session.Stdout = &buf
|
||||||
|
session.Stderr = &buf
|
||||||
|
session.Stdin = strings.NewReader(script)
|
||||||
|
|
||||||
|
// Feed the bootstrap script via stdin so we don't need to quote/escape it.
|
||||||
|
//
|
||||||
|
// Note: Some SSH servers do not reliably parse exec strings with arguments.
|
||||||
|
// Running bare `/bin/bash` still reads from stdin and avoids argument parsing.
|
||||||
|
if err := session.Run("/bin/bash"); err != nil {
|
||||||
|
outRaw := buf.String()
|
||||||
|
out := strings.TrimSpace(outRaw)
|
||||||
|
|
||||||
|
// Some SSH servers reject exec requests and only allow interactive shells,
|
||||||
|
// and others will "succeed" but still interpret stdin under the default
|
||||||
|
// login shell (showing the zsh banner / prompts).
|
||||||
|
//
|
||||||
|
// In those cases, retry via Shell() with a PTY.
|
||||||
|
exitStatus := 0
|
||||||
|
exitErr, isExitErr := err.(*ssh.ExitError)
|
||||||
|
if isExitErr {
|
||||||
|
exitStatus = exitErr.ExitStatus()
|
||||||
|
}
|
||||||
|
|
||||||
|
looksInteractive := strings.Contains(outRaw, "The default interactive shell is now zsh") ||
|
||||||
|
strings.Contains(outRaw, " runner$ ") ||
|
||||||
|
strings.Contains(outRaw, "bash-3.2$")
|
||||||
|
shouldFallback := !isExitErr || looksInteractive
|
||||||
|
|
||||||
|
if shouldFallback {
|
||||||
|
d.log.Warn("compute ssh exec bootstrap failed; retrying via interactive shell",
|
||||||
|
"runner", runnerName,
|
||||||
|
"instance", instanceID,
|
||||||
|
"exit_status", exitStatus,
|
||||||
|
)
|
||||||
|
|
||||||
|
session2, err2 := clientSSH.NewSession()
|
||||||
|
if err2 != nil {
|
||||||
|
return fmt.Errorf("ssh new session (fallback): %w", err2)
|
||||||
|
}
|
||||||
|
defer session2.Close()
|
||||||
|
|
||||||
|
// bytes.Buffer isn't safe for concurrent writes + reads; the SSH session
|
||||||
|
// writes from background goroutines. Wrap it so we can poll for a prompt
|
||||||
|
// before sending commands.
|
||||||
|
lb := &lockedBuffer{}
|
||||||
|
session2.Stdout = lb
|
||||||
|
session2.Stderr = lb
|
||||||
|
|
||||||
|
in, err2 := session2.StdinPipe()
|
||||||
|
if err2 != nil {
|
||||||
|
return fmt.Errorf("ssh stdin pipe (fallback): %w", err2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request a PTY to match interactive semantics even when the caller
|
||||||
|
// doesn't have a local terminal.
|
||||||
|
_ = session2.RequestPty("xterm", 24, 80, nil)
|
||||||
|
|
||||||
|
if err2 := session2.Shell(); err2 != nil {
|
||||||
|
return fmt.Errorf("ssh shell (fallback): %w", err2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait briefly for the prompt/banner so the first command isn't dropped.
|
||||||
|
// We also emit a sentinel `echo` to verify the TTY is live.
|
||||||
|
deadline := time.Now().Add(3 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
n := lb.Len()
|
||||||
|
if n > 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream the script then exit. Prefer LF line endings; macOS shells and
|
||||||
|
// PTYs can treat CRLF as literal CR characters (breaking heredoc
|
||||||
|
// delimiters and quoting).
|
||||||
|
writeTTY := func(s string) {
|
||||||
|
if s == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s = strings.ReplaceAll(s, "\r\n", "\n")
|
||||||
|
_, _ = io.WriteString(in, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
scriptTTY := strings.ReplaceAll(script, "\r\n", "\n")
|
||||||
|
|
||||||
|
// Cut down noise in logs and reduce the chance of ZSH line-editing
|
||||||
|
// behavior corrupting long inputs.
|
||||||
|
writeTTY("stty -echo 2>/dev/null || true\n")
|
||||||
|
writeTTY("echo BURROW_BOOTSTRAP_TTY_OK\n")
|
||||||
|
|
||||||
|
// Avoid heredocs for the script itself (PTY newline handling is fragile).
|
||||||
|
// Instead, stream base64 in short chunks to a file, then decode and run it.
|
||||||
|
enc := base64.StdEncoding.EncodeToString([]byte(scriptTTY))
|
||||||
|
idSafe := strings.ReplaceAll(instanceID, "-", "_")
|
||||||
|
b64Path := "/tmp/burrow-bootstrap-" + idSafe + ".b64"
|
||||||
|
shPath := "/tmp/burrow-bootstrap-" + idSafe + ".sh"
|
||||||
|
|
||||||
|
writeTTY("rm -f " + b64Path + " " + shPath + "\n")
|
||||||
|
writeTTY(": > " + b64Path + "\n")
|
||||||
|
|
||||||
|
const chunkSize = 80
|
||||||
|
for i := 0; i < len(enc); i += chunkSize {
|
||||||
|
j := i + chunkSize
|
||||||
|
if j > len(enc) {
|
||||||
|
j = len(enc)
|
||||||
|
}
|
||||||
|
chunk := enc[i:j]
|
||||||
|
// Base64 chunks contain only [A-Za-z0-9+/=], which are safe to pass
|
||||||
|
// unquoted. Avoid quotes entirely so a truncated line can't leave
|
||||||
|
// the remote shell in a multi-line continuation state.
|
||||||
|
writeTTY("printf %s " + chunk + " >> " + b64Path + "\n")
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// macOS uses `base64 -D` (BSD), some environments use `-d` (GNU).
|
||||||
|
writeTTY("base64 -D " + b64Path + " > " + shPath + " 2>/dev/null || base64 -d " + b64Path + " > " + shPath + "\n")
|
||||||
|
writeTTY("/bin/bash " + shPath + "\n")
|
||||||
|
writeTTY("exit\n")
|
||||||
|
_ = in.Close()
|
||||||
|
|
||||||
|
if err2 := session2.Wait(); err2 != nil {
|
||||||
|
out2 := strings.TrimSpace(lb.String())
|
||||||
|
if len(out2) > 16*1024 {
|
||||||
|
out2 = out2[len(out2)-16*1024:]
|
||||||
|
}
|
||||||
|
return fmt.Errorf("compute ssh runner bootstrap failed (shell fallback): %w\n%s", err2, out2)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("macos runner bootstrap completed via compute ssh shell", "runner", runnerName, "instance", instanceID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out) > 16*1024 {
|
||||||
|
out = out[len(out)-16*1024:]
|
||||||
|
}
|
||||||
|
return fmt.Errorf("compute ssh runner bootstrap failed: %w\n%s", err, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("macos runner bootstrap completed via compute ssh", "runner", runnerName, "instance", instanceID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) waitForMacOSRunnerStop(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string, ttl time.Duration) error {
|
||||||
|
if ttl <= 0 {
|
||||||
|
ttl = d.opts.DefaultDuration
|
||||||
|
}
|
||||||
|
deadline := time.Now().Add(ttl)
|
||||||
|
ticker := time.NewTicker(15 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
stopped, err := d.checkComputeInstanceStopped(ctx, client, bearer, instanceID)
|
||||||
|
if err != nil {
|
||||||
|
d.log.Warn("macos runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err)
|
||||||
|
} else if stopped {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().After(deadline) {
|
||||||
|
return fmt.Errorf("macos runner exceeded ttl (%s) without stopping", ttl)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) checkComputeInstanceStopped(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, instanceID string) (bool, error) {
|
||||||
|
describeReq := connect.NewRequest(&computev1beta.DescribeInstanceRequest{InstanceId: instanceID})
|
||||||
|
describeReq.Header().Set("Authorization", "Bearer "+bearer)
|
||||||
|
resp, err := client.DescribeInstance(ctx, describeReq)
|
||||||
|
if err != nil {
|
||||||
|
// NotFound means the instance is already gone.
|
||||||
|
if connect.CodeOf(err) == connect.CodeNotFound {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if resp.Msg == nil || resp.Msg.Metadata == nil {
|
||||||
|
return false, errors.New("describe instance returned no metadata")
|
||||||
|
}
|
||||||
|
switch resp.Msg.Metadata.Status {
|
||||||
|
case computev1beta.InstanceMetadata_DESTROYED:
|
||||||
|
return true, nil
|
||||||
|
case computev1beta.InstanceMetadata_ERROR:
|
||||||
|
// Best-effort include shutdown reasons; do not include unbounded output.
|
||||||
|
var b strings.Builder
|
||||||
|
for _, reason := range resp.Msg.ShutdownReasons {
|
||||||
|
if reason == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteString("; ")
|
||||||
|
}
|
||||||
|
b.WriteString(reason.String())
|
||||||
|
if b.Len() > 1024 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg := strings.TrimSpace(b.String())
|
||||||
|
if msg == "" {
|
||||||
|
msg = "unknown shutdown reason"
|
||||||
|
}
|
||||||
|
return true, fmt.Errorf("instance entered error state: %s", msg)
|
||||||
|
default:
|
||||||
|
if resp.Msg.Metadata.DestroyedAt != nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string) {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
destroyReq := connect.NewRequest(&computev1beta.DestroyInstanceRequest{InstanceId: instanceID})
|
||||||
|
destroyReq.Header().Set("Authorization", "Bearer "+bearer)
|
||||||
|
if _, err := client.DestroyInstance(ctx, destroyReq); err != nil {
|
||||||
|
if connect.CodeOf(err) == connect.CodeNotFound {
|
||||||
|
d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID, "status", "not_found")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.log.Warn("macos runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeCacheVolumeRequests(volumes []CacheVolume) []*computev1beta.VolumeRequest {
|
||||||
|
var out []*computev1beta.VolumeRequest
|
||||||
|
for _, volume := range volumes {
|
||||||
|
if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, &computev1beta.VolumeRequest{
|
||||||
|
MountPoint: volume.MountPoint,
|
||||||
|
Tag: volume.Tag,
|
||||||
|
SizeMb: volume.SizeGb * 1024,
|
||||||
|
PersistencyKind: computev1beta.VolumeRequest_CACHE,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosBootstrapScript() string {
|
||||||
|
// Keep this script self-contained: it runs on a fresh macOS VM base image.
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString(`set -euo pipefail
|
||||||
|
|
||||||
|
workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
|
||||||
|
mkdir -p "${workdir}"
|
||||||
|
cd "${workdir}"
|
||||||
|
if ! mkdir -p "/Users/runner/.cache/act" 2>/dev/null; then
|
||||||
|
sudo install -d -m 0775 -o "$(id -un)" -g "$(id -gn)" /Users/runner/.cache /Users/runner/.cache/act
|
||||||
|
fi
|
||||||
|
|
||||||
|
export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}"
|
||||||
|
cache_base="${NSC_CACHE_PATH:-$HOME/.cache/burrow}"
|
||||||
|
cache_root="${NSC_SHARED_CACHE_PATH:-${cache_base}/shared}"
|
||||||
|
cache_owner="$(id -un)"
|
||||||
|
cache_group="$(id -gn)"
|
||||||
|
if ! install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \
|
||||||
|
"${cache_root}" \
|
||||||
|
"${cache_root}/bin" \
|
||||||
|
"${cache_root}/downloads" \
|
||||||
|
"${cache_root}/go/path" \
|
||||||
|
"${cache_root}/go/mod" \
|
||||||
|
"${cache_root}/go/build" \
|
||||||
|
"${cache_root}/homebrew" 2>/dev/null; then
|
||||||
|
sudo install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \
|
||||||
|
"${cache_root}" \
|
||||||
|
"${cache_root}/bin" \
|
||||||
|
"${cache_root}/downloads" \
|
||||||
|
"${cache_root}/go/path" \
|
||||||
|
"${cache_root}/go/mod" \
|
||||||
|
"${cache_root}/go/build" \
|
||||||
|
"${cache_root}/homebrew"
|
||||||
|
fi
|
||||||
|
export HOMEBREW_CACHE="${cache_root}/homebrew"
|
||||||
|
export GOPATH="${cache_root}/go/path"
|
||||||
|
export GOMODCACHE="${cache_root}/go/mod"
|
||||||
|
export GOCACHE="${cache_root}/go/build"
|
||||||
|
|
||||||
|
if ! command -v curl >/dev/null 2>&1; then
|
||||||
|
echo "curl is required" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Apple build workflows do not require Nix just to bootstrap the Forgejo runner.
|
||||||
|
# If Nix is already present on the base image, keep it on PATH; otherwise leave
|
||||||
|
# installation to the job itself.
|
||||||
|
if [[ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
|
||||||
|
export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p bin
|
||||||
|
export PATH="${PWD}/bin:${PATH}"
|
||||||
|
|
||||||
|
# Keep the ad-hoc macOS bootstrap on the same Forgejo runner major line as the
|
||||||
|
# Linux runner image. Forgejo runner 11.x is currently published as v11.3.1.
|
||||||
|
runner_version="v11.3.1"
|
||||||
|
runner_src_tgz="forgejo-runner-${runner_version}.tar.gz"
|
||||||
|
runner_src_tgz_path="${cache_root}/downloads/${runner_src_tgz}"
|
||||||
|
runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz"
|
||||||
|
runner_src_dir="forgejo-runner-src"
|
||||||
|
runner_bin_cache="${cache_root}/bin/forgejo-runner-${runner_version}"
|
||||||
|
|
||||||
|
if [[ ! -x "${runner_bin_cache}" ]]; then
|
||||||
|
rm -rf "${runner_src_dir}"
|
||||||
|
mkdir -p "${runner_src_dir}"
|
||||||
|
if [[ ! -f "${runner_src_tgz_path}" ]]; then
|
||||||
|
curl -fsSL "${runner_src_url}" -o "${runner_src_tgz_path}"
|
||||||
|
fi
|
||||||
|
tar -xzf "${runner_src_tgz_path}" -C "${runner_src_dir}" --strip-components=1
|
||||||
|
|
||||||
|
toolchain="$(grep -E '^toolchain ' "${runner_src_dir}/go.mod" | awk '{print $2}' | head -n 1 || true)"
|
||||||
|
if [ -z "${toolchain}" ]; then
|
||||||
|
toolchain="go1.25.7"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v go >/dev/null 2>&1; then
|
||||||
|
go_tgz="${toolchain}.darwin-arm64.tar.gz"
|
||||||
|
go_url="https://go.dev/dl/${go_tgz}"
|
||||||
|
go_tgz_path="${cache_root}/downloads/${go_tgz}"
|
||||||
|
if [[ ! -f "${go_tgz_path}" ]]; then
|
||||||
|
curl -fsSL "${go_url}" -o "${go_tgz_path}"
|
||||||
|
fi
|
||||||
|
tar -xzf "${go_tgz_path}"
|
||||||
|
export GOROOT="${PWD}/go"
|
||||||
|
export PATH="${GOROOT}/bin:${PATH}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "${GOPATH}" "${GOMODCACHE}" "${GOCACHE}"
|
||||||
|
|
||||||
|
(cd "${runner_src_dir}" && go build -o "${runner_bin_cache}" .)
|
||||||
|
chmod +x "${runner_bin_cache}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ln -sf "${runner_bin_cache}" "${workdir}/bin/forgejo-runner"
|
||||||
|
|
||||||
|
cat > runner.yaml <<'EOF'
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
runner:
|
||||||
|
file: .runner
|
||||||
|
capacity: 1
|
||||||
|
name: ${FORGEJO_RUNNER_NAME}
|
||||||
|
labels:
|
||||||
|
EOF
|
||||||
|
|
||||||
|
runner_exec="${FORGEJO_RUNNER_EXEC:-host}"
|
||||||
|
if [ "$runner_exec" = "shell" ]; then
|
||||||
|
runner_exec="host"
|
||||||
|
fi
|
||||||
|
|
||||||
|
resolved_labels=""
|
||||||
|
for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do
|
||||||
|
if [ -z "${label}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
case "${label}" in
|
||||||
|
*:*) resolved="${label}" ;;
|
||||||
|
*) resolved="${label}:${runner_exec}" ;;
|
||||||
|
esac
|
||||||
|
echo " - ${resolved}" >> runner.yaml
|
||||||
|
if [ -z "${resolved_labels}" ]; then
|
||||||
|
resolved_labels="${resolved}"
|
||||||
|
else
|
||||||
|
resolved_labels="${resolved_labels},${resolved}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
cat >> runner.yaml <<'EOF'
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
EOF
|
||||||
|
|
||||||
|
forgejo-runner register \
|
||||||
|
--no-interactive \
|
||||||
|
--instance "${FORGEJO_INSTANCE_URL}" \
|
||||||
|
--token "${FORGEJO_RUNNER_TOKEN}" \
|
||||||
|
--name "${FORGEJO_RUNNER_NAME}" \
|
||||||
|
--labels "${resolved_labels}" \
|
||||||
|
--config runner.yaml
|
||||||
|
|
||||||
|
forgejo-runner one-job --config runner.yaml
|
||||||
|
`)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
484
services/forgejo-nsc/internal/nsc/macos_nsc.go
Normal file
484
services/forgejo-nsc/internal/nsc/macos_nsc.go
Normal file
|
|
@ -0,0 +1,484 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"connectrpc.com/connect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func nscCLIEnv() []string {
|
||||||
|
env := os.Environ()
|
||||||
|
out := env[:0]
|
||||||
|
for _, entry := range env {
|
||||||
|
if strings.HasPrefix(entry, "NSC_TOKEN_FILE=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, entry)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeMacOSNSCMachineType(machineType string) (normalized string, changed bool, err error) {
|
||||||
|
vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
memGB := memoryMB / 1024
|
||||||
|
if memGB <= 0 || vcpu <= 0 {
|
||||||
|
return "", false, fmt.Errorf("invalid machine_type %q after parse: vcpu=%d memGB=%d", machineType, vcpu, memGB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NSC CLI (and the underlying InstanceService) enforce discrete cpu/mem sets
|
||||||
|
// for macOS. Normalize requested values by rounding up to the closest allowed
|
||||||
|
// values to keep provisioning stable even when configs drift.
|
||||||
|
//
|
||||||
|
// Observed allowed sets from Namespace API error output for macos/arm64:
|
||||||
|
// cpu: [4 6 8 12]
|
||||||
|
// mem: [7 14 28 56] (GB)
|
||||||
|
allowedCPU := []int32{4, 6, 8, 12}
|
||||||
|
allowedMemGB := []int32{7, 14, 28, 56}
|
||||||
|
|
||||||
|
roundUp := func(v int32, allowed []int32) (int32, bool) {
|
||||||
|
for _, a := range allowed {
|
||||||
|
if v <= a {
|
||||||
|
return a, a != v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Clamp to max if above all allowed values.
|
||||||
|
return allowed[len(allowed)-1], true
|
||||||
|
}
|
||||||
|
|
||||||
|
newCPU, cpuChanged := roundUp(vcpu, allowedCPU)
|
||||||
|
newMemGB, memChanged := roundUp(memGB, allowedMemGB)
|
||||||
|
|
||||||
|
normalized = fmt.Sprintf("%dx%d", newCPU, newMemGB)
|
||||||
|
changed = cpuChanged || memChanged
|
||||||
|
return normalized, changed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type macosNSCSSHOutcome int
|
||||||
|
|
||||||
|
const (
|
||||||
|
macosNSCSSHCompleted macosNSCSSHOutcome = iota
|
||||||
|
macosNSCSSHHandoff
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
|
||||||
|
if machineType == "" {
|
||||||
|
return errors.New("machine_type is required for macos runners")
|
||||||
|
}
|
||||||
|
|
||||||
|
selectors := macosSelectorsArg(d.opts.MacosBaseImageID)
|
||||||
|
if selectors == "" {
|
||||||
|
return errors.New("macos selectors resolved empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
normalizedMachineType := machineType
|
||||||
|
if n, changed, err := normalizeMacOSNSCMachineType(machineType); err != nil {
|
||||||
|
return err
|
||||||
|
} else if changed {
|
||||||
|
normalizedMachineType = n
|
||||||
|
}
|
||||||
|
|
||||||
|
// If capacity is constrained for the requested (large) shape, try a small
|
||||||
|
// set of progressively smaller shapes before failing the dispatch request.
|
||||||
|
// This keeps macOS builds flowing even when large runners are scarce.
|
||||||
|
candidates := []string{normalizedMachineType, "8x28", "6x14", "4x7"}
|
||||||
|
seen := map[string]struct{}{}
|
||||||
|
var uniq []string
|
||||||
|
for _, c := range candidates {
|
||||||
|
c = strings.TrimSpace(c)
|
||||||
|
if c == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := seen[c]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[c] = struct{}{}
|
||||||
|
uniq = append(uniq, c)
|
||||||
|
}
|
||||||
|
candidates = uniq
|
||||||
|
|
||||||
|
type attemptCfg struct {
|
||||||
|
waitTimeout time.Duration
|
||||||
|
createTimeout time.Duration
|
||||||
|
}
|
||||||
|
attempts := []attemptCfg{
|
||||||
|
{waitTimeout: 6 * time.Minute, createTimeout: 8 * time.Minute},
|
||||||
|
{waitTimeout: 4 * time.Minute, createTimeout: 6 * time.Minute},
|
||||||
|
{waitTimeout: 3 * time.Minute, createTimeout: 5 * time.Minute},
|
||||||
|
}
|
||||||
|
|
||||||
|
createInstance := func(mt string, a attemptCfg) (instanceID string, out string, err error) {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-macos-*")
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("mktemp: %w", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
metaPath := filepath.Join(tmpDir, "create.json")
|
||||||
|
cidPath := filepath.Join(tmpDir, "create.cid")
|
||||||
|
|
||||||
|
arch := strings.TrimSpace(d.opts.MacosMachineArch)
|
||||||
|
if arch == "" {
|
||||||
|
arch = "arm64"
|
||||||
|
}
|
||||||
|
// Namespace CLI requires the "os/arch:" prefix to create a macOS instance.
|
||||||
|
// Without it, `nsc create` defaults to Linux even if selectors include macos.*.
|
||||||
|
machineType := fmt.Sprintf("macos/%s:%s", arch, mt)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"create",
|
||||||
|
"--duration", ttl.String(),
|
||||||
|
"--machine_type", machineType,
|
||||||
|
"--selectors", selectors,
|
||||||
|
"--bare",
|
||||||
|
"--cidfile", cidPath,
|
||||||
|
"--log_actions",
|
||||||
|
"--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName),
|
||||||
|
// Prefer plain output for debuggability (progress, capacity errors, etc).
|
||||||
|
"--output", "plain",
|
||||||
|
"--output_json_to", metaPath,
|
||||||
|
// macOS instances can take a while to become ready.
|
||||||
|
"--wait_timeout", a.waitTimeout.String(),
|
||||||
|
}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
args = appendVolumeArgs(args, d.opts.MacosCacheVolumes)
|
||||||
|
|
||||||
|
createCtx, cancel := context.WithTimeout(ctx, a.createTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...)
|
||||||
|
cmd.Env = nscCLIEnv()
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
// Best-effort cleanup: if the instance ID was written before the command failed
|
||||||
|
// (or before we timed it out), attempt to destroy it to avoid idling machines.
|
||||||
|
if instanceID := strings.TrimSpace(mustReadFile(cidPath)); instanceID != "" {
|
||||||
|
d.destroyNSCInstance(context.Background(), runnerName, instanceID)
|
||||||
|
}
|
||||||
|
if errors.Is(createCtx.Err(), context.DeadlineExceeded) {
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create timed out after %s", a.createTimeout)
|
||||||
|
}
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceID, err = readNSCCreateInstanceID(metaPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err)
|
||||||
|
}
|
||||||
|
if instanceID == "" {
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create returned empty instance id")
|
||||||
|
}
|
||||||
|
return instanceID, buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
instanceID string
|
||||||
|
lastOut string
|
||||||
|
lastErr error
|
||||||
|
)
|
||||||
|
for i, mt := range candidates {
|
||||||
|
a := attempts[i]
|
||||||
|
if i >= len(attempts) {
|
||||||
|
a = attempts[len(attempts)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("launching Namespace macos runner via nsc",
|
||||||
|
"runner", runnerName,
|
||||||
|
"attempt", i+1,
|
||||||
|
"machine_type", mt,
|
||||||
|
"requested_machine_type", machineType,
|
||||||
|
"selectors", selectors,
|
||||||
|
)
|
||||||
|
|
||||||
|
id, out, err := createInstance(mt, a)
|
||||||
|
lastOut = out
|
||||||
|
lastErr = err
|
||||||
|
if err != nil {
|
||||||
|
// Timeouts are treated as retryable (capacity constrained).
|
||||||
|
if strings.Contains(err.Error(), "timed out") || strings.Contains(strings.ToLower(out), "capacity") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%w\n%s", err, out)
|
||||||
|
}
|
||||||
|
instanceID = id
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if instanceID == "" {
|
||||||
|
if lastErr != nil {
|
||||||
|
return fmt.Errorf("%w\n%s", lastErr, lastOut)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut)
|
||||||
|
}
|
||||||
|
|
||||||
|
destroyOnReturn := true
|
||||||
|
defer func() {
|
||||||
|
if destroyOnReturn {
|
||||||
|
d.destroyNSCInstance(context.Background(), runnerName, instanceID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir)
|
||||||
|
// Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which
|
||||||
|
// relies on a websocket-based SSH proxy that is less reliable under the
|
||||||
|
// revokable tenant token flow used by the dispatcher.
|
||||||
|
if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil {
|
||||||
|
if shouldFallbackToNSCSSH(err) {
|
||||||
|
d.log.Warn("compute ssh bootstrap failed; falling back to nsc ssh",
|
||||||
|
"runner", runnerName,
|
||||||
|
"instance", instanceID,
|
||||||
|
"err", err,
|
||||||
|
)
|
||||||
|
outcome, sshErr := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script)
|
||||||
|
if sshErr != nil {
|
||||||
|
return sshErr
|
||||||
|
}
|
||||||
|
if outcome == macosNSCSSHHandoff {
|
||||||
|
destroyOnReturn = false
|
||||||
|
d.log.Info("leaving macos nsc instance running until TTL after runner handoff",
|
||||||
|
"runner", runnerName,
|
||||||
|
"instance", instanceID,
|
||||||
|
"ttl", ttl.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustReadFile(path string) string {
|
||||||
|
raw, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return string(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosSelectorsArg(baseImageID string) string {
|
||||||
|
id := strings.TrimSpace(baseImageID)
|
||||||
|
if id == "" {
|
||||||
|
id = "tahoe"
|
||||||
|
}
|
||||||
|
// Allow passing selectors directly via config, e.g. "macos.version=26.x,image.with=xcode-26".
|
||||||
|
if strings.Contains(id, "=") {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
switch strings.ToLower(id) {
|
||||||
|
case "sonoma", "macos-14", "macos14", "14":
|
||||||
|
return "macos.version=14.x"
|
||||||
|
case "sequoia", "macos-15", "macos15", "15":
|
||||||
|
return "macos.version=15.x"
|
||||||
|
case "tahoe", "macos-26", "macos26", "26":
|
||||||
|
return "macos.version=26.x,image.with=xcode-26"
|
||||||
|
default:
|
||||||
|
return "macos.version=26.x"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type nscCreateMetadata struct {
|
||||||
|
InstanceID string `json:"instance_id"`
|
||||||
|
ClusterID string `json:"cluster_id"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readNSCCreateInstanceID(path string) (string, error) {
|
||||||
|
raw, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("read %s: %w", path, err)
|
||||||
|
}
|
||||||
|
var meta nscCreateMetadata
|
||||||
|
if err := json.Unmarshal(raw, &meta); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if meta.InstanceID != "" {
|
||||||
|
return meta.InstanceID, nil
|
||||||
|
}
|
||||||
|
if meta.ClusterID != "" {
|
||||||
|
return meta.ClusterID, nil
|
||||||
|
}
|
||||||
|
if meta.ID != "" {
|
||||||
|
return meta.ID, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanceID string) {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
args := []string{"destroy", "--force", instanceID}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...)
|
||||||
|
cmd.Env = nscCLIEnv()
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
d.log.Warn("nsc destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.log.Info("nsc instance destroyed", "runner", runnerName, "instance", instanceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func macosBootstrapWrapperScript(runnerName string, req LaunchRequest, executor, workdir string) string {
|
||||||
|
workdir = macosWorkDir(workdir)
|
||||||
|
|
||||||
|
// Pass all values via stdin script so secrets do not appear in the nsc ssh argv.
|
||||||
|
env := map[string]string{
|
||||||
|
"FORGEJO_INSTANCE_URL": req.InstanceURL,
|
||||||
|
"FORGEJO_RUNNER_TOKEN": req.Token,
|
||||||
|
"FORGEJO_RUNNER_NAME": runnerName,
|
||||||
|
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
|
||||||
|
"FORGEJO_RUNNER_EXEC": executor,
|
||||||
|
"FORGEJO_RUNNER_WORKDIR": workdir,
|
||||||
|
}
|
||||||
|
for k, v := range req.ExtraEnv {
|
||||||
|
env[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString("set -euo pipefail\n")
|
||||||
|
for k, v := range env {
|
||||||
|
if strings.TrimSpace(k) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Single-quote shell escaping: safe for arbitrary tokens.
|
||||||
|
b.WriteString("export ")
|
||||||
|
b.WriteString(k)
|
||||||
|
b.WriteString("=")
|
||||||
|
b.WriteString(shellSingleQuote(v))
|
||||||
|
b.WriteString("\n")
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
b.WriteString(macosBootstrapScript())
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func shellSingleQuote(value string) string {
|
||||||
|
// 'foo' -> '\'' within single quotes: '"'"'
|
||||||
|
return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'"
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldFallbackToNSCSSH(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch connect.CodeOf(err) {
|
||||||
|
case connect.CodeUnauthenticated, connect.CodePermissionDenied, connect.CodeUnimplemented:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
errText := strings.ToLower(err.Error())
|
||||||
|
return strings.Contains(errText, "compute get ssh config failed") &&
|
||||||
|
(strings.Contains(errText, "unauthenticated") ||
|
||||||
|
strings.Contains(errText, "permission_denied") ||
|
||||||
|
strings.Contains(errText, "permission denied") ||
|
||||||
|
strings.Contains(errText, "unimplemented"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) (macosNSCSSHOutcome, error) {
|
||||||
|
sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
args := []string{"ssh", "--disable-pty", instanceID, "/bin/bash"}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(sshCtx, d.opts.BinaryPath, args...)
|
||||||
|
cmd.Env = nscCLIEnv()
|
||||||
|
cmd.Stdin = strings.NewReader(script)
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if errors.Is(sshCtx.Err(), context.DeadlineExceeded) {
|
||||||
|
return macosNSCSSHCompleted, fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String()))
|
||||||
|
}
|
||||||
|
if nscSSHBootstrapLikelySucceeded(err, buf.String()) {
|
||||||
|
d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful",
|
||||||
|
"runner", runnerName,
|
||||||
|
"instance", instanceID,
|
||||||
|
"err", err,
|
||||||
|
)
|
||||||
|
d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID)
|
||||||
|
return macosNSCSSHHandoff, nil
|
||||||
|
}
|
||||||
|
return macosNSCSSHCompleted, fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID)
|
||||||
|
return macosNSCSSHCompleted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func nscSSHBootstrapLikelySucceeded(err error, output string) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
errText := strings.ToLower(err.Error())
|
||||||
|
if !strings.Contains(errText, "remote command exited without exit status or exit signal") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
output = strings.ToLower(output)
|
||||||
|
return strings.Contains(output, "runner registered successfully") &&
|
||||||
|
strings.Contains(output, "starting job") &&
|
||||||
|
strings.Contains(output, "task ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func prependNSCRegionArgs(args []string, computeBaseURL string) []string {
|
||||||
|
region := strings.TrimSpace(os.Getenv("NSC_REGION"))
|
||||||
|
if region == "" {
|
||||||
|
region = regionFromComputeBaseURL(computeBaseURL)
|
||||||
|
}
|
||||||
|
if region == "" {
|
||||||
|
// Default to the burrow region used for other Namespace integrations.
|
||||||
|
region = "ord4"
|
||||||
|
}
|
||||||
|
return append([]string{"--region", region}, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func regionFromComputeBaseURL(raw string) string {
|
||||||
|
raw = strings.TrimSpace(raw)
|
||||||
|
if raw == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
u, err := url.Parse(raw)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
host := u.Hostname()
|
||||||
|
if host == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
parts := strings.Split(host, ".")
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
// ord4.compute.namespaceapis.com -> ord4
|
||||||
|
if strings.HasSuffix(host, ".compute.namespaceapis.com") || strings.Contains(host, ".compute.") {
|
||||||
|
return parts[0]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
69
services/forgejo-nsc/internal/nsc/macos_nsc_test.go
Normal file
69
services/forgejo-nsc/internal/nsc/macos_nsc_test.go
Normal file
|
|
@ -0,0 +1,69 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNormalizeMacOSNSCMachineTypeRoundsUp(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
got, changed, err := normalizeMacOSNSCMachineType("5x10")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("normalizeMacOSNSCMachineType: %v", err)
|
||||||
|
}
|
||||||
|
if !changed {
|
||||||
|
t.Fatal("expected machine type to be normalized")
|
||||||
|
}
|
||||||
|
if got != "6x14" {
|
||||||
|
t.Fatalf("expected 6x14, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNormalizeMacOSNSCMachineTypeKeepsAllowedShape(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
got, changed, err := normalizeMacOSNSCMachineType("6x14")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("normalizeMacOSNSCMachineType: %v", err)
|
||||||
|
}
|
||||||
|
if changed {
|
||||||
|
t.Fatal("expected allowed machine type to remain unchanged")
|
||||||
|
}
|
||||||
|
if got != "6x14" {
|
||||||
|
t.Fatalf("expected 6x14, got %q", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldFallbackToNSCSSHFallbackForComputeAuthErrors(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
err := errors.New("compute get ssh config failed: unauthenticated: invalid tenant credentials")
|
||||||
|
if !shouldFallbackToNSCSSH(err) {
|
||||||
|
t.Fatal("expected compute auth error to fall back to nsc ssh")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldFallbackToNSCSSHRejectsOtherErrors(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
err := errors.New("compute ssh runner bootstrap failed: exit status 1")
|
||||||
|
if shouldFallbackToNSCSSH(err) {
|
||||||
|
t.Fatal("expected unrelated bootstrap errors to remain fatal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNSCSSHBootstrapLikelySucceeded(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
err := errors.New("wait: remote command exited without exit status or exit signal")
|
||||||
|
output := `
|
||||||
|
level=info msg="Runner registered successfully."
|
||||||
|
time="2026-03-19T11:29:49Z" level=info msg="Starting job"
|
||||||
|
time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow"
|
||||||
|
`
|
||||||
|
|
||||||
|
if !nscSSHBootstrapLikelySucceeded(err, output) {
|
||||||
|
t.Fatal("expected handoff success heuristic to match")
|
||||||
|
}
|
||||||
|
}
|
||||||
59
services/forgejo-nsc/internal/nsc/windows.go
Normal file
59
services/forgejo-nsc/internal/nsc/windows.go
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const windowsDefaultMachineType = "windows/amd64:8x16"
|
||||||
|
|
||||||
|
var cpuMemShapePattern = regexp.MustCompile(`^\d+x\d+$`)
|
||||||
|
|
||||||
|
func hasWindowsLabel(labels []string) bool {
|
||||||
|
for _, label := range labels {
|
||||||
|
l := strings.TrimSpace(label)
|
||||||
|
if l == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
base := l
|
||||||
|
if before, _, ok := strings.Cut(l, ":"); ok {
|
||||||
|
base = before
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(base, "namespace-profile-windows-") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeWindowsMachineType(machineType string, labels []string) string {
|
||||||
|
mt := strings.TrimSpace(machineType)
|
||||||
|
if strings.HasPrefix(mt, "windows/") {
|
||||||
|
return mt
|
||||||
|
}
|
||||||
|
if cpuMemShapePattern.MatchString(mt) {
|
||||||
|
return "windows/amd64:" + mt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label-derived defaults: keep a simple shape ladder for explicit profile sizes.
|
||||||
|
for _, label := range labels {
|
||||||
|
base := strings.TrimSpace(label)
|
||||||
|
if before, _, ok := strings.Cut(base, ":"); ok {
|
||||||
|
base = before
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(base, "namespace-profile-windows-small"):
|
||||||
|
return "windows/amd64:2x4"
|
||||||
|
case strings.HasPrefix(base, "namespace-profile-windows-medium"):
|
||||||
|
return "windows/amd64:4x8"
|
||||||
|
case strings.HasPrefix(base, "namespace-profile-windows-large"):
|
||||||
|
return windowsDefaultMachineType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return windowsDefaultMachineType
|
||||||
|
}
|
||||||
|
|
||||||
|
func powershellSingleQuote(value string) string {
|
||||||
|
// PowerShell single-quoted string escaping: ' -> ''
|
||||||
|
return "'" + strings.ReplaceAll(value, "'", "''") + "'"
|
||||||
|
}
|
||||||
98
services/forgejo-nsc/internal/nsc/windows_test.go
Normal file
98
services/forgejo-nsc/internal/nsc/windows_test.go
Normal file
|
|
@ -0,0 +1,98 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestHasWindowsLabel(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
labels []string
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "namespace windows label",
|
||||||
|
labels: []string{"namespace-profile-windows-large"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "namespace windows label with host suffix",
|
||||||
|
labels: []string{"namespace-profile-windows-large:host"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non namespace windows-like label",
|
||||||
|
labels: []string{"burrow-winrunner:host"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "macos label",
|
||||||
|
labels: []string{"namespace-profile-macos-large"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
tc := tc
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
got := hasWindowsLabel(tc.labels)
|
||||||
|
if got != tc.want {
|
||||||
|
t.Fatalf("hasWindowsLabel(%v) = %v, want %v", tc.labels, got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNormalizeWindowsMachineType(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
machine string
|
||||||
|
labels []string
|
||||||
|
wantPrefix string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "explicit windows machine type keeps value",
|
||||||
|
machine: "windows/amd64:8x16",
|
||||||
|
labels: []string{"namespace-profile-windows-large"},
|
||||||
|
wantPrefix: "windows/amd64:8x16",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "shape only is normalized",
|
||||||
|
machine: "4x8",
|
||||||
|
labels: []string{"namespace-profile-windows-large"},
|
||||||
|
wantPrefix: "windows/amd64:4x8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "large label default",
|
||||||
|
machine: "",
|
||||||
|
labels: []string{"namespace-profile-windows-large"},
|
||||||
|
wantPrefix: "windows/amd64:8x16",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "medium label default",
|
||||||
|
machine: "",
|
||||||
|
labels: []string{"namespace-profile-windows-medium"},
|
||||||
|
wantPrefix: "windows/amd64:4x8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fallback default",
|
||||||
|
machine: "",
|
||||||
|
labels: []string{"namespace-profile-windows-custom"},
|
||||||
|
wantPrefix: "windows/amd64:8x16",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
tc := tc
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
got := normalizeWindowsMachineType(tc.machine, tc.labels)
|
||||||
|
if got != tc.wantPrefix {
|
||||||
|
t.Fatalf("normalizeWindowsMachineType(%q, %v) = %q, want %q", tc.machine, tc.labels, got, tc.wantPrefix)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
499
services/forgejo-nsc/internal/nsc/windows_winrm.go
Normal file
499
services/forgejo-nsc/internal/nsc/windows_winrm.go
Normal file
|
|
@ -0,0 +1,499 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type windowsProxyOutput struct {
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
RDP struct {
|
||||||
|
Credentials struct {
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
} `json:"credentials"`
|
||||||
|
} `json:"rdp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) launchWindowsRunnerViaWinRM(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
|
||||||
|
script := windowsBootstrapScript(runnerName, req, d.opts.Executor, d.opts.WorkDir)
|
||||||
|
return d.launchWindowsScriptViaWinRM(ctx, runnerName, ttl, machineType, req.Labels, script)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) launchWindowsScriptViaWinRM(ctx context.Context, runnerName string, ttl time.Duration, machineType string, labels []string, script string) error {
|
||||||
|
if ttl <= 0 {
|
||||||
|
ttl = d.opts.DefaultDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
mt := normalizeWindowsMachineType(machineType, labels)
|
||||||
|
instanceID, createOutput, err := d.createWindowsInstance(ctx, runnerName, ttl, mt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("windows create failed: %w\n%s", err, createOutput)
|
||||||
|
}
|
||||||
|
defer d.destroyNSCInstance(context.Background(), runnerName, instanceID)
|
||||||
|
|
||||||
|
username, password, err := d.resolveWindowsCredentials(ctx, instanceID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.probeWindowsWinRMService(ctx, instanceID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint, stopForward, err := d.startWindowsWinRMPortForward(ctx, instanceID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer stopForward()
|
||||||
|
|
||||||
|
if err := d.runWindowsWinRMPowerShell(ctx, endpoint, username, password, script); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) createWindowsInstance(ctx context.Context, runnerName string, ttl time.Duration, machineType string) (instanceID string, output string, err error) {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-windows-*")
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("mktemp: %w", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
metaPath := filepath.Join(tmpDir, "create.json")
|
||||||
|
cidPath := filepath.Join(tmpDir, "create.cid")
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"create",
|
||||||
|
"--duration", ttl.String(),
|
||||||
|
"--machine_type", machineType,
|
||||||
|
"--cidfile", cidPath,
|
||||||
|
"--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName),
|
||||||
|
"--output", "plain",
|
||||||
|
"--output_json_to", metaPath,
|
||||||
|
"--wait_timeout", "6m",
|
||||||
|
}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
createCtx, cancel := context.WithTimeout(ctx, 8*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
cmd.Stdout = &buf
|
||||||
|
cmd.Stderr = &buf
|
||||||
|
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if created := strings.TrimSpace(mustReadFile(cidPath)); created != "" {
|
||||||
|
d.destroyNSCInstance(context.Background(), runnerName, created)
|
||||||
|
}
|
||||||
|
if errors.Is(createCtx.Err(), context.DeadlineExceeded) {
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create timed out after %s", 8*time.Minute)
|
||||||
|
}
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceID, err = readNSCCreateInstanceID(metaPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err)
|
||||||
|
}
|
||||||
|
if instanceID == "" {
|
||||||
|
return "", buf.String(), errors.New("nsc create returned empty instance id")
|
||||||
|
}
|
||||||
|
return instanceID, buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) resolveWindowsCredentials(ctx context.Context, instanceID string) (username string, password string, err error) {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-winproxy-*")
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("mktemp: %w", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
outPath := filepath.Join(tmpDir, "proxy.json")
|
||||||
|
outFile, err := os.Create(outPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("create proxy output file: %w", err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
args := []string{"instance", "proxy", instanceID, "-s", "rdp", "-o", "json"}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
proxyCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(proxyCtx, d.opts.BinaryPath, args...)
|
||||||
|
cmd.Stdout = outFile
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return "", "", fmt.Errorf("start nsc instance proxy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitDone := make(chan struct{})
|
||||||
|
var waitErr error
|
||||||
|
go func() {
|
||||||
|
waitErr = cmd.Wait()
|
||||||
|
close(waitDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var payload windowsProxyOutput
|
||||||
|
deadline := time.Now().Add(45 * time.Second)
|
||||||
|
for time.Now().Before(deadline) {
|
||||||
|
raw, _ := os.ReadFile(outPath)
|
||||||
|
jsonBlob := extractJSON(string(raw))
|
||||||
|
if jsonBlob != "" {
|
||||||
|
if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil {
|
||||||
|
username = strings.TrimSpace(payload.RDP.Credentials.Username)
|
||||||
|
password = strings.TrimSpace(payload.RDP.Credentials.Password)
|
||||||
|
if username != "" && password != "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-waitDone:
|
||||||
|
if waitErr != nil {
|
||||||
|
return "", "", fmt.Errorf("nsc instance proxy exited before credentials were available: %w\n%s", waitErr, stderr.String())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.Process != nil {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
<-waitDone
|
||||||
|
|
||||||
|
if username == "" || password == "" {
|
||||||
|
raw, _ := os.ReadFile(outPath)
|
||||||
|
return "", "", fmt.Errorf("failed to resolve windows credentials from nsc instance proxy output\nstdout=%s\nstderr=%s", strings.TrimSpace(string(raw)), strings.TrimSpace(stderr.String()))
|
||||||
|
}
|
||||||
|
return username, password, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) probeWindowsWinRMService(ctx context.Context, instanceID string) error {
|
||||||
|
args := []string{"instance", "proxy", instanceID, "-s", "winrm", "-o", "json", "--once"}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
probeCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(probeCtx, d.opts.BinaryPath, args...)
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
raw := strings.TrimSpace(out.String())
|
||||||
|
if endpoint, ok := parseProxyEndpoint(raw); ok && endpoint != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if indicatesMissingProxyService(raw, "winrm") {
|
||||||
|
return fmt.Errorf("namespace windows non-interactive channel unavailable: instance does not expose winrm service (rdp-only)\n%s", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(probeCtx.Err(), context.DeadlineExceeded) {
|
||||||
|
return fmt.Errorf("timed out probing Namespace winrm service before bootstrap\n%s", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("nsc winrm service probe failed: %w\n%s", err, raw)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("nsc winrm service probe did not yield endpoint output\n%s", raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseProxyEndpoint(raw string) (string, bool) {
|
||||||
|
jsonBlob := extractJSON(raw)
|
||||||
|
if jsonBlob == "" {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
var payload struct {
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
endpoint := strings.TrimSpace(payload.Endpoint)
|
||||||
|
if endpoint == "" {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return endpoint, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func indicatesMissingProxyService(raw string, service string) bool {
|
||||||
|
service = strings.TrimSpace(service)
|
||||||
|
if service == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
token := fmt.Sprintf("does not have service %q", service)
|
||||||
|
return strings.Contains(raw, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) startWindowsWinRMPortForward(ctx context.Context, instanceID string) (endpoint string, stop func(), err error) {
|
||||||
|
args := []string{"instance", "port-forward", instanceID, "--target_port", "5985"}
|
||||||
|
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
|
||||||
|
|
||||||
|
forwardCtx, cancel := context.WithCancel(ctx)
|
||||||
|
cmd := exec.CommandContext(forwardCtx, d.opts.BinaryPath, args...)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
return "", nil, fmt.Errorf("port-forward stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
cancel()
|
||||||
|
return "", nil, fmt.Errorf("start nsc port-forward: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitDone := make(chan struct{})
|
||||||
|
var waitErr error
|
||||||
|
go func() {
|
||||||
|
waitErr = cmd.Wait()
|
||||||
|
close(waitDone)
|
||||||
|
}()
|
||||||
|
|
||||||
|
endpointCh := make(chan string, 1)
|
||||||
|
scanErrCh := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
scanner := bufio.NewScanner(stdout)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if strings.HasPrefix(line, "Listening on ") {
|
||||||
|
endpointCh <- strings.TrimSpace(strings.TrimPrefix(line, "Listening on "))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
scanErrCh <- err
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case endpoint = <-endpointCh:
|
||||||
|
stop = func() {
|
||||||
|
cancel()
|
||||||
|
if cmd.Process != nil {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
<-waitDone
|
||||||
|
}
|
||||||
|
return endpoint, stop, nil
|
||||||
|
case err := <-scanErrCh:
|
||||||
|
cancel()
|
||||||
|
if cmd.Process != nil {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
<-waitDone
|
||||||
|
return "", nil, fmt.Errorf("failed reading port-forward output: %w", err)
|
||||||
|
case <-waitDone:
|
||||||
|
cancel()
|
||||||
|
if waitErr != nil {
|
||||||
|
return "", nil, fmt.Errorf("nsc port-forward exited early: %w\n%s", waitErr, stderr.String())
|
||||||
|
}
|
||||||
|
return "", nil, fmt.Errorf("nsc port-forward exited without endpoint\n%s", stderr.String())
|
||||||
|
case <-time.After(45 * time.Second):
|
||||||
|
cancel()
|
||||||
|
if cmd.Process != nil {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
<-waitDone
|
||||||
|
return "", nil, fmt.Errorf("timed out waiting for WinRM port-forward endpoint\n%s", stderr.String())
|
||||||
|
case <-ctx.Done():
|
||||||
|
cancel()
|
||||||
|
if cmd.Process != nil {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
<-waitDone
|
||||||
|
return "", nil, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Dispatcher) runWindowsWinRMPowerShell(ctx context.Context, endpoint, username, password, script string) error {
|
||||||
|
pythonPath, err := exec.LookPath("python3")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("python3 is required for windows WinRM bootstrap: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
workdir := strings.TrimSpace(d.opts.WorkDir)
|
||||||
|
if workdir == "" {
|
||||||
|
workdir = "/tmp/forgejo-runner"
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(workdir, 0o755); err != nil {
|
||||||
|
return fmt.Errorf("create workdir %s: %w", workdir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
venvPath := filepath.Join(workdir, ".winrm-venv")
|
||||||
|
venvPython := filepath.Join(venvPath, "bin", "python")
|
||||||
|
if _, err := os.Stat(venvPython); err != nil {
|
||||||
|
cmd := exec.CommandContext(ctx, pythonPath, "-m", "venv", venvPath)
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("create python venv for winrm failed: %w\n%s", err, out.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ensurePyWinRM := `
|
||||||
|
import importlib.util, subprocess, sys
|
||||||
|
if importlib.util.find_spec("winrm") is None:
|
||||||
|
subprocess.check_call([sys.executable, "-m", "pip", "install", "--quiet", "pywinrm"])
|
||||||
|
`
|
||||||
|
ensureCmd := exec.CommandContext(ctx, venvPython, "-c", ensurePyWinRM)
|
||||||
|
var ensureOut bytes.Buffer
|
||||||
|
ensureCmd.Stdout = &ensureOut
|
||||||
|
ensureCmd.Stderr = &ensureOut
|
||||||
|
if err := ensureCmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("install pywinrm failed: %w\n%s", err, ensureOut.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
runScript := `
|
||||||
|
import base64, os, sys, time, traceback, winrm
|
||||||
|
|
||||||
|
endpoint = os.environ["WINRM_ENDPOINT"]
|
||||||
|
user = os.environ["WINRM_USER"]
|
||||||
|
password = os.environ["WINRM_PASS"]
|
||||||
|
script = base64.b64decode(os.environ["WINRM_SCRIPT_B64"]).decode("utf-8")
|
||||||
|
|
||||||
|
deadline = time.time() + 300.0
|
||||||
|
last_err = None
|
||||||
|
|
||||||
|
while time.time() < deadline:
|
||||||
|
try:
|
||||||
|
session = winrm.Session(f"http://{endpoint}/wsman", auth=(user, password), transport="ntlm")
|
||||||
|
result = session.run_ps(script)
|
||||||
|
sys.stdout.write(result.std_out.decode("utf-8", errors="replace"))
|
||||||
|
sys.stderr.write(result.std_err.decode("utf-8", errors="replace"))
|
||||||
|
print(f"winrm_exit={result.status_code}")
|
||||||
|
sys.exit(result.status_code)
|
||||||
|
except Exception as err:
|
||||||
|
last_err = err
|
||||||
|
time.sleep(5.0)
|
||||||
|
|
||||||
|
sys.stderr.write("timed out waiting for WinRM connectivity after 300s\\n")
|
||||||
|
if last_err is not None:
|
||||||
|
traceback.print_exception(last_err, file=sys.stderr)
|
||||||
|
sys.exit(111)
|
||||||
|
`
|
||||||
|
runCmd := exec.CommandContext(ctx, venvPython, "-c", runScript)
|
||||||
|
runCmd.Env = append(os.Environ(),
|
||||||
|
"WINRM_ENDPOINT="+endpoint,
|
||||||
|
"WINRM_USER="+username,
|
||||||
|
"WINRM_PASS="+password,
|
||||||
|
"WINRM_SCRIPT_B64="+base64.StdEncoding.EncodeToString([]byte(script)),
|
||||||
|
)
|
||||||
|
var runOut bytes.Buffer
|
||||||
|
runCmd.Stdout = &runOut
|
||||||
|
runCmd.Stderr = &runOut
|
||||||
|
if err := runCmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("windows winrm bootstrap command failed: %w\n%s", err, runOut.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func windowsBootstrapScript(runnerName string, req LaunchRequest, executor, workdir string) string {
|
||||||
|
if strings.TrimSpace(workdir) == "" {
|
||||||
|
workdir = `C:\burrow\forgejo-runner`
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerExec := strings.TrimSpace(executor)
|
||||||
|
if runnerExec == "" || runnerExec == "shell" {
|
||||||
|
runnerExec = "host"
|
||||||
|
}
|
||||||
|
|
||||||
|
safeName := strings.NewReplacer(`\`, "-", ":", "-", "/", "-", " ", "-").Replace(runnerName)
|
||||||
|
workRoot := strings.TrimRight(workdir, `\`) + `\` + safeName
|
||||||
|
|
||||||
|
var b strings.Builder
|
||||||
|
b.WriteString("$ErrorActionPreference = 'Stop'\n")
|
||||||
|
b.WriteString("$ProgressPreference = 'SilentlyContinue'\n")
|
||||||
|
b.WriteString("[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12\n")
|
||||||
|
b.WriteString("$runnerName = " + powershellSingleQuote(runnerName) + "\n")
|
||||||
|
b.WriteString("$runnerToken = " + powershellSingleQuote(req.Token) + "\n")
|
||||||
|
b.WriteString("$instanceURL = " + powershellSingleQuote(req.InstanceURL) + "\n")
|
||||||
|
b.WriteString("$labelsCsv = " + powershellSingleQuote(strings.Join(req.Labels, ",")) + "\n")
|
||||||
|
b.WriteString("$runnerExec = " + powershellSingleQuote(runnerExec) + "\n")
|
||||||
|
b.WriteString("$workRoot = " + powershellSingleQuote(workRoot) + "\n")
|
||||||
|
b.WriteString(`
|
||||||
|
New-Item -Path $workRoot -ItemType Directory -Force | Out-Null
|
||||||
|
Set-Location $workRoot
|
||||||
|
|
||||||
|
$runnerVersion = "12.6.4"
|
||||||
|
$zipUrl = "https://code.forgejo.org/forgejo/runner/releases/download/v${runnerVersion}/forgejo-runner-${runnerVersion}-windows-amd64.zip"
|
||||||
|
$zipPath = Join-Path $workRoot "forgejo-runner.zip"
|
||||||
|
$extractDir = Join-Path $workRoot "forgejo-runner"
|
||||||
|
|
||||||
|
if (Test-Path $extractDir) {
|
||||||
|
Remove-Item -Path $extractDir -Recurse -Force
|
||||||
|
}
|
||||||
|
|
||||||
|
Invoke-WebRequest -Uri $zipUrl -OutFile $zipPath
|
||||||
|
Expand-Archive -Path $zipPath -DestinationPath $extractDir -Force
|
||||||
|
|
||||||
|
$runnerExe = Join-Path $extractDir "forgejo-runner.exe"
|
||||||
|
if (-not (Test-Path $runnerExe)) {
|
||||||
|
throw "Missing forgejo-runner.exe after extract: $runnerExe"
|
||||||
|
}
|
||||||
|
|
||||||
|
$labels = @()
|
||||||
|
foreach ($label in ($labelsCsv -split ",")) {
|
||||||
|
$trimmed = $label.Trim()
|
||||||
|
if ([string]::IsNullOrWhiteSpace($trimmed)) { continue }
|
||||||
|
if ($trimmed.Contains(":")) {
|
||||||
|
$labels += $trimmed
|
||||||
|
} else {
|
||||||
|
$labels += ("{0}:{1}" -f $trimmed, $runnerExec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ($labels.Count -eq 0) {
|
||||||
|
throw "No runner labels resolved for windows bootstrap"
|
||||||
|
}
|
||||||
|
|
||||||
|
$labelLines = ($labels | ForEach-Object { " - $_" }) -join [Environment]::NewLine
|
||||||
|
$configPath = Join-Path $workRoot "runner.yaml"
|
||||||
|
$runnerYaml = @"
|
||||||
|
log:
|
||||||
|
level: info
|
||||||
|
runner:
|
||||||
|
file: .runner
|
||||||
|
capacity: 1
|
||||||
|
name: $runnerName
|
||||||
|
labels:
|
||||||
|
$labelLines
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
"@
|
||||||
|
Set-Content -Path $configPath -Value $runnerYaml -Encoding UTF8
|
||||||
|
|
||||||
|
$labelsArg = ($labels -join ",")
|
||||||
|
& $runnerExe register --no-interactive --instance $instanceURL --token $runnerToken --name $runnerName --labels $labelsArg --config $configPath
|
||||||
|
if ($LASTEXITCODE -ne 0) {
|
||||||
|
throw ("forgejo-runner register failed: {0}" -f $LASTEXITCODE)
|
||||||
|
}
|
||||||
|
|
||||||
|
& $runnerExe one-job --config $configPath
|
||||||
|
if ($LASTEXITCODE -ne 0) {
|
||||||
|
throw ("forgejo-runner one-job failed: {0}" -f $LASTEXITCODE)
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,59 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWindowsWinRMScriptRoundTrip(t *testing.T) {
|
||||||
|
if os.Getenv("NSC_WINDOWS_E2E") != "1" {
|
||||||
|
t.Skip("set NSC_WINDOWS_E2E=1 to run Namespace Windows integration test")
|
||||||
|
}
|
||||||
|
|
||||||
|
nscBinary, err := exec.LookPath("nsc")
|
||||||
|
if err != nil {
|
||||||
|
t.Skipf("nsc not found in PATH: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
authCheck := exec.Command(nscBinary, "auth", "check-login")
|
||||||
|
if out, err := authCheck.CombinedOutput(); err != nil {
|
||||||
|
t.Skipf("nsc auth check-login failed: %v (%s)", err, strings.TrimSpace(string(out)))
|
||||||
|
}
|
||||||
|
|
||||||
|
machineType := strings.TrimSpace(os.Getenv("NSC_WINDOWS_E2E_MACHINE_TYPE"))
|
||||||
|
if machineType == "" {
|
||||||
|
machineType = "windows/amd64:4x8"
|
||||||
|
}
|
||||||
|
|
||||||
|
dispatcher, err := NewDispatcher(Options{
|
||||||
|
BinaryPath: nscBinary,
|
||||||
|
DefaultImage: "code.forgejo.org/forgejo/runner:11",
|
||||||
|
DefaultMachine: machineType,
|
||||||
|
DefaultDuration: 20 * time.Minute,
|
||||||
|
MaxParallel: 1,
|
||||||
|
WorkDir: t.TempDir(),
|
||||||
|
ComputeBaseURL: strings.TrimSpace(os.Getenv("NSC_COMPUTE_BASE_URL")),
|
||||||
|
Logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewDispatcher() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
script := "Write-Output ('winrm-ok:' + $env:COMPUTERNAME)"
|
||||||
|
labels := []string{"namespace-profile-windows-medium"}
|
||||||
|
if err := dispatcher.launchWindowsScriptViaWinRM(ctx, "nsc-winrm-itest", 20*time.Minute, machineType, labels, script); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "does not expose winrm service (rdp-only)") {
|
||||||
|
t.Skipf("namespace windows control channel is rdp-only: %v", err)
|
||||||
|
}
|
||||||
|
t.Fatalf("launchWindowsScriptViaWinRM() error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
65
services/forgejo-nsc/internal/nsc/windows_winrm_test.go
Normal file
65
services/forgejo-nsc/internal/nsc/windows_winrm_test.go
Normal file
|
|
@ -0,0 +1,65 @@
|
||||||
|
package nsc
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestParseProxyEndpoint(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
raw string
|
||||||
|
want string
|
||||||
|
wantOK bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "plain json payload",
|
||||||
|
raw: `{"endpoint":"127.0.0.1:61234"}`,
|
||||||
|
want: "127.0.0.1:61234",
|
||||||
|
wantOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "json wrapped with extra output",
|
||||||
|
raw: `Connected.
|
||||||
|
{"endpoint":"127.0.0.1:61235","rdp":{"credentials":{"username":"runneradmin","password":"runneradmin"}}}`,
|
||||||
|
want: "127.0.0.1:61235",
|
||||||
|
wantOK: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "missing endpoint field",
|
||||||
|
raw: `{"rdp":{"credentials":{"username":"runneradmin"}}}`,
|
||||||
|
wantOK: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-json output",
|
||||||
|
raw: `Failed: instance does not have service "winrm"`,
|
||||||
|
wantOK: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
tc := tc
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
got, ok := parseProxyEndpoint(tc.raw)
|
||||||
|
if ok != tc.wantOK {
|
||||||
|
t.Fatalf("parseProxyEndpoint(%q) ok=%v, want %v", tc.raw, ok, tc.wantOK)
|
||||||
|
}
|
||||||
|
if got != tc.want {
|
||||||
|
t.Fatalf("parseProxyEndpoint(%q) endpoint=%q, want %q", tc.raw, got, tc.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIndicatesMissingProxyService(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
raw := `Failed: instance does not have service "winrm"`
|
||||||
|
if !indicatesMissingProxyService(raw, "winrm") {
|
||||||
|
t.Fatalf("indicatesMissingProxyService should return true for missing winrm message")
|
||||||
|
}
|
||||||
|
if indicatesMissingProxyService(raw, "ssh") {
|
||||||
|
t.Fatalf("indicatesMissingProxyService should be false when service name does not match")
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue