Add Forgejo namespace workflow stack
Some checks are pending
Build Rust / Cargo Test (push) Waiting to run
Build Site / Next.js Build (push) Waiting to run

This commit is contained in:
Conrad Kramer 2026-03-18 02:49:55 -07:00
parent 482fd5d085
commit 865b676c99
68 changed files with 9709 additions and 11 deletions

View file

@ -1,6 +1,3 @@
[target.'cfg(unix)']
runner = "sudo -E"
[alias] # command aliases
rr = "run --release"
bb = "build --release"

View file

@ -0,0 +1,97 @@
name: Build Apple
on:
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
build:
name: Build App (${{ matrix.platform }})
runs-on: namespace-profile-macos-large
strategy:
fail-fast: false
matrix:
include:
- platform: macOS
destination: platform=macOS
rust-targets: x86_64-apple-darwin,aarch64-apple-darwin
- platform: iOS Simulator
destination: platform=iOS Simulator,name=iPhone 17 Pro
rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios
env:
CARGO_INCREMENTAL: 0
RUST_BACKTRACE: short
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
submodules: recursive
- name: Select Xcode
shell: bash
run: |
set -euo pipefail
candidates=(
"/Applications/Xcode_26.1.app/Contents/Developer"
"/Applications/Xcode_26_1.app/Contents/Developer"
"/Applications/Xcode.app/Contents/Developer"
"/Applications/Xcode/Xcode.app/Contents/Developer"
)
selected=""
for candidate in "${candidates[@]}"; do
if [[ -d "$candidate" ]]; then
selected="$candidate"
break
fi
done
if [[ -z "$selected" ]] && command -v xcode-select >/dev/null 2>&1; then
selected="$(xcode-select -p)"
fi
if [[ -z "$selected" ]]; then
echo "::error ::Unable to locate an Xcode toolchain" >&2
exit 1
fi
echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV"
DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: 1.85.0
targets: ${{ matrix.rust-targets }}
- name: Install Protobuf
shell: bash
run: |
set -euo pipefail
if ! command -v protoc >/dev/null 2>&1; then
brew install protobuf
fi
- name: Build
shell: bash
working-directory: Apple
run: |
set -euo pipefail
xcodebuild build \
-project Burrow.xcodeproj \
-scheme App \
-destination '${{ matrix.destination }}' \
-skipPackagePluginValidation \
-skipMacroValidation \
-onlyUsePackageVersionsFromResolvedFile \
-clonedSourcePackagesDirPath SourcePackages \
-packageCachePath "$PWD/PackageCache" \
-derivedDataPath "$PWD/DerivedData" \
CODE_SIGNING_ALLOWED=NO \
CODE_SIGNING_REQUIRED=NO \
CODE_SIGN_IDENTITY="" \
DEVELOPMENT_TEAM=""

View file

@ -0,0 +1,31 @@
name: Build Rust
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
rust:
name: Cargo Test
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Test
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c cargo test --workspace --all-features

View file

@ -0,0 +1,31 @@
name: Build Site
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
site:
name: Next.js Build
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Build
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c bash -lc 'cd site && npm install && npm run build'

1
.gitignore vendored
View file

@ -14,4 +14,5 @@ target/
tmp/
*.db
*.sqlite3
*.sock

38
CONSTITUTION.md Normal file
View file

@ -0,0 +1,38 @@
# Burrow Constitution
1. Mission
Burrow exists to build a proper VPN: fast, inspectable, deployable on infrastructure the project controls, and legible enough that future contributors can extend it without guesswork.
2. Commitments
- Protocol work must favor correctness over novelty. Burrow does not claim support for a transport or control-plane feature until the wire format, state handling, and recovery behavior are implemented and tested.
- Security is a design constraint, not a cleanup phase. Key material, bootstrap credentials, control-plane tokens, and routing policy must have explicit storage and rotation paths.
- Performance matters. Burrow should avoid needless copies, hidden blocking, and ad hoc process graphs that make packet forwarding or control-plane convergence harder to reason about.
- Source, infrastructure, and release logic live in the repository. If the forge cannot be rebuilt from the tree, the work is incomplete.
- Non-trivial changes require a Burrow Evolution Proposal. Durable rationale belongs in the repository, not only in chat.
3. Infrastructure
Burrow controls its own forge, runners, deployment automation, and edge configuration for `burrow.net` and `burrow.rs`.
- Dedicated compute is preferred over SaaS dependencies when the dependency would hold release, source, or identity authority.
- Secrets may be bootstrapped from local intake for initial bring-up, but long-lived operation must converge on encrypted, versioned secret handling.
- Production access must be attributable. Automation identities, SSH keys, and service accounts must be named and documented.
4. Contributors
- Read this constitution before drafting product, protocol, or infrastructure changes.
- Capture intent, testing expectations, and rollback procedures in proposals.
- Prefer reversible migrations. If a change is destructive, document the preconditions and teardown plan first.
- Security-sensitive work requires explicit reviewer attention, even when the implementation is performed by an agent.
5. Governance
- Burrow Evolution Proposals (BEPs) are the primary design record for architectural, protocol, forge, and deployment changes.
- Accepted proposals are authoritative until superseded.
- Constitutional changes require a dedicated proposal that quotes the affected text and records the decision.
6. Origin
Burrow started as a firewall-burrowing client and now carries its own transport, daemon, mesh, and control-plane work. This constitution exists so the project can finish that evolution coherently.

View file

@ -1,21 +1,23 @@
tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1)
cargo_console := RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features
cargo_norm := RUST_BACKTRACE=1 RUST_LOG=debug cargo run
cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
check:
@cargo check
build:
@cargo run build
@cargo build
daemon-console:
@$(cargo_console) daemon
@$(sudo_cargo_console) daemon
daemon:
@$(cargo_norm) daemon
@$(sudo_cargo_norm) daemon
start:
@$(cargo_norm) start
@$(sudo_cargo_norm) start
stop:
@$(cargo_norm) stop

View file

@ -5,10 +5,19 @@
Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/).
`burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them.
Routine verification now runs unprivileged with `cargo test --workspace --all-features`; only tunnel startup needs elevation.
The repository now carries its own design and deployment record:
- [Constitution](./CONSTITUTION.md)
- [Burrow Evolution](./evolution/README.md)
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
- [Forward Email Runbook](./docs/FORWARDEMAIL.md)
## Contributing
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow! Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app.
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh).
The project structure is divided in the following folders:

95
Scripts/_burrow-flake.sh Executable file
View file

@ -0,0 +1,95 @@
#!/usr/bin/env bash
burrow_require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
burrow_cleanup_flake_tmpdirs() {
if [[ "${#BURROW_FLAKE_TMPDIRS[@]}" -eq 0 ]]; then
return
fi
rm -rf "${BURROW_FLAKE_TMPDIRS[@]}"
}
burrow_prepare_flake_ref() {
local input="${1:-.}"
case "${input}" in
path:*|git+*|github:*|tarball+*|http://*|https://*)
printf '%s\n' "${input}"
return 0
;;
esac
local resolved
resolved="$(cd "${input}" && pwd)"
local cache_root="${HOME}/.cache/burrow"
mkdir -p "${cache_root}"
local copy_root
copy_root="$(mktemp -d "${cache_root}/flake-XXXXXX")"
mkdir -p "${copy_root}/repo"
rsync -a \
--delete \
--exclude '.git' \
--exclude '.direnv' \
--exclude 'result' \
--exclude 'burrow.sock' \
--exclude 'node_modules' \
--exclude 'target' \
--exclude 'build' \
"${resolved}/" "${copy_root}/repo/"
BURROW_FLAKE_TMPDIRS+=("${copy_root}")
printf 'path:%s/repo\n' "${copy_root}"
}
burrow_resolve_image_artifact() {
local store_path="$1"
if [[ -f "${store_path}" ]]; then
printf '%s\n' "${store_path}"
return 0
fi
if [[ -d "${store_path}" ]]; then
local candidate
candidate="$(
find "${store_path}" -type f \
\( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) \
| sort \
| head -n1
)"
if [[ -n "${candidate}" ]]; then
printf '%s\n' "${candidate}"
return 0
fi
fi
echo "unable to locate disk image artifact under ${store_path}" >&2
exit 1
}
burrow_detect_compression() {
local artifact="$1"
case "${artifact}" in
*.bz2)
printf 'bz2\n'
;;
*.xz)
printf 'xz\n'
;;
*.zst|*.zstd)
printf 'zstd\n'
;;
*)
printf '\n'
;;
esac
}

View file

@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/bootstrap-forge-intake.sh [options]
Copy the minimum Burrow forge bootstrap secrets onto the target host under
/var/lib/burrow/intake with the ownership expected by the NixOS services.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key used to reach the host
(default: intake/agent_at_burrow_net_ed25519)
--password-file <path> Forgejo admin bootstrap password file
(default: intake/forgejo_pass_contact_at_burrow_net.txt)
--agent-key-file <path> Agent SSH private key copied for runner bootstrap
(default: intake/agent_at_burrow_net_ed25519)
--no-verify Skip remote ls/stat verification after install
-h, --help Show this help text
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}"
AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
VERIFY=1
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--password-file)
PASSWORD_FILE="${2:?missing value for --password-file}"
shift 2
;;
--agent-key-file)
AGENT_KEY_FILE="${2:?missing value for --agent-key-file}"
shift 2
;;
--no-verify)
VERIFY=0
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do
if [[ ! -s "${path}" ]]; then
echo "required file missing or empty: ${path}" >&2
exit 1
fi
done
ssh_opts=(
-i "${SSH_KEY}"
-o IdentitiesOnly=yes
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
-o StrictHostKeyChecking=accept-new
)
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
cleanup() {
if [[ -n "${remote_tmp:-}" ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
scp "${ssh_opts[@]}" \
"${PASSWORD_FILE}" \
"${AGENT_KEY_FILE}" \
"${HOST}:${remote_tmp}/"
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
install -d -m 0755 /var/lib/burrow/intake
install -m 0400 -o forgejo -g forgejo '${remote_tmp}/$(basename "${PASSWORD_FILE}")' /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt
install -m 0400 -o root -g root '${remote_tmp}/$(basename "${AGENT_KEY_FILE}")' /var/lib/burrow/intake/agent_at_burrow_net_ed25519
"
if [[ "${VERIFY}" -eq 1 ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
ls -l \
/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt \
/var/lib/burrow/intake/agent_at_burrow_net_ed25519
"
fi
echo "Burrow forge bootstrap intake sync complete (host=${HOST})."

143
Scripts/check-forge-host.sh Executable file
View file

@ -0,0 +1,143 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/check-forge-host.sh [options]
Run a post-boot verification pass against the Burrow forge host.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
--expect-nsc Fail if forgejo-nsc services are not active
-h, --help Show this help text
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
EXPECT_NSC=0
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--expect-nsc)
EXPECT_NSC=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
EXPECT_NSC="${EXPECT_NSC}" \
'bash -s' <<'EOF'
set -euo pipefail
base_services=(
forgejo.service
caddy.service
burrow-forgejo-bootstrap.service
burrow-forgejo-runner-bootstrap.service
burrow-forgejo-runner.service
)
nsc_services=(
forgejo-nsc-dispatcher.service
forgejo-nsc-autoscaler.service
)
show_service() {
local service="$1"
systemctl show \
--no-pager \
--property Id \
--property LoadState \
--property UnitFileState \
--property ActiveState \
--property SubState \
--property Result \
"${service}"
}
service_is_healthy() {
local service="$1"
local active_state
local result
local unit_type
active_state="$(systemctl show --property ActiveState --value "${service}")"
result="$(systemctl show --property Result --value "${service}")"
unit_type="$(systemctl show --property Type --value "${service}")"
if [[ "${active_state}" == "active" ]]; then
return 0
fi
if [[ "${unit_type}" == "oneshot" && "${active_state}" == "inactive" && "${result}" == "success" ]]; then
return 0
fi
return 1
}
for service in "${base_services[@]}"; do
echo "== ${service} =="
show_service "${service}"
if ! service_is_healthy "${service}"; then
echo "required service is not active: ${service}" >&2
exit 1
fi
done
for service in "${nsc_services[@]}"; do
echo "== ${service} =="
show_service "${service}" || true
if [[ "${EXPECT_NSC}" == "1" && "$(systemctl is-active "${service}" 2>/dev/null || true)" != "active" ]]; then
echo "required NSC service is not active: ${service}" >&2
exit 1
fi
done
echo "== intake =="
ls -l /var/lib/burrow/intake || true
if command -v curl >/dev/null 2>&1; then
echo "== http-local =="
curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login
curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/
curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login
fi
EOF

View file

@ -0,0 +1,165 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: Scripts/cloudflare-upsert-a-record.sh --zone <zone> --name <fqdn> --ipv4 <address> [options]
Upsert a DNS-only or proxied Cloudflare A record without putting the API token on
the process list.
Options:
--zone <zone> Cloudflare zone name, for example burrow.net
--name <fqdn> Fully-qualified DNS record name
--ipv4 <address> IPv4 address for the A record
--token-file <path> Cloudflare API token file
default: intake/cloudflare-token.txt
--ttl <seconds|auto> Record TTL, or auto
default: auto
--proxied <true|false> Whether to proxy through Cloudflare
default: false
-h, --help Show this help
EOF
}
ZONE_NAME=""
RECORD_NAME=""
IPV4=""
TOKEN_FILE="intake/cloudflare-token.txt"
TTL_VALUE="auto"
PROXIED="false"
while [[ $# -gt 0 ]]; do
case "$1" in
--zone)
ZONE_NAME="${2:?missing value for --zone}"
shift 2
;;
--name)
RECORD_NAME="${2:?missing value for --name}"
shift 2
;;
--ipv4)
IPV4="${2:?missing value for --ipv4}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--ttl)
TTL_VALUE="${2:?missing value for --ttl}"
shift 2
;;
--proxied)
PROXIED="${2:?missing value for --proxied}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then
usage >&2
exit 2
fi
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
echo "Invalid IPv4 address: ${IPV4}" >&2
exit 1
fi
case "${PROXIED}" in
true|false)
;;
*)
echo "--proxied must be true or false" >&2
exit 1
;;
esac
case "${TTL_VALUE}" in
auto)
TTL_JSON=1
;;
''|*[!0-9]*)
echo "--ttl must be a number of seconds or auto" >&2
exit 1
;;
*)
TTL_JSON="${TTL_VALUE}"
;;
esac
TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
if [[ -z "${TOKEN}" ]]; then
echo "Cloudflare token file is empty: ${TOKEN_FILE}" >&2
exit 1
fi
cf_api() {
local method="$1"
local path="$2"
local body="${3-}"
if [[ -n "${body}" ]]; then
curl -fsS -X "${method}" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
--data "${body}" \
"https://api.cloudflare.com/client/v4${path}"
else
curl -fsS -X "${method}" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
"https://api.cloudflare.com/client/v4${path}"
fi
}
zone_lookup="$(cf_api GET "/zones?name=${ZONE_NAME}&status=active")"
zone_id="$(jq -r '.result[0].id // empty' <<<"${zone_lookup}")"
if [[ -z "${zone_id}" ]]; then
echo "Active Cloudflare zone not found: ${ZONE_NAME}" >&2
exit 1
fi
payload="$(jq -cn \
--arg type "A" \
--arg name "${RECORD_NAME}" \
--arg content "${IPV4}" \
--argjson proxied "${PROXIED}" \
--argjson ttl "${TTL_JSON}" \
'{type: $type, name: $name, content: $content, proxied: $proxied, ttl: $ttl}')"
record_lookup="$(cf_api GET "/zones/${zone_id}/dns_records?type=A&name=${RECORD_NAME}")"
record_id="$(jq -r '.result[0].id // empty' <<<"${record_lookup}")"
if [[ -n "${record_id}" ]]; then
result="$(cf_api PUT "/zones/${zone_id}/dns_records/${record_id}" "${payload}")"
action="updated"
else
result="$(cf_api POST "/zones/${zone_id}/dns_records" "${payload}")"
action="created"
fi
jq -r --arg action "${action}" '
if .success != true then
.errors | tostring | halt_error(1)
else
"Cloudflare DNS " + $action + ": " + .result.name + " -> " + .result.content +
" (proxied=" + (.result.proxied | tostring) + ", ttl=" + (.result.ttl | tostring) + ")"
end
' <<<"${result}"

100
Scripts/forge-deploy.sh Executable file
View file

@ -0,0 +1,100 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
usage() {
cat <<'EOF'
Usage: Scripts/forge-deploy.sh [--test|--switch] [--flake-attr <attr>] [--allow-dirty]
Standardized remote deploy path for the Burrow forge host.
Defaults:
--switch
--flake-attr burrow-forge
Environment:
BURROW_FORGE_HOST root@git.burrow.net
BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519
EOF
}
MODE="switch"
FLAKE_ATTR="burrow-forge"
ALLOW_DIRTY=0
BURROW_FLAKE_TMPDIRS=()
cleanup() {
burrow_cleanup_flake_tmpdirs
}
trap cleanup EXIT
while [[ $# -gt 0 ]]; do
case "$1" in
--test)
MODE="test"
shift
;;
--switch)
MODE="switch"
shift
;;
--flake-attr)
FLAKE_ATTR="${2:?missing value for --flake-attr}"
shift 2
;;
--allow-dirty)
ALLOW_DIRTY=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
REPO_ROOT="$(git rev-parse --show-toplevel)"
cd "${REPO_ROOT}"
if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then
echo "Refusing to deploy from a dirty checkout. Commit first, or pass --allow-dirty for incident-only work." >&2
exit 1
fi
FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
if [[ -z "${FORGE_SSH_KEY}" ]]; then
if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then
FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519"
else
FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519"
fi
fi
if [[ ! -f "${FORGE_SSH_KEY}" ]]; then
echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2
echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2
exit 1
fi
FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")"
export NIX_SSHOPTS="-i ${FORGE_SSH_KEY} -o IdentitiesOnly=yes -o UserKnownHostsFile=${FORGE_KNOWN_HOSTS_FILE} -o StrictHostKeyChecking=accept-new"
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
nix --extra-experimental-features "nix-command flakes" shell nixpkgs#nixos-rebuild -c \
nixos-rebuild "${MODE}" \
--flake "${flake_ref}#${FLAKE_ATTR}" \
--build-host "${FORGE_HOST}" \
--target-host "${FORGE_HOST}"

View file

@ -0,0 +1,327 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
DEFAULT_CONFIG="burrow-forge"
DEFAULT_FLAKE="."
DEFAULT_LOCATION="hel1"
DEFAULT_ARCHITECTURE="x86"
DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt"
CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}"
FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}"
LOCATION="${HCLOUD_IMAGE_LOCATION:-${DEFAULT_LOCATION}}"
ARCHITECTURE="${HCLOUD_IMAGE_ARCHITECTURE:-${DEFAULT_ARCHITECTURE}}"
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${DEFAULT_TOKEN_FILE}}"
DESCRIPTION="${HCLOUD_IMAGE_DESCRIPTION:-}"
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
UPLOAD_VERBOSE="${HCLOUD_IMAGE_UPLOAD_VERBOSE:-0}"
ARTIFACT_PATH_INPUT=""
OUTPUT_HASH=""
NO_UPDATE=0
BUILDER_SPEC="${HCLOUD_IMAGE_BUILDER_SPEC:-}"
EXTRA_LABELS=()
NIX_BUILD_FLAGS=()
BURROW_FLAKE_TMPDIRS=()
LOCAL_STORE_DIR=""
usage() {
cat <<'EOF'
Usage: Scripts/hcloud-upload-nixos-image.sh [options]
Build a raw Burrow NixOS image and upload it into Hetzner Cloud as a snapshot.
Options:
--config <name> images.<name>-raw output to build (default: burrow-forge)
--flake <path> Flake path to build from (default: .)
--location <code> Hetzner location for the temporary upload server (default: hel1)
--architecture <x86|arm> CPU architecture of the image (default: x86)
--server-type <name> Hetzner server type for the temporary upload server
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
--artifact-path <path> Prebuilt raw image artifact to upload directly
--output-hash <hash> Stable hash label for --artifact-path uploads
--builder-spec <string> Complete builders string passed to nix build
--description <text> Description for the resulting snapshot
--upload-verbose <n> Pass -v N times to hcloud-upload-image
--label key=value Extra Hetzner image label (repeatable)
--nix-flag <arg> Extra argument passed to nix build (repeatable)
--no-update Reuse an existing snapshot with the same config/output hash
-h, --help Show this help text
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--architecture)
ARCHITECTURE="${2:?missing value for --architecture}"
shift 2
;;
--server-type)
UPLOAD_SERVER_TYPE="${2:?missing value for --server-type}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--artifact-path)
ARTIFACT_PATH_INPUT="${2:?missing value for --artifact-path}"
shift 2
;;
--output-hash)
OUTPUT_HASH="${2:?missing value for --output-hash}"
shift 2
;;
--builder-spec)
BUILDER_SPEC="${2:?missing value for --builder-spec}"
shift 2
;;
--description)
DESCRIPTION="${2:?missing value for --description}"
shift 2
;;
--upload-verbose)
UPLOAD_VERBOSE="${2:?missing value for --upload-verbose}"
shift 2
;;
--label)
EXTRA_LABELS+=("${2:?missing value for --label}")
shift 2
;;
--nix-flag)
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
shift 2
;;
--no-update)
NO_UPDATE=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
cleanup() {
burrow_cleanup_flake_tmpdirs
if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then
rm -rf "${LOCAL_STORE_DIR}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
burrow_require_cmd nix
burrow_require_cmd curl
burrow_require_cmd python3
burrow_require_cmd rsync
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
if [[ -z "${HCLOUD_TOKEN}" ]]; then
echo "Hetzner API token file is empty: ${TOKEN_FILE}" >&2
exit 1
fi
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
if [[ -z "${DESCRIPTION}" ]]; then
DESCRIPTION="Burrow ${CONFIG} $(date -u +%Y-%m-%dT%H:%M:%SZ)"
fi
printf 'Building raw image for %s from %s\n' "${CONFIG}" "${flake_ref}" >&2
if [[ -z "${ARTIFACT_PATH_INPUT}" && -n "${BUILDER_SPEC}" && -z "${NIX_BUILD_STORE:-}" ]]; then
mkdir -p "${HOME}/.cache/burrow"
LOCAL_STORE_DIR="$(mktemp -d "${HOME}/.cache/burrow/local-store-XXXXXX")"
fi
artifact_path=""
compression=""
output_hash="${OUTPUT_HASH}"
if [[ -n "${ARTIFACT_PATH_INPUT}" ]]; then
artifact_path="${ARTIFACT_PATH_INPUT}"
if [[ ! -f "${artifact_path}" ]]; then
echo "artifact path does not exist: ${artifact_path}" >&2
exit 1
fi
compression="$(burrow_detect_compression "${artifact_path}")"
if [[ -z "${output_hash}" ]]; then
if command -v sha256sum >/dev/null 2>&1; then
output_hash="$(sha256sum "${artifact_path}" | awk '{print $1}')"
else
output_hash="$(shasum -a 256 "${artifact_path}" | awk '{print $1}')"
fi
fi
else
nix_build_cmd=(
nix
--extra-experimental-features
"nix-command flakes"
build
"${flake_ref}#images.${CONFIG}-raw"
--no-link
--print-out-paths
)
if [[ -n "${BUILDER_SPEC}" ]]; then
nix_build_cmd+=(--builders "${BUILDER_SPEC}")
fi
if [[ -n "${NIX_BUILD_STORE:-}" ]]; then
nix_build_cmd+=(--store "${NIX_BUILD_STORE}")
elif [[ -n "${LOCAL_STORE_DIR}" ]]; then
nix_build_cmd+=(--store "${LOCAL_STORE_DIR}")
fi
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
nix_build_cmd+=("${NIX_BUILD_FLAGS[@]}")
fi
build_output=""
if ! build_output="$("${nix_build_cmd[@]}" 2>&1)"; then
printf '%s\n' "${build_output}" >&2
exit 1
fi
store_path="$(printf '%s\n' "${build_output}" | tail -n1)"
if [[ -z "${store_path}" ]]; then
echo "nix build did not return a store path" >&2
printf '%s\n' "${build_output}" >&2
exit 1
fi
artifact_path="$(burrow_resolve_image_artifact "${store_path}")"
compression="$(burrow_detect_compression "${artifact_path}")"
output_hash="$(basename "${store_path}")"
output_hash="${output_hash%%-*}"
fi
label_args=(
"burrow.nixos-config=${CONFIG}"
"burrow.nixos-output-hash=${output_hash}"
)
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
label_args+=("${EXTRA_LABELS[@]}")
fi
label_csv="$(IFS=,; printf '%s' "${label_args[*]}")"
find_existing_image() {
HCLOUD_TOKEN="${HCLOUD_TOKEN}" \
BURROW_LABEL_SELECTOR="burrow.nixos-config=${CONFIG},burrow.nixos-output-hash=${output_hash}" \
python3 - <<'PY'
import json
import os
import sys
import urllib.parse
import urllib.request
selector = urllib.parse.quote(os.environ["BURROW_LABEL_SELECTOR"], safe=",=")
req = urllib.request.Request(
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
)
with urllib.request.urlopen(req, timeout=30) as resp:
data = json.load(resp)
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
if images:
print(images[-1]["id"])
PY
}
if [[ "${NO_UPDATE}" -eq 1 ]]; then
existing_id="$(find_existing_image || true)"
if [[ -n "${existing_id}" ]]; then
printf 'Reusing existing Hetzner snapshot %s for %s\n' "${existing_id}" "${CONFIG}" >&2
printf '%s\n' "${existing_id}"
exit 0
fi
fi
uploader_bin="${HCLOUD_UPLOAD_IMAGE_BIN:-}"
if [[ -z "${uploader_bin}" ]]; then
uploader_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#hcloud-upload-image" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${uploader_build_output}" >&2
exit 1
}
uploader_bin="$(printf '%s\n' "${uploader_build_output}" | tail -n1)/bin/hcloud-upload-image"
fi
if [[ ! -x "${uploader_bin}" ]]; then
echo "unable to resolve an executable hcloud-upload-image binary; set HCLOUD_UPLOAD_IMAGE_BIN explicitly" >&2
exit 1
fi
upload_cmd=(
"${uploader_bin}"
)
if [[ "${UPLOAD_VERBOSE}" =~ ^[0-9]+$ ]] && [[ "${UPLOAD_VERBOSE}" -gt 0 ]]; then
for _ in $(seq 1 "${UPLOAD_VERBOSE}"); do
upload_cmd+=(-v)
done
fi
upload_cmd+=(
upload
--image-path "${artifact_path}"
--location "${LOCATION}"
--description "${DESCRIPTION}"
--labels "${label_csv}"
)
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
upload_cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
else
upload_cmd+=(--architecture "${ARCHITECTURE}")
fi
if [[ -n "${compression}" ]]; then
upload_cmd+=(--compression "${compression}")
fi
printf 'Uploading %s to Hetzner Cloud via %s\n' "${artifact_path}" "${uploader_bin}" >&2
HCLOUD_TOKEN="${HCLOUD_TOKEN}" "${upload_cmd[@]}" >&2
image_id=""
for _ in $(seq 1 24); do
image_id="$(find_existing_image || true)"
if [[ -n "${image_id}" ]]; then
break
fi
sleep 5
done
if [[ -z "${image_id}" ]]; then
echo "failed to locate uploaded Hetzner snapshot after upload completed" >&2
exit 1
fi
printf '%s\n' "${image_id}"

284
Scripts/hetzner-forge.sh Executable file
View file

@ -0,0 +1,284 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/hetzner-forge.sh [show|create|delete|recreate|build-image|create-from-image|recreate-from-image] [options]
Manage the Burrow forge server and its Hetzner snapshot lifecycle.
Defaults:
action: show
server-name: burrow-forge
server-type: ccx23
location: hel1
image: ubuntu-24.04
ssh keys: contact@burrow.net,agent@burrow.net
Options:
--server-name <name> Server name to manage.
--server-type <type> Hetzner server type.
--location <code> Hetzner location.
--image <name|id> Image used at create time.
--config <name> Burrow image config name for snapshot lookup/build (default: burrow-forge).
--ssh-key <name> SSH key name to attach. Repeatable.
--token-file <path> Hetzner API token file.
--flake <path> Flake path used by image-build actions (default: .)
--upload-location <code> Hetzner location used for image upload (default: same as --location)
--yes Required for delete and recreate.
-h, --help Show this help text.
Environment:
HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt
EOF
}
ACTION="show"
SERVER_NAME="burrow-forge"
SERVER_TYPE="ccx23"
LOCATION="hel1"
IMAGE="ubuntu-24.04"
CONFIG="burrow-forge"
FLAKE="."
UPLOAD_LOCATION=""
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}"
YES=0
SSH_KEYS=("contact@burrow.net" "agent@burrow.net")
if [[ $# -gt 0 ]]; then
case "$1" in
show|create|delete|recreate|build-image|create-from-image|recreate-from-image)
ACTION="$1"
shift
;;
esac
fi
while [[ $# -gt 0 ]]; do
case "$1" in
--server-name)
SERVER_NAME="${2:?missing value for --server-name}"
shift 2
;;
--server-type)
SERVER_TYPE="${2:?missing value for --server-type}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--image)
IMAGE="${2:?missing value for --image}"
shift 2
;;
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--ssh-key)
SSH_KEYS+=("${2:?missing value for --ssh-key}")
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--upload-location)
UPLOAD_LOCATION="${2:?missing value for --upload-location}"
shift 2
;;
--yes)
YES=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
if [[ -z "${UPLOAD_LOCATION}" ]]; then
UPLOAD_LOCATION="${LOCATION}"
fi
if [[ "${ACTION}" == "delete" || "${ACTION}" == "recreate" || "${ACTION}" == "recreate-from-image" ]] && [[ ${YES} -ne 1 ]]; then
echo "--yes is required for ${ACTION}" >&2
exit 1
fi
latest_snapshot_id() {
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" \
BURROW_CONFIG="${CONFIG}" \
python3 - <<'PY'
import json
import os
import urllib.parse
import urllib.request
selector = urllib.parse.quote(f"burrow.nixos-config={os.environ['BURROW_CONFIG']}", safe=",=")
req = urllib.request.Request(
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
)
with urllib.request.urlopen(req, timeout=30) as resp:
data = json.load(resp)
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
if images:
print(images[-1]["id"])
PY
}
if [[ "${ACTION}" == "build-image" ]]; then
exec "${SCRIPT_DIR}/nsc-build-and-upload-image.sh" \
--config "${CONFIG}" \
--flake "${FLAKE}" \
--location "${UPLOAD_LOCATION}" \
--upload-server-type "${SERVER_TYPE}" \
--token-file "${TOKEN_FILE}"
fi
if [[ "${ACTION}" == "create-from-image" || "${ACTION}" == "recreate-from-image" ]]; then
if [[ "${IMAGE}" == "ubuntu-24.04" ]]; then
IMAGE="$(latest_snapshot_id)"
fi
if [[ -z "${IMAGE}" ]]; then
echo "No Burrow snapshot found for config ${CONFIG}. Run build-image first." >&2
exit 1
fi
if [[ "${ACTION}" == "create-from-image" ]]; then
ACTION="create"
else
ACTION="recreate"
fi
fi
ssh_keys_csv=""
for key in "${SSH_KEYS[@]}"; do
if [[ -n "${ssh_keys_csv}" ]]; then
ssh_keys_csv+=","
fi
ssh_keys_csv+="${key}"
done
export BURROW_HCLOUD_ACTION="${ACTION}"
export BURROW_HCLOUD_SERVER_NAME="${SERVER_NAME}"
export BURROW_HCLOUD_SERVER_TYPE="${SERVER_TYPE}"
export BURROW_HCLOUD_LOCATION="${LOCATION}"
export BURROW_HCLOUD_IMAGE="${IMAGE}"
export BURROW_HCLOUD_TOKEN_FILE="${TOKEN_FILE}"
export BURROW_HCLOUD_SSH_KEYS="${ssh_keys_csv}"
python3 - <<'PY'
import json
import os
import sys
from pathlib import Path
import requests
base = "https://api.hetzner.cloud/v1"
action = os.environ["BURROW_HCLOUD_ACTION"]
server_name = os.environ["BURROW_HCLOUD_SERVER_NAME"]
server_type = os.environ["BURROW_HCLOUD_SERVER_TYPE"]
location = os.environ["BURROW_HCLOUD_LOCATION"]
image = os.environ["BURROW_HCLOUD_IMAGE"]
token = Path(os.environ["BURROW_HCLOUD_TOKEN_FILE"]).read_text(encoding="utf-8").strip()
ssh_keys = [key for key in os.environ["BURROW_HCLOUD_SSH_KEYS"].split(",") if key]
session = requests.Session()
session.headers.update({"Authorization": f"Bearer {token}", "Content-Type": "application/json"})
def request(method: str, path: str, **kwargs) -> requests.Response:
response = session.request(method, f"{base}{path}", timeout=30, **kwargs)
response.raise_for_status()
return response
def find_server():
response = request("GET", "/servers", params={"name": server_name})
data = response.json()
for server in data.get("servers", []):
if server.get("name") == server_name:
return server
return None
def summarize(server):
ipv4 = (((server.get("public_net") or {}).get("ipv4")) or {}).get("ip")
image_name = ((server.get("image") or {}).get("name")) or ""
summary = {
"id": server.get("id"),
"name": server.get("name"),
"status": server.get("status"),
"server_type": ((server.get("server_type") or {}).get("name")),
"location": ((server.get("location") or {}).get("name")),
"image": image_name,
"ipv4": ipv4,
"created": server.get("created"),
}
print(json.dumps(summary, indent=2))
server = find_server()
if action == "show":
if server is None:
print(json.dumps({"name": server_name, "present": False}, indent=2))
else:
summarize(server)
sys.exit(0)
if action == "delete":
if server is None:
print(json.dumps({"name": server_name, "deleted": False, "reason": "not found"}, indent=2))
sys.exit(0)
request("DELETE", f"/servers/{server['id']}")
print(json.dumps({"name": server_name, "deleted": True, "id": server["id"]}, indent=2))
sys.exit(0)
if action == "recreate" and server is not None:
request("DELETE", f"/servers/{server['id']}")
server = None
if action in {"create", "recreate"}:
if server is not None:
summarize(server)
sys.exit(0)
payload = {
"name": server_name,
"server_type": server_type,
"location": location,
"image": image,
"ssh_keys": ssh_keys,
"labels": {
"project": "burrow",
"role": "forge",
},
}
response = request("POST", "/servers", json=payload)
created = response.json()["server"]
summarize(created)
sys.exit(0)
raise SystemExit(f"unsupported action: {action}")
PY

View file

@ -0,0 +1,542 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}"
FLAKE="${HCLOUD_IMAGE_FLAKE:-.}"
LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}"
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}"
NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}"
NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}"
NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}"
NSC_BUILDER_JOBS="${NSC_BUILDER_JOBS:-32}"
NSC_BUILDER_FEATURES="${NSC_BUILDER_FEATURES:-kvm,big-parallel}"
NSC_BIN="${NSC_BIN:-}"
REMOTE_COMPRESSION="${HCLOUD_IMAGE_REMOTE_COMPRESSION:-auto}"
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
KEEP_TMPDIR="${HCLOUD_IMAGE_KEEP_TMPDIR:-0}"
NO_UPDATE=0
NIX_BUILD_FLAGS=()
EXTRA_LABELS=()
BURROW_FLAKE_TMPDIRS=()
BUILDER_ID=""
usage() {
cat <<'EOF'
Usage: Scripts/nsc-build-and-upload-image.sh [options]
Create a temporary Namespace Linux builder, build the Burrow raw image on it,
and upload the resulting artifact to Hetzner Cloud.
Options:
--config <name> images.<name>-raw output to build (default: burrow-forge)
--flake <path> Flake path to build from (default: .)
--location <code> Hetzner upload location (default: hel1)
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
--machine-type <type> Namespace machine type (default: linux/amd64:32x64)
--ssh-host <host> Namespace SSH endpoint (default: ssh.ord2.namespace.so)
--duration <ttl> Namespace builder lifetime (default: 4h)
--builder-jobs <n> Nix builder job count advertised to the local client
--builder-features <s> Comma-separated Nix system features (default: "kvm,big-parallel")
--remote-compression <mode>
Compress raw/image artifacts on the Namespace builder
before copy-back. Modes: auto, none, xz, zstd
(default: auto)
--upload-server-type <name>
Hetzner server type for the temporary upload host
--label key=value Extra Hetzner snapshot label (repeatable)
--nix-flag <arg> Extra argument passed to nix build (repeatable)
--no-update Reuse an existing snapshot with the same config/output hash
-h, --help Show this help text
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--machine-type)
NSC_MACHINE_TYPE="${2:?missing value for --machine-type}"
shift 2
;;
--ssh-host)
NSC_SSH_HOST="${2:?missing value for --ssh-host}"
shift 2
;;
--duration)
NSC_BUILDER_DURATION="${2:?missing value for --duration}"
shift 2
;;
--builder-jobs)
NSC_BUILDER_JOBS="${2:?missing value for --builder-jobs}"
shift 2
;;
--builder-features)
NSC_BUILDER_FEATURES="${2:?missing value for --builder-features}"
shift 2
;;
--remote-compression)
REMOTE_COMPRESSION="${2:?missing value for --remote-compression}"
shift 2
;;
--upload-server-type)
UPLOAD_SERVER_TYPE="${2:?missing value for --upload-server-type}"
shift 2
;;
--label)
EXTRA_LABELS+=("${2:?missing value for --label}")
shift 2
;;
--nix-flag)
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
shift 2
;;
--no-update)
NO_UPDATE=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
cleanup() {
if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then
"${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true
fi
burrow_cleanup_flake_tmpdirs
if [[ "${KEEP_TMPDIR}" != "1" && -n "${TMPDIR_BURROW_NSC:-}" && -d "${TMPDIR_BURROW_NSC}" ]]; then
rm -rf "${TMPDIR_BURROW_NSC}"
fi
}
trap cleanup EXIT
burrow_require_cmd nix
burrow_require_cmd curl
burrow_require_cmd python3
burrow_require_cmd ssh
burrow_require_cmd ssh-keygen
burrow_require_cmd ssh-keyscan
burrow_require_cmd tar
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
if [[ -z "${NSC_BIN}" ]]; then
nsc_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#nsc" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${nsc_build_output}" >&2
exit 1
}
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
fi
if [[ ! -x "${NSC_BIN}" ]]; then
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
exit 1
fi
if [[ -n "${NSC_SESSION:-}" && ! -f "${HOME}/.ns/session" ]]; then
mkdir -p "${HOME}/.ns"
printf '%s\n' "${NSC_SESSION}" > "${HOME}/.ns/session"
chmod 600 "${HOME}/.ns/session"
fi
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
"${NSC_BIN}" version >/dev/null || true
TMPDIR_BURROW_NSC="$(mktemp -d "${HOME}/.cache/burrow/nsc-XXXXXX")"
ssh_key="${TMPDIR_BURROW_NSC}/builder"
known_hosts="${TMPDIR_BURROW_NSC}/known_hosts"
id_file="${TMPDIR_BURROW_NSC}/builder.id"
ssh-keygen -q -t ed25519 -N "" -f "${ssh_key}"
ssh-keyscan -H "${NSC_SSH_HOST}" > "${known_hosts}"
ssh_base=(
ssh
-i "${ssh_key}"
-o UserKnownHostsFile="${known_hosts}"
-o StrictHostKeyChecking=yes
)
wait_for_ssh() {
local instance_id="$1"
for _ in $(seq 1 30); do
if "${ssh_base[@]}" -q "${instance_id}@${NSC_SSH_HOST}" true >/dev/null 2>&1; then
return 0
fi
sleep 5
done
return 1
}
configure_builder() {
local instance_id="$1"
"${ssh_base[@]}" "${instance_id}@${NSC_SSH_HOST}" <<'EOF'
set -euo pipefail
if ! command -v nix >/dev/null 2>&1; then
curl -fsSL https://install.determinate.systems/nix | sh -s -- install linux --determinate --init none --no-confirm
fi
if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
fi
mkdir -p /etc/nix
cat <<CFG >/etc/nix/nix.conf
build-users-group =
trusted-users = root $USER
auto-optimise-store = true
substituters = https://cache.nixos.org
builders-use-substitutes = true
CFG
mkdir -p /nix/var/nix/daemon-socket
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
nohup nix-daemon >/dev/null 2>&1 </dev/null &
fi
for _ in $(seq 1 120); do
if [ -S /nix/var/nix/daemon-socket/socket ]; then
exit 0
fi
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
nohup nix-daemon >/dev/null 2>&1 </dev/null &
fi
sleep 1
done
echo "nix-daemon socket never appeared" >&2
exit 1
EOF
}
printf 'Creating temporary Namespace builder (%s)\n' "${NSC_MACHINE_TYPE}" >&2
"${NSC_BIN}" create \
--bare \
--machine_type "${NSC_MACHINE_TYPE}" \
--ssh_key "${ssh_key}.pub" \
--duration "${NSC_BUILDER_DURATION}" \
--label "burrow=true" \
--label "purpose=hetzner-image-build" \
--output_to "${id_file}" \
>/dev/null
BUILDER_ID="$(tr -d '\r\n' < "${id_file}")"
if [[ -z "${BUILDER_ID}" ]]; then
echo "nsc create did not return a builder id" >&2
exit 1
fi
printf 'Waiting for Namespace builder %s\n' "${BUILDER_ID}" >&2
wait_for_ssh "${BUILDER_ID}"
configure_builder "${BUILDER_ID}" >&2
remote_root="burrow-image-build-${BUILDER_ID}"
remote_flake_path="./${remote_root}"
local_flake_dir="${flake_ref#path:}"
remote_build_stdout="/tmp/burrow-image-build-${BUILDER_ID}.stdout"
remote_build_stderr="/tmp/burrow-image-build-${BUILDER_ID}.stderr"
printf 'Syncing flake to Namespace builder %s\n' "${BUILDER_ID}" >&2
tar -C "${local_flake_dir}" -cf - . \
| "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "rm -rf '${remote_root}' && mkdir -p '${remote_root}' && tar -C '${remote_root}' -xf -"
run_remote_build() {
local remote_cmd=(
env
"CONFIG=${CONFIG}"
"REMOTE_FLAKE_PATH=${remote_flake_path}"
"REMOTE_BUILD_STDOUT=${remote_build_stdout}"
"REMOTE_BUILD_STDERR=${remote_build_stderr}"
bash
-s
--
)
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
remote_cmd+=("${NIX_BUILD_FLAGS[@]}")
fi
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "${remote_cmd[@]}" <<'EOF'
set -euo pipefail
config="${CONFIG}"
remote_flake_path="${REMOTE_FLAKE_PATH}"
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
remote_build_stderr="${REMOTE_BUILD_STDERR}"
nix_build_cmd=(
nix
--extra-experimental-features
"nix-command flakes"
build
"path:${remote_flake_path}#images.${config}-raw"
--no-link
--print-out-paths
)
if [[ "$#" -gt 0 ]]; then
nix_build_cmd+=("$@")
fi
rm -f "${remote_build_stdout}" "${remote_build_stderr}"
if ! "${nix_build_cmd[@]}" >"${remote_build_stdout}" 2>"${remote_build_stderr}"; then
cat "${remote_build_stderr}" >&2
exit 1
fi
EOF
}
resolve_remote_store_path() {
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_BUILD_STDOUT=${remote_build_stdout}" "REMOTE_BUILD_STDERR=${remote_build_stderr}" bash -s <<'EOF'
set -euo pipefail
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
remote_build_stderr="${REMOTE_BUILD_STDERR}"
if [[ ! -s "${remote_build_stdout}" ]]; then
echo "remote build stdout file is missing or empty: ${remote_build_stdout}" >&2
if [[ -s "${remote_build_stderr}" ]]; then
cat "${remote_build_stderr}" >&2
fi
exit 1
fi
tail -n1 "${remote_build_stdout}"
EOF
}
resolve_remote_artifact_path() {
local store_path="$1"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_STORE_PATH=${store_path}" bash -s <<'EOF'
set -euo pipefail
store_path="${REMOTE_STORE_PATH}"
artifact_path="${store_path}"
if [[ -d "${artifact_path}" ]]; then
artifact_path="$(find "${artifact_path}" -type f \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) | sort | head -n1)"
fi
if [[ -z "${artifact_path}" || ! -f "${artifact_path}" ]]; then
echo "unable to locate image artifact under ${store_path}" >&2
exit 1
fi
printf '%s\n' "${artifact_path}"
EOF
}
plan_remote_artifact_transfer() {
local artifact_path="$1"
local compression_mode="$2"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF'
set -euo pipefail
artifact_path="${REMOTE_ARTIFACT_PATH}"
compression_mode="${REMOTE_COMPRESSION}"
case "${artifact_path}" in
*.bz2)
printf '%s\tbz2\n' "$(basename "${artifact_path}")"
exit 0
;;
*.xz)
printf '%s\txz\n' "$(basename "${artifact_path}")"
exit 0
;;
*.zst|*.zstd)
printf '%s\tzstd\n' "$(basename "${artifact_path}")"
exit 0
;;
esac
select_compression() {
case "${compression_mode}" in
auto)
if command -v zstd >/dev/null 2>&1; then
printf 'zstd\n'
return 0
fi
if command -v xz >/dev/null 2>&1; then
printf 'xz\n'
return 0
fi
printf 'none\n'
;;
none|xz|zstd)
printf '%s\n' "${compression_mode}"
;;
*)
echo "unsupported remote compression mode: ${compression_mode}" >&2
exit 1
;;
esac
}
mode="$(select_compression)"
case "${mode}" in
none)
printf '%s\tnone\n' "$(basename "${artifact_path}")"
;;
zstd)
printf '%s.zst\tzstd\n' "$(basename "${artifact_path}")"
;;
xz)
printf '%s.xz\txz\n' "$(basename "${artifact_path}")"
;;
esac
EOF
}
stream_remote_artifact() {
local artifact_path="$1"
local compression_mode="$2"
local destination="$3"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' > "${destination}"
set -euo pipefail
artifact_path="${REMOTE_ARTIFACT_PATH}"
compression_mode="${REMOTE_COMPRESSION}"
case "${artifact_path}" in
*.bz2|*.xz|*.zst|*.zstd)
cat "${artifact_path}"
exit 0
;;
esac
select_compression() {
case "${compression_mode}" in
auto)
if command -v zstd >/dev/null 2>&1; then
printf 'zstd\n'
return 0
fi
if command -v xz >/dev/null 2>&1; then
printf 'xz\n'
return 0
fi
printf 'none\n'
;;
none|xz|zstd)
printf '%s\n' "${compression_mode}"
;;
*)
echo "unsupported remote compression mode: ${compression_mode}" >&2
exit 1
;;
esac
}
mode="$(select_compression)"
case "${mode}" in
none)
cat "${artifact_path}"
;;
zstd)
if ! command -v zstd >/dev/null 2>&1; then
echo "zstd requested but not available on Namespace builder" >&2
exit 1
fi
zstd -T0 -19 -c "${artifact_path}"
;;
xz)
if ! command -v xz >/dev/null 2>&1; then
echo "xz requested but not available on Namespace builder" >&2
exit 1
fi
xz -T0 -c "${artifact_path}"
;;
esac
EOF
}
printf 'Building raw image on Namespace builder %s\n' "${BUILDER_ID}" >&2
run_remote_build
remote_store_path="$(resolve_remote_store_path)"
if [[ -z "${remote_store_path}" ]]; then
echo "remote build did not return a store path" >&2
exit 1
fi
remote_artifact_path="$(resolve_remote_artifact_path "${remote_store_path}")"
if [[ -z "${remote_artifact_path}" ]]; then
echo "remote build did not return an artifact path" >&2
exit 1
fi
transfer_plan="$(plan_remote_artifact_transfer "${remote_artifact_path}" "${REMOTE_COMPRESSION}")"
local_artifact_name="$(printf '%s\n' "${transfer_plan}" | cut -f1)"
transfer_compression="$(printf '%s\n' "${transfer_plan}" | cut -f2)"
if [[ -z "${local_artifact_name}" || -z "${transfer_compression}" ]]; then
echo "unable to determine artifact transfer plan for ${remote_artifact_path}" >&2
exit 1
fi
output_hash="$(basename "${remote_store_path}")"
output_hash="${output_hash%%-*}"
local_artifact="${TMPDIR_BURROW_NSC}/${local_artifact_name}"
printf 'Streaming built artifact back from Namespace builder %s (%s)\n' "${BUILDER_ID}" "${transfer_compression}" >&2
stream_remote_artifact "${remote_artifact_path}" "${REMOTE_COMPRESSION}" "${local_artifact}"
cmd=(
"${SCRIPT_DIR}/hcloud-upload-nixos-image.sh"
--config "${CONFIG}"
--flake "${FLAKE}"
--location "${LOCATION}"
--token-file "${TOKEN_FILE}"
--artifact-path "${local_artifact}"
--output-hash "${output_hash}"
)
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
fi
if [[ "${NO_UPDATE}" -eq 1 ]]; then
cmd+=(--no-update)
fi
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
for label in "${EXTRA_LABELS[@]}"; do
cmd+=(--label "${label}")
done
fi
"${cmd[@]}"

237
Scripts/provision-forgejo-nsc.sh Executable file
View file

@ -0,0 +1,237 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
usage() {
cat <<'EOF'
Usage: Scripts/provision-forgejo-nsc.sh [options]
Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the
Namespace token from the currently logged-in namespace account.
Options:
--host <user@host> SSH target used to mint the Forgejo PAT.
Default: root@git.burrow.net
--ssh-key <path> SSH private key for the forge host.
Default: intake/agent_at_burrow_net_ed25519
--nsc-bin <path> Override the nsc binary.
--no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists.
--token-name <name> Forgejo PAT name prefix (default: forgejo-nsc)
--contact-user <name> Forgejo username used for PAT creation (default: contact)
--scope-owner <name> Forgejo org/user owner for the default NSC scope (default: hackclub)
--scope-name <name> Forgejo repository name for the default NSC scope (default: burrow)
-h, --help Show this help text.
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
NSC_BIN="${NSC_BIN:-}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
REFRESH_TOKEN=1
TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}"
CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}"
SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}"
SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}"
BURROW_FLAKE_TMPDIRS=()
cleanup() {
burrow_cleanup_flake_tmpdirs
}
trap cleanup EXIT
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--nsc-bin)
NSC_BIN="${2:?missing value for --nsc-bin}"
shift 2
;;
--no-refresh-token)
REFRESH_TOKEN=0
shift
;;
--token-name)
TOKEN_NAME_PREFIX="${2:?missing value for --token-name}"
shift 2
;;
--contact-user)
CONTACT_USER="${2:?missing value for --contact-user}"
shift 2
;;
--scope-owner)
SCOPE_OWNER="${2:?missing value for --scope-owner}"
shift 2
;;
--scope-name)
SCOPE_NAME="${2:?missing value for --scope-name}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
burrow_require_cmd nix
burrow_require_cmd ssh
burrow_require_cmd python3
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
mkdir -p "${REPO_ROOT}/intake"
chmod 700 "${REPO_ROOT}/intake"
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
if [[ -z "${NSC_BIN}" ]]; then
if command -v nsc >/dev/null 2>&1; then
NSC_BIN="$(command -v nsc)"
else
nsc_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#nsc" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${nsc_build_output}" >&2
exit 1
}
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
fi
fi
if [[ ! -x "${NSC_BIN}" ]]; then
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
exit 1
fi
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml"
autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml"
if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
"${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null
chmod 600 "${token_file}"
fi
webhook_secret="$(python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
)"
token_name="${TOKEN_NAME_PREFIX}-$(date -u +%Y%m%dT%H%M%SZ)"
forgejo_pat="$(
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
"set -euo pipefail; forgejo_bin=\$(systemctl show -p ExecStart forgejo.service --value | sed -E 's/^\\{ path=([^ ;]+).*/\\1/'); sudo -u forgejo \"\${forgejo_bin}\" --config /var/lib/forgejo/custom/conf/app.ini --custom-path /var/lib/forgejo/custom --work-path /var/lib/forgejo admin user generate-access-token --username '${CONTACT_USER}' --scopes all --raw --token-name '${token_name}'" \
| tr -d '\r\n'
)"
if [[ -z "${forgejo_pat}" ]]; then
echo "failed to mint Forgejo PAT on ${HOST}" >&2
exit 1
fi
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
'bash -s' <<EOF
set -euo pipefail
base_url='http://127.0.0.1:3000'
token='${forgejo_pat}'
scope_owner='${SCOPE_OWNER}'
scope_name='${SCOPE_NAME}'
api() {
curl -sS -o /tmp/forgejo-provision-response.json -w '%{http_code}' \
-H "Authorization: token \${token}" \
-H 'Content-Type: application/json' \
"\$@"
}
org_code="\$(api "\${base_url}/api/v1/orgs/\${scope_owner}")"
if [[ "\${org_code}" == "404" ]]; then
cat >/tmp/forgejo-provision-org.json <<JSON
{"username":"${SCOPE_OWNER}","full_name":"${SCOPE_OWNER}","visibility":"public"}
JSON
org_code="\$(api -X POST --data @/tmp/forgejo-provision-org.json "\${base_url}/api/v1/orgs")"
if [[ "\${org_code}" != "201" ]]; then
echo "failed to create Forgejo org ${SCOPE_OWNER} (HTTP \${org_code})" >&2
cat /tmp/forgejo-provision-response.json >&2
exit 1
fi
fi
repo_code="\$(api "\${base_url}/api/v1/repos/\${scope_owner}/\${scope_name}")"
if [[ "\${repo_code}" == "404" ]]; then
cat >/tmp/forgejo-provision-repo.json <<JSON
{"name":"${SCOPE_NAME}","description":"Burrow forge bootstrap repository","private":false,"default_branch":"main","auto_init":false}
JSON
repo_code="\$(api -X POST --data @/tmp/forgejo-provision-repo.json "\${base_url}/api/v1/orgs/\${scope_owner}/repos")"
if [[ "\${repo_code}" != "201" ]]; then
echo "failed to create Forgejo repo ${SCOPE_OWNER}/${SCOPE_NAME} (HTTP \${repo_code})" >&2
cat /tmp/forgejo-provision-response.json >&2
exit 1
fi
fi
EOF
FORGEJO_PAT="${forgejo_pat}" \
WEBHOOK_SECRET="${webhook_secret}" \
DISPATCHER_SRC="${dispatcher_src}" \
AUTOSCALER_SRC="${autoscaler_src}" \
DISPATCHER_OUT="${dispatcher_out}" \
AUTOSCALER_OUT="${autoscaler_out}" \
python3 - <<'PY'
import os
from pathlib import Path
def render(src: str, dst: str) -> None:
text = Path(src).read_text(encoding="utf-8")
text = text.replace("PENDING-FORGEJO-PAT", os.environ["FORGEJO_PAT"])
text = text.replace("PENDING-WEBHOOK-SECRET", os.environ["WEBHOOK_SECRET"])
Path(dst).write_text(text, encoding="utf-8")
render(os.environ["DISPATCHER_SRC"], os.environ["DISPATCHER_OUT"])
render(os.environ["AUTOSCALER_SRC"], os.environ["AUTOSCALER_OUT"])
PY
chmod 600 "${dispatcher_out}" "${autoscaler_out}"
echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml."
echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}."

View file

@ -0,0 +1,132 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: Scripts/sync-forgejo-nsc-config.sh [options]
Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and
restart the dispatcher/autoscaler units.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
--rotate-pat Re-render the intake files before syncing.
--no-restart Copy files only.
-h, --help Show this help text.
EOF
}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
ROTATE_PAT=0
NO_RESTART=0
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--rotate-pat)
ROTATE_PAT=1
shift
;;
--no-restart)
NO_RESTART=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
burrow_require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
burrow_require_cmd ssh
burrow_require_cmd scp
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
if [[ "${ROTATE_PAT}" -eq 1 ]]; then
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}"
fi
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do
if [[ ! -s "${path}" ]]; then
echo "required runtime input missing or empty: ${path}" >&2
exit 1
fi
done
ssh_opts=(
-i "${SSH_KEY}"
-o IdentitiesOnly=yes
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
-o StrictHostKeyChecking=accept-new
)
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
cleanup() {
if [[ -n "${remote_tmp:-}" ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
scp "${ssh_opts[@]}" \
"${token_file}" \
"${dispatcher_file}" \
"${autoscaler_file}" \
"${HOST}:${remote_tmp}/"
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
install -d -m 0755 /var/lib/burrow/intake
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
if [[ "${NO_RESTART}" -eq 0 ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
ls -l \
/var/lib/burrow/intake/forgejo_nsc_token.txt \
/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \
/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
fi
echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))."

171
Tools/forwardemail-custom-s3.sh Executable file
View file

@ -0,0 +1,171 @@
#!/usr/bin/env bash
set -euo pipefail
umask 077
usage() {
cat <<'EOF'
Usage:
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://<endpoint> \
--s3-region <region> \
--s3-bucket <bucket> \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
Options:
--domain <domain> Forward Email domain to update.
--api-token-file <path> File containing the Forward Email API token.
--s3-endpoint <url> S3-compatible endpoint URL.
--s3-region <region> S3 region string expected by Forward Email.
--s3-bucket <name> Bucket used for alias backup uploads.
--s3-access-key-file <path> File containing the S3 access key id.
--s3-secret-key-file <path> File containing the S3 secret access key.
--test-only Skip the update call and only test the saved connection.
--help Show this help text.
Notes:
- Secrets are passed to curl through a temporary config file to avoid putting
them in the process list.
- By default the script updates the domain settings and then calls
/test-s3-connection.
- For Hetzner Object Storage, use the regional S3 endpoint such as
https://hel1.your-objectstorage.com, not an account alias endpoint.
EOF
}
fail() {
printf 'error: %s\n' "$*" >&2
exit 1
}
require_file() {
local path="$1"
[[ -f "$path" ]] || fail "missing file: $path"
}
read_secret() {
local path="$1"
local value
value="$(tr -d '\r\n' < "$path")"
[[ -n "$value" ]] || fail "empty secret file: $path"
printf '%s' "$value"
}
domain=""
api_token_file=""
s3_endpoint=""
s3_region=""
s3_bucket=""
s3_access_key_file=""
s3_secret_key_file=""
test_only=false
while [[ $# -gt 0 ]]; do
case "$1" in
--domain)
domain="${2:-}"
shift 2
;;
--api-token-file)
api_token_file="${2:-}"
shift 2
;;
--s3-endpoint)
s3_endpoint="${2:-}"
shift 2
;;
--s3-region)
s3_region="${2:-}"
shift 2
;;
--s3-bucket)
s3_bucket="${2:-}"
shift 2
;;
--s3-access-key-file)
s3_access_key_file="${2:-}"
shift 2
;;
--s3-secret-key-file)
s3_secret_key_file="${2:-}"
shift 2
;;
--test-only)
test_only=true
shift
;;
--help|-h)
usage
exit 0
;;
*)
fail "unknown argument: $1"
;;
esac
done
[[ -n "$domain" ]] || fail "--domain is required"
[[ -n "$api_token_file" ]] || fail "--api-token-file is required"
[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set"
[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set"
[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set"
[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set"
[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set"
require_file "$api_token_file"
api_token="$(read_secret "$api_token_file")"
if [[ "$test_only" == false ]]; then
require_file "$s3_access_key_file"
require_file "$s3_secret_key_file"
s3_access_key_id="$(read_secret "$s3_access_key_file")"
s3_secret_access_key="$(read_secret "$s3_secret_key_file")"
case "$s3_endpoint" in
http://*|https://*)
;;
*)
fail "--s3-endpoint must start with http:// or https://"
;;
esac
fi
curl_config="$(mktemp)"
trap 'rm -f "$curl_config"' EXIT
if [[ "$test_only" == false ]]; then
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}"
request = "PUT"
user = "${api_token}:"
data = "has_custom_s3=true"
data-urlencode = "s3_endpoint=${s3_endpoint}"
data-urlencode = "s3_access_key_id=${s3_access_key_id}"
data-urlencode = "s3_secret_access_key=${s3_secret_access_key}"
data-urlencode = "s3_region=${s3_region}"
data-urlencode = "s3_bucket=${s3_bucket}"
EOF
printf 'Configuring Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2
fi
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}/test-s3-connection"
request = "POST"
user = "${api_token}:"
EOF
printf 'Testing Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2

View file

@ -0,0 +1,261 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import datetime as dt
import hashlib
import hmac
import sys
import textwrap
from pathlib import Path
from urllib.parse import urlencode, urlparse
import requests
def read_secret(path: str) -> str:
value = Path(path).read_text(encoding="utf-8").strip()
if not value:
raise SystemExit(f"error: empty secret file: {path}")
return value
def sign(key: bytes, msg: str) -> bytes:
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def request(
*,
method: str,
endpoint: str,
region: str,
access_key: str,
secret_key: str,
bucket: str,
query: dict[str, str] | None = None,
body: bytes = b"",
content_type: str | None = None,
) -> requests.Response:
parsed = urlparse(endpoint)
if parsed.scheme != "https":
raise SystemExit("error: endpoint must use https")
host = parsed.netloc
canonical_uri = f"/{bucket}"
query = query or {}
canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~")
now = dt.datetime.now(dt.timezone.utc)
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
date_stamp = now.strftime("%Y%m%d")
payload_hash = hashlib.sha256(body).hexdigest()
headers = {
"host": host,
"x-amz-content-sha256": payload_hash,
"x-amz-date": amz_date,
}
if content_type:
headers["content-type"] = content_type
signed_headers = ";".join(sorted(headers.keys()))
canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys()))
canonical_request = "\n".join(
[
method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash,
]
)
algorithm = "AWS4-HMAC-SHA256"
credential_scope = f"{date_stamp}/{region}/s3/aws4_request"
string_to_sign = "\n".join(
[
algorithm,
amz_date,
credential_scope,
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
]
)
k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
k_region = sign(k_date, region)
k_service = sign(k_region, "s3")
signing_key = sign(k_service, "aws4_request")
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
auth_header = (
f"{algorithm} Credential={access_key}/{credential_scope}, "
f"SignedHeaders={signed_headers}, Signature={signature}"
)
url = f"{parsed.scheme}://{host}{canonical_uri}"
if canonical_querystring:
url = f"{url}?{canonical_querystring}"
response = requests.request(
method,
url,
headers={**headers, "Authorization": auth_header},
data=body,
timeout=30,
)
return response
def ensure_bucket(args: argparse.Namespace, bucket: str) -> None:
head = request(
method="HEAD",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
)
if head.status_code == 200:
print(f"{bucket}: exists")
return
if head.status_code != 404:
raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}")
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>{args.region}</LocationConstraint>
</CreateBucketConfiguration>
"""
).encode("utf-8")
create = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
body=body,
content_type="application/xml",
)
if create.status_code not in (200, 204):
raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}")
print(f"{bucket}: created")
def put_lifecycle(args: argparse.Namespace, bucket: str) -> None:
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ID>expire-forwardemail-backups-after-{args.expire_days}-days</ID>
<Status>Enabled</Status>
<Filter>
<Prefix></Prefix>
</Filter>
<Expiration>
<Days>{args.expire_days}</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>
"""
).encode("utf-8")
response = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
body=body,
content_type="application/xml",
)
if response.status_code not in (200, 204):
raise SystemExit(
f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"{bucket}: lifecycle set to {args.expire_days} days")
def get_lifecycle(args: argparse.Namespace, bucket: str) -> None:
response = request(
method="GET",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
)
if response.status_code != 200:
raise SystemExit(
f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"=== {bucket} lifecycle ===")
print(response.text.strip())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Provision Hetzner object-storage buckets for Forward Email backups."
)
parser.add_argument(
"--endpoint",
default="https://hel1.your-objectstorage.com",
help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.",
)
parser.add_argument("--region", default="hel1", help="S3 region.")
parser.add_argument(
"--access-key-file",
default="intake/hetzner-s3-user.txt",
help="File containing the S3 access key id.",
)
parser.add_argument(
"--secret-key-file",
default="intake/hetzner-s3-secret.txt",
help="File containing the S3 secret key.",
)
parser.add_argument(
"--bucket",
action="append",
required=True,
help="Bucket to provision. Repeat for multiple buckets.",
)
parser.add_argument(
"--expire-days",
type=int,
default=90,
help="Lifecycle expiry window in days.",
)
parser.add_argument(
"--verify-only",
action="store_true",
help="Skip create/update and only read the current lifecycle.",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
args.access_key = read_secret(args.access_key_file)
args.secret_key = read_secret(args.secret_key_file)
for bucket in args.bucket:
if args.verify_only:
get_lifecycle(args, bucket)
continue
ensure_bucket(args, bucket)
put_lifecycle(args, bucket)
get_lifecycle(args, bucket)
if __name__ == "__main__":
try:
main()
except requests.RequestException as err:
raise SystemExit(f"error: request failed: {err}") from err

101
docs/FORWARDEMAIL.md Normal file
View file

@ -0,0 +1,101 @@
# Forward Email Backups
Burrow's mail direction is hosted mail on [Forward Email](https://forwardemail.net/), with domain-owned backup retention in our own S3-compatible object storage.
This is the first mail path to operationalize for `burrow.net` and `burrow.rs`. It keeps SMTP/IMAP hosting off the first forge host while still giving Burrow control over backup retention and object ownership.
## What Forward Email Requires
Forward Email exposes custom backup storage per domain. The documented API shape is:
- `PUT /v1/domains/{domain}` with:
- `has_custom_s3=true`
- `s3_endpoint`
- `s3_access_key_id`
- `s3_secret_access_key`
- `s3_region`
- `s3_bucket`
- `POST /v1/domains/{domain}/test-s3-connection`
Forward Email also documents these operational constraints:
- the bucket must remain private
- credentials are validated with `HeadBucket`
- failed or public-bucket configurations fall back to Forward Email's default storage and notify domain administrators
- custom S3 keeps every backup version, so lifecycle expiration is our responsibility
## Burrow Secret Layout
Present in `intake/` today:
- `intake/forwardemail_api_token.txt`
- `intake/hetzner-s3-user.txt`
- `intake/hetzner-s3-secret.txt`
- Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com`
- Hetzner object storage region: `hel1`
- Hetzner bucket used for Forward Email backups: `burrow`
## Verified Storage State
As of March 15, 2026, Burrow's Forward Email custom S3 configuration is live:
- endpoint: `https://hel1.your-objectstorage.com`
- region: `hel1`
- bucket: `burrow`
- `burrow.net` has `has_custom_s3=true`
- `burrow.rs` has `has_custom_s3=true`
- Forward Email's `/test-s3-connection` succeeded for both domains
- the `burrow` bucket enforces lifecycle expiration after `90` days
Forward Email performs bucket validation with bucket-style addressing. For Hetzner Object Storage, this means the working endpoint is the regional S3 endpoint (`https://hel1.your-objectstorage.com`), not the account alias (`https://burrow.hel1.your-objectstorage.com`). Using the account alias causes TLS hostname mismatches when the vendor prepends the bucket name.
## Helper
Use [`Tools/forwardemail-custom-s3.sh`](../Tools/forwardemail-custom-s3.sh) to configure or retest the domain setting without putting secrets on the process list.
Use [`Tools/forwardemail-hetzner-storage.py`](../Tools/forwardemail-hetzner-storage.py) to ensure the Hetzner backup bucket exists and to apply lifecycle expiry before enabling custom S3 on the Forward Email side.
Bucket bootstrap example:
```sh
Tools/forwardemail-hetzner-storage.py \
--endpoint https://hel1.your-objectstorage.com \
--bucket burrow \
--expire-days 90
```
Example:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://hel1.your-objectstorage.com \
--s3-region hel1 \
--s3-bucket burrow \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
```
Retest an existing domain configuration without rewriting it:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--test-only
```
## Retention
Forward Email preserves every backup object when custom S3 is enabled. Configure lifecycle expiration on the bucket itself. A 30-day or 90-day expiry window is the baseline recommendation from the vendor docs; Burrow should choose explicitly per domain instead of letting the bucket grow without bound. The current Burrow bootstrap helper defaults to `90` days.
## Identity Direction
Hosted mail and SaaS identity are separate concerns:
- mail hosting/backups: Forward Email + Burrow-owned S3-compatible storage
- interactive identity: Authentik as the long-term IdP
- future SaaS SSO target: Linear via SAML once the workspace and plan are ready
This means the forge host does not need to become the first mail server just to give Burrow mailboxes or retention control.

31
docs/PROTOCOL_ROADMAP.md Normal file
View file

@ -0,0 +1,31 @@
# Protocol Roadmap
Burrow currently has two tunnel paths in-tree:
- a WireGuard data plane
- a mesh transport built on `iroh`
What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer:
- control-plane data structures in `burrow/src/control/mod.rs`
- local auth bootstrap and persistent node/session storage in `burrow/src/auth/server/`
- governance documents under `evolution/` for the bigger protocol work
## `CONNECT-IP`
Full RFC 9484 support requires more than packet forwarding. It needs HTTP/3 session management, Capsule handling, HTTP Datagram context identifiers, address assignment, route advertisement, and request-scope enforcement. Burrow does not implement those end to end yet.
## Tailscale-Style Negotiation
Burrow now has register/map request and response types plus persistent node records, but it does not yet implement the full Tailscale capability surface, peer delta protocol, DERP coordination, or Noise-based control transport.
## Current Direction
The intended sequence is:
1. Stabilize the control-plane data model and bootstrap auth.
2. Introduce transport-neutral route and address abstractions.
3. Add MASQUE framing and HTTP/3 transport support.
4. Expand policy, relay, and interoperability testing.
This keeps Burrow honest about what is running today while creating a clean path for the rest.

30
docs/WIREGUARD_LINEAGE.md Normal file
View file

@ -0,0 +1,30 @@
# WireGuard Rust Lineage
Burrow's in-tree WireGuard engine is not a greenfield implementation. It was lifted from the Rust WireGuard lineage around Cloudflare's BoringTun, then cut down and reshaped to fit Burrow's own daemon and tunnel abstractions.
## What Was Lifted
- The repository history includes `1b39eca` (`boringtun wip`) and `28af9003` (`merge boringtun into burrow`).
- The current `burrow/src/wireguard/noise/*` files still carry the original Cloudflare copyright and SPDX headers.
- Core protocol machinery such as the Noise handshake, session state, rate limiter, and timer logic came from that imported body of work.
## What Changed in Burrow
Burrow does not embed BoringTun unchanged.
- The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`.
- Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`.
- The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload.
- Burrow added its own runtime switching path so WireGuard and mesh transports can share one daemon lifecycle.
## What Was Improved
The lifted code has been tightened further in-repo.
- Deprecated constant-time comparisons were replaced with `subtle`.
- Network ordering and runtime selection are now deterministic and test-covered.
- The Burrow runtime can swap between WireGuard and mesh-backed networks without restarting the daemon process itself.
## Why This Matters
This project should be explicit about lineage. Burrow benefits from proven Rust WireGuard work, but it owns the integration surface, runtime behavior, and future maintenance burden. That is why the code should be documented as lifted, modified, and improved rather than described as wholly original.

60
evolution/README.md Normal file
View file

@ -0,0 +1,60 @@
# Burrow Evolution
Burrow Evolution Proposals (BEPs) are the repository's durable design record for protocol work, control-plane changes, forge infrastructure, and operational policy.
## Goals
1. Capture intent before implementation outruns the architecture.
2. Give contributors and agents enough context to work safely without re-discovering prior decisions.
3. Tie ambitious work to concrete validation, rollout, and rollback criteria.
## When a BEP is required
Open a BEP for:
- new transports or protocol families
- control-plane and identity changes
- deployment, forge, runner, or secrets changes
- data model migrations
- user-visible behavior that changes security or routing semantics
Small bug fixes and isolated refactors do not need a BEP unless they materially change one of the areas above.
## Lifecycle
1. Pitch
Capture the problem and why it matters now.
2. Draft
Copy `evolution/proposals/0000-template.md` to `evolution/proposals/BEP-XXXX-short-slug.md`.
3. Review
Collect feedback, tighten the design, and document unresolved concerns.
4. Decision
Mark the proposal `Accepted`, `Rejected`, or `Returned for Revision`.
5. Implementation
Link code changes, tests, and rollout evidence.
6. Supersession
Keep historical proposals in-tree and point forward to the replacing BEP.
## Status Values
- `Pitch`
- `Draft`
- `In Review`
- `Accepted`
- `Implemented`
- `Rejected`
- `Returned for Revision`
- `Superseded`
- `Archived`
## Layout
```text
evolution/
README.md
proposals/
0000-template.md
BEP-0001-...
```
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.

View file

@ -0,0 +1,57 @@
# `BEP-XXXX` - Title Case Summary
```text
Status: Draft | In Review | Accepted | Implemented | Rejected | Returned for Revision | Superseded | Archived
Proposal: BEP-XXXX
Authors: <name(s) or agent ids>
Coordinator: <name>
Reviewers: <people, bots, contributors>
Constitution Sections: <II, III, IV, etc.>
Implementation PRs: <link(s)> (optional while drafting)
Decision Date: <YYYY-MM-DD or Pending>
```
## Summary
One or two paragraphs that state the desired outcome and why it matters.
## Motivation
- What problem exists today?
- Why should Burrow solve it now?
- Which issues, incidents, or constraints support the change?
## Detailed Design
- Architecture and boundaries
- Data model and migration plan
- Protocol or API changes
- Observability, testing, and failure handling
## Security and Operational Considerations
- Access and secret handling
- Abuse, downgrade, or supply-chain risks
- Rollback and kill-switch plans
## Contributor Playbook
Give the concrete steps, commands, checks, and evidence a contributor should produce while implementing or rolling out the change.
## Alternatives Considered
List alternatives and why they were rejected.
## Impact on Other Work
- follow-up tasks
- dependencies
- compatibility constraints
## Decision
Record the final call, who made it, and any conditions.
## References
Link relevant issues, specs, transcripts, and external research.

View file

@ -0,0 +1,61 @@
# `BEP-0001` - Sovereign Forge and Governance Bootstrap
```text
Status: Draft
Proposal: BEP-0001
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should own its forge, deployment logic, and operational context under `burrow.net`. This proposal establishes the repository-local governance and forge bootstrap required to move build, release, and infrastructure control out of GitHub-centric assumptions and into a self-hosted operating model.
## Motivation
- The repository currently keeps CI definitions under `.github/workflows/` but has no first-class self-hosted forge layout.
- Infrastructure changes and protocol work are already entangled; without a design record, the project risks landing irreversible operations without enough context.
- A self-hosted forge is a prerequisite for durable autonomy over source, runners, and release pipelines.
## Detailed Design
- Add a project constitution and BEP process under `evolution/`.
- Introduce a Nix flake and NixOS host/module layout for `burrow-forge`.
- Add Forgejo-native workflows under `.forgejo/workflows/` for repository-local CI.
- Bootstrap the initial forge identity around `contact@burrow.net` and an agent-owned SSH workflow.
## Security and Operational Considerations
- Initial bootstrap may read credentials from local intake, but production must converge on encrypted secret handling.
- The first forge host replacement must preserve rollback information before deleting any existing VM.
- DNS for `burrow.net` is currently pending activation; the forge rollout must not assume public reachability until nameserver cutover completes.
## Contributor Playbook
- Keep destructive host operations behind explicit verification of the current Hetzner state.
- Build and test repository-local workflows before using them for deployment.
- Record the active server id, image, IPs, and SSH path before replacement.
## Alternatives Considered
- Continue relying on GitHub Actions while separately hosting services. Rejected because it leaves source authority and CI policy split across systems.
- Stand up Forgejo without a repository-local operating model. Rejected because the repo would still be missing deployment truth.
## Impact on Other Work
- Blocks long-term migration of workflows away from GitHub.
- Provides the governance anchor for protocol and control-plane proposals.
## Decision
Pending.
## References
- `CONSTITUTION.md`
- `.github/workflows/`
- `.forgejo/workflows/`

View file

@ -0,0 +1,60 @@
# `BEP-0002` - Control-Plane Bootstrap and Local Auth
```text
Status: Draft
Proposal: BEP-0002
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow needs a repository-owned control-plane model instead of ad hoc network payload storage plus third-party-only auth. This proposal introduces a local username/password bootstrap for `contact@burrow.net`, plus a register/map data model shaped to support a Tailscale-style control server without claiming full parity yet.
## Motivation
- Current auth support is limited and does not provide a plain local bootstrap path for the project's own operator identity.
- The existing database stores network payloads, but not a durable model for users, nodes, sessions, or control-plane negotiation state.
- Future work on route policy, device coordination, and richer negotiation needs a real data model now.
## Detailed Design
- Add control-plane types for users, nodes, register requests, and map responses.
- Extend the auth server schema with local credentials, sessions, provider logins, and control nodes.
- Expose JSON endpoints for local login, node registration, and map retrieval.
- Seed the initial operator account from intake-backed bootstrap credentials.
## Security and Operational Considerations
- Passwords are stored with Argon2id hashes only.
- Session tokens are bearer credentials and must be treated as sensitive.
- The bootstrap credential path is a short-term path; follow-up work should move it into encrypted secret management before public deployment.
## Contributor Playbook
- Verify bootstrap account creation in an isolated test database.
- Exercise login, register, and map end to end with integration tests.
- Do not advertise protocol parity beyond the implemented request/response contract.
## Alternatives Considered
- Wait for full external identity-provider integration first. Rejected because the forge needs an operator account now.
- Keep control-plane state implicit in daemon-local configuration. Rejected because it cannot express multi-device coordination.
## Impact on Other Work
- Unblocks forge bootstrap and future device control-plane work.
- Creates the storage model needed for richer policy and transport proposals.
## Decision
Pending.
## References
- `burrow/src/auth/server/`
- `burrow/src/control/`

View file

@ -0,0 +1,61 @@
# `BEP-0003` - CONNECT-IP and Negotiation Roadmap
```text
Status: Draft
Proposal: BEP-0003
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should grow from a WireGuard-first tunnel runner into a transport stack that can support HTTP/3 MASQUE `CONNECT-IP` and a richer node negotiation model. This proposal stages that work so Burrow can adopt the right abstractions instead of stapling QUIC-era semantics onto a WireGuard-only daemon.
## Motivation
- `CONNECT-IP` introduces HTTP/3 sessions, context identifiers, address assignment, and route advertisements that do not fit the current daemon model.
- A Tailscale-style control plane requires explicit node, endpoint, and session state rather than raw network blobs.
- The project needs a roadmap that distinguishes data-model work, control-plane work, and actual transport implementation.
## Detailed Design
- Stage 1: land control-plane types and persistent auth/session/node storage.
- Stage 2: add transport-agnostic route, address-assignment, and policy abstractions in Burrow.
- Stage 3: implement MASQUE `CONNECT-IP` framing and HTTP Datagram handling.
- Stage 4: connect the transport layer to real relay, policy, and observability paths.
## Security and Operational Considerations
- `CONNECT-IP` changes the trust boundary from WireGuard peers to HTTP/3 peers and relays; authentication, replay handling, and scope restriction must be explicit.
- Route advertisements and delegated prefixes must be validated before touching the data plane.
- Control-plane capability claims must not imply support that the transport layer does not yet implement.
## Contributor Playbook
- Keep protocol codecs independently testable before integrating them into live transports.
- Add interoperability tests for every new capsule or datagram type.
- Separate request parsing, policy validation, and packet forwarding so regressions stay localized.
## Alternatives Considered
- Implement MASQUE directly in the daemon without control-plane refactoring. Rejected because the current daemon has no transport-neutral contract for routes or prefixes.
- Treat Tailscale negotiation as a one-off compatibility shim. Rejected because Burrow needs first-class control-plane concepts either way.
## Impact on Other Work
- Depends on BEP-0002.
- Informs future relay, policy, and node coordination work.
## Decision
Pending.
## References
- RFC 9484
- `burrow/src/daemon/`
- `burrow/src/control/`

View file

@ -0,0 +1,68 @@
# `BEP-0004` - Hosted Mail Backups and SaaS Identity
```text
Status: Draft
Proposal: BEP-0004
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should start with hosted mail on Forward Email instead of self-hosting SMTP and IMAP on the first forge machine. Backup retention should still be controlled by Burrow through custom S3-compatible storage backed by Burrow-owned object storage. In parallel, Burrow should treat SaaS identity as a separate track and converge on Authentik as the long-term IdP, with Linear SAML SSO as a planned downstream integration rather than an immediate bootstrap dependency.
## Motivation
- The first forge host already carries source control, CI, and deployment bootstrap risk. Adding a self-hosted mail stack increases operational scope before the forge is stable.
- Forward Email already exposes SMTP and IMAP while allowing per-domain custom S3 backup storage, which preserves Burrow's data ownership over mailbox backups.
- The repository needs a durable decision record that separates hosted mail operations from future SaaS SSO work.
## Detailed Design
- Use Forward Email as the operational mail provider for `burrow.net` and `burrow.rs`.
- Configure custom S3-compatible storage per domain using Burrow-controlled object storage credentials.
- Keep one backup bucket per domain and enforce lifecycle expiration at the bucket layer.
- Add repository-owned tooling and documentation for applying and testing the Forward Email custom S3 configuration.
- Treat Authentik as the future identity authority for SaaS applications, but keep Linear SAML as a later rollout once the workspace and vendor prerequisites are available. Linear's current docs place SAML and SCIM behind higher-tier workspace security settings, so Burrow should treat plan availability as an explicit precondition.
## Security and Operational Considerations
- Forward Email API tokens and S3 credentials must stay in secret files and must not be passed directly on the shell command line.
- Buckets must remain private. Public bucket detection by the vendor should be treated as a hard failure, not a warning.
- Backup growth is unbounded without lifecycle rules. Retention policy is part of the rollout, not optional cleanup.
- Hosted mail reduces MTA attack surface on the forge host, but it adds third-party dependency risk; keeping backups in Burrow-owned storage limits that blast radius.
## Contributor Playbook
- Put the Forward Email API token in `intake/forwardemail_api_token.txt`.
- Use `Tools/forwardemail-custom-s3.sh` to configure `burrow.net` and `burrow.rs`.
- Run the helper again with `--test-only` after any credential rotation.
- Record the chosen endpoint, region, bucket names, and lifecycle policy alongside rollout evidence.
- Do not claim Linear SAML is live until the Authentik app, Linear workspace settings, workspace plan prerequisites, and end-to-end login flow are verified.
## Alternatives Considered
- Self-host Stalwart on the forge host immediately. Rejected for the first rollout because it expands host scope before source control and CI are stable.
- Rely on Forward Email default backup storage only. Rejected because it gives Burrow less control over retention and data location.
- Delay all SaaS identity planning until after forge cutover. Rejected because Linear and other SaaS integrations will otherwise accrete without an agreed authority.
## Impact on Other Work
- Narrows the first forge host scope.
- Creates a clean mail path for `contact@burrow.net` without requiring self-hosted SMTP and IMAP.
- Leaves Authentik and Linear SAML as explicit follow-up work instead of hidden assumptions.
## Decision
Pending.
## References
- `docs/FORWARDEMAIL.md`
- `Tools/forwardemail-custom-s3.sh`
- Forward Email FAQ: custom S3-compatible storage for backups
- Linear docs: SAML SSO

86
flake.lock generated Normal file
View file

@ -0,0 +1,86 @@
{
"nodes": {
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1773506317,
"narHash": "sha256-qWKbLUJpavIpvOdX1fhHYm0WGerytFHRoh9lVck6Bh0=",
"type": "tarball",
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"type": "tarball",
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
}
},
"hcloud-upload-image-src": {
"flake": false,
"locked": {
"lastModified": 1766413232,
"narHash": "sha256-1u9tpzciYjB/EgBI81pg9w0kez7hHZON7+AHvfKW7k0=",
"type": "tarball",
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1773389992,
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
"type": "tarball",
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
}
},
"root": {
"inputs": {
"disko": "disko",
"flake-utils": "flake-utils",
"hcloud-upload-image-src": "hcloud-upload-image-src",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

190
flake.nix Normal file
View file

@ -0,0 +1,190 @@
{
description = "Burrow development shell and forge host configuration";
inputs = {
nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable";
flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main";
disko = {
url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master";
inputs.nixpkgs.follows = "nixpkgs";
};
hcloud-upload-image-src = {
url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0";
flake = false;
};
};
outputs = { self, nixpkgs, flake-utils, disko, hcloud-upload-image-src }:
let
supportedSystems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
in
(flake-utils.lib.eachSystem supportedSystems (system:
let
pkgs = import nixpkgs {
inherit system;
};
lib = pkgs.lib;
commonPackages = with pkgs; [
cargo
rustc
rustfmt
clippy
protobuf
pkg-config
sqlite
git
openssh
curl
jq
nodejs_20
python3
rsync
];
nscPkg =
if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then
let
version = "0.0.452";
osName =
if pkgs.stdenv.isLinux then
"linux"
else if pkgs.stdenv.isDarwin then
"darwin"
else
throw "nsc: unsupported host OS ${pkgs.stdenv.hostPlatform.system}";
archInfo =
if pkgs.stdenv.hostPlatform.isx86_64 then
{
arch = "amd64";
hash =
if pkgs.stdenv.isLinux then
"sha256-FBqOJ0UQWTv2r4HWMHrR/aqFzDa0ej/mS8dSoaCe6fY="
else
"sha256-3fRKWO0SCCa5PEym5yCB7dtyEx3xSxXSHfJYz8B+/4M=";
}
else if pkgs.stdenv.hostPlatform.isAarch64 then
{
arch = "arm64";
hash =
if pkgs.stdenv.isLinux then
"sha256-A6twO8Ievbu7Gi5Hqon4ug5rCGOm/uHhlCya3px6+io="
else
"sha256-n363xLaGhy+a6lw2F+WicQYGXnGYnqRW8aTQCSppwcw=";
}
else
throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}";
src = pkgs.fetchurl {
url = "https://github.com/namespacelabs/foundation/releases/download/v${version}/nsc_${version}_${osName}_${archInfo.arch}.tar.gz";
sha256 = archInfo.hash;
};
in
pkgs.stdenvNoCC.mkDerivation {
pname = "nsc";
inherit version src;
dontConfigure = true;
dontBuild = true;
unpackPhase = ''
tar -xzf "$src"
'';
installPhase = ''
install -d "$out/bin"
install -m 0555 nsc "$out/bin/nsc"
install -m 0555 docker-credential-nsc "$out/bin/docker-credential-nsc"
install -m 0555 bazel-credential-nsc "$out/bin/bazel-credential-nsc"
'';
}
else
null;
hcloudUploadImagePkg = pkgs.buildGoModule {
pname = "hcloud-upload-image";
version = "1.3.0";
src = hcloud-upload-image-src;
vendorHash = "sha256-IdOAUBPg0CEuHd2rdc7jOlw0XtnAhr3PVPJbnFs2+x4=";
subPackages = [ "." ];
env.GOWORK = "off";
ldflags = [
"-s"
"-w"
];
};
forgejoNscSrc = lib.cleanSourceWith {
src = ./services/forgejo-nsc;
filter = path: type:
let
p = toString path;
name = builtins.baseNameOf path;
hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p;
in
!(hasDir ".git" || hasDir "vendor" || hasDir "node_modules" || name == "result");
};
forgejoNscDispatcher = pkgs.buildGoModule {
pname = "forgejo-nsc-dispatcher";
version = "0.1.0";
src = forgejoNscSrc;
subPackages = [ "./cmd/forgejo-nsc-dispatcher" ];
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
};
forgejoNscAutoscaler = pkgs.buildGoModule {
pname = "forgejo-nsc-autoscaler";
version = "0.1.0";
src = forgejoNscSrc;
subPackages = [ "./cmd/forgejo-nsc-autoscaler" ];
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
};
in
{
devShells.default = pkgs.mkShell {
packages =
commonPackages
++ [
hcloudUploadImagePkg
forgejoNscDispatcher
forgejoNscAutoscaler
]
++ lib.optionals (nscPkg != null) [ nscPkg ];
};
devShells.ci = pkgs.mkShell {
packages =
commonPackages
++ [
hcloudUploadImagePkg
]
++ lib.optionals (nscPkg != null) [ nscPkg ];
};
formatter = pkgs.nixpkgs-fmt;
packages =
{
hcloud-upload-image = hcloudUploadImagePkg;
forgejo-nsc-dispatcher = forgejoNscDispatcher;
forgejo-nsc-autoscaler = forgejoNscAutoscaler;
}
// lib.optionalAttrs (nscPkg != null) { nsc = nscPkg; };
}))
// {
nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix;
nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix;
nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix;
nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
inherit self;
};
modules = [
disko.nixosModules.disko
./nixos/hosts/burrow-forge/default.nix
];
};
images = {
burrow-forge-raw = self.nixosConfigurations.burrow-forge.config.system.build.diskoImages;
};
};
}

53
nixos/README.md Normal file
View file

@ -0,0 +1,53 @@
# Burrow Forge Runbook
This directory contains the Burrow forge host definition and the Hetzner bootstrap shape for `burrow-forge`.
Mail hosting is intentionally not part of this NixOS host in the current plan. Burrow's first mail path is Forward Email with Burrow-owned custom S3 backups; see [`docs/FORWARDEMAIL.md`](../docs/FORWARDEMAIL.md).
## Files
- `hosts/burrow-forge/default.nix`: host entrypoint
- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module
- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap
- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services
- `hetzner-cloud-config.yaml`: desired Hetzner host shape
- `keys/contact_at_burrow_net.pub`: initial operator SSH public key
- `keys/agent_at_burrow_net.pub`: automation SSH public key
- `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow
- `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot
- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/`
- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot
- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers
- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host
- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists
- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host
## Intended Flow
1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`.
2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`.
3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`.
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account.
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`.
7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.
9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`.
## Current Constraints
- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host.
- Public Burrow forge cutover completed on March 15, 2026:
- `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21`
- HTTP redirects to HTTPS on all three names
- `https://burrow.net` returns the root forge landing response
- `https://git.burrow.net` returns the live Forgejo front door
- `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/`
- The Cloudflare token currently in `intake/cloudflare-token.txt` is an account-scoped token: `POST /accounts/<account>/tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`.
- `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response.
- Both domains publish Forward Email MX/TXT records.
- Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`.
- The current Hetzner account contains both:
- the older Ubuntu bootstrap server in `hil`
- the live `burrow-forge` NixOS server in `hel1`
- The remaining forge work is follow-on product/integration work, not host bring-up, mail backup wiring, or public DNS cutover.

View file

@ -0,0 +1,10 @@
name: burrow-forge
server_type: ccx23
location: hel1
image: ubuntu-24.04
ssh_keys:
- contact@burrow.net
- agent@burrow.net
labels:
project: burrow
role: forge

View file

@ -0,0 +1,46 @@
{ self, ... }:
{
imports = [
./hardware-configuration.nix
./disko-config.nix
self.nixosModules.burrow-forge
self.nixosModules.burrow-forge-runner
self.nixosModules.burrow-forgejo-nsc
];
system.stateVersion = "24.11";
time.timeZone = "America/Los_Angeles";
nix.settings.experimental-features = [
"nix-command"
"flakes"
];
services.burrow.forge = {
enable = true;
adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt";
authorizedKeys = [
(builtins.readFile ../../keys/contact_at_burrow_net.pub)
(builtins.readFile ../../keys/agent_at_burrow_net.pub)
];
};
services.burrow.forgeRunner = {
enable = true;
sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519";
};
services.burrow.forgejoNsc = {
enable = true;
nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt";
dispatcher = {
configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml";
};
autoscaler = {
enable = true;
configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml";
};
};
}

View file

@ -0,0 +1,36 @@
{ lib, ... }:
{
disko.devices = {
disk.main = {
type = "disk";
device = lib.mkDefault "/dev/sda";
imageName = "burrow-forge";
imageSize = "80G";
content = {
type = "gpt";
partitions = {
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
}

View file

@ -0,0 +1,11 @@
{ ... }:
{
# Derived from Hetzner Cloud rescue-mode hardware inspection.
boot.initrd.availableKernelModules = [
"ahci"
"sd_mod"
"virtio_pci"
"virtio_scsi"
];
}

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa

View file

@ -0,0 +1,213 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.burrow.forgeRunner;
runnerPkg = pkgs.forgejo-runner;
stateDir = cfg.stateDir;
runnerFile = "${stateDir}/.runner";
configFile = "${stateDir}/runner.yaml";
labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels);
sshPrivateKeyFile = cfg.sshPrivateKeyFile or "";
in
{
options.services.burrow.forgeRunner = {
enable = lib.mkEnableOption "the Burrow Forgejo Actions runner";
instanceUrl = lib.mkOption {
type = lib.types.str;
default = "http://127.0.0.1:3000";
description = "Forgejo base URL used by the local runner for registration and job polling.";
};
labels = lib.mkOption {
type = with lib.types; listOf str;
default = [ "burrow-forge" ];
description = "Runner labels exposed to Forgejo Actions.";
};
name = lib.mkOption {
type = lib.types.str;
default = "burrow-forge-agent";
description = "Runner name shown in Forgejo.";
};
capacity = lib.mkOption {
type = lib.types.int;
default = 1;
description = "Maximum concurrent jobs on this runner.";
};
stateDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/forgejo-runner-agent";
description = "Persistent runner state directory.";
};
user = lib.mkOption {
type = lib.types.str;
default = "forgejo-runner-agent";
description = "System user that runs the Forgejo runner.";
};
group = lib.mkOption {
type = lib.types.str;
default = "forgejo-runner-agent";
description = "System group that runs the Forgejo runner.";
};
forgejoConfigFile = lib.mkOption {
type = lib.types.str;
default = "/var/lib/forgejo/custom/conf/app.ini";
description = "Forgejo app.ini path used to generate runner tokens.";
};
gitUserName = lib.mkOption {
type = lib.types.str;
default = "agent";
description = "Git commit author name for automation on the forge host.";
};
gitUserEmail = lib.mkOption {
type = lib.types.str;
default = "agent@burrow.net";
description = "Git commit author email for automation on the forge host.";
};
sshPrivateKeyFile = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
description = "Optional host-local path to the agent SSH private key copied into the runner home.";
};
};
config = lib.mkIf cfg.enable {
users.groups.${cfg.group} = { };
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
description = "Burrow Forgejo Actions runner";
home = cfg.stateDir;
createHome = true;
shell = pkgs.bashInteractive;
};
environment.systemPackages = with pkgs; [
runnerPkg
bash
coreutils
findutils
git
git-lfs
openssh
python3
rsync
];
systemd.tmpfiles.rules = [
"d ${stateDir} 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.burrow-forgejo-runner-bootstrap = {
description = "Bootstrap Burrow Forgejo runner registration";
after = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ];
wants = [ "forgejo.service" "network-online.target" "systemd-tmpfiles-setup.service" ];
before = [ "burrow-forgejo-runner.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
umask 077
install -d -m 0750 -o ${cfg.user} -g ${cfg.group} ${stateDir}
cat > ${configFile} <<EOF
runner:
file: ${runnerFile}
capacity: ${toString cfg.capacity}
name: ${cfg.name}
labels:
EOF
for label in ${lib.concatStringsSep " " cfg.labels}; do
echo " - ${"$"}label:host" >> ${configFile}
done
cat >> ${configFile} <<'EOF'
cache:
enabled: false
EOF
chown ${cfg.user}:${cfg.group} ${configFile}
chmod 0640 ${configFile}
install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName}
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
${pkgs.git}/bin/git config --global user.email ${lib.escapeShellArg cfg.gitUserEmail}
if [ -n ${lib.escapeShellArg sshPrivateKeyFile} ] && [ -s ${lib.escapeShellArg sshPrivateKeyFile} ]; then
install -m 0600 -o ${cfg.user} -g ${cfg.group} \
${lib.escapeShellArg sshPrivateKeyFile} \
${stateDir}/.ssh/id_ed25519
cat > ${stateDir}/.ssh/config <<EOF
Host *
IdentityFile ${stateDir}/.ssh/id_ed25519
IdentitiesOnly yes
StrictHostKeyChecking accept-new
EOF
chown ${cfg.user}:${cfg.group} ${stateDir}/.ssh/config
chmod 0600 ${stateDir}/.ssh/config
fi
if [ ! -s ${runnerFile} ]; then
token="$(${pkgs.util-linux}/bin/runuser -u forgejo -- \
${config.services.forgejo.package}/bin/forgejo actions generate-runner-token --config ${cfg.forgejoConfigFile} | tr -d '\r\n')"
if [ -z "${"$"}token" ]; then
echo "[burrow-forgejo-runner] failed to generate runner token" >&2
exit 1
fi
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
${runnerPkg}/bin/forgejo-runner register \
--no-interactive \
--instance ${lib.escapeShellArg cfg.instanceUrl} \
--token "${"$"}token" \
--name ${lib.escapeShellArg cfg.name} \
--labels ${lib.escapeShellArg labelsCsv} \
--config ${configFile}
fi
'';
};
systemd.services.burrow-forgejo-runner = {
description = "Burrow Forgejo Actions runner";
after = [ "burrow-forgejo-runner-bootstrap.service" ];
wants = [ "burrow-forgejo-runner-bootstrap.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = stateDir;
Restart = "on-failure";
RestartSec = 2;
ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" ''
set -euo pipefail
export PATH="/run/wrappers/bin:/run/current-system/sw/bin:${"$"}{PATH:-}"
tmp="$(${pkgs.coreutils}/bin/mktemp)"
set +e
${runnerPkg}/bin/forgejo-runner daemon --config ${configFile} 2>&1 | ${pkgs.coreutils}/bin/tee "${"$"}tmp"
rc="${"$"}{PIPESTATUS[0]}"
set -e
if ${pkgs.gnugrep}/bin/grep -qi "unregistered runner" "${"$"}tmp"; then
rm -f ${runnerFile}
fi
rm -f "${"$"}tmp"
exit "${"$"}rc"
'';
};
};
};
}

View file

@ -0,0 +1,247 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.burrow.forge;
forgejoCfg = config.services.forgejo;
forgejoExe = lib.getExe forgejoCfg.package;
forgejoWorkPath = forgejoCfg.stateDir;
forgejoCustomPath = "${forgejoWorkPath}/custom";
forgejoConfigFile = "${forgejoCustomPath}/conf/app.ini";
forgejoAdminArgs = "--config ${lib.escapeShellArg forgejoConfigFile} --work-path ${lib.escapeShellArg forgejoWorkPath} --custom-path ${lib.escapeShellArg forgejoCustomPath}";
homeRepoPath = "/${cfg.homeOwner}/${cfg.homeRepo}";
homeRepoUrl = "https://${cfg.gitDomain}${homeRepoPath}";
in
{
options.services.burrow.forge = {
enable = lib.mkEnableOption "the Burrow Forge host";
gitDomain = lib.mkOption {
type = lib.types.str;
default = "git.burrow.net";
description = "Public Forgejo domain.";
};
siteDomain = lib.mkOption {
type = lib.types.str;
default = "burrow.net";
description = "Root site domain.";
};
homeOwner = lib.mkOption {
type = lib.types.str;
default = "hackclub";
description = "Canonical Forgejo org/user for the Burrow home repository.";
};
homeRepo = lib.mkOption {
type = lib.types.str;
default = "burrow";
description = "Canonical Forgejo repository name for the Burrow home repository.";
};
contactEmail = lib.mkOption {
type = lib.types.str;
default = "contact@burrow.net";
description = "Operator contact email.";
};
nscAutoscalerDomain = lib.mkOption {
type = lib.types.str;
default = "nsc-autoscaler.burrow.net";
description = "Public webhook domain for the Forgejo Namespace autoscaler.";
};
adminUsername = lib.mkOption {
type = lib.types.str;
default = "contact";
description = "Initial Forgejo admin username.";
};
adminEmail = lib.mkOption {
type = lib.types.str;
default = "contact@burrow.net";
description = "Initial Forgejo admin email.";
};
adminPasswordFile = lib.mkOption {
type = lib.types.str;
description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin.";
};
authorizedKeys = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
description = "SSH keys allowed for root login and operational bootstrap.";
};
};
config = lib.mkIf cfg.enable {
networking.hostName = "burrow-forge";
networking.useDHCP = lib.mkDefault true;
services.qemuGuest.enable = true;
boot.loader.grub = {
enable = true;
efiSupport = true;
efiInstallAsRemovable = true;
device = "nodev";
};
fileSystems."/boot".neededForBoot = true;
services.postgresql = {
enable = true;
package = pkgs.postgresql_16;
};
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
PermitRootLogin = "prohibit-password";
};
};
users.users.root.openssh.authorizedKeys.keys = cfg.authorizedKeys;
networking.firewall.allowedTCPPorts = [
22
80
443
2222
];
services.forgejo = {
enable = true;
database = {
type = "postgres";
createDatabase = true;
};
lfs.enable = true;
settings = {
server = {
DOMAIN = cfg.gitDomain;
ROOT_URL = "https://${cfg.gitDomain}/";
HTTP_PORT = 3000;
SSH_DOMAIN = cfg.gitDomain;
SSH_PORT = 2222;
START_SSH_SERVER = true;
};
service = {
DISABLE_REGISTRATION = true;
REQUIRE_SIGNIN_VIEW = false;
DEFAULT_ALLOW_CREATE_ORGANIZATION = false;
ENABLE_NOTIFY_MAIL = false;
NO_REPLY_ADDRESS = cfg.adminEmail;
};
session = {
COOKIE_SECURE = true;
SAME_SITE = "strict";
};
openid = {
ENABLE_OPENID_SIGNIN = false;
ENABLE_OPENID_SIGNUP = false;
};
actions = {
ENABLED = true;
};
repository = {
DEFAULT_BRANCH = "main";
ENABLE_PUSH_CREATE_USER = false;
};
ui = {
DEFAULT_THEME = "forgejo-auto";
};
};
};
services.caddy = {
enable = true;
email = cfg.contactEmail;
virtualHosts =
{
"${cfg.gitDomain}".extraConfig = ''
encode gzip zstd
@root path /
redir @root ${homeRepoPath} 308
reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}
'';
"${cfg.siteDomain}".extraConfig = ''
@root path /
redir @root ${homeRepoUrl} 308
respond 404
'';
}
// lib.optionalAttrs (
config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable
) {
"${cfg.nscAutoscalerDomain}".extraConfig = ''
encode gzip zstd
reverse_proxy 127.0.0.1:8090
'';
};
};
systemd.services.burrow-forgejo-bootstrap = {
description = "Seed the initial Burrow Forgejo admin account";
after = [ "forgejo.service" ];
requires = [ "forgejo.service" ];
wantedBy = [ "multi-user.target" ];
path = [
forgejoCfg.package
pkgs.coreutils
pkgs.gnugrep
];
serviceConfig = {
Type = "oneshot";
User = forgejoCfg.user;
Group = forgejoCfg.group;
WorkingDirectory = forgejoCfg.stateDir;
};
script = ''
set -euo pipefail
if [ ! -s ${lib.escapeShellArg cfg.adminPasswordFile} ]; then
echo "bootstrap password file is missing; skipping admin bootstrap" >&2
exit 0
fi
password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.adminPasswordFile})"
if [ -z "$password" ]; then
echo "bootstrap password file is empty; skipping admin bootstrap" >&2
exit 0
fi
log_file="$(mktemp)"
trap 'rm -f "$log_file"' EXIT
if ! ${forgejoExe} admin user create \
${forgejoAdminArgs} \
--admin \
--username ${lib.escapeShellArg cfg.adminUsername} \
--email ${lib.escapeShellArg cfg.adminEmail} \
--password "$password" \
--must-change-password=false >"$log_file" 2>&1; then
if grep -qi "already exists" "$log_file"; then
${forgejoExe} admin user change-password \
${forgejoAdminArgs} \
--username ${lib.escapeShellArg cfg.adminUsername} \
--password "$password" \
--must-change-password=false
else
cat "$log_file" >&2
exit 1
fi
fi
'';
};
};
}

View file

@ -0,0 +1,234 @@
{ config, lib, pkgs, self, ... }:
let
inherit (lib)
mkEnableOption
mkIf
mkOption
types
mkAfter
mkDefault
optional
optionalAttrs
optionalString
;
cfg = config.services.burrow.forgejoNsc;
dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml";
autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml";
pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" ''
set -euo pipefail
if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then
echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2
exit 1
fi
'';
nscTokenPath = "${cfg.stateDir}/nsc.token";
tokenSync = optionalString (cfg.nscTokenFile != null) ''
install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath}
chown ${cfg.user}:${cfg.group} ${nscTokenPath}
chmod 600 ${nscTokenPath}
'';
dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig}
chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig}
'';
autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig}
chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig}
'';
dispatcherEnv =
cfg.extraEnv
// optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; }
// optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; }
// optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; };
in {
options.services.burrow.forgejoNsc = {
enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher";
user = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System user that runs the forgejo-nsc services.";
};
group = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System group for the forgejo-nsc services.";
};
stateDir = mkOption {
type = types.str;
default = "/var/lib/forgejo-nsc";
description = "State directory for the dispatcher/autoscaler.";
};
nscTokenFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token file (exported as NSC_TOKEN_FILE).";
};
nscTokenSpecFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE).";
};
nscEndpoint = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC endpoint override (exported as NSC_ENDPOINT).";
};
extraEnv = mkOption {
type = types.attrsOf types.str;
default = { };
description = "Extra environment variables injected into the services.";
};
nscPackage = mkOption {
type = types.nullOr types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null;
description = "Optional nsc CLI package added to the service PATH.";
};
dispatcher = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable the forgejo-nsc dispatcher service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher;
description = "Package providing the forgejo-nsc dispatcher binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the dispatcher.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the dispatcher config.";
};
};
autoscaler = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable the forgejo-nsc autoscaler service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler;
description = "Package providing the forgejo-nsc autoscaler binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the autoscaler.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the autoscaler config.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null;
message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled.";
}
{
assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null;
message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled.";
}
];
users.groups.${cfg.group} = { };
users.users.${cfg.user} = {
uid = mkDefault 2011;
isSystemUser = true;
group = cfg.group;
description = "Forgejo Namespace Cloud runner services";
home = cfg.stateDir;
createHome = true;
shell = pkgs.bashInteractive;
};
systemd.tmpfiles.rules = mkAfter [
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable {
description = "Forgejo Namespace Cloud dispatcher";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile))
dispatcherConfigSync
tokenSync
]);
};
systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable {
description = "Forgejo Namespace Cloud autoscaler";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile))
autoscalerConfigSync
tokenSync
]);
};
};
}

4
rust-toolchain.toml Normal file
View file

@ -0,0 +1,4 @@
[toolchain]
channel = "1.85.0"
components = ["rustfmt"]
profile = "minimal"

View file

@ -0,0 +1,183 @@
## forgejo-nsc-dispatcher
This service exposes a simple HTTP API that tells Namespace Cloud to start
ephemeral Forgejo Actions runners on demand. It glues together three pieces:
1. **Forgejo Actions** the service requests a scoped registration token
for the repository/organization/instance where you want to run jobs.
2. **Namespace (`nsc`)** the dispatcher shells out to the `nsc` CLI to create
a shortlived environment, runs the `forgejo-runner` container inside it,
and exits after a single job (`forgejo-runner one-job`). The Namespace TTL is
the hard cap, not the typical lifetime.
3. **Your automation** you call the service via HTTP (directly, through Caddy,
via Forgejo webhooks, etc.) whenever a new runner is needed.
### Directory layout
```
.
├── cmd/forgejo-nsc-dispatcher # main entry point
├── internal/ # service packages (config, forgejo client, nsc dispatcher, HTTP server)
├── config.example.yaml # starter config referenced by README
├── flake.nix / flake.lock # reproducible builds (Go binary + container image)
└── .forgejo/workflows # CI that runs go test/build and publishes manifests
```
### Configuration
Copy `config.example.yaml` and update it for your Forgejo instance and Namespace
profile. The important knobs are:
- `forgejo.base_url` HTTPS endpoint of your Forgejo server. A PAT with
`actions:runner` scope is required in `forgejo.token`.
- `forgejo.instance_url` URL that spawned runners use to register back to Forgejo.
This must be reachable from the runner (typically the public URL like
`https://git.burrow.net`). On the forge host it commonly differs from `base_url`
(which may be `http://127.0.0.1:3000`).
- `forgejo.default_scope` where new runners register
(`instance`, `organization`, or `repository`).
- `forgejo.default_labels` labels applied to every spawned runner. GateForge
workflows via `runs-on: ["namespace-profile-linux-medium"]` (or other
`namespace-profile-linux-*` labels).
- `namespace.nsc_binary` path to the `nsc` binary (the Nix container ships one
compiled from `namespacelabs/foundation` so `/app/bin/nsc` works out of the box).
- `namespace.image` OCI image containing `forgejo-runner`.
- `namespace.machine_type` / `namespace.duration` shape + TTL for the ephemeral
Namespace environment. The dispatcher destroys the instance after a job so the
TTL acts as a hard cap, not an idle timeout.
### Running locally
```shell
# Ensure nsc is available (e.g. `go build ./foundation/cmd/nsc`)
cp config.example.yaml config.yaml
nix develop # optional dev shell with Go toolchain
go run ./cmd/forgejo-nsc-dispatcher --config config.yaml
```
API example:
```shell
curl -X POST http://localhost:8080/api/v1/dispatch \
-H 'Content-Type: application/json' \
-d '{
"count": 1,
"ttl": "20m",
"labels": ["namespace-profile-linux-medium"],
"scope": {"level": "repository", "owner": "example", "name": "app"}
}'
```
### Deploying with Nix + GHCR
- `nix build .#packages.x86_64-linux.container-amd64` produces a deterministic
tarball containing the service, the `nsc` binary, BusyBox, and `forgejo-runner`.
- The included `Build Container` workflow builds both `amd64` and `arm64` images
on Namespace runners and pushes them to `ghcr.io/<owner>/<repo>`.
No Fly.io manifests are emitted the multiarch manifest points only at GHCR.
### How this fits behind Caddy (last-mile networking)
The dispatcher is just an HTTP server. You can:
1. Run it anywhere that can reach Forgejo and Namespace: bare metal, Namespace
cluster, Kubernetes, Fly, etc.
2. Put Caddy (or any reverse proxy) in front to terminate TLS, do auth, or
rewrite URLs. For example:
```
forgejo-dispatcher.example.com {
reverse_proxy 127.0.0.1:8080
basicauth /api/* {
user JDJhJDE...
}
}
```
The service doesnt assume Caddy, nor does it manipulate HTTP clients
directly it simply waits for POST requests. As long as the dispatcher can
reach Forgejos REST API and run the `nsc` binary, you can drop it anywhere.
### Autoscaling (webhook + poller)
If you dont want to call `/api/v1/dispatch` manually, theres a companion
autoscaler (`cmd/forgejo-nsc-autoscaler`) that watches Forgejo job queues and
triggers the dispatcher for you. It operates in two modes simultaneously:
1. **Polling** every instance polls `GET /api/v1/.../actions/runners` to keep a
minimum number of idle Namespace runners per label. This continues until a
webhook is successfully processed, so the system is self-bootstrapping.
2. **Webhooks** once Forgejo reaches the autoscaler via the `/webhook/{name}`
endpoint, the autoscaler stops polling and reacts to `workflow_job` events in
real time. Each payload is mapped to a target label set and results in a
dispatch call.
You can manage multiple Forgejo instances by listing them under `instances` in
`autoscaler.example.yaml`:
```
listen: ":8090"
dispatcher:
url: "http://dispatcher:8080"
instances:
- name: burrow
forgejo:
base_url: "https://git.burrow.net"
token: "PENDING-FORGEJO-PAT"
scope:
level: "repository"
owner: "hackclub"
name: "burrow"
disable_polling: true # webhook-only mode
poll_interval: "30s"
webhook_secret: "supersecret"
webhook:
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
content_type: "json"
events: ["workflow_job"]
active: true
targets:
- labels: ["namespace-profile-linux-medium"]
min_idle: 0 # set to 0 to scale-to-zero between jobs
ttl: "20m"
- labels: ["namespace-profile-macos-large"]
min_idle: 0
ttl: "90m"
machine_type: "12x28"
- labels: ["namespace-profile-windows-large"]
min_idle: 0
ttl: "45m"
machine_type: "windows/amd64:8x16"
```
For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT,
generate a Namespace token from the logged-in namespace account, and render the
dispatcher/autoscaler configs into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml`
plus `intake/forgejo_nsc_token.txt`.
For ongoing operations, use `Scripts/sync-forgejo-nsc-config.sh`:
- `Scripts/sync-forgejo-nsc-config.sh` copies the intake-backed configs and
Namespace token onto `/var/lib/burrow/intake/` on the forge host, reapplies
file ownership for `forgejo-nsc`, and restarts the dispatcher/autoscaler.
- `Scripts/sync-forgejo-nsc-config.sh --rotate-pat` additionally mints a new
Forgejo PAT on the Burrow forge host and refreshes the local intake files.
Run it next to the dispatcher:
```bash
go run ./cmd/forgejo-nsc-autoscaler --config autoscaler.yaml
# or build the binary/container via `nix build .#forgejo-nsc-autoscaler`
```
If your Forgejo build doesnt expose the runner listing API, set
`disable_polling: true` and rely on `webhook` entries. The autoscaler will
auto-create/update the webhook (using the PAT) so that new `workflow_job` events
immediately call the dispatcher even if the service isnt publicly reachable yet.
In Forgejo add a webhook pointing to `https://nsc-autoscaler.burrow.net/webhook/burrow`
with the shared secret (or let the autoscaler create it by specifying `webhook.url`
in config). The autoscaler continues polling until it receives the first valid
webhook (unless disabled), so you get capacity immediately even if outbound
webhooks from Forgejo arent yet configured.

View file

@ -0,0 +1,34 @@
listen: ":8090"
dispatcher:
url: "http://localhost:8080"
instances:
- name: burrow
forgejo:
base_url: "https://git.burrow.net"
token: "PENDING-FORGEJO-PAT"
scope:
level: "repository"
owner: "hackclub"
name: "burrow"
disable_polling: true
poll_interval: "30s"
webhook_secret: "supersecret"
webhook:
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
content_type: "json"
events: ["workflow_job"]
active: true
targets:
- labels: ["namespace-profile-linux-medium"]
min_idle: 1
ttl: "20m"
machine_type: "4x8"
- labels: ["namespace-profile-macos-large"]
min_idle: 0
ttl: "90m"
machine_type: "12x28"
- labels: ["namespace-profile-windows-large"]
min_idle: 0
ttl: "45m"
machine_type: "windows/amd64:8x16"

View file

@ -0,0 +1,46 @@
package main
import (
"context"
"flag"
"log/slog"
"os"
"os/signal"
"syscall"
"namespacelabs.dev/foundation/std/tasks"
"namespacelabs.dev/foundation/std/tasks/simplelog"
"github.com/burrow/forgejo-nsc/internal/autoscaler"
)
func main() {
var configPath string
flag.StringVar(&configPath, "config", "autoscaler.yaml", "Path to the autoscaler config file")
flag.Parse()
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))
cfg, err := autoscaler.LoadConfig(configPath)
if err != nil {
logger.Error("failed to load config", "error", err)
os.Exit(1)
}
service, err := autoscaler.NewService(cfg)
if err != nil {
logger.Error("failed to initialize autoscaler", "error", err)
os.Exit(1)
}
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
ctx = tasks.WithSink(ctx, simplelog.NewSink(os.Stdout, 0))
if err := tasks.Action("autoscaler.run").Run(ctx, func(ctx context.Context) error {
return service.Start(ctx)
}); err != nil {
logger.Error("autoscaler exited", "error", err)
os.Exit(1)
}
}

View file

@ -0,0 +1,90 @@
package main
import (
"context"
"flag"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/burrow/forgejo-nsc/internal/app"
"github.com/burrow/forgejo-nsc/internal/config"
"github.com/burrow/forgejo-nsc/internal/forgejo"
"github.com/burrow/forgejo-nsc/internal/nsc"
"github.com/burrow/forgejo-nsc/internal/server"
)
func main() {
var configPath string
flag.StringVar(&configPath, "config", "config.yaml", "Path to the dispatcher config file.")
flag.Parse()
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo}))
cfg, err := config.Load(configPath)
if err != nil {
logger.Error("failed to load config", "error", err)
os.Exit(1)
}
scope, err := cfg.Forgejo.DefaultScope.ToScope()
if err != nil {
logger.Error("invalid default scope", "error", err)
os.Exit(1)
}
forgejoClient, err := forgejo.NewClient(cfg.Forgejo.BaseURL, cfg.Forgejo.Token)
if err != nil {
logger.Error("failed to create forgejo client", "error", err)
os.Exit(1)
}
dispatcher, err := nsc.NewDispatcher(nsc.Options{
BinaryPath: cfg.Namespace.NSCBinary,
ComputeBaseURL: cfg.Namespace.ComputeBaseURL,
DefaultImage: cfg.Namespace.Image,
DefaultMachine: cfg.Namespace.MachineType,
MacosBaseImageID: cfg.Namespace.MacosBaseImageID,
MacosMachineArch: cfg.Namespace.MacosMachineArch,
DefaultDuration: cfg.Namespace.Duration.Duration,
WorkDir: cfg.Namespace.WorkDir,
MaxParallel: cfg.Namespace.MaxParallel,
RunnerNamePrefix: cfg.Runner.NamePrefix,
Executor: cfg.Runner.Executor,
Network: cfg.Namespace.Network,
Logger: logger,
})
if err != nil {
logger.Error("failed to create dispatcher", "error", err)
os.Exit(1)
}
service := app.NewService(app.Config{
DefaultScope: scope,
DefaultLabels: cfg.Forgejo.DefaultLabels,
InstanceURL: cfg.Forgejo.InstanceURL,
DefaultTTL: cfg.Namespace.Duration.Duration,
AllowLabels: cfg.Namespace.AllowLabels,
AllowScopes: cfg.Namespace.AllowScopes,
}, forgejoClient, dispatcher, logger)
srv := server.New(cfg.Listen, service, logger)
go func() {
logger.Info("dispatcher listening", "addr", cfg.Listen)
if err := srv.ListenAndServe(); err != nil && err != context.Canceled && err != http.ErrServerClosed {
logger.Error("server terminated", "error", err)
}
}()
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, syscall.SIGTERM, syscall.SIGINT)
<-interrupt
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
_ = srv.Shutdown(ctx)
}

View file

@ -0,0 +1,27 @@
listen: ":8080"
forgejo:
base_url: "https://forgejo.example.com"
token: "${FORGEJO_PERSONAL_ACCESS_TOKEN}"
default_scope:
level: "organization"
owner: "example"
default_labels:
- namespace-profile-linux-medium
timeout: "30s"
namespace:
nsc_binary: "/app/bin/nsc"
compute_base_url: "https://ord4.compute.namespaceapis.com"
image: "ghcr.io/forgejo/runner:3"
machine_type: "8x16"
macos_base_image_id: "tahoe"
macos_machine_arch: "arm64"
duration: "30m"
workdir: "/var/lib/forgejo-runner"
max_parallel: 4
network: ""
runner:
name_prefix: "nscloud-"
executor: "shell"

View file

@ -0,0 +1,35 @@
listen: "127.0.0.1:8090"
dispatcher:
url: "http://127.0.0.1:8080"
instances:
- name: burrow
forgejo:
base_url: "http://127.0.0.1:3000"
token: "PENDING-FORGEJO-PAT"
scope:
level: "repository"
owner: "hackclub"
name: "burrow"
disable_polling: false
poll_interval: "30s"
webhook_secret: "PENDING-WEBHOOK-SECRET"
webhook:
url: "https://nsc-autoscaler.burrow.net/webhook/burrow"
content_type: "json"
events: ["workflow_job"]
active: true
targets:
- labels: ["namespace-profile-linux-medium"]
min_idle: 0
ttl: "20m"
machine_type: "4x8"
- labels: ["namespace-profile-macos-large"]
min_idle: 0
ttl: "90m"
machine_type: "12x28"
- labels: ["namespace-profile-windows-large"]
min_idle: 0
ttl: "45m"
machine_type: "windows/amd64:8x16"

View file

@ -0,0 +1,37 @@
listen: "127.0.0.1:8080"
forgejo:
base_url: "http://127.0.0.1:3000"
instance_url: "https://git.burrow.net"
token: "PENDING-FORGEJO-PAT"
default_scope:
level: "repository"
owner: "hackclub"
name: "burrow"
default_labels:
- namespace-profile-linux-medium
timeout: "30s"
namespace:
nsc_binary: "/run/current-system/sw/bin/nsc"
compute_base_url: "https://ord4.compute.namespaceapis.com"
image: "code.forgejo.org/forgejo/runner:11"
machine_type: "4x8"
macos_base_image_id: "tahoe"
macos_machine_arch: "arm64"
duration: "30m"
workdir: "/var/lib/forgejo-runner"
max_parallel: 4
allow_labels:
- namespace-profile-linux-medium
- namespace-profile-macos-large
- namespace-profile-windows-large
allow_scopes:
- "repository:hackclub/burrow"
instance_tags:
- "burrow"
network: ""
runner:
name_prefix: "nscloud-"
executor: "shell"

View file

@ -0,0 +1,65 @@
module github.com/burrow/forgejo-nsc
go 1.24.4
require (
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1
connectrpc.com/connect v1.19.1
github.com/go-chi/chi/v5 v5.2.1
github.com/google/uuid v1.6.0
golang.org/x/crypto v0.48.0
golang.org/x/sync v0.19.0
google.golang.org/protobuf v1.36.11
gopkg.in/yaml.v3 v3.0.1
namespacelabs.dev/foundation v0.0.478
)
require (
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jxskiss/base62 v1.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mattn/go-zglob v0.0.3 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/muesli/reflow v0.3.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/rivo/uniseg v0.4.2 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.7 // indirect
github.com/spf13/viper v1.14.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/term v0.40.0 // indirect
golang.org/x/text v0.34.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/grpc v1.76.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
helm.sh/helm/v3 v3.18.4 // indirect
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 // indirect
)

575
services/forgejo-nsc/go.sum Normal file
View file

@ -0,0 +1,575 @@
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2 h1:XaeFtt6yN8G5q2uYoiTjyshOyai1Q+GzwfEKlxrTzVw=
buf.build/gen/go/namespace/cloud/connectrpc/go v1.19.1-20260212004106-290ae81f8d6d.2/go.mod h1:QvCL7PUDMFotMXVUoWMeRClEEnCbh7S51xHy39mO+H4=
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1 h1:xTgPJaOj5QNRPAA3nxW3fTz01aAOLr/6SG7C4Iqxm54=
buf.build/gen/go/namespace/cloud/protocolbuffers/go v1.36.11-20260212004106-290ae81f8d6d.1/go.mod h1:Il2wpJNQB40Yj3Rmuhg5xKJPSXaZVwij+Q30d1PNuNY=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8=
github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw=
github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-zglob v0.0.3 h1:6Ry4EYsScDyt5di4OI6xw1bYhOqfE5S33Z1OPy+d+To=
github.com/mattn/go-zglob v0.0.3/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8=
github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4=
go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ=
helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
namespacelabs.dev/foundation v0.0.478 h1:3xFLZcrjih7Jjey2N7faSfr6EoBCg2LMTHipq/3Hlrg=
namespacelabs.dev/foundation v0.0.478/go.mod h1:svBrTIfZK773sytmjudGkCzQWNisxcQtcWNCs+uLznI=
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7 h1:8NlnfPlzDSJr8TYV/qarIWwhjLd1gOXf3Jme0M/oGBM=
namespacelabs.dev/go-ids v0.0.0-20221124082625-9fc72ee06af7/go.mod h1:J+Sd+ngeffnCsaO/M7zgs2bR8Klq/ZBhS0+bbnDEH2M=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View file

@ -0,0 +1,253 @@
package app
import (
"context"
"errors"
"fmt"
"log/slog"
"strings"
"time"
"golang.org/x/sync/errgroup"
"github.com/burrow/forgejo-nsc/internal/forgejo"
"github.com/burrow/forgejo-nsc/internal/nsc"
)
type Dispatcher interface {
LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error)
}
type ForgejoClient interface {
RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error)
}
type Service struct {
forgejo ForgejoClient
dispatcher Dispatcher
logger *slog.Logger
defaultScope forgejo.Scope
defaultLabels []string
instanceURL string
defaultTTL time.Duration
allowLabels map[string]struct{}
allowScopes map[string]struct{}
}
type Config struct {
DefaultScope forgejo.Scope
DefaultLabels []string
InstanceURL string
DefaultTTL time.Duration
AllowLabels []string
AllowScopes []string
}
func NewService(cfg Config, forgejo ForgejoClient, dispatcher Dispatcher, logger *slog.Logger) *Service {
if logger == nil {
logger = slog.Default()
}
allowLabels := make(map[string]struct{}, len(cfg.AllowLabels))
for _, label := range cfg.AllowLabels {
allowLabels[normalizeLabel(label)] = struct{}{}
}
allowScopes := make(map[string]struct{}, len(cfg.AllowScopes))
for _, scope := range cfg.AllowScopes {
allowScopes[scope] = struct{}{}
}
return &Service{
defaultScope: cfg.DefaultScope,
defaultLabels: cfg.DefaultLabels,
instanceURL: cfg.InstanceURL,
defaultTTL: cfg.DefaultTTL,
forgejo: forgejo,
dispatcher: dispatcher,
logger: logger,
allowLabels: allowLabels,
allowScopes: allowScopes,
}
}
type DispatchRequest struct {
Count int
Labels []string
Scope *Scope
TTL time.Duration
Machine string
Image string
ExtraEnv map[string]string
}
type Scope struct {
Level string
Owner string
Name string
}
type DispatchResponse struct {
Runners []RunnerHandle `json:"runners"`
}
type RunnerHandle struct {
Name string `json:"name"`
}
func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchResponse, error) {
count := req.Count
if count <= 0 {
count = 1
}
scope, err := s.mergeScope(req.Scope)
if err != nil {
return DispatchResponse{}, err
}
labels, err := s.mergeLabels(req.Labels)
if err != nil {
return DispatchResponse{}, err
}
if len(labels) == 0 {
return DispatchResponse{}, errors.New("no runner labels resolved")
}
ttl := req.TTL
if ttl == 0 {
ttl = s.defaultTTL
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
res := DispatchResponse{
Runners: make([]RunnerHandle, count),
}
eg, egCtx := errgroup.WithContext(ctx)
for i := 0; i < count; i++ {
index := i
eg.Go(func() error {
token, err := s.forgejo.RegistrationToken(egCtx, scope)
if err != nil {
return fmt.Errorf("fetching registration token: %w", err)
}
name, err := s.dispatcher.LaunchRunner(egCtx, nsc.LaunchRequest{
Token: token,
InstanceURL: s.instanceURL,
Labels: labels,
Duration: ttl,
MachineType: req.Machine,
Image: req.Image,
ExtraEnv: req.ExtraEnv,
})
if err != nil {
return err
}
res.Runners[index] = RunnerHandle{Name: name}
return nil
})
}
if err := eg.Wait(); err != nil {
return DispatchResponse{}, err
}
return res, nil
}
func (s *Service) mergeScope(value *Scope) (forgejo.Scope, error) {
if value == nil {
return s.defaultScope, nil
}
scope := forgejo.Scope{
Level: forgejo.ScopeLevel(value.Level),
Owner: value.Owner,
Name: value.Name,
}
if scope.Level == "" {
return forgejo.Scope{}, errors.New("scope level is required")
}
switch scope.Level {
case forgejo.ScopeInstance:
if !s.scopeAllowed(scope) {
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
}
return scope, nil
case forgejo.ScopeOrganization:
if scope.Owner == "" {
return forgejo.Scope{}, errors.New("organization scope requires owner")
}
if !s.scopeAllowed(scope) {
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
}
return scope, nil
case forgejo.ScopeRepository:
if scope.Owner == "" || scope.Name == "" {
return forgejo.Scope{}, errors.New("repository scope requires owner and name")
}
if !s.scopeAllowed(scope) {
return forgejo.Scope{}, fmt.Errorf("scope %q not allowed", scopeKey(scope))
}
return scope, nil
default:
return forgejo.Scope{}, fmt.Errorf("unsupported scope %q", scope.Level)
}
}
func (s *Service) mergeLabels(labels []string) ([]string, error) {
var resolved []string
if len(labels) == 0 {
resolved = append([]string{}, s.defaultLabels...)
} else {
resolved = labels
}
if len(s.allowLabels) == 0 {
return resolved, nil
}
for _, label := range resolved {
norm := normalizeLabel(label)
if _, ok := s.allowLabels[norm]; !ok {
return nil, fmt.Errorf("label %q not allowed", label)
}
}
return resolved, nil
}
func normalizeLabel(label string) string {
trimmed := strings.TrimSpace(label)
if trimmed == "" {
return ""
}
// Ignore any explicit executor suffix ("label:host"), since workflows
// and config allowlists typically deal in base label names.
if before, _, ok := strings.Cut(trimmed, ":"); ok {
return before
}
return trimmed
}
func scopeKey(scope forgejo.Scope) string {
switch scope.Level {
case forgejo.ScopeInstance:
return "instance"
case forgejo.ScopeOrganization:
return fmt.Sprintf("organization:%s", scope.Owner)
case forgejo.ScopeRepository:
return fmt.Sprintf("repository:%s/%s", scope.Owner, scope.Name)
default:
return string(scope.Level)
}
}
func (s *Service) scopeAllowed(scope forgejo.Scope) bool {
if len(s.allowScopes) == 0 {
return true
}
_, ok := s.allowScopes[scopeKey(scope)]
return ok
}

View file

@ -0,0 +1,160 @@
package app
import (
"context"
"sync"
"testing"
"time"
"github.com/burrow/forgejo-nsc/internal/forgejo"
"github.com/burrow/forgejo-nsc/internal/nsc"
)
type mockForgejo struct {
mu sync.Mutex
tokens []string
scopes []forgejo.Scope
err error
counter int
}
func (m *mockForgejo) RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.scopes = append(m.scopes, scope)
if m.err != nil {
return "", m.err
}
if m.counter >= len(m.tokens) {
return "", context.Canceled
}
tok := m.tokens[m.counter]
m.counter++
return tok, nil
}
type mockDispatcher struct {
mu sync.Mutex
requests []nsc.LaunchRequest
responses []string
err error
}
func (m *mockDispatcher) LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.err != nil {
return "", m.err
}
m.requests = append(m.requests, req)
idx := len(m.requests) - 1
if idx < len(m.responses) {
return m.responses[idx], nil
}
return "runner", nil
}
func TestServiceDispatchUsesDefaults(t *testing.T) {
forgejoMock := &mockForgejo{tokens: []string{"token"}}
dispatcherMock := &mockDispatcher{responses: []string{"runner-default"}}
cfg := Config{
DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance},
DefaultLabels: []string{"nscloud"},
InstanceURL: "https://forgejo.example.com",
DefaultTTL: 15 * time.Minute,
}
service := NewService(cfg, forgejoMock, dispatcherMock, nil)
resp, err := service.Dispatch(context.Background(), DispatchRequest{})
if err != nil {
t.Fatalf("Dispatch returned error: %v", err)
}
if len(resp.Runners) != 1 || resp.Runners[0].Name != "runner-default" {
t.Fatalf("unexpected dispatch response: %+v", resp)
}
if len(forgejoMock.scopes) != 1 || forgejoMock.scopes[0].Level != forgejo.ScopeInstance {
t.Fatalf("expected default scope, got %+v", forgejoMock.scopes)
}
if len(dispatcherMock.requests) != 1 {
t.Fatalf("expected one dispatcher call, got %d", len(dispatcherMock.requests))
}
req := dispatcherMock.requests[0]
if req.InstanceURL != cfg.InstanceURL {
t.Fatalf("expected instance URL %s, got %s", cfg.InstanceURL, req.InstanceURL)
}
if got := req.Labels; len(got) != 1 || got[0] != "nscloud" {
t.Fatalf("expected default labels, got %v", got)
}
if req.Duration != cfg.DefaultTTL {
t.Fatalf("expected duration %v, got %v", cfg.DefaultTTL, req.Duration)
}
}
func TestServiceDispatchCustomScopeAndCount(t *testing.T) {
forgejoMock := &mockForgejo{tokens: []string{"token-1", "token-2"}}
dispatcherMock := &mockDispatcher{responses: []string{"runner-1", "runner-2"}}
cfg := Config{
DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance},
DefaultLabels: []string{"default"},
InstanceURL: "https://forgejo.example.com",
DefaultTTL: 10 * time.Minute,
}
service := NewService(cfg, forgejoMock, dispatcherMock, nil)
reqScope := &Scope{Level: string(forgejo.ScopeRepository), Owner: "acme", Name: "repo"}
res, err := service.Dispatch(context.Background(), DispatchRequest{
Count: 2,
Labels: []string{"custom"},
Scope: reqScope,
TTL: 5 * time.Minute,
Machine: "4x8",
Image: "runner:latest",
ExtraEnv: map[string]string{"FOO": "bar"},
})
if err != nil {
t.Fatalf("Dispatch returned error: %v", err)
}
if len(res.Runners) != 2 {
t.Fatalf("expected two runners, got %+v", res)
}
if len(forgejoMock.scopes) != 2 {
t.Fatalf("expected two scope calls, got %d", len(forgejoMock.scopes))
}
for _, scope := range forgejoMock.scopes {
if scope.Level != forgejo.ScopeRepository || scope.Owner != "acme" || scope.Name != "repo" {
t.Fatalf("unexpected scope: %+v", scope)
}
}
if len(dispatcherMock.requests) != 2 {
t.Fatalf("expected two dispatcher calls, got %d", len(dispatcherMock.requests))
}
for _, call := range dispatcherMock.requests {
if call.MachineType != "4x8" || call.Image != "runner:latest" {
t.Fatalf("unexpected machine/image in %+v", call)
}
if call.Duration != 5*time.Minute {
t.Fatalf("expected TTL to override default, got %v", call.Duration)
}
if call.Labels[0] != "custom" {
t.Fatalf("expected custom labels, got %v", call.Labels)
}
if call.ExtraEnv["FOO"] != "bar" {
t.Fatalf("expected env passthrough, got %v", call.ExtraEnv)
}
}
}
func TestServiceDispatchErrorsWithoutLabels(t *testing.T) {
service := NewService(Config{DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance}}, &mockForgejo{}, &mockDispatcher{}, nil)
if _, err := service.Dispatch(context.Background(), DispatchRequest{}); err == nil {
t.Fatalf("expected error when no labels are available")
}
}

View file

@ -0,0 +1,108 @@
package autoscaler
import (
"fmt"
"os"
"time"
"gopkg.in/yaml.v3"
"github.com/burrow/forgejo-nsc/internal/config"
)
type Config struct {
Listen string `yaml:"listen"`
Dispatcher DispatcherConfig `yaml:"dispatcher"`
Instances []InstanceConfig `yaml:"instances"`
}
type DispatcherConfig struct {
URL string `yaml:"url"`
Timeout config.Duration `yaml:"timeout"`
}
type InstanceConfig struct {
Name string `yaml:"name"`
Forgejo ForgejoInstance `yaml:"forgejo"`
Scope config.ScopeConfig `yaml:"scope"`
PollInterval config.Duration `yaml:"poll_interval"`
DisablePolling bool `yaml:"disable_polling"`
WebhookSecret string `yaml:"webhook_secret"`
Webhook WebhookConfig `yaml:"webhook"`
Dispatcher *DispatcherConfig `yaml:"dispatcher"`
Targets []TargetConfig `yaml:"targets"`
}
type ForgejoInstance struct {
BaseURL string `yaml:"base_url"`
Token string `yaml:"token"`
}
type WebhookConfig struct {
URL string `yaml:"url"`
ContentType string `yaml:"content_type"`
Events []string `yaml:"events"`
Active *bool `yaml:"active"`
}
type TargetConfig struct {
Labels []string `yaml:"labels"`
MinIdle int `yaml:"min_idle"`
TTL config.Duration `yaml:"ttl"`
MachineType string `yaml:"machine_type"`
Image string `yaml:"image"`
Env map[string]string `yaml:"env"`
}
func LoadConfig(path string) (Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return Config{}, err
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return Config{}, err
}
if cfg.Listen == "" {
cfg.Listen = ":8090"
}
if cfg.Dispatcher.URL == "" {
return Config{}, fmt.Errorf("dispatcher.url is required")
}
if cfg.Dispatcher.Timeout.Duration == 0 {
cfg.Dispatcher.Timeout = config.Duration{Duration: 15 * time.Second}
}
if len(cfg.Instances) == 0 {
return Config{}, fmt.Errorf("at least one instance must be configured")
}
for i := range cfg.Instances {
inst := &cfg.Instances[i]
if inst.Name == "" {
return Config{}, fmt.Errorf("instance[%d] missing name", i)
}
if inst.Forgejo.BaseURL == "" || inst.Forgejo.Token == "" {
return Config{}, fmt.Errorf("instance %s missing forgejo.base_url or token", inst.Name)
}
if inst.PollInterval.Duration == 0 {
inst.PollInterval = config.Duration{Duration: 30 * time.Second}
}
if len(inst.Webhook.Events) == 0 {
inst.Webhook.Events = []string{"workflow_job"}
}
if inst.Webhook.ContentType == "" {
inst.Webhook.ContentType = "json"
}
if len(inst.Targets) == 0 {
return Config{}, fmt.Errorf("instance %s requires at least one target", inst.Name)
}
for ti, tgt := range inst.Targets {
if len(tgt.Labels) == 0 {
return Config{}, fmt.Errorf("instance %s target[%d] missing labels", inst.Name, ti)
}
if tgt.MinIdle < 0 {
return Config{}, fmt.Errorf("instance %s target[%d] min_idle must be >= 0", inst.Name, ti)
}
}
}
return cfg, nil
}

View file

@ -0,0 +1,385 @@
package autoscaler
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-chi/chi/v5"
"namespacelabs.dev/foundation/std/tasks"
"github.com/burrow/forgejo-nsc/internal/forgejo"
)
type Service struct {
listen string
controllers map[string]*InstanceController
router chi.Router
}
func NewService(cfg Config) (*Service, error) {
controllers := make(map[string]*InstanceController)
for _, inst := range cfg.Instances {
scope, err := inst.Scope.ToScope()
if err != nil {
return nil, err
}
forgejoClient, err := forgejo.NewClient(inst.Forgejo.BaseURL, inst.Forgejo.Token)
if err != nil {
return nil, err
}
dispCfg := cfg.Dispatcher
if inst.Dispatcher != nil && inst.Dispatcher.URL != "" {
dispCfg = *inst.Dispatcher
if dispCfg.Timeout.Duration == 0 {
dispCfg.Timeout = cfg.Dispatcher.Timeout
}
}
dClient := newDispatcherClient(dispCfg.URL, dispCfg.Timeout.Duration)
webhookActive := true
if inst.Webhook.Active != nil {
webhookActive = *inst.Webhook.Active
}
controller := &InstanceController{
name: inst.Name,
cfg: inst,
scope: scope,
forgejo: forgejoClient,
dispatcher: dClient,
webhook: forgejo.WebhookConfig{
URL: inst.Webhook.URL,
ContentType: inst.Webhook.ContentType,
Events: inst.Webhook.Events,
Active: webhookActive,
},
secret: inst.WebhookSecret,
}
controllers[inst.Name] = controller
}
router := chi.NewRouter()
service := &Service{
listen: cfg.Listen,
controllers: controllers,
router: router,
}
router.Get("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok"))
})
router.Post("/webhook/{instance}", service.handleWebhook)
return service, nil
}
func (s *Service) Start(ctx context.Context) error {
for _, controller := range s.controllers {
if err := controller.EnsureWebhook(ctx); err != nil {
return err
}
}
var wg sync.WaitGroup
for _, controller := range s.controllers {
wg.Add(1)
go func(c *InstanceController) {
defer wg.Done()
c.Run(ctx)
}(controller)
}
srv := &http.Server{
Addr: s.listen,
Handler: s.router,
}
go func() {
<-ctx.Done()
_ = srv.Shutdown(context.Background())
}()
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
wg.Wait()
return nil
}
func (s *Service) handleWebhook(w http.ResponseWriter, r *http.Request) {
name := chi.URLParam(r, "instance")
controller, ok := s.controllers[name]
if !ok {
http.Error(w, "unknown instance", http.StatusNotFound)
return
}
body, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "invalid body", http.StatusBadRequest)
return
}
if controller.cfg.WebhookSecret != "" {
signature := r.Header.Get("X-Gitea-Signature")
if signature == "" {
http.Error(w, "missing signature", http.StatusUnauthorized)
return
}
if !verifySignature(controller.cfg.WebhookSecret, signature, body) {
http.Error(w, "invalid signature", http.StatusUnauthorized)
return
}
}
var payload workflowJobPayload
if err := json.Unmarshal(body, &payload); err != nil {
http.Error(w, "bad payload", http.StatusBadRequest)
return
}
controller.MarkWebhookSeen()
if payload.Action == "queued" {
controller.DispatchForJob(r.Context(), payload)
}
w.WriteHeader(http.StatusAccepted)
}
type workflowJobPayload struct {
Action string `json:"action"`
WorkflowJob struct {
Labels []string `json:"labels"`
} `json:"workflow_job"`
}
type InstanceController struct {
name string
cfg InstanceConfig
scope forgejo.Scope
forgejo *forgejo.Client
dispatcher *dispatcherClient
ready atomic.Bool
webhook forgejo.WebhookConfig
secret string
}
func (c *InstanceController) EnsureWebhook(ctx context.Context) error {
if c.webhook.URL == "" {
return nil
}
return tasks.Action("autoscaler.ensure-webhook").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error {
return c.forgejo.EnsureWebhook(ctx, c.scope, c.webhook, c.secret)
})
}
func (c *InstanceController) Run(ctx context.Context) {
if c.cfg.DisablePolling {
<-ctx.Done()
return
}
ticker := time.NewTicker(c.cfg.PollInterval.Duration)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
_ = tasks.Action("autoscaler.poll").Arg("instance", c.name).Run(ctx, func(ctx context.Context) error {
return c.reconcile(ctx)
})
}
}
}
func (c *InstanceController) reconcile(ctx context.Context) error {
runners, err := c.forgejo.ListRunners(ctx, c.scope)
if err != nil {
// Keep polling even if runner listing fails; we can still dispatch based on queued jobs.
runners = nil
}
for _, target := range c.cfg.Targets {
idle := countIdle(runners, target.Labels)
need := 0
if idle < target.MinIdle {
need = target.MinIdle - idle
}
jobs, jobErr := c.forgejo.ListRunJobs(ctx, c.scope, target.Labels)
if jobErr != nil {
return jobErr
}
waiting := countWaitingJobs(jobs, target.Labels)
// Scale-to-zero friendly: if anything is waiting and there are no idle runners
// for that label set, dispatch exactly one runner to unblock the queue.
if waiting > 0 && idle == 0 && need < 1 {
need = 1
}
if need <= 0 {
continue
}
if err := c.dispatch(ctx, target, need, "poll"); err != nil {
return err
}
}
return nil
}
func (c *InstanceController) dispatch(ctx context.Context, target TargetConfig, count int, reason string) error {
if count <= 0 {
return nil
}
req := dispatcherRequest{
Count: count,
Labels: target.Labels,
}
if target.TTL.Duration > 0 {
req.TTL = target.TTL.Duration.String()
}
if target.MachineType != "" {
req.MachineType = target.MachineType
}
if target.Image != "" {
req.Image = target.Image
}
if len(target.Env) > 0 {
req.Env = target.Env
}
return tasks.Action("autoscaler.dispatch").Arg("instance", c.name).Arg("reason", reason).Arg("labels", strings.Join(target.Labels, ",")).Run(ctx, func(ctx context.Context) error {
return c.dispatcher.Dispatch(ctx, req)
})
}
func (c *InstanceController) DispatchForJob(ctx context.Context, payload workflowJobPayload) {
action := strings.ToLower(payload.Action)
if action != "queued" && action != "waiting" {
return
}
jobLabels := payload.WorkflowJob.Labels
for _, target := range c.cfg.Targets {
if labelsMatch(jobLabels, target.Labels) {
_ = c.dispatch(ctx, target, 1, "webhook")
return
}
}
}
func (c *InstanceController) MarkWebhookSeen() {
c.ready.Store(true)
}
func countIdle(runners []forgejo.Runner, labels []string) int {
count := 0
for _, runner := range runners {
if strings.ToLower(runner.Status) != "online" || runner.Busy {
continue
}
if labelsMatch(extractLabels(runner.Labels), labels) {
count++
}
}
return count
}
func countWaitingJobs(jobs []forgejo.RunJob, labels []string) int {
count := 0
for _, job := range jobs {
if status := strings.ToLower(job.Status); status != "waiting" && status != "queued" {
continue
}
if labelsMatch(job.RunsOn, labels) {
count++
}
}
return count
}
func extractLabels(src []forgejo.RunnerLabel) []string {
result := make([]string, 0, len(src))
for _, lbl := range src {
result = append(result, lbl.Name)
}
return result
}
func labelsMatch(have, want []string) bool {
set := make(map[string]struct{}, len(have))
for _, label := range have {
set[label] = struct{}{}
}
for _, label := range want {
if _, ok := set[label]; !ok {
return false
}
}
return true
}
func verifySignature(secret, signature string, body []byte) bool {
parts := strings.SplitN(signature, "=", 2)
if len(parts) == 2 {
signature = parts[1]
}
mac := hmac.New(sha256.New, []byte(secret))
mac.Write(body)
expected := hex.EncodeToString(mac.Sum(nil))
return hmac.Equal([]byte(expected), []byte(signature))
}
type dispatcherClient struct {
url string
client *http.Client
}
type dispatcherRequest struct {
Count int `json:"count"`
Labels []string `json:"labels"`
TTL string `json:"ttl,omitempty"`
MachineType string `json:"machine_type,omitempty"`
Image string `json:"image,omitempty"`
Env map[string]string `json:"env,omitempty"`
}
func newDispatcherClient(url string, timeout time.Duration) *dispatcherClient {
if timeout == 0 {
timeout = 30 * time.Second
}
return &dispatcherClient{
url: url,
client: &http.Client{
Timeout: timeout,
},
}
}
func (d *dispatcherClient) Dispatch(ctx context.Context, req dispatcherRequest) error {
body, _ := json.Marshal(req)
endpoint := strings.TrimSuffix(d.url, "/") + "/api/v1/dispatch"
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
if err != nil {
return err
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := d.client.Do(httpReq)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return fmt.Errorf("dispatcher returned %s", resp.Status)
}
return nil
}

View file

@ -0,0 +1,185 @@
package config
import (
"errors"
"fmt"
"os"
"strings"
"time"
"gopkg.in/yaml.v3"
"github.com/burrow/forgejo-nsc/internal/forgejo"
)
// Duration wraps time.Duration to support YAML unmarshalling from strings.
type Duration struct {
time.Duration
}
// UnmarshalYAML implements yaml.v3 unmarshalling for Duration.
func (d *Duration) UnmarshalYAML(value *yaml.Node) error {
switch value.Tag {
case "!!int":
var seconds int64
if err := value.Decode(&seconds); err != nil {
return err
}
d.Duration = time.Duration(seconds) * time.Second
return nil
default:
parsed, err := time.ParseDuration(value.Value)
if err != nil {
return err
}
d.Duration = parsed
return nil
}
}
// MarshalYAML implements yaml.v3 marshalling.
func (d Duration) MarshalYAML() (any, error) {
return d.Duration.String(), nil
}
type Config struct {
Listen string `yaml:"listen"`
Forgejo ForgejoConfig `yaml:"forgejo"`
Namespace NamespaceConfig `yaml:"namespace"`
Runner RunnerConfig `yaml:"runner"`
}
type ForgejoConfig struct {
BaseURL string `yaml:"base_url"`
// InstanceURL is the URL runners should use when registering with Forgejo.
// This must be reachable from the spawned runner (e.g. the public URL like
// https://git.burrow.net), and may differ from BaseURL (which can be a local
// loopback URL on the forge host).
InstanceURL string `yaml:"instance_url"`
Token string `yaml:"token"`
DefaultScope ScopeConfig `yaml:"default_scope"`
DefaultLabels []string `yaml:"default_labels"`
Timeout Duration `yaml:"timeout"`
ExtraHeaders yaml.Node `yaml:"extra_headers"`
}
type ScopeConfig struct {
Level string `yaml:"level"`
Owner string `yaml:"owner,omitempty"`
Name string `yaml:"name,omitempty"`
}
type NamespaceConfig struct {
NSCBinary string `yaml:"nsc_binary"`
// ComputeBaseURL is the Namespace Cloud Compute API endpoint (Connect RPC base URL).
// This is used for macOS runners, since NSC "run" is container-based (Linux-only).
// Example: "https://ord4.compute.namespaceapis.com"
ComputeBaseURL string `yaml:"compute_base_url"`
Image string `yaml:"image"`
MachineType string `yaml:"machine_type"`
// MacosBaseImageID selects which macOS base image to use (e.g. "tahoe").
MacosBaseImageID string `yaml:"macos_base_image_id"`
// MacosMachineArch is the architecture used for macOS instances (typically "arm64").
MacosMachineArch string `yaml:"macos_machine_arch"`
Duration Duration `yaml:"duration"`
WorkDir string `yaml:"workdir"`
MaxParallel int64 `yaml:"max_parallel"`
Environment []string `yaml:"environment"`
AllowLabels []string `yaml:"allow_labels"`
AllowScopes []string `yaml:"allow_scopes"`
Network string `yaml:"network"`
InstanceTags []string `yaml:"instance_tags"`
}
type RunnerConfig struct {
NamePrefix string `yaml:"name_prefix"`
Executor string `yaml:"executor"`
}
func Load(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg Config
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}
if err := cfg.Validate(); err != nil {
return nil, err
}
return &cfg, nil
}
func (c *Config) Validate() error {
if c.Listen == "" {
c.Listen = ":8080"
}
if c.Runner.NamePrefix == "" {
c.Runner.NamePrefix = "nscloud-"
}
if c.Runner.Executor == "" {
c.Runner.Executor = "shell"
}
if c.Forgejo.BaseURL == "" {
return errors.New("forgejo.base_url is required")
}
if c.Forgejo.InstanceURL == "" {
// Backwards-compatible default: assume runners can reach the same URL.
c.Forgejo.InstanceURL = c.Forgejo.BaseURL
}
if c.Forgejo.Token == "" {
return errors.New("forgejo.token is required")
}
if c.Forgejo.Timeout.Duration == 0 {
c.Forgejo.Timeout.Duration = 30 * time.Second
}
if _, err := c.Forgejo.DefaultScope.ToScope(); err != nil {
return err
}
if c.Namespace.NSCBinary == "" {
c.Namespace.NSCBinary = "nsc"
}
if c.Namespace.Image == "" {
c.Namespace.Image = "code.forgejo.org/forgejo/runner:11"
}
if c.Namespace.MacosBaseImageID == "" {
c.Namespace.MacosBaseImageID = "tahoe"
}
if c.Namespace.MacosMachineArch == "" {
c.Namespace.MacosMachineArch = "arm64"
}
if c.Namespace.Duration.Duration == 0 {
c.Namespace.Duration.Duration = 30 * time.Minute
}
if c.Namespace.MaxParallel <= 0 {
c.Namespace.MaxParallel = 4
}
return nil
}
func (s ScopeConfig) ToScope() (forgejo.Scope, error) {
level := forgejo.ScopeLevel(strings.ToLower(s.Level))
switch level {
case forgejo.ScopeInstance:
return forgejo.Scope{Level: level}, nil
case forgejo.ScopeOrganization:
if s.Owner == "" {
return forgejo.Scope{}, errors.New("forgejo default scope requires owner for organization level")
}
return forgejo.Scope{Level: level, Owner: s.Owner}, nil
case forgejo.ScopeRepository:
if s.Owner == "" || s.Name == "" {
return forgejo.Scope{}, errors.New("forgejo default scope requires owner and name for repository level")
}
return forgejo.Scope{Level: level, Owner: s.Owner, Name: s.Name}, nil
default:
return forgejo.Scope{}, fmt.Errorf("unknown scope level %q", s.Level)
}
}

View file

@ -0,0 +1,41 @@
package config
import (
"os"
"path/filepath"
"testing"
"time"
)
func TestLoadConfig(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "config.yaml")
content := `
listen: ":9090"
forgejo:
base_url: https://forgejo.test
token: abc
default_scope:
level: instance
namespace:
nsc_binary: /usr/bin/nsc
image: ghcr.io/forgejo/runner:3
duration: 15m
runner:
name_prefix: custom-
`
if err := os.WriteFile(path, []byte(content), 0o600); err != nil {
t.Fatal(err)
}
cfg, err := Load(path)
if err != nil {
t.Fatalf("Load() error = %v", err)
}
if cfg.Listen != ":9090" {
t.Fatalf("unexpected listen addr: %s", cfg.Listen)
}
if cfg.Namespace.Duration.Duration != 15*time.Minute {
t.Fatalf("duration parsing failed: %s", cfg.Namespace.Duration.Duration)
}
}

View file

@ -0,0 +1,454 @@
package forgejo
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
)
type ScopeLevel string
const (
ScopeInstance ScopeLevel = "instance"
ScopeOrganization ScopeLevel = "organization"
ScopeRepository ScopeLevel = "repository"
)
type Scope struct {
Level ScopeLevel
Owner string
Name string
}
type Client struct {
baseURL *url.URL
token string
client *http.Client
}
type Runner struct {
ID int64 `json:"id"`
Name string `json:"name"`
Status string `json:"status"`
Busy bool `json:"busy"`
Labels []RunnerLabel `json:"labels"`
}
type RunnerLabel struct {
Name string `json:"name"`
}
type RunJob struct {
ID int64 `json:"id"`
Name string `json:"name"`
RunsOn []string `json:"runs_on"`
Status string `json:"status"`
TaskID int64 `json:"task_id"`
}
type WebhookConfig struct {
URL string
ContentType string
Events []string
Active bool
}
type Option func(*Client)
func WithHTTPClient(httpClient *http.Client) Option {
return func(c *Client) {
if httpClient != nil {
c.client = httpClient
}
}
}
func NewClient(rawURL, token string, opts ...Option) (*Client, error) {
if rawURL == "" {
return nil, errors.New("forgejo base URL is required")
}
u, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
client := &Client{
baseURL: u,
token: strings.TrimSpace(token),
client: &http.Client{
Timeout: 30 * time.Second,
},
}
for _, opt := range opts {
opt(client)
}
if client.token == "" {
return nil, errors.New("forgejo token is required")
}
return client, nil
}
type registrationTokenResponse struct {
Token string `json:"token"`
TTL time.Time `json:"expires_at"`
}
func (c *Client) RegistrationToken(ctx context.Context, scope Scope) (string, error) {
endpoint, err := c.registrationEndpoint(scope)
if err != nil {
return "", err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return "", err
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Accept", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return "", fmt.Errorf("forgejo returned %s", resp.Status)
}
var decoded registrationTokenResponse
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
return "", err
}
if decoded.Token == "" {
return "", errors.New("forgejo response missing token")
}
return decoded.Token, nil
}
func (c *Client) ListRunners(ctx context.Context, scope Scope) ([]Runner, error) {
endpoint, err := c.runnersEndpoint(scope)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Accept", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
}
var decoded []Runner
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
return nil, err
}
return decoded, nil
}
func (c *Client) ListRunJobs(ctx context.Context, scope Scope, labels []string) ([]RunJob, error) {
endpoint, err := c.runJobsEndpoint(scope)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return nil, err
}
if len(labels) > 0 {
query := req.URL.Query()
query.Set("labels", strings.Join(labels, ","))
req.URL.RawQuery = query.Encode()
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Accept", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
}
var decoded []RunJob
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
return nil, err
}
if decoded == nil {
decoded = []RunJob{}
}
return decoded, nil
}
func (c *Client) EnsureWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error {
if cfg.URL == "" {
return nil
}
hooks, err := c.listWebhooks(ctx, scope)
if err != nil {
return err
}
for _, hook := range hooks {
if strings.EqualFold(hook.Config.URL, cfg.URL) {
return c.updateWebhook(ctx, scope, hook.ID, cfg, secret)
}
}
return c.createWebhook(ctx, scope, cfg, secret)
}
func (c *Client) registrationEndpoint(scope Scope) (string, error) {
var segments []string
switch scope.Level {
case ScopeRepository:
if scope.Owner == "" || scope.Name == "" {
return "", errors.New("repository scope requires owner and name")
}
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "registration-token"}
case ScopeOrganization:
if scope.Owner == "" {
return "", errors.New("organization scope requires owner")
}
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "registration-token"}
case ScopeInstance:
segments = []string{"api", "v1", "admin", "actions", "runners", "registration-token"}
default:
return "", fmt.Errorf("unsupported scope level %q", scope.Level)
}
clone := *c.baseURL
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
return clone.String(), nil
}
type webhook struct {
ID int64 `json:"id"`
Config webhookConfigPayload `json:"config"`
}
type webhookConfigPayload struct {
URL string `json:"url"`
ContentType string `json:"content_type"`
}
func (c *Client) listWebhooks(ctx context.Context, scope Scope) ([]webhook, error) {
endpoint, err := c.webhooksEndpoint(scope)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Accept", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("forgejo returned %s", resp.Status)
}
var hooks []webhook
if err := json.NewDecoder(resp.Body).Decode(&hooks); err != nil {
return nil, err
}
return hooks, nil
}
func (c *Client) createWebhook(ctx context.Context, scope Scope, cfg WebhookConfig, secret string) error {
payload := webhookRequestPayload{
Type: "gitea",
Config: map[string]string{
"url": cfg.URL,
"content_type": cfg.ContentType,
"secret": secret,
"insecure_ssl": "0",
},
Events: cfg.Events,
Active: cfg.Active,
}
body, err := json.Marshal(payload)
if err != nil {
return err
}
endpoint, err := c.webhooksEndpoint(scope)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Content-Type", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return fmt.Errorf("forgejo returned %s", resp.Status)
}
return nil
}
func (c *Client) updateWebhook(ctx context.Context, scope Scope, id int64, cfg WebhookConfig, secret string) error {
payload := webhookRequestPayload{
Type: "gitea",
Config: map[string]string{
"url": cfg.URL,
"content_type": cfg.ContentType,
"secret": secret,
"insecure_ssl": "0",
},
Events: cfg.Events,
Active: cfg.Active,
}
body, err := json.Marshal(payload)
if err != nil {
return err
}
endpoint, err := c.webhooksEndpoint(scope)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("%s/%d", endpoint, id), bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("token %s", c.token))
req.Header.Set("Content-Type", "application/json")
resp, err := c.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return fmt.Errorf("forgejo returned %s", resp.Status)
}
return nil
}
func (c *Client) webhooksEndpoint(scope Scope) (string, error) {
var segments []string
switch scope.Level {
case ScopeRepository:
if scope.Owner == "" || scope.Name == "" {
return "", errors.New("repository scope requires owner and name")
}
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "hooks"}
case ScopeOrganization:
if scope.Owner == "" {
return "", errors.New("organization scope requires owner")
}
segments = []string{"api", "v1", "orgs", scope.Owner, "hooks"}
default:
return "", fmt.Errorf("webhook management not supported for scope level %q", scope.Level)
}
clone := *c.baseURL
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
return clone.String(), nil
}
type webhookRequestPayload struct {
Type string `json:"type"`
Config map[string]string `json:"config"`
Events []string `json:"events"`
Active bool `json:"active"`
}
func (c *Client) runnersEndpoint(scope Scope) (string, error) {
var segments []string
switch scope.Level {
case ScopeRepository:
if scope.Owner == "" || scope.Name == "" {
return "", errors.New("repository scope requires owner and name")
}
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners"}
case ScopeOrganization:
if scope.Owner == "" {
return "", errors.New("organization scope requires owner")
}
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners"}
case ScopeInstance:
segments = []string{"api", "v1", "actions", "runners"}
default:
return "", fmt.Errorf("unsupported scope level %q", scope.Level)
}
clone := *c.baseURL
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
return clone.String(), nil
}
func (c *Client) runJobsEndpoint(scope Scope) (string, error) {
var segments []string
switch scope.Level {
case ScopeRepository:
if scope.Owner == "" || scope.Name == "" {
return "", errors.New("repository scope requires owner and name")
}
segments = []string{"api", "v1", "repos", scope.Owner, scope.Name, "actions", "runners", "jobs"}
case ScopeOrganization:
if scope.Owner == "" {
return "", errors.New("organization scope requires owner")
}
segments = []string{"api", "v1", "orgs", scope.Owner, "actions", "runners", "jobs"}
default:
return "", fmt.Errorf("run jobs not supported for scope level %q", scope.Level)
}
clone := *c.baseURL
clone.Path = path.Join(append([]string{clone.Path}, segments...)...)
return clone.String(), nil
}

View file

@ -0,0 +1,460 @@
package nsc
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"os/exec"
"strings"
"time"
"github.com/google/uuid"
"golang.org/x/sync/semaphore"
)
type Options struct {
BinaryPath string
DefaultImage string
DefaultMachine string
DefaultDuration time.Duration
WorkDir string
MaxParallel int64
RunnerNamePrefix string
Executor string
Network string
ComputeBaseURL string
MacosBaseImageID string
MacosMachineArch string
Logger *slog.Logger
}
type LaunchRequest struct {
Token string
InstanceURL string
Labels []string
Duration time.Duration
MachineType string
Image string
ExtraEnv map[string]string
}
type Dispatcher struct {
opts Options
sem *semaphore.Weighted
log *slog.Logger
}
func NewDispatcher(opts Options) (*Dispatcher, error) {
if opts.BinaryPath == "" {
return nil, errors.New("nsc binary path is required")
}
if opts.DefaultImage == "" {
return nil, errors.New("default Namespace runner image is required")
}
if opts.RunnerNamePrefix == "" {
opts.RunnerNamePrefix = "nscloud-"
}
if opts.Executor == "" {
opts.Executor = "shell"
}
if opts.MacosBaseImageID == "" {
opts.MacosBaseImageID = "tahoe"
}
if opts.MacosMachineArch == "" {
opts.MacosMachineArch = "arm64"
}
if opts.MaxParallel <= 0 {
opts.MaxParallel = 4
}
if opts.DefaultDuration == 0 {
opts.DefaultDuration = 30 * time.Minute
}
logger := opts.Logger
if logger == nil {
logger = slog.New(slog.NewTextHandler(io.Discard, nil))
}
return &Dispatcher{
opts: opts,
sem: semaphore.NewWeighted(opts.MaxParallel),
log: logger,
}, nil
}
func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (string, error) {
if req.Token == "" {
return "", errors.New("registration token is required")
}
if req.InstanceURL == "" {
return "", errors.New("forgejo instance url is required")
}
if err := d.sem.Acquire(ctx, 1); err != nil {
return "", err
}
defer d.sem.Release(1)
runnerName := d.generateName()
duration := req.Duration
if duration == 0 {
duration = d.opts.DefaultDuration
}
machineType := choose(req.MachineType, d.opts.DefaultMachine)
image := choose(req.Image, d.opts.DefaultImage)
if hasWindowsLabel(req.Labels) {
if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil {
return "", err
}
return runnerName, nil
}
if hasMacOSLabel(req.Labels) {
// Compute macOS shapes differ from the Linux "run" defaults. If the request
// didn't specify a machine type, ensure we pick a macOS-valid default.
if machineType == "" || machineType == d.opts.DefaultMachine {
machineType = "12x28"
}
// Prefer the Compute API path because it uses the service token (NSC_TOKEN_FILE)
// and does not require an interactive `nsc login` session.
if err := d.launchMacOSRunner(ctx, runnerName, req, duration, machineType); err != nil {
d.log.Warn("macos compute launch failed; falling back to nsc create+ssh", "runner", runnerName, "err", err)
if err := d.launchMacOSRunnerViaNSC(ctx, runnerName, req, duration, machineType); err != nil {
return "", err
}
}
return runnerName, nil
}
env := map[string]string{
"FORGEJO_INSTANCE_URL": req.InstanceURL,
"FORGEJO_RUNNER_TOKEN": req.Token,
"FORGEJO_RUNNER_NAME": runnerName,
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
"FORGEJO_RUNNER_EXEC": d.opts.Executor,
}
for k, v := range req.ExtraEnv {
env[k] = v
}
if _, ok := env["NSC_CACHE_PATH"]; !ok {
env["NSC_CACHE_PATH"] = "/nix/store"
}
script := d.bootstrapScript()
args := []string{
"run",
"--wait",
"--output",
"json",
"--duration", duration.String(),
"--image", image,
"--name", runnerName,
"--user", "root",
}
if machineType != "" {
args = append(args, "--machine_type", machineType)
}
if d.opts.Network != "" {
args = append(args, "--network", d.opts.Network)
}
for key, value := range env {
if value == "" {
continue
}
args = append(args, "-e", fmt.Sprintf("%s=%s", key, value))
}
if d.opts.WorkDir != "" {
args = append(args, "-e", fmt.Sprintf("FORGEJO_RUNNER_WORKDIR=%s", d.opts.WorkDir))
}
args = append(args, "--", "/bin/sh", "-c", script)
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
start := time.Now()
d.log.Info("launching Namespace runner",
"runner", runnerName,
"machine_type", machineType,
"image", image,
)
err := cmd.Run()
if err != nil {
return "", fmt.Errorf("nsc run failed: %w\n%s", err, buf.String())
}
if output := strings.TrimSpace(buf.String()); output != "" {
d.log.Info("runner output", "runner", runnerName, "output", output)
}
d.log.Info("runner completed",
"runner", runnerName,
"duration", time.Since(start),
)
if instanceID := parseInstanceID(buf.String()); instanceID != "" {
waitCtx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
stopped := d.waitForInstanceStop(waitCtx, runnerName, instanceID, duration)
if !stopped {
d.log.Warn("runner did not stop before timeout", "runner", runnerName, "instance", instanceID)
}
d.destroyInstance(waitCtx, runnerName, instanceID)
}
return runnerName, nil
}
func (d *Dispatcher) generateName() string {
id := strings.ReplaceAll(uuid.NewString(), "-", "")
return d.opts.RunnerNamePrefix + id[:12]
}
func parseInstanceID(output string) string {
if jsonBlob := extractJSON(output); jsonBlob != "" {
var payload struct {
ClusterID string `json:"cluster_id"`
}
if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil && payload.ClusterID != "" {
return payload.ClusterID
}
}
const marker = "ID:"
idx := strings.Index(output, marker)
if idx == -1 {
return ""
}
rest := strings.TrimSpace(output[idx+len(marker):])
if rest == "" {
return ""
}
fields := strings.Fields(rest)
if len(fields) == 0 {
return ""
}
return fields[0]
}
func extractJSON(output string) string {
trimmed := strings.TrimSpace(output)
if trimmed == "" {
return ""
}
start := strings.IndexAny(trimmed, "[{")
if start == -1 {
return ""
}
end := strings.LastIndexAny(trimmed, "]}")
if end == -1 || end < start {
return ""
}
return trimmed[start : end+1]
}
type describeResponse struct {
Resource string `json:"resource"`
PerResource map[string]describeTarget `json:"per_resource"`
}
type describeTarget struct {
Tombstone string `json:"tombstone"`
Container []describeContainer `json:"container"`
}
type describeContainer struct {
Status string `json:"status"`
TerminatedAt string `json:"terminated_at"`
}
func instanceStopped(output string) bool {
jsonBlob := extractJSON(output)
if jsonBlob == "" {
return false
}
var payload []describeResponse
if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil {
return false
}
if len(payload) == 0 {
return false
}
for _, entry := range payload {
for _, target := range entry.PerResource {
if target.Tombstone != "" {
return true
}
if len(target.Container) == 0 {
continue
}
for _, container := range target.Container {
if container.Status != "stopped" && container.TerminatedAt == "" {
return false
}
}
}
}
return true
}
func (d *Dispatcher) waitForInstanceStop(ctx context.Context, runnerName, instanceID string, timeout time.Duration) bool {
if timeout <= 0 {
timeout = d.opts.DefaultDuration
}
deadline := time.Now().Add(timeout)
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
stopped, err := d.checkInstanceStopped(ctx, instanceID)
if err != nil {
d.log.Warn("runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err)
return false
}
if stopped {
return true
}
if time.Now().After(deadline) {
return false
}
select {
case <-ctx.Done():
return false
case <-ticker.C:
}
}
}
func (d *Dispatcher) checkInstanceStopped(ctx context.Context, instanceID string) (bool, error) {
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "describe", "--output", "json", instanceID)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
output := strings.ToLower(buf.String())
if strings.Contains(output, "destroyed") || strings.Contains(output, "not found") {
return true, nil
}
return false, fmt.Errorf("nsc describe failed: %w\n%s", err, strings.TrimSpace(buf.String()))
}
return instanceStopped(buf.String()), nil
}
func (d *Dispatcher) destroyInstance(ctx context.Context, runnerName, instanceID string) {
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, "destroy", "--force", instanceID)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
d.log.Warn("runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String()))
return
}
if output := strings.TrimSpace(buf.String()); output != "" {
d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID, "output", output)
} else {
d.log.Info("runner destroyed", "runner", runnerName, "instance", instanceID)
}
}
func choose(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
return v
}
}
return ""
}
func (d *Dispatcher) bootstrapScript() string {
var builder strings.Builder
builder.WriteString(`set -euo pipefail
mkdir -p "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
cd "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
if ! command -v node >/dev/null 2>&1; then
apk add --no-cache nodejs npm >/dev/null
fi
if ! command -v sudo >/dev/null 2>&1; then
apk add --no-cache sudo bash >/dev/null
fi
if ! command -v curl >/dev/null 2>&1; then
apk add --no-cache curl >/dev/null
fi
if ! command -v xz >/dev/null 2>&1; then
apk add --no-cache xz >/dev/null
fi
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
node --version >/dev/null
cat > runner.yaml <<'EOF'
log:
level: info
runner:
file: .runner
capacity: 1
name: ${FORGEJO_RUNNER_NAME}
labels:
EOF
`)
builder.WriteString(`runner_exec="${FORGEJO_RUNNER_EXEC:-host}"
if [ "$runner_exec" = "shell" ]; then
runner_exec="host"
fi
resolved_labels=""
for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do
if [ -z "${label}" ]; then
continue
fi
case "${label}" in
*:*) resolved="${label}" ;;
*)
if [ "$runner_exec" = "host" ]; then
resolved="${label}:host"
else
resolved="${label}:${runner_exec}"
fi
;;
esac
echo " - ${resolved}" >> runner.yaml
if [ -z "${resolved_labels}" ]; then
resolved_labels="${resolved}"
else
resolved_labels="${resolved_labels},${resolved}"
fi
done
`)
builder.WriteString(`cat >> runner.yaml <<'EOF'
cache:
enabled: false
EOF
forgejo-runner register \
--no-interactive \
--instance "${FORGEJO_INSTANCE_URL}" \
--token "${FORGEJO_RUNNER_TOKEN}" \
--name "${FORGEJO_RUNNER_NAME}" \
--labels "${resolved_labels}" \
--config runner.yaml
runner_mode="${FORGEJO_RUNNER_MODE:-one-job}"
case "$runner_mode" in
one-job)
forgejo-runner one-job --config runner.yaml
;;
daemon)
forgejo-runner daemon --config runner.yaml
;;
*)
echo "Unknown FORGEJO_RUNNER_MODE: ${runner_mode}" >&2
exit 1
;;
esac
`)
return builder.String()
}

View file

@ -0,0 +1,708 @@
package nsc
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
computev1betaconnect "buf.build/gen/go/namespace/cloud/connectrpc/go/proto/namespace/cloud/compute/v1beta/computev1betaconnect"
computev1beta "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/cloud/compute/v1beta"
stdlib "buf.build/gen/go/namespace/cloud/protocolbuffers/go/proto/namespace/stdlib"
"connectrpc.com/connect"
"golang.org/x/crypto/ssh"
"google.golang.org/protobuf/types/known/timestamppb"
)
func hasMacOSLabel(labels []string) bool {
for _, label := range labels {
l := strings.TrimSpace(label)
if l == "" {
continue
}
if strings.HasPrefix(l, "namespace-profile-macos-") {
return true
}
}
return false
}
type lockedBuffer struct {
mu sync.Mutex
b bytes.Buffer
}
func (lb *lockedBuffer) Write(p []byte) (int, error) {
lb.mu.Lock()
defer lb.mu.Unlock()
return lb.b.Write(p)
}
func (lb *lockedBuffer) Len() int {
lb.mu.Lock()
defer lb.mu.Unlock()
return lb.b.Len()
}
func (lb *lockedBuffer) String() string {
lb.mu.Lock()
defer lb.mu.Unlock()
return lb.b.String()
}
func macosSupportDiskSelectors(baseImageID string) []*stdlib.Label {
id := strings.TrimSpace(baseImageID)
if id == "" {
id = "tahoe"
}
// Allow specifying selectors directly, e.g. "macos.version=26.x,image.with=xcode-26".
if strings.Contains(id, "=") {
var out []*stdlib.Label
for _, part := range strings.Split(id, ",") {
part = strings.TrimSpace(part)
if part == "" {
continue
}
name, value, ok := strings.Cut(part, "=")
name = strings.TrimSpace(name)
value = strings.TrimSpace(value)
if !ok || name == "" || value == "" {
continue
}
out = append(out, &stdlib.Label{Name: name, Value: value})
}
if len(out) > 0 {
return out
}
}
// Human-friendly presets used by burrow config.
switch strings.ToLower(id) {
case "sonoma", "macos-14", "macos14", "14":
return []*stdlib.Label{{Name: "macos.version", Value: "14.x"}}
case "sequoia", "macos-15", "macos15", "15":
return []*stdlib.Label{{Name: "macos.version", Value: "15.x"}}
case "tahoe", "macos-26", "macos26", "26":
// Constrain to the Xcode 26 support disk explicitly, since Apple builds
// depend on Xcode being present and Compute currently errors if it can't
// resolve a support disk selection.
return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}, {Name: "image.with", Value: "xcode-26"}}
default:
return []*stdlib.Label{{Name: "macos.version", Value: "26.x"}}
}
}
func macosComputeBaseImageID(baseImageID string) string {
id := strings.TrimSpace(baseImageID)
if id == "" {
return "tahoe"
}
// If selectors were provided directly, we cannot safely infer a canonical
// base image ID from them.
if strings.Contains(id, "=") {
return ""
}
switch strings.ToLower(id) {
case "sonoma", "macos-14", "macos14", "14":
return "sonoma"
case "sequoia", "macos-15", "macos15", "15":
return "sequoia"
case "tahoe", "macos-26", "macos26", "26":
return "tahoe"
default:
return id
}
}
type nscBearerTokenFile struct {
BearerToken string `json:"bearer_token"`
}
func readNSCBearerToken() (string, error) {
path := os.Getenv("NSC_TOKEN_FILE")
if path == "" {
return "", errors.New("NSC_TOKEN_FILE is required for macos runners")
}
raw, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("read NSC_TOKEN_FILE: %w", err)
}
trimmed := strings.TrimSpace(string(raw))
if trimmed == "" {
return "", errors.New("NSC_TOKEN_FILE is empty")
}
// Support the on-host format used by burrow: {"bearer_token":"..."}.
var parsed nscBearerTokenFile
if err := json.Unmarshal([]byte(trimmed), &parsed); err == nil && parsed.BearerToken != "" {
return parsed.BearerToken, nil
}
// Fallback: allow a raw bearer token.
return trimmed, nil
}
func parseMachineTypeCPUxMemGB(machineType string) (vcpu int32, memoryMB int32, err error) {
parts := strings.Split(machineType, "x")
if len(parts) != 2 {
return 0, 0, fmt.Errorf("invalid machine_type %q: expected CPUxMemoryGB (e.g. 12x28)", machineType)
}
cpu64, err := strconv.ParseInt(parts[0], 10, 32)
if err != nil {
return 0, 0, fmt.Errorf("invalid machine_type %q: cpu: %w", machineType, err)
}
memGB64, err := strconv.ParseInt(parts[1], 10, 32)
if err != nil {
return 0, 0, fmt.Errorf("invalid machine_type %q: memory: %w", machineType, err)
}
return int32(cpu64), int32(memGB64 * 1024), nil
}
func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
if machineType == "" {
return errors.New("machine_type is required for macos runners")
}
vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType)
if err != nil {
return err
}
bearer, err := readNSCBearerToken()
if err != nil {
return err
}
httpClient := &http.Client{Timeout: 60 * time.Second}
client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL)
workdir := d.opts.WorkDir
if strings.TrimSpace(workdir) == "" {
workdir = "/tmp/forgejo-runner"
}
env := map[string]string{
"FORGEJO_INSTANCE_URL": req.InstanceURL,
"FORGEJO_RUNNER_TOKEN": req.Token,
"FORGEJO_RUNNER_NAME": runnerName,
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
"FORGEJO_RUNNER_EXEC": d.opts.Executor,
"FORGEJO_RUNNER_WORKDIR": workdir,
}
for k, v := range req.ExtraEnv {
env[k] = v
}
// Best-effort caching: workflows call Scripts/nscloud-cache.sh, which is a
// no-op unless NSC_CACHE_PATH is set. This may still be skipped if spacectl
// lacks credentials, but setting the path is harmless and keeps behavior
// consistent across macOS / Linux runners.
if _, ok := env["NSC_CACHE_PATH"]; !ok {
env["NSC_CACHE_PATH"] = "/Users/runner/.cache/nscloud"
}
deadline := timestamppb.New(time.Now().Add(ttl))
createReq := &computev1beta.CreateInstanceRequest{
Shape: &computev1beta.InstanceShape{
VirtualCpu: vcpu,
MemoryMegabytes: memoryMB,
MachineArch: d.opts.MacosMachineArch,
Os: "macos",
// Namespace macOS compute requires selectors to pick the base image
// ("support disk"), otherwise instance creation fails.
Selectors: macosSupportDiskSelectors(d.opts.MacosBaseImageID),
},
DocumentedPurpose: fmt.Sprintf("burrow forgejo runner %s", runnerName),
Deadline: deadline,
Labels: []*stdlib.Label{
{Name: "nsc.source", Value: "forgejo-nsc"},
{Name: "burrow.service", Value: "forgejo-runner"},
{Name: "burrow.runner", Value: runnerName},
},
Applications: []*computev1beta.ApplicationRequest{
{
Name: "forgejo-runner",
Command: "/bin/bash",
Args: []string{"-lc", macosBootstrapScript()},
Environment: env,
WorkloadType: computev1beta.ApplicationRequest_JOB,
},
},
}
if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" {
createReq.Experimental = &computev1beta.CreateInstanceRequest_ExperimentalFeatures{
MacosBaseImageId: imageID,
}
}
d.log.Info("launching Namespace macos runner",
"runner", runnerName,
"compute_base_url", d.opts.ComputeBaseURL,
"macos_base_image_id", d.opts.MacosBaseImageID,
"shape", fmt.Sprintf("%dx%d", vcpu, memoryMB/1024),
"arch", d.opts.MacosMachineArch,
)
reqCreate := connect.NewRequest(createReq)
reqCreate.Header().Set("Authorization", "Bearer "+bearer)
resp, err := client.CreateInstance(ctx, reqCreate)
if err != nil {
return fmt.Errorf("compute create instance failed: %w", err)
}
if resp.Msg == nil || resp.Msg.Metadata == nil {
return errors.New("compute create instance returned no metadata")
}
instanceID := resp.Msg.Metadata.InstanceId
waitErr := d.waitForMacOSRunnerStop(ctx, client, bearer, runnerName, instanceID, ttl)
d.destroyComputeInstance(context.Background(), client, bearer, runnerName, instanceID)
return waitErr
}
func (d *Dispatcher) runMacOSComputeSSHScript(ctx context.Context, runnerName, instanceID, script string) error {
bearer, err := readNSCBearerToken()
if err != nil {
return err
}
httpClient := &http.Client{Timeout: 60 * time.Second}
client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL)
getReq := connect.NewRequest(&computev1beta.GetSSHConfigRequest{
InstanceId: instanceID,
// TargetContainer is optional. Keep it empty to run commands in the default instance environment.
})
getReq.Header().Set("Authorization", "Bearer "+bearer)
resp, err := client.GetSSHConfig(ctx, getReq)
if err != nil {
return fmt.Errorf("compute get ssh config failed: %w", err)
}
if resp.Msg == nil {
return errors.New("compute get ssh config returned empty response")
}
if resp.Msg.Endpoint == "" {
return errors.New("compute get ssh config returned empty endpoint")
}
if len(resp.Msg.SshPrivateKey) == 0 {
return errors.New("compute get ssh config returned empty ssh private key")
}
if strings.TrimSpace(resp.Msg.Username) == "" {
return errors.New("compute get ssh config returned empty username")
}
signer, err := ssh.ParsePrivateKey(resp.Msg.SshPrivateKey)
if err != nil {
return fmt.Errorf("parse ssh private key: %w", err)
}
addr := fmt.Sprintf("%s:22", resp.Msg.Endpoint)
conn, err := net.Dial("tcp", addr)
if err != nil {
return fmt.Errorf("dial ssh endpoint: %w", err)
}
defer conn.Close()
sshCfg := &ssh.ClientConfig{
User: resp.Msg.Username,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(), // Endpoint is short-lived and key is delivered out-of-band.
Timeout: 30 * time.Second,
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshCfg)
if err != nil {
return fmt.Errorf("ssh client conn: %w", err)
}
clientSSH := ssh.NewClient(c, chans, reqs)
defer clientSSH.Close()
session, err := clientSSH.NewSession()
if err != nil {
return fmt.Errorf("ssh new session: %w", err)
}
defer session.Close()
var buf bytes.Buffer
session.Stdout = &buf
session.Stderr = &buf
session.Stdin = strings.NewReader(script)
// Feed the bootstrap script via stdin so we don't need to quote/escape it.
//
// Note: Some SSH servers do not reliably parse exec strings with arguments.
// Running bare `/bin/bash` still reads from stdin and avoids argument parsing.
if err := session.Run("/bin/bash"); err != nil {
outRaw := buf.String()
out := strings.TrimSpace(outRaw)
// Some SSH servers reject exec requests and only allow interactive shells,
// and others will "succeed" but still interpret stdin under the default
// login shell (showing the zsh banner / prompts).
//
// In those cases, retry via Shell() with a PTY.
exitStatus := 0
exitErr, isExitErr := err.(*ssh.ExitError)
if isExitErr {
exitStatus = exitErr.ExitStatus()
}
looksInteractive := strings.Contains(outRaw, "The default interactive shell is now zsh") ||
strings.Contains(outRaw, " runner$ ") ||
strings.Contains(outRaw, "bash-3.2$")
shouldFallback := !isExitErr || looksInteractive
if shouldFallback {
d.log.Warn("compute ssh exec bootstrap failed; retrying via interactive shell",
"runner", runnerName,
"instance", instanceID,
"exit_status", exitStatus,
)
session2, err2 := clientSSH.NewSession()
if err2 != nil {
return fmt.Errorf("ssh new session (fallback): %w", err2)
}
defer session2.Close()
// bytes.Buffer isn't safe for concurrent writes + reads; the SSH session
// writes from background goroutines. Wrap it so we can poll for a prompt
// before sending commands.
lb := &lockedBuffer{}
session2.Stdout = lb
session2.Stderr = lb
in, err2 := session2.StdinPipe()
if err2 != nil {
return fmt.Errorf("ssh stdin pipe (fallback): %w", err2)
}
// Request a PTY to match interactive semantics even when the caller
// doesn't have a local terminal.
_ = session2.RequestPty("xterm", 24, 80, nil)
if err2 := session2.Shell(); err2 != nil {
return fmt.Errorf("ssh shell (fallback): %w", err2)
}
// Wait briefly for the prompt/banner so the first command isn't dropped.
// We also emit a sentinel `echo` to verify the TTY is live.
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
n := lb.Len()
if n > 0 {
break
}
time.Sleep(50 * time.Millisecond)
}
// Stream the script then exit. Prefer LF line endings; macOS shells and
// PTYs can treat CRLF as literal CR characters (breaking heredoc
// delimiters and quoting).
writeTTY := func(s string) {
if s == "" {
return
}
s = strings.ReplaceAll(s, "\r\n", "\n")
_, _ = io.WriteString(in, s)
}
scriptTTY := strings.ReplaceAll(script, "\r\n", "\n")
// Cut down noise in logs and reduce the chance of ZSH line-editing
// behavior corrupting long inputs.
writeTTY("stty -echo 2>/dev/null || true\n")
writeTTY("echo BURROW_BOOTSTRAP_TTY_OK\n")
// Avoid heredocs for the script itself (PTY newline handling is fragile).
// Instead, stream base64 in short chunks to a file, then decode and run it.
enc := base64.StdEncoding.EncodeToString([]byte(scriptTTY))
idSafe := strings.ReplaceAll(instanceID, "-", "_")
b64Path := "/tmp/burrow-bootstrap-" + idSafe + ".b64"
shPath := "/tmp/burrow-bootstrap-" + idSafe + ".sh"
writeTTY("rm -f " + b64Path + " " + shPath + "\n")
writeTTY(": > " + b64Path + "\n")
const chunkSize = 80
for i := 0; i < len(enc); i += chunkSize {
j := i + chunkSize
if j > len(enc) {
j = len(enc)
}
chunk := enc[i:j]
// Base64 chunks contain only [A-Za-z0-9+/=], which are safe to pass
// unquoted. Avoid quotes entirely so a truncated line can't leave
// the remote shell in a multi-line continuation state.
writeTTY("printf %s " + chunk + " >> " + b64Path + "\n")
time.Sleep(5 * time.Millisecond)
}
// macOS uses `base64 -D` (BSD), some environments use `-d` (GNU).
writeTTY("base64 -D " + b64Path + " > " + shPath + " 2>/dev/null || base64 -d " + b64Path + " > " + shPath + "\n")
writeTTY("/bin/bash " + shPath + "\n")
writeTTY("exit\n")
_ = in.Close()
if err2 := session2.Wait(); err2 != nil {
out2 := strings.TrimSpace(lb.String())
if len(out2) > 16*1024 {
out2 = out2[len(out2)-16*1024:]
}
return fmt.Errorf("compute ssh runner bootstrap failed (shell fallback): %w\n%s", err2, out2)
}
d.log.Info("macos runner bootstrap completed via compute ssh shell", "runner", runnerName, "instance", instanceID)
return nil
}
if len(out) > 16*1024 {
out = out[len(out)-16*1024:]
}
return fmt.Errorf("compute ssh runner bootstrap failed: %w\n%s", err, out)
}
d.log.Info("macos runner bootstrap completed via compute ssh", "runner", runnerName, "instance", instanceID)
return nil
}
func (d *Dispatcher) waitForMacOSRunnerStop(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string, ttl time.Duration) error {
if ttl <= 0 {
ttl = d.opts.DefaultDuration
}
deadline := time.Now().Add(ttl)
ticker := time.NewTicker(15 * time.Second)
defer ticker.Stop()
for {
stopped, err := d.checkComputeInstanceStopped(ctx, client, bearer, instanceID)
if err != nil {
d.log.Warn("macos runner stop check failed", "runner", runnerName, "instance", instanceID, "err", err)
} else if stopped {
return nil
}
if time.Now().After(deadline) {
return fmt.Errorf("macos runner exceeded ttl (%s) without stopping", ttl)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
func (d *Dispatcher) checkComputeInstanceStopped(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, instanceID string) (bool, error) {
describeReq := connect.NewRequest(&computev1beta.DescribeInstanceRequest{InstanceId: instanceID})
describeReq.Header().Set("Authorization", "Bearer "+bearer)
resp, err := client.DescribeInstance(ctx, describeReq)
if err != nil {
// NotFound means the instance is already gone.
if connect.CodeOf(err) == connect.CodeNotFound {
return true, nil
}
return false, err
}
if resp.Msg == nil || resp.Msg.Metadata == nil {
return false, errors.New("describe instance returned no metadata")
}
switch resp.Msg.Metadata.Status {
case computev1beta.InstanceMetadata_DESTROYED:
return true, nil
case computev1beta.InstanceMetadata_ERROR:
// Best-effort include shutdown reasons; do not include unbounded output.
var b strings.Builder
for _, reason := range resp.Msg.ShutdownReasons {
if reason == nil {
continue
}
if b.Len() > 0 {
b.WriteString("; ")
}
b.WriteString(reason.String())
if b.Len() > 1024 {
break
}
}
msg := strings.TrimSpace(b.String())
if msg == "" {
msg = "unknown shutdown reason"
}
return true, fmt.Errorf("instance entered error state: %s", msg)
default:
if resp.Msg.Metadata.DestroyedAt != nil {
return true, nil
}
return false, nil
}
}
func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev1betaconnect.ComputeServiceClient, bearer, runnerName, instanceID string) {
if ctx == nil {
ctx = context.Background()
}
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
destroyReq := connect.NewRequest(&computev1beta.DestroyInstanceRequest{InstanceId: instanceID})
destroyReq.Header().Set("Authorization", "Bearer "+bearer)
if _, err := client.DestroyInstance(ctx, destroyReq); err != nil {
if connect.CodeOf(err) == connect.CodeNotFound {
d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID, "status", "not_found")
return
}
d.log.Warn("macos runner destroy failed", "runner", runnerName, "instance", instanceID, "err", err)
return
}
d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID)
}
func macosBootstrapScript() string {
// Keep this script self-contained: it runs on a fresh macOS VM base image.
var b strings.Builder
b.WriteString(`set -euo pipefail
workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}"
mkdir -p "${workdir}"
cd "${workdir}"
export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}"
if ! command -v curl >/dev/null 2>&1; then
echo "curl is required" >&2
exit 1
fi
if ! command -v nix >/dev/null 2>&1; then
echo "Installing nix (Determinate Systems installer)..."
installer="/tmp/nix-installer.$$"
curl -fsSL -o "${installer}" https://install.determinate.systems/nix
chmod +x "${installer}"
if command -v sudo >/dev/null 2>&1; then
if sudo -n true 2>/dev/null; then
sudo -n sh "${installer}" install --no-confirm
else
sudo sh "${installer}" install --no-confirm
fi
else
sh "${installer}" install --no-confirm
fi
rm -f "${installer}"
fi
if [[ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]]; then
# shellcheck disable=SC1091
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
fi
export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}"
# Flake builds need nix-command + flakes enabled. Workflows may layer additional
# config, but ensure a sane default exists.
mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/nix"
cat > "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" <<'EOF'
experimental-features = nix-command flakes
sandbox = true
fallback = true
substituters = https://cache.nixos.org
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
EOF
mkdir -p bin
export PATH="${PWD}/bin:${PATH}"
runner_version="v12.6.4"
runner_src_tgz="forgejo-runner-${runner_version}.tar.gz"
runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz"
runner_src_dir="forgejo-runner-src"
if ! command -v forgejo-runner >/dev/null 2>&1; then
rm -rf "${runner_src_dir}"
mkdir -p "${runner_src_dir}"
curl -fsSL "${runner_src_url}" -o "${runner_src_tgz}"
tar -xzf "${runner_src_tgz}" -C "${runner_src_dir}" --strip-components=1
toolchain="$(grep -E '^toolchain ' "${runner_src_dir}/go.mod" | awk '{print $2}' | head -n 1 || true)"
if [ -z "${toolchain}" ]; then
toolchain="go1.25.7"
fi
if ! command -v go >/dev/null 2>&1; then
go_tgz="${toolchain}.darwin-arm64.tar.gz"
go_url="https://go.dev/dl/${go_tgz}"
curl -fsSL "${go_url}" -o "${go_tgz}"
tar -xzf "${go_tgz}"
export GOROOT="${PWD}/go"
export PATH="${GOROOT}/bin:${PATH}"
fi
export GOPATH="${PWD}/.gopath"
export GOMODCACHE="${PWD}/.gomodcache"
export GOCACHE="${PWD}/.gocache"
mkdir -p "${GOPATH}" "${GOMODCACHE}" "${GOCACHE}"
(cd "${runner_src_dir}" && go build -o "${workdir}/bin/forgejo-runner" .)
chmod +x "${workdir}/bin/forgejo-runner"
fi
cat > runner.yaml <<'EOF'
log:
level: info
runner:
file: .runner
capacity: 1
name: ${FORGEJO_RUNNER_NAME}
labels:
EOF
runner_exec="${FORGEJO_RUNNER_EXEC:-host}"
if [ "$runner_exec" = "shell" ]; then
runner_exec="host"
fi
resolved_labels=""
for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do
if [ -z "${label}" ]; then
continue
fi
case "${label}" in
*:*) resolved="${label}" ;;
*)
resolved="${label}:host"
;;
esac
echo " - ${resolved}" >> runner.yaml
if [ -z "${resolved_labels}" ]; then
resolved_labels="${resolved}"
else
resolved_labels="${resolved_labels},${resolved}"
fi
done
cat >> runner.yaml <<'EOF'
cache:
enabled: false
EOF
forgejo-runner register \
--no-interactive \
--instance "${FORGEJO_INSTANCE_URL}" \
--token "${FORGEJO_RUNNER_TOKEN}" \
--name "${FORGEJO_RUNNER_NAME}" \
--labels "${resolved_labels}" \
--config runner.yaml
forgejo-runner one-job --config runner.yaml
`)
return b.String()
}

View file

@ -0,0 +1,373 @@
package nsc
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
func normalizeMacOSNSCMachineType(machineType string) (normalized string, changed bool, err error) {
vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType)
if err != nil {
return "", false, err
}
memGB := memoryMB / 1024
if memGB <= 0 || vcpu <= 0 {
return "", false, fmt.Errorf("invalid machine_type %q after parse: vcpu=%d memGB=%d", machineType, vcpu, memGB)
}
// NSC CLI (and the underlying InstanceService) enforce discrete cpu/mem sets
// for macOS. Normalize requested values by rounding up to the closest allowed
// values to keep provisioning stable even when configs drift.
//
// Observed allowed sets from Namespace API error output for macos/arm64:
// cpu: [4 6 8 12]
// mem: [7 14 28 56] (GB)
allowedCPU := []int32{4, 6, 8, 12}
allowedMemGB := []int32{7, 14, 28, 56}
roundUp := func(v int32, allowed []int32) (int32, bool) {
for _, a := range allowed {
if v <= a {
return a, a != v
}
}
// Clamp to max if above all allowed values.
return allowed[len(allowed)-1], true
}
newCPU, cpuChanged := roundUp(vcpu, allowedCPU)
newMemGB, memChanged := roundUp(memGB, allowedMemGB)
normalized = fmt.Sprintf("%dx%d", newCPU, newMemGB)
changed = cpuChanged || memChanged
return normalized, changed, nil
}
func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
if machineType == "" {
return errors.New("machine_type is required for macos runners")
}
if strings.TrimSpace(os.Getenv("NSC_TOKEN_FILE")) == "" {
// The Burrow forge host feeds NSC_TOKEN_FILE from the intake-backed runtime token.
return errors.New("NSC_TOKEN_FILE is required for macos runners")
}
selectors := macosSelectorsArg(d.opts.MacosBaseImageID)
if selectors == "" {
return errors.New("macos selectors resolved empty")
}
normalizedMachineType := machineType
if n, changed, err := normalizeMacOSNSCMachineType(machineType); err != nil {
return err
} else if changed {
normalizedMachineType = n
}
// If capacity is constrained for the requested (large) shape, try a small
// set of progressively smaller shapes before failing the dispatch request.
// This keeps macOS builds flowing even when large runners are scarce.
candidates := []string{normalizedMachineType, "8x28", "6x14", "4x7"}
seen := map[string]struct{}{}
var uniq []string
for _, c := range candidates {
c = strings.TrimSpace(c)
if c == "" {
continue
}
if _, ok := seen[c]; ok {
continue
}
seen[c] = struct{}{}
uniq = append(uniq, c)
}
candidates = uniq
type attemptCfg struct {
waitTimeout time.Duration
createTimeout time.Duration
}
attempts := []attemptCfg{
{waitTimeout: 6 * time.Minute, createTimeout: 8 * time.Minute},
{waitTimeout: 4 * time.Minute, createTimeout: 6 * time.Minute},
{waitTimeout: 3 * time.Minute, createTimeout: 5 * time.Minute},
}
createInstance := func(mt string, a attemptCfg) (instanceID string, out string, err error) {
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-macos-*")
if err != nil {
return "", "", fmt.Errorf("mktemp: %w", err)
}
defer os.RemoveAll(tmpDir)
metaPath := filepath.Join(tmpDir, "create.json")
cidPath := filepath.Join(tmpDir, "create.cid")
arch := strings.TrimSpace(d.opts.MacosMachineArch)
if arch == "" {
arch = "arm64"
}
// Namespace CLI requires the "os/arch:" prefix to create a macOS instance.
// Without it, `nsc create` defaults to Linux even if selectors include macos.*.
machineType := fmt.Sprintf("macos/%s:%s", arch, mt)
args := []string{
"create",
"--duration", ttl.String(),
"--machine_type", machineType,
"--selectors", selectors,
"--bare",
"--cidfile", cidPath,
"--log_actions",
"--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName),
// Prefer plain output for debuggability (progress, capacity errors, etc).
"--output", "plain",
"--output_json_to", metaPath,
// macOS instances can take a while to become ready.
"--wait_timeout", a.waitTimeout.String(),
}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
createCtx, cancel := context.WithTimeout(ctx, a.createTimeout)
defer cancel()
cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
// Best-effort cleanup: if the instance ID was written before the command failed
// (or before we timed it out), attempt to destroy it to avoid idling machines.
if instanceID := strings.TrimSpace(mustReadFile(cidPath)); instanceID != "" {
d.destroyNSCInstance(context.Background(), runnerName, instanceID)
}
if errors.Is(createCtx.Err(), context.DeadlineExceeded) {
return "", buf.String(), fmt.Errorf("nsc create timed out after %s", a.createTimeout)
}
return "", buf.String(), fmt.Errorf("nsc create failed: %w", err)
}
instanceID, err = readNSCCreateInstanceID(metaPath)
if err != nil {
return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err)
}
if instanceID == "" {
return "", buf.String(), fmt.Errorf("nsc create returned empty instance id")
}
return instanceID, buf.String(), nil
}
var (
instanceID string
lastOut string
lastErr error
)
for i, mt := range candidates {
a := attempts[i]
if i >= len(attempts) {
a = attempts[len(attempts)-1]
}
d.log.Info("launching Namespace macos runner via nsc",
"runner", runnerName,
"attempt", i+1,
"machine_type", mt,
"requested_machine_type", machineType,
"selectors", selectors,
)
id, out, err := createInstance(mt, a)
lastOut = out
lastErr = err
if err != nil {
// Timeouts are treated as retryable (capacity constrained).
if strings.Contains(err.Error(), "timed out") || strings.Contains(strings.ToLower(out), "capacity") {
continue
}
return fmt.Errorf("%w\n%s", err, out)
}
instanceID = id
break
}
if instanceID == "" {
if lastErr != nil {
return fmt.Errorf("%w\n%s", lastErr, lastOut)
}
return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut)
}
// Always attempt cleanup even if the runner fails.
defer d.destroyNSCInstance(context.Background(), runnerName, instanceID)
script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir)
// Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which
// relies on a websocket-based SSH proxy that is not supported by the
// revokable tenant token we run the dispatcher with.
if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil {
return err
}
return nil
}
func mustReadFile(path string) string {
raw, err := os.ReadFile(path)
if err != nil {
return ""
}
return string(raw)
}
func macosSelectorsArg(baseImageID string) string {
id := strings.TrimSpace(baseImageID)
if id == "" {
id = "tahoe"
}
// Allow passing selectors directly via config, e.g. "macos.version=26.x,image.with=xcode-26".
if strings.Contains(id, "=") {
return id
}
switch strings.ToLower(id) {
case "sonoma", "macos-14", "macos14", "14":
return "macos.version=14.x"
case "sequoia", "macos-15", "macos15", "15":
return "macos.version=15.x"
case "tahoe", "macos-26", "macos26", "26":
return "macos.version=26.x,image.with=xcode-26"
default:
return "macos.version=26.x"
}
}
type nscCreateMetadata struct {
InstanceID string `json:"instance_id"`
ClusterID string `json:"cluster_id"`
ID string `json:"id"`
}
func readNSCCreateInstanceID(path string) (string, error) {
raw, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("read %s: %w", path, err)
}
var meta nscCreateMetadata
if err := json.Unmarshal(raw, &meta); err != nil {
return "", err
}
if meta.InstanceID != "" {
return meta.InstanceID, nil
}
if meta.ClusterID != "" {
return meta.ClusterID, nil
}
if meta.ID != "" {
return meta.ID, nil
}
return "", nil
}
func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanceID string) {
if ctx == nil {
ctx = context.Background()
}
ctx, cancel := context.WithTimeout(ctx, 2*time.Minute)
defer cancel()
args := []string{"destroy", "--force", instanceID}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
d.log.Warn("nsc destroy failed", "runner", runnerName, "instance", instanceID, "err", err, "output", strings.TrimSpace(buf.String()))
return
}
d.log.Info("nsc instance destroyed", "runner", runnerName, "instance", instanceID)
}
func macosBootstrapWrapperScript(runnerName string, req LaunchRequest, executor, workdir string) string {
if strings.TrimSpace(workdir) == "" {
workdir = "/tmp/forgejo-runner"
}
// Pass all values via stdin script so secrets do not appear in the nsc ssh argv.
env := map[string]string{
"FORGEJO_INSTANCE_URL": req.InstanceURL,
"FORGEJO_RUNNER_TOKEN": req.Token,
"FORGEJO_RUNNER_NAME": runnerName,
"FORGEJO_RUNNER_LABELS": strings.Join(req.Labels, ","),
"FORGEJO_RUNNER_EXEC": executor,
"FORGEJO_RUNNER_WORKDIR": workdir,
}
for k, v := range req.ExtraEnv {
env[k] = v
}
var b strings.Builder
b.WriteString("set -euo pipefail\n")
for k, v := range env {
if strings.TrimSpace(k) == "" {
continue
}
// Single-quote shell escaping: safe for arbitrary tokens.
b.WriteString("export ")
b.WriteString(k)
b.WriteString("=")
b.WriteString(shellSingleQuote(v))
b.WriteString("\n")
}
b.WriteString("\n")
b.WriteString(macosBootstrapScript())
return b.String()
}
func shellSingleQuote(value string) string {
// 'foo' -> '\'' within single quotes: '"'"'
return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'"
}
func prependNSCRegionArgs(args []string, computeBaseURL string) []string {
region := strings.TrimSpace(os.Getenv("NSC_REGION"))
if region == "" {
region = regionFromComputeBaseURL(computeBaseURL)
}
if region == "" {
// Default to the burrow region used for other Namespace integrations.
region = "ord4"
}
return append([]string{"--region", region}, args...)
}
func regionFromComputeBaseURL(raw string) string {
raw = strings.TrimSpace(raw)
if raw == "" {
return ""
}
u, err := url.Parse(raw)
if err != nil {
return ""
}
host := u.Hostname()
if host == "" {
return ""
}
parts := strings.Split(host, ".")
if len(parts) == 0 {
return ""
}
// ord4.compute.namespaceapis.com -> ord4
if strings.HasSuffix(host, ".compute.namespaceapis.com") || strings.Contains(host, ".compute.") {
return parts[0]
}
return ""
}

View file

@ -0,0 +1,59 @@
package nsc
import (
"regexp"
"strings"
)
const windowsDefaultMachineType = "windows/amd64:8x16"
var cpuMemShapePattern = regexp.MustCompile(`^\d+x\d+$`)
func hasWindowsLabel(labels []string) bool {
for _, label := range labels {
l := strings.TrimSpace(label)
if l == "" {
continue
}
base := l
if before, _, ok := strings.Cut(l, ":"); ok {
base = before
}
if strings.HasPrefix(base, "namespace-profile-windows-") {
return true
}
}
return false
}
func normalizeWindowsMachineType(machineType string, labels []string) string {
mt := strings.TrimSpace(machineType)
if strings.HasPrefix(mt, "windows/") {
return mt
}
if cpuMemShapePattern.MatchString(mt) {
return "windows/amd64:" + mt
}
// Label-derived defaults: keep a simple shape ladder for explicit profile sizes.
for _, label := range labels {
base := strings.TrimSpace(label)
if before, _, ok := strings.Cut(base, ":"); ok {
base = before
}
switch {
case strings.HasPrefix(base, "namespace-profile-windows-small"):
return "windows/amd64:2x4"
case strings.HasPrefix(base, "namespace-profile-windows-medium"):
return "windows/amd64:4x8"
case strings.HasPrefix(base, "namespace-profile-windows-large"):
return windowsDefaultMachineType
}
}
return windowsDefaultMachineType
}
func powershellSingleQuote(value string) string {
// PowerShell single-quoted string escaping: ' -> ''
return "'" + strings.ReplaceAll(value, "'", "''") + "'"
}

View file

@ -0,0 +1,98 @@
package nsc
import "testing"
func TestHasWindowsLabel(t *testing.T) {
t.Parallel()
cases := []struct {
name string
labels []string
want bool
}{
{
name: "namespace windows label",
labels: []string{"namespace-profile-windows-large"},
want: true,
},
{
name: "namespace windows label with host suffix",
labels: []string{"namespace-profile-windows-large:host"},
want: true,
},
{
name: "non namespace windows-like label",
labels: []string{"burrow-winrunner:host"},
want: false,
},
{
name: "macos label",
labels: []string{"namespace-profile-macos-large"},
want: false,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := hasWindowsLabel(tc.labels)
if got != tc.want {
t.Fatalf("hasWindowsLabel(%v) = %v, want %v", tc.labels, got, tc.want)
}
})
}
}
func TestNormalizeWindowsMachineType(t *testing.T) {
t.Parallel()
cases := []struct {
name string
machine string
labels []string
wantPrefix string
}{
{
name: "explicit windows machine type keeps value",
machine: "windows/amd64:8x16",
labels: []string{"namespace-profile-windows-large"},
wantPrefix: "windows/amd64:8x16",
},
{
name: "shape only is normalized",
machine: "4x8",
labels: []string{"namespace-profile-windows-large"},
wantPrefix: "windows/amd64:4x8",
},
{
name: "large label default",
machine: "",
labels: []string{"namespace-profile-windows-large"},
wantPrefix: "windows/amd64:8x16",
},
{
name: "medium label default",
machine: "",
labels: []string{"namespace-profile-windows-medium"},
wantPrefix: "windows/amd64:4x8",
},
{
name: "fallback default",
machine: "",
labels: []string{"namespace-profile-windows-custom"},
wantPrefix: "windows/amd64:8x16",
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got := normalizeWindowsMachineType(tc.machine, tc.labels)
if got != tc.wantPrefix {
t.Fatalf("normalizeWindowsMachineType(%q, %v) = %q, want %q", tc.machine, tc.labels, got, tc.wantPrefix)
}
})
}
}

View file

@ -0,0 +1,499 @@
package nsc
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
type windowsProxyOutput struct {
Endpoint string `json:"endpoint"`
RDP struct {
Credentials struct {
Username string `json:"username"`
Password string `json:"password"`
} `json:"credentials"`
} `json:"rdp"`
}
func (d *Dispatcher) launchWindowsRunnerViaWinRM(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error {
script := windowsBootstrapScript(runnerName, req, d.opts.Executor, d.opts.WorkDir)
return d.launchWindowsScriptViaWinRM(ctx, runnerName, ttl, machineType, req.Labels, script)
}
func (d *Dispatcher) launchWindowsScriptViaWinRM(ctx context.Context, runnerName string, ttl time.Duration, machineType string, labels []string, script string) error {
if ttl <= 0 {
ttl = d.opts.DefaultDuration
}
mt := normalizeWindowsMachineType(machineType, labels)
instanceID, createOutput, err := d.createWindowsInstance(ctx, runnerName, ttl, mt)
if err != nil {
return fmt.Errorf("windows create failed: %w\n%s", err, createOutput)
}
defer d.destroyNSCInstance(context.Background(), runnerName, instanceID)
username, password, err := d.resolveWindowsCredentials(ctx, instanceID)
if err != nil {
return err
}
if err := d.probeWindowsWinRMService(ctx, instanceID); err != nil {
return err
}
endpoint, stopForward, err := d.startWindowsWinRMPortForward(ctx, instanceID)
if err != nil {
return err
}
defer stopForward()
if err := d.runWindowsWinRMPowerShell(ctx, endpoint, username, password, script); err != nil {
return err
}
return nil
}
func (d *Dispatcher) createWindowsInstance(ctx context.Context, runnerName string, ttl time.Duration, machineType string) (instanceID string, output string, err error) {
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-windows-*")
if err != nil {
return "", "", fmt.Errorf("mktemp: %w", err)
}
defer os.RemoveAll(tmpDir)
metaPath := filepath.Join(tmpDir, "create.json")
cidPath := filepath.Join(tmpDir, "create.cid")
args := []string{
"create",
"--duration", ttl.String(),
"--machine_type", machineType,
"--cidfile", cidPath,
"--purpose", fmt.Sprintf("burrow forgejo runner %s", runnerName),
"--output", "plain",
"--output_json_to", metaPath,
"--wait_timeout", "6m",
}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
createCtx, cancel := context.WithTimeout(ctx, 8*time.Minute)
defer cancel()
cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...)
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err := cmd.Run(); err != nil {
if created := strings.TrimSpace(mustReadFile(cidPath)); created != "" {
d.destroyNSCInstance(context.Background(), runnerName, created)
}
if errors.Is(createCtx.Err(), context.DeadlineExceeded) {
return "", buf.String(), fmt.Errorf("nsc create timed out after %s", 8*time.Minute)
}
return "", buf.String(), fmt.Errorf("nsc create failed: %w", err)
}
instanceID, err = readNSCCreateInstanceID(metaPath)
if err != nil {
return "", buf.String(), fmt.Errorf("nsc create output parse failed: %w", err)
}
if instanceID == "" {
return "", buf.String(), errors.New("nsc create returned empty instance id")
}
return instanceID, buf.String(), nil
}
func (d *Dispatcher) resolveWindowsCredentials(ctx context.Context, instanceID string) (username string, password string, err error) {
tmpDir, err := os.MkdirTemp("", "forgejo-nsc-winproxy-*")
if err != nil {
return "", "", fmt.Errorf("mktemp: %w", err)
}
defer os.RemoveAll(tmpDir)
outPath := filepath.Join(tmpDir, "proxy.json")
outFile, err := os.Create(outPath)
if err != nil {
return "", "", fmt.Errorf("create proxy output file: %w", err)
}
defer outFile.Close()
var stderr bytes.Buffer
args := []string{"instance", "proxy", instanceID, "-s", "rdp", "-o", "json"}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
proxyCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
defer cancel()
cmd := exec.CommandContext(proxyCtx, d.opts.BinaryPath, args...)
cmd.Stdout = outFile
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
return "", "", fmt.Errorf("start nsc instance proxy: %w", err)
}
waitDone := make(chan struct{})
var waitErr error
go func() {
waitErr = cmd.Wait()
close(waitDone)
}()
var payload windowsProxyOutput
deadline := time.Now().Add(45 * time.Second)
for time.Now().Before(deadline) {
raw, _ := os.ReadFile(outPath)
jsonBlob := extractJSON(string(raw))
if jsonBlob != "" {
if err := json.Unmarshal([]byte(jsonBlob), &payload); err == nil {
username = strings.TrimSpace(payload.RDP.Credentials.Username)
password = strings.TrimSpace(payload.RDP.Credentials.Password)
if username != "" && password != "" {
break
}
}
}
select {
case <-waitDone:
if waitErr != nil {
return "", "", fmt.Errorf("nsc instance proxy exited before credentials were available: %w\n%s", waitErr, stderr.String())
}
default:
}
time.Sleep(1 * time.Second)
}
if cmd.Process != nil {
_ = cmd.Process.Kill()
}
<-waitDone
if username == "" || password == "" {
raw, _ := os.ReadFile(outPath)
return "", "", fmt.Errorf("failed to resolve windows credentials from nsc instance proxy output\nstdout=%s\nstderr=%s", strings.TrimSpace(string(raw)), strings.TrimSpace(stderr.String()))
}
return username, password, nil
}
func (d *Dispatcher) probeWindowsWinRMService(ctx context.Context, instanceID string) error {
args := []string{"instance", "proxy", instanceID, "-s", "winrm", "-o", "json", "--once"}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
probeCtx, cancel := context.WithTimeout(ctx, 15*time.Second)
defer cancel()
cmd := exec.CommandContext(probeCtx, d.opts.BinaryPath, args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
raw := strings.TrimSpace(out.String())
if endpoint, ok := parseProxyEndpoint(raw); ok && endpoint != "" {
return nil
}
if indicatesMissingProxyService(raw, "winrm") {
return fmt.Errorf("namespace windows non-interactive channel unavailable: instance does not expose winrm service (rdp-only)\n%s", raw)
}
if errors.Is(probeCtx.Err(), context.DeadlineExceeded) {
return fmt.Errorf("timed out probing Namespace winrm service before bootstrap\n%s", raw)
}
if err != nil {
return fmt.Errorf("nsc winrm service probe failed: %w\n%s", err, raw)
}
return fmt.Errorf("nsc winrm service probe did not yield endpoint output\n%s", raw)
}
func parseProxyEndpoint(raw string) (string, bool) {
jsonBlob := extractJSON(raw)
if jsonBlob == "" {
return "", false
}
var payload struct {
Endpoint string `json:"endpoint"`
}
if err := json.Unmarshal([]byte(jsonBlob), &payload); err != nil {
return "", false
}
endpoint := strings.TrimSpace(payload.Endpoint)
if endpoint == "" {
return "", false
}
return endpoint, true
}
func indicatesMissingProxyService(raw string, service string) bool {
service = strings.TrimSpace(service)
if service == "" {
return false
}
token := fmt.Sprintf("does not have service %q", service)
return strings.Contains(raw, token)
}
func (d *Dispatcher) startWindowsWinRMPortForward(ctx context.Context, instanceID string) (endpoint string, stop func(), err error) {
args := []string{"instance", "port-forward", instanceID, "--target_port", "5985"}
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
forwardCtx, cancel := context.WithCancel(ctx)
cmd := exec.CommandContext(forwardCtx, d.opts.BinaryPath, args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
cancel()
return "", nil, fmt.Errorf("port-forward stdout pipe: %w", err)
}
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
cancel()
return "", nil, fmt.Errorf("start nsc port-forward: %w", err)
}
waitDone := make(chan struct{})
var waitErr error
go func() {
waitErr = cmd.Wait()
close(waitDone)
}()
endpointCh := make(chan string, 1)
scanErrCh := make(chan error, 1)
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, "Listening on ") {
endpointCh <- strings.TrimSpace(strings.TrimPrefix(line, "Listening on "))
return
}
}
if err := scanner.Err(); err != nil {
scanErrCh <- err
}
}()
select {
case endpoint = <-endpointCh:
stop = func() {
cancel()
if cmd.Process != nil {
_ = cmd.Process.Kill()
}
<-waitDone
}
return endpoint, stop, nil
case err := <-scanErrCh:
cancel()
if cmd.Process != nil {
_ = cmd.Process.Kill()
}
<-waitDone
return "", nil, fmt.Errorf("failed reading port-forward output: %w", err)
case <-waitDone:
cancel()
if waitErr != nil {
return "", nil, fmt.Errorf("nsc port-forward exited early: %w\n%s", waitErr, stderr.String())
}
return "", nil, fmt.Errorf("nsc port-forward exited without endpoint\n%s", stderr.String())
case <-time.After(45 * time.Second):
cancel()
if cmd.Process != nil {
_ = cmd.Process.Kill()
}
<-waitDone
return "", nil, fmt.Errorf("timed out waiting for WinRM port-forward endpoint\n%s", stderr.String())
case <-ctx.Done():
cancel()
if cmd.Process != nil {
_ = cmd.Process.Kill()
}
<-waitDone
return "", nil, ctx.Err()
}
}
func (d *Dispatcher) runWindowsWinRMPowerShell(ctx context.Context, endpoint, username, password, script string) error {
pythonPath, err := exec.LookPath("python3")
if err != nil {
return fmt.Errorf("python3 is required for windows WinRM bootstrap: %w", err)
}
workdir := strings.TrimSpace(d.opts.WorkDir)
if workdir == "" {
workdir = "/tmp/forgejo-runner"
}
if err := os.MkdirAll(workdir, 0o755); err != nil {
return fmt.Errorf("create workdir %s: %w", workdir, err)
}
venvPath := filepath.Join(workdir, ".winrm-venv")
venvPython := filepath.Join(venvPath, "bin", "python")
if _, err := os.Stat(venvPython); err != nil {
cmd := exec.CommandContext(ctx, pythonPath, "-m", "venv", venvPath)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
if err := cmd.Run(); err != nil {
return fmt.Errorf("create python venv for winrm failed: %w\n%s", err, out.String())
}
}
ensurePyWinRM := `
import importlib.util, subprocess, sys
if importlib.util.find_spec("winrm") is None:
subprocess.check_call([sys.executable, "-m", "pip", "install", "--quiet", "pywinrm"])
`
ensureCmd := exec.CommandContext(ctx, venvPython, "-c", ensurePyWinRM)
var ensureOut bytes.Buffer
ensureCmd.Stdout = &ensureOut
ensureCmd.Stderr = &ensureOut
if err := ensureCmd.Run(); err != nil {
return fmt.Errorf("install pywinrm failed: %w\n%s", err, ensureOut.String())
}
runScript := `
import base64, os, sys, time, traceback, winrm
endpoint = os.environ["WINRM_ENDPOINT"]
user = os.environ["WINRM_USER"]
password = os.environ["WINRM_PASS"]
script = base64.b64decode(os.environ["WINRM_SCRIPT_B64"]).decode("utf-8")
deadline = time.time() + 300.0
last_err = None
while time.time() < deadline:
try:
session = winrm.Session(f"http://{endpoint}/wsman", auth=(user, password), transport="ntlm")
result = session.run_ps(script)
sys.stdout.write(result.std_out.decode("utf-8", errors="replace"))
sys.stderr.write(result.std_err.decode("utf-8", errors="replace"))
print(f"winrm_exit={result.status_code}")
sys.exit(result.status_code)
except Exception as err:
last_err = err
time.sleep(5.0)
sys.stderr.write("timed out waiting for WinRM connectivity after 300s\\n")
if last_err is not None:
traceback.print_exception(last_err, file=sys.stderr)
sys.exit(111)
`
runCmd := exec.CommandContext(ctx, venvPython, "-c", runScript)
runCmd.Env = append(os.Environ(),
"WINRM_ENDPOINT="+endpoint,
"WINRM_USER="+username,
"WINRM_PASS="+password,
"WINRM_SCRIPT_B64="+base64.StdEncoding.EncodeToString([]byte(script)),
)
var runOut bytes.Buffer
runCmd.Stdout = &runOut
runCmd.Stderr = &runOut
if err := runCmd.Run(); err != nil {
return fmt.Errorf("windows winrm bootstrap command failed: %w\n%s", err, runOut.String())
}
return nil
}
func windowsBootstrapScript(runnerName string, req LaunchRequest, executor, workdir string) string {
if strings.TrimSpace(workdir) == "" {
workdir = `C:\burrow\forgejo-runner`
}
runnerExec := strings.TrimSpace(executor)
if runnerExec == "" || runnerExec == "shell" {
runnerExec = "host"
}
safeName := strings.NewReplacer(`\`, "-", ":", "-", "/", "-", " ", "-").Replace(runnerName)
workRoot := strings.TrimRight(workdir, `\`) + `\` + safeName
var b strings.Builder
b.WriteString("$ErrorActionPreference = 'Stop'\n")
b.WriteString("$ProgressPreference = 'SilentlyContinue'\n")
b.WriteString("[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12\n")
b.WriteString("$runnerName = " + powershellSingleQuote(runnerName) + "\n")
b.WriteString("$runnerToken = " + powershellSingleQuote(req.Token) + "\n")
b.WriteString("$instanceURL = " + powershellSingleQuote(req.InstanceURL) + "\n")
b.WriteString("$labelsCsv = " + powershellSingleQuote(strings.Join(req.Labels, ",")) + "\n")
b.WriteString("$runnerExec = " + powershellSingleQuote(runnerExec) + "\n")
b.WriteString("$workRoot = " + powershellSingleQuote(workRoot) + "\n")
b.WriteString(`
New-Item -Path $workRoot -ItemType Directory -Force | Out-Null
Set-Location $workRoot
$runnerVersion = "12.6.4"
$zipUrl = "https://code.forgejo.org/forgejo/runner/releases/download/v${runnerVersion}/forgejo-runner-${runnerVersion}-windows-amd64.zip"
$zipPath = Join-Path $workRoot "forgejo-runner.zip"
$extractDir = Join-Path $workRoot "forgejo-runner"
if (Test-Path $extractDir) {
Remove-Item -Path $extractDir -Recurse -Force
}
Invoke-WebRequest -Uri $zipUrl -OutFile $zipPath
Expand-Archive -Path $zipPath -DestinationPath $extractDir -Force
$runnerExe = Join-Path $extractDir "forgejo-runner.exe"
if (-not (Test-Path $runnerExe)) {
throw "Missing forgejo-runner.exe after extract: $runnerExe"
}
$labels = @()
foreach ($label in ($labelsCsv -split ",")) {
$trimmed = $label.Trim()
if ([string]::IsNullOrWhiteSpace($trimmed)) { continue }
if ($trimmed.Contains(":")) {
$labels += $trimmed
} else {
$labels += ("{0}:{1}" -f $trimmed, $runnerExec)
}
}
if ($labels.Count -eq 0) {
throw "No runner labels resolved for windows bootstrap"
}
$labelLines = ($labels | ForEach-Object { " - $_" }) -join [Environment]::NewLine
$configPath = Join-Path $workRoot "runner.yaml"
$runnerYaml = @"
log:
level: info
runner:
file: .runner
capacity: 1
name: $runnerName
labels:
$labelLines
cache:
enabled: false
"@
Set-Content -Path $configPath -Value $runnerYaml -Encoding UTF8
$labelsArg = ($labels -join ",")
& $runnerExe register --no-interactive --instance $instanceURL --token $runnerToken --name $runnerName --labels $labelsArg --config $configPath
if ($LASTEXITCODE -ne 0) {
throw ("forgejo-runner register failed: {0}" -f $LASTEXITCODE)
}
& $runnerExe one-job --config $configPath
if ($LASTEXITCODE -ne 0) {
throw ("forgejo-runner one-job failed: {0}" -f $LASTEXITCODE)
}
`)
return b.String()
}

View file

@ -0,0 +1,59 @@
package nsc
import (
"context"
"io"
"log/slog"
"os"
"os/exec"
"strings"
"testing"
"time"
)
func TestWindowsWinRMScriptRoundTrip(t *testing.T) {
if os.Getenv("NSC_WINDOWS_E2E") != "1" {
t.Skip("set NSC_WINDOWS_E2E=1 to run Namespace Windows integration test")
}
nscBinary, err := exec.LookPath("nsc")
if err != nil {
t.Skipf("nsc not found in PATH: %v", err)
}
authCheck := exec.Command(nscBinary, "auth", "check-login")
if out, err := authCheck.CombinedOutput(); err != nil {
t.Skipf("nsc auth check-login failed: %v (%s)", err, strings.TrimSpace(string(out)))
}
machineType := strings.TrimSpace(os.Getenv("NSC_WINDOWS_E2E_MACHINE_TYPE"))
if machineType == "" {
machineType = "windows/amd64:4x8"
}
dispatcher, err := NewDispatcher(Options{
BinaryPath: nscBinary,
DefaultImage: "code.forgejo.org/forgejo/runner:11",
DefaultMachine: machineType,
DefaultDuration: 20 * time.Minute,
MaxParallel: 1,
WorkDir: t.TempDir(),
ComputeBaseURL: strings.TrimSpace(os.Getenv("NSC_COMPUTE_BASE_URL")),
Logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
})
if err != nil {
t.Fatalf("NewDispatcher() error: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute)
defer cancel()
script := "Write-Output ('winrm-ok:' + $env:COMPUTERNAME)"
labels := []string{"namespace-profile-windows-medium"}
if err := dispatcher.launchWindowsScriptViaWinRM(ctx, "nsc-winrm-itest", 20*time.Minute, machineType, labels, script); err != nil {
if strings.Contains(err.Error(), "does not expose winrm service (rdp-only)") {
t.Skipf("namespace windows control channel is rdp-only: %v", err)
}
t.Fatalf("launchWindowsScriptViaWinRM() error: %v", err)
}
}

View file

@ -0,0 +1,65 @@
package nsc
import "testing"
func TestParseProxyEndpoint(t *testing.T) {
t.Parallel()
tests := []struct {
name string
raw string
want string
wantOK bool
}{
{
name: "plain json payload",
raw: `{"endpoint":"127.0.0.1:61234"}`,
want: "127.0.0.1:61234",
wantOK: true,
},
{
name: "json wrapped with extra output",
raw: `Connected.
{"endpoint":"127.0.0.1:61235","rdp":{"credentials":{"username":"runneradmin","password":"runneradmin"}}}`,
want: "127.0.0.1:61235",
wantOK: true,
},
{
name: "missing endpoint field",
raw: `{"rdp":{"credentials":{"username":"runneradmin"}}}`,
wantOK: false,
},
{
name: "non-json output",
raw: `Failed: instance does not have service "winrm"`,
wantOK: false,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
got, ok := parseProxyEndpoint(tc.raw)
if ok != tc.wantOK {
t.Fatalf("parseProxyEndpoint(%q) ok=%v, want %v", tc.raw, ok, tc.wantOK)
}
if got != tc.want {
t.Fatalf("parseProxyEndpoint(%q) endpoint=%q, want %q", tc.raw, got, tc.want)
}
})
}
}
func TestIndicatesMissingProxyService(t *testing.T) {
t.Parallel()
raw := `Failed: instance does not have service "winrm"`
if !indicatesMissingProxyService(raw, "winrm") {
t.Fatalf("indicatesMissingProxyService should return true for missing winrm message")
}
if indicatesMissingProxyService(raw, "ssh") {
t.Fatalf("indicatesMissingProxyService should be false when service name does not match")
}
}

View file

@ -0,0 +1,151 @@
package server
import (
"context"
"encoding/json"
"errors"
"log/slog"
"net/http"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/burrow/forgejo-nsc/internal/app"
)
type Server struct {
httpServer *http.Server
app *app.Service
log *slog.Logger
}
func New(listen string, svc *app.Service, logger *slog.Logger) *Server {
if logger == nil {
logger = slog.Default()
}
router := chi.NewRouter()
router.Use(middleware.RequestID)
router.Use(middleware.RealIP)
router.Use(middleware.Logger)
router.Use(middleware.Recoverer)
s := &Server{
app: svc,
log: logger,
httpServer: &http.Server{
Addr: listen,
Handler: router,
ReadTimeout: 30 * time.Second,
// Dispatch requests can legitimately run for the duration of a build.
// A short WriteTimeout will kill the request context mid-provisioning.
WriteTimeout: 2 * time.Hour,
IdleTimeout: 60 * time.Second,
},
}
router.Get("/healthz", s.handleHealthz)
router.Post("/api/v1/dispatch", s.handleDispatch)
return s
}
func (s *Server) ListenAndServe() error {
return s.httpServer.ListenAndServe()
}
func (s *Server) Shutdown(ctx context.Context) error {
return s.httpServer.Shutdown(ctx)
}
// Handler exposes the underlying HTTP handler for tests.
func (s *Server) Handler() http.Handler {
return s.httpServer.Handler
}
type dispatchRequest struct {
Count int `json:"count"`
Labels []string `json:"labels"`
Scope *dispatchScope `json:"scope"`
TTL string `json:"ttl"`
Machine string `json:"machine_type"`
Image string `json:"image"`
Env map[string]string `json:"env"`
}
type dispatchScope struct {
Level string `json:"level"`
Owner string `json:"owner"`
Name string `json:"name"`
}
func (s *Server) handleDispatch(w http.ResponseWriter, r *http.Request) {
var payload dispatchRequest
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
s.writeError(w, http.StatusBadRequest, err)
return
}
duration, err := parseDuration(payload.TTL)
if err != nil {
s.writeError(w, http.StatusBadRequest, err)
return
}
var scope *app.Scope
if payload.Scope != nil {
scope = &app.Scope{
Level: payload.Scope.Level,
Owner: payload.Scope.Owner,
Name: payload.Scope.Name,
}
}
resp, err := s.app.Dispatch(r.Context(), app.DispatchRequest{
Count: payload.Count,
Labels: payload.Labels,
Scope: scope,
TTL: duration,
Machine: payload.Machine,
Image: payload.Image,
ExtraEnv: payload.Env,
})
if err != nil {
s.writeError(w, http.StatusInternalServerError, err)
return
}
s.writeJSON(w, http.StatusOK, resp)
}
func parseDuration(value string) (time.Duration, error) {
if value == "" {
return 0, nil
}
dur, err := time.ParseDuration(value)
if err != nil {
return 0, err
}
if dur <= 0 {
return 0, errors.New("ttl must be positive")
}
return dur, nil
}
func (s *Server) handleHealthz(w http.ResponseWriter, _ *http.Request) {
s.writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
}
func (s *Server) writeError(w http.ResponseWriter, code int, err error) {
s.log.Error("request failed", "err", err, "status", code)
s.writeJSON(w, code, map[string]string{
"error": err.Error(),
})
}
func (s *Server) writeJSON(w http.ResponseWriter, code int, payload any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
_ = json.NewEncoder(w).Encode(payload)
}

View file

@ -0,0 +1,111 @@
package server
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/burrow/forgejo-nsc/internal/app"
"github.com/burrow/forgejo-nsc/internal/forgejo"
"github.com/burrow/forgejo-nsc/internal/nsc"
)
type serverForgejoMock struct {
mu sync.Mutex
token string
scopes []forgejo.Scope
}
func (m *serverForgejoMock) RegistrationToken(ctx context.Context, scope forgejo.Scope) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.scopes = append(m.scopes, scope)
return m.token, nil
}
type serverDispatcherMock struct {
mu sync.Mutex
requests []nsc.LaunchRequest
result string
}
func (m *serverDispatcherMock) LaunchRunner(ctx context.Context, req nsc.LaunchRequest) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.requests = append(m.requests, req)
if m.result != "" {
return m.result, nil
}
return "runner", nil
}
func TestDispatchEndpoint(t *testing.T) {
forgejoMock := &serverForgejoMock{token: "token"}
dispatcherMock := &serverDispatcherMock{result: "runner-http"}
cfg := app.Config{
DefaultScope: forgejo.Scope{Level: forgejo.ScopeInstance},
DefaultLabels: []string{"fallback"},
InstanceURL: "https://forgejo.example.com",
DefaultTTL: 30 * time.Minute,
}
service := app.NewService(cfg, forgejoMock, dispatcherMock, nil)
srv := New(":0", service, nil)
ts := httptest.NewServer(srv.Handler())
defer ts.Close()
body := map[string]any{
"count": 1,
"ttl": "45m",
"labels": []string{"nscloud-arm"},
"scope": map[string]string{"level": string(forgejo.ScopeOrganization), "owner": "acme"},
"machine_type": "8x16",
"image": "runner:http",
"env": map[string]string{"FOO": "bar"},
}
payload, _ := json.Marshal(body)
resp, err := http.Post(ts.URL+"/api/v1/dispatch", "application/json", bytes.NewReader(payload))
if err != nil {
t.Fatalf("POST failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 OK, got %d", resp.StatusCode)
}
var decoded app.DispatchResponse
if err := json.NewDecoder(resp.Body).Decode(&decoded); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if len(decoded.Runners) != 1 || decoded.Runners[0].Name != "runner-http" {
t.Fatalf("unexpected response: %+v", decoded)
}
if len(forgejoMock.scopes) != 1 || forgejoMock.scopes[0].Level != forgejo.ScopeOrganization {
t.Fatalf("expected organization scope, got %+v", forgejoMock.scopes)
}
if len(dispatcherMock.requests) != 1 {
t.Fatalf("expected dispatcher call")
}
call := dispatcherMock.requests[0]
if call.Duration != 45*time.Minute {
t.Fatalf("expected ttl override, got %v", call.Duration)
}
if call.Labels[0] != "nscloud-arm" {
t.Fatalf("expected labels passthrough, got %v", call.Labels)
}
if call.ExtraEnv["FOO"] != "bar" {
t.Fatalf("expected env passthrough")
}
}