Add Forgejo namespace workflow stack
This commit is contained in:
parent
482fd5d085
commit
865b676c99
68 changed files with 9709 additions and 11 deletions
95
Scripts/_burrow-flake.sh
Executable file
95
Scripts/_burrow-flake.sh
Executable file
|
|
@ -0,0 +1,95 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
burrow_require_cmd() {
|
||||
if ! command -v "$1" >/dev/null 2>&1; then
|
||||
echo "missing required command: $1" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
burrow_cleanup_flake_tmpdirs() {
|
||||
if [[ "${#BURROW_FLAKE_TMPDIRS[@]}" -eq 0 ]]; then
|
||||
return
|
||||
fi
|
||||
rm -rf "${BURROW_FLAKE_TMPDIRS[@]}"
|
||||
}
|
||||
|
||||
burrow_prepare_flake_ref() {
|
||||
local input="${1:-.}"
|
||||
|
||||
case "${input}" in
|
||||
path:*|git+*|github:*|tarball+*|http://*|https://*)
|
||||
printf '%s\n' "${input}"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
local resolved
|
||||
resolved="$(cd "${input}" && pwd)"
|
||||
|
||||
local cache_root="${HOME}/.cache/burrow"
|
||||
mkdir -p "${cache_root}"
|
||||
|
||||
local copy_root
|
||||
copy_root="$(mktemp -d "${cache_root}/flake-XXXXXX")"
|
||||
mkdir -p "${copy_root}/repo"
|
||||
|
||||
rsync -a \
|
||||
--delete \
|
||||
--exclude '.git' \
|
||||
--exclude '.direnv' \
|
||||
--exclude 'result' \
|
||||
--exclude 'burrow.sock' \
|
||||
--exclude 'node_modules' \
|
||||
--exclude 'target' \
|
||||
--exclude 'build' \
|
||||
"${resolved}/" "${copy_root}/repo/"
|
||||
|
||||
BURROW_FLAKE_TMPDIRS+=("${copy_root}")
|
||||
printf 'path:%s/repo\n' "${copy_root}"
|
||||
}
|
||||
|
||||
burrow_resolve_image_artifact() {
|
||||
local store_path="$1"
|
||||
|
||||
if [[ -f "${store_path}" ]]; then
|
||||
printf '%s\n' "${store_path}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -d "${store_path}" ]]; then
|
||||
local candidate
|
||||
candidate="$(
|
||||
find "${store_path}" -type f \
|
||||
\( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) \
|
||||
| sort \
|
||||
| head -n1
|
||||
)"
|
||||
if [[ -n "${candidate}" ]]; then
|
||||
printf '%s\n' "${candidate}"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "unable to locate disk image artifact under ${store_path}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
burrow_detect_compression() {
|
||||
local artifact="$1"
|
||||
|
||||
case "${artifact}" in
|
||||
*.bz2)
|
||||
printf 'bz2\n'
|
||||
;;
|
||||
*.xz)
|
||||
printf 'xz\n'
|
||||
;;
|
||||
*.zst|*.zstd)
|
||||
printf 'zstd\n'
|
||||
;;
|
||||
*)
|
||||
printf '\n'
|
||||
;;
|
||||
esac
|
||||
}
|
||||
113
Scripts/bootstrap-forge-intake.sh
Normal file
113
Scripts/bootstrap-forge-intake.sh
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/bootstrap-forge-intake.sh [options]
|
||||
|
||||
Copy the minimum Burrow forge bootstrap secrets onto the target host under
|
||||
/var/lib/burrow/intake with the ownership expected by the NixOS services.
|
||||
|
||||
Options:
|
||||
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||
--ssh-key <path> SSH private key used to reach the host
|
||||
(default: intake/agent_at_burrow_net_ed25519)
|
||||
--password-file <path> Forgejo admin bootstrap password file
|
||||
(default: intake/forgejo_pass_contact_at_burrow_net.txt)
|
||||
--agent-key-file <path> Agent SSH private key copied for runner bootstrap
|
||||
(default: intake/agent_at_burrow_net_ed25519)
|
||||
--no-verify Skip remote ls/stat verification after install
|
||||
-h, --help Show this help text
|
||||
EOF
|
||||
}
|
||||
|
||||
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
|
||||
PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}"
|
||||
AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
|
||||
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||
VERIFY=1
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host)
|
||||
HOST="${2:?missing value for --host}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-key)
|
||||
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||
shift 2
|
||||
;;
|
||||
--password-file)
|
||||
PASSWORD_FILE="${2:?missing value for --password-file}"
|
||||
shift 2
|
||||
;;
|
||||
--agent-key-file)
|
||||
AGENT_KEY_FILE="${2:?missing value for --agent-key-file}"
|
||||
shift 2
|
||||
;;
|
||||
--no-verify)
|
||||
VERIFY=0
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||
|
||||
for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do
|
||||
if [[ ! -s "${path}" ]]; then
|
||||
echo "required file missing or empty: ${path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
ssh_opts=(
|
||||
-i "${SSH_KEY}"
|
||||
-o IdentitiesOnly=yes
|
||||
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
|
||||
-o StrictHostKeyChecking=accept-new
|
||||
)
|
||||
|
||||
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
|
||||
cleanup() {
|
||||
if [[ -n "${remote_tmp:-}" ]]; then
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
scp "${ssh_opts[@]}" \
|
||||
"${PASSWORD_FILE}" \
|
||||
"${AGENT_KEY_FILE}" \
|
||||
"${HOST}:${remote_tmp}/"
|
||||
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||
set -euo pipefail
|
||||
install -d -m 0755 /var/lib/burrow/intake
|
||||
install -m 0400 -o forgejo -g forgejo '${remote_tmp}/$(basename "${PASSWORD_FILE}")' /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt
|
||||
install -m 0400 -o root -g root '${remote_tmp}/$(basename "${AGENT_KEY_FILE}")' /var/lib/burrow/intake/agent_at_burrow_net_ed25519
|
||||
"
|
||||
|
||||
if [[ "${VERIFY}" -eq 1 ]]; then
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||
set -euo pipefail
|
||||
ls -l \
|
||||
/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt \
|
||||
/var/lib/burrow/intake/agent_at_burrow_net_ed25519
|
||||
"
|
||||
fi
|
||||
|
||||
echo "Burrow forge bootstrap intake sync complete (host=${HOST})."
|
||||
143
Scripts/check-forge-host.sh
Executable file
143
Scripts/check-forge-host.sh
Executable file
|
|
@ -0,0 +1,143 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/check-forge-host.sh [options]
|
||||
|
||||
Run a post-boot verification pass against the Burrow forge host.
|
||||
|
||||
Options:
|
||||
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
|
||||
--expect-nsc Fail if forgejo-nsc services are not active
|
||||
-h, --help Show this help text
|
||||
EOF
|
||||
}
|
||||
|
||||
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
|
||||
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||
EXPECT_NSC=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host)
|
||||
HOST="${2:?missing value for --host}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-key)
|
||||
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||
shift 2
|
||||
;;
|
||||
--expect-nsc)
|
||||
EXPECT_NSC=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||
|
||||
if [[ ! -f "${SSH_KEY}" ]]; then
|
||||
echo "forge SSH key not found: ${SSH_KEY}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ssh \
|
||||
-i "${SSH_KEY}" \
|
||||
-o IdentitiesOnly=yes \
|
||||
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||
-o StrictHostKeyChecking=accept-new \
|
||||
"${HOST}" \
|
||||
EXPECT_NSC="${EXPECT_NSC}" \
|
||||
'bash -s' <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
base_services=(
|
||||
forgejo.service
|
||||
caddy.service
|
||||
burrow-forgejo-bootstrap.service
|
||||
burrow-forgejo-runner-bootstrap.service
|
||||
burrow-forgejo-runner.service
|
||||
)
|
||||
|
||||
nsc_services=(
|
||||
forgejo-nsc-dispatcher.service
|
||||
forgejo-nsc-autoscaler.service
|
||||
)
|
||||
|
||||
show_service() {
|
||||
local service="$1"
|
||||
systemctl show \
|
||||
--no-pager \
|
||||
--property Id \
|
||||
--property LoadState \
|
||||
--property UnitFileState \
|
||||
--property ActiveState \
|
||||
--property SubState \
|
||||
--property Result \
|
||||
"${service}"
|
||||
}
|
||||
|
||||
service_is_healthy() {
|
||||
local service="$1"
|
||||
local active_state
|
||||
local result
|
||||
local unit_type
|
||||
|
||||
active_state="$(systemctl show --property ActiveState --value "${service}")"
|
||||
result="$(systemctl show --property Result --value "${service}")"
|
||||
unit_type="$(systemctl show --property Type --value "${service}")"
|
||||
|
||||
if [[ "${active_state}" == "active" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "${unit_type}" == "oneshot" && "${active_state}" == "inactive" && "${result}" == "success" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for service in "${base_services[@]}"; do
|
||||
echo "== ${service} =="
|
||||
show_service "${service}"
|
||||
if ! service_is_healthy "${service}"; then
|
||||
echo "required service is not active: ${service}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
for service in "${nsc_services[@]}"; do
|
||||
echo "== ${service} =="
|
||||
show_service "${service}" || true
|
||||
if [[ "${EXPECT_NSC}" == "1" && "$(systemctl is-active "${service}" 2>/dev/null || true)" != "active" ]]; then
|
||||
echo "required NSC service is not active: ${service}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "== intake =="
|
||||
ls -l /var/lib/burrow/intake || true
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
echo "== http-local =="
|
||||
curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login
|
||||
curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/
|
||||
curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login
|
||||
fi
|
||||
EOF
|
||||
165
Scripts/cloudflare-upsert-a-record.sh
Executable file
165
Scripts/cloudflare-upsert-a-record.sh
Executable file
|
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/cloudflare-upsert-a-record.sh --zone <zone> --name <fqdn> --ipv4 <address> [options]
|
||||
|
||||
Upsert a DNS-only or proxied Cloudflare A record without putting the API token on
|
||||
the process list.
|
||||
|
||||
Options:
|
||||
--zone <zone> Cloudflare zone name, for example burrow.net
|
||||
--name <fqdn> Fully-qualified DNS record name
|
||||
--ipv4 <address> IPv4 address for the A record
|
||||
--token-file <path> Cloudflare API token file
|
||||
default: intake/cloudflare-token.txt
|
||||
--ttl <seconds|auto> Record TTL, or auto
|
||||
default: auto
|
||||
--proxied <true|false> Whether to proxy through Cloudflare
|
||||
default: false
|
||||
-h, --help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
ZONE_NAME=""
|
||||
RECORD_NAME=""
|
||||
IPV4=""
|
||||
TOKEN_FILE="intake/cloudflare-token.txt"
|
||||
TTL_VALUE="auto"
|
||||
PROXIED="false"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--zone)
|
||||
ZONE_NAME="${2:?missing value for --zone}"
|
||||
shift 2
|
||||
;;
|
||||
--name)
|
||||
RECORD_NAME="${2:?missing value for --name}"
|
||||
shift 2
|
||||
;;
|
||||
--ipv4)
|
||||
IPV4="${2:?missing value for --ipv4}"
|
||||
shift 2
|
||||
;;
|
||||
--token-file)
|
||||
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||
shift 2
|
||||
;;
|
||||
--ttl)
|
||||
TTL_VALUE="${2:?missing value for --ttl}"
|
||||
shift 2
|
||||
;;
|
||||
--proxied)
|
||||
PROXIED="${2:?missing value for --proxied}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
usage >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then
|
||||
usage >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if [[ ! -f "${TOKEN_FILE}" ]]; then
|
||||
echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
echo "Invalid IPv4 address: ${IPV4}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "${PROXIED}" in
|
||||
true|false)
|
||||
;;
|
||||
*)
|
||||
echo "--proxied must be true or false" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "${TTL_VALUE}" in
|
||||
auto)
|
||||
TTL_JSON=1
|
||||
;;
|
||||
''|*[!0-9]*)
|
||||
echo "--ttl must be a number of seconds or auto" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
TTL_JSON="${TTL_VALUE}"
|
||||
;;
|
||||
esac
|
||||
|
||||
TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
|
||||
if [[ -z "${TOKEN}" ]]; then
|
||||
echo "Cloudflare token file is empty: ${TOKEN_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cf_api() {
|
||||
local method="$1"
|
||||
local path="$2"
|
||||
local body="${3-}"
|
||||
if [[ -n "${body}" ]]; then
|
||||
curl -fsS -X "${method}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data "${body}" \
|
||||
"https://api.cloudflare.com/client/v4${path}"
|
||||
else
|
||||
curl -fsS -X "${method}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
"https://api.cloudflare.com/client/v4${path}"
|
||||
fi
|
||||
}
|
||||
|
||||
zone_lookup="$(cf_api GET "/zones?name=${ZONE_NAME}&status=active")"
|
||||
zone_id="$(jq -r '.result[0].id // empty' <<<"${zone_lookup}")"
|
||||
|
||||
if [[ -z "${zone_id}" ]]; then
|
||||
echo "Active Cloudflare zone not found: ${ZONE_NAME}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
payload="$(jq -cn \
|
||||
--arg type "A" \
|
||||
--arg name "${RECORD_NAME}" \
|
||||
--arg content "${IPV4}" \
|
||||
--argjson proxied "${PROXIED}" \
|
||||
--argjson ttl "${TTL_JSON}" \
|
||||
'{type: $type, name: $name, content: $content, proxied: $proxied, ttl: $ttl}')"
|
||||
|
||||
record_lookup="$(cf_api GET "/zones/${zone_id}/dns_records?type=A&name=${RECORD_NAME}")"
|
||||
record_id="$(jq -r '.result[0].id // empty' <<<"${record_lookup}")"
|
||||
|
||||
if [[ -n "${record_id}" ]]; then
|
||||
result="$(cf_api PUT "/zones/${zone_id}/dns_records/${record_id}" "${payload}")"
|
||||
action="updated"
|
||||
else
|
||||
result="$(cf_api POST "/zones/${zone_id}/dns_records" "${payload}")"
|
||||
action="created"
|
||||
fi
|
||||
|
||||
jq -r --arg action "${action}" '
|
||||
if .success != true then
|
||||
.errors | tostring | halt_error(1)
|
||||
else
|
||||
"Cloudflare DNS " + $action + ": " + .result.name + " -> " + .result.content +
|
||||
" (proxied=" + (.result.proxied | tostring) + ", ttl=" + (.result.ttl | tostring) + ")"
|
||||
end
|
||||
' <<<"${result}"
|
||||
100
Scripts/forge-deploy.sh
Executable file
100
Scripts/forge-deploy.sh
Executable file
|
|
@ -0,0 +1,100 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# shellcheck source=Scripts/_burrow-flake.sh
|
||||
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/forge-deploy.sh [--test|--switch] [--flake-attr <attr>] [--allow-dirty]
|
||||
|
||||
Standardized remote deploy path for the Burrow forge host.
|
||||
|
||||
Defaults:
|
||||
--switch
|
||||
--flake-attr burrow-forge
|
||||
|
||||
Environment:
|
||||
BURROW_FORGE_HOST root@git.burrow.net
|
||||
BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519
|
||||
EOF
|
||||
}
|
||||
|
||||
MODE="switch"
|
||||
FLAKE_ATTR="burrow-forge"
|
||||
ALLOW_DIRTY=0
|
||||
BURROW_FLAKE_TMPDIRS=()
|
||||
|
||||
cleanup() {
|
||||
burrow_cleanup_flake_tmpdirs
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--test)
|
||||
MODE="test"
|
||||
shift
|
||||
;;
|
||||
--switch)
|
||||
MODE="switch"
|
||||
shift
|
||||
;;
|
||||
--flake-attr)
|
||||
FLAKE_ATTR="${2:?missing value for --flake-attr}"
|
||||
shift 2
|
||||
;;
|
||||
--allow-dirty)
|
||||
ALLOW_DIRTY=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
usage >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then
|
||||
echo "Refusing to deploy from a dirty checkout. Commit first, or pass --allow-dirty for incident-only work." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||
FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
|
||||
|
||||
if [[ -z "${FORGE_SSH_KEY}" ]]; then
|
||||
if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then
|
||||
FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519"
|
||||
else
|
||||
FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -f "${FORGE_SSH_KEY}" ]]; then
|
||||
echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2
|
||||
echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||
mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")"
|
||||
|
||||
export NIX_SSHOPTS="-i ${FORGE_SSH_KEY} -o IdentitiesOnly=yes -o UserKnownHostsFile=${FORGE_KNOWN_HOSTS_FILE} -o StrictHostKeyChecking=accept-new"
|
||||
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
|
||||
|
||||
nix --extra-experimental-features "nix-command flakes" shell nixpkgs#nixos-rebuild -c \
|
||||
nixos-rebuild "${MODE}" \
|
||||
--flake "${flake_ref}#${FLAKE_ATTR}" \
|
||||
--build-host "${FORGE_HOST}" \
|
||||
--target-host "${FORGE_HOST}"
|
||||
327
Scripts/hcloud-upload-nixos-image.sh
Executable file
327
Scripts/hcloud-upload-nixos-image.sh
Executable file
|
|
@ -0,0 +1,327 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# shellcheck source=Scripts/_burrow-flake.sh
|
||||
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||
|
||||
DEFAULT_CONFIG="burrow-forge"
|
||||
DEFAULT_FLAKE="."
|
||||
DEFAULT_LOCATION="hel1"
|
||||
DEFAULT_ARCHITECTURE="x86"
|
||||
DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt"
|
||||
|
||||
CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}"
|
||||
FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}"
|
||||
LOCATION="${HCLOUD_IMAGE_LOCATION:-${DEFAULT_LOCATION}}"
|
||||
ARCHITECTURE="${HCLOUD_IMAGE_ARCHITECTURE:-${DEFAULT_ARCHITECTURE}}"
|
||||
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${DEFAULT_TOKEN_FILE}}"
|
||||
DESCRIPTION="${HCLOUD_IMAGE_DESCRIPTION:-}"
|
||||
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
|
||||
UPLOAD_VERBOSE="${HCLOUD_IMAGE_UPLOAD_VERBOSE:-0}"
|
||||
ARTIFACT_PATH_INPUT=""
|
||||
OUTPUT_HASH=""
|
||||
NO_UPDATE=0
|
||||
BUILDER_SPEC="${HCLOUD_IMAGE_BUILDER_SPEC:-}"
|
||||
EXTRA_LABELS=()
|
||||
NIX_BUILD_FLAGS=()
|
||||
BURROW_FLAKE_TMPDIRS=()
|
||||
LOCAL_STORE_DIR=""
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/hcloud-upload-nixos-image.sh [options]
|
||||
|
||||
Build a raw Burrow NixOS image and upload it into Hetzner Cloud as a snapshot.
|
||||
|
||||
Options:
|
||||
--config <name> images.<name>-raw output to build (default: burrow-forge)
|
||||
--flake <path> Flake path to build from (default: .)
|
||||
--location <code> Hetzner location for the temporary upload server (default: hel1)
|
||||
--architecture <x86|arm> CPU architecture of the image (default: x86)
|
||||
--server-type <name> Hetzner server type for the temporary upload server
|
||||
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
|
||||
--artifact-path <path> Prebuilt raw image artifact to upload directly
|
||||
--output-hash <hash> Stable hash label for --artifact-path uploads
|
||||
--builder-spec <string> Complete builders string passed to nix build
|
||||
--description <text> Description for the resulting snapshot
|
||||
--upload-verbose <n> Pass -v N times to hcloud-upload-image
|
||||
--label key=value Extra Hetzner image label (repeatable)
|
||||
--nix-flag <arg> Extra argument passed to nix build (repeatable)
|
||||
--no-update Reuse an existing snapshot with the same config/output hash
|
||||
-h, --help Show this help text
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--config)
|
||||
CONFIG="${2:?missing value for --config}"
|
||||
shift 2
|
||||
;;
|
||||
--flake)
|
||||
FLAKE="${2:?missing value for --flake}"
|
||||
shift 2
|
||||
;;
|
||||
--location)
|
||||
LOCATION="${2:?missing value for --location}"
|
||||
shift 2
|
||||
;;
|
||||
--architecture)
|
||||
ARCHITECTURE="${2:?missing value for --architecture}"
|
||||
shift 2
|
||||
;;
|
||||
--server-type)
|
||||
UPLOAD_SERVER_TYPE="${2:?missing value for --server-type}"
|
||||
shift 2
|
||||
;;
|
||||
--token-file)
|
||||
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||
shift 2
|
||||
;;
|
||||
--artifact-path)
|
||||
ARTIFACT_PATH_INPUT="${2:?missing value for --artifact-path}"
|
||||
shift 2
|
||||
;;
|
||||
--output-hash)
|
||||
OUTPUT_HASH="${2:?missing value for --output-hash}"
|
||||
shift 2
|
||||
;;
|
||||
--builder-spec)
|
||||
BUILDER_SPEC="${2:?missing value for --builder-spec}"
|
||||
shift 2
|
||||
;;
|
||||
--description)
|
||||
DESCRIPTION="${2:?missing value for --description}"
|
||||
shift 2
|
||||
;;
|
||||
--upload-verbose)
|
||||
UPLOAD_VERBOSE="${2:?missing value for --upload-verbose}"
|
||||
shift 2
|
||||
;;
|
||||
--label)
|
||||
EXTRA_LABELS+=("${2:?missing value for --label}")
|
||||
shift 2
|
||||
;;
|
||||
--nix-flag)
|
||||
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
|
||||
shift 2
|
||||
;;
|
||||
--no-update)
|
||||
NO_UPDATE=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cleanup() {
|
||||
burrow_cleanup_flake_tmpdirs
|
||||
if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then
|
||||
rm -rf "${LOCAL_STORE_DIR}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
burrow_require_cmd nix
|
||||
burrow_require_cmd curl
|
||||
burrow_require_cmd python3
|
||||
burrow_require_cmd rsync
|
||||
|
||||
if [[ ! -f "${TOKEN_FILE}" ]]; then
|
||||
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
|
||||
if [[ -z "${HCLOUD_TOKEN}" ]]; then
|
||||
echo "Hetzner API token file is empty: ${TOKEN_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
|
||||
|
||||
if [[ -z "${DESCRIPTION}" ]]; then
|
||||
DESCRIPTION="Burrow ${CONFIG} $(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
||||
fi
|
||||
|
||||
printf 'Building raw image for %s from %s\n' "${CONFIG}" "${flake_ref}" >&2
|
||||
|
||||
if [[ -z "${ARTIFACT_PATH_INPUT}" && -n "${BUILDER_SPEC}" && -z "${NIX_BUILD_STORE:-}" ]]; then
|
||||
mkdir -p "${HOME}/.cache/burrow"
|
||||
LOCAL_STORE_DIR="$(mktemp -d "${HOME}/.cache/burrow/local-store-XXXXXX")"
|
||||
fi
|
||||
|
||||
artifact_path=""
|
||||
compression=""
|
||||
output_hash="${OUTPUT_HASH}"
|
||||
if [[ -n "${ARTIFACT_PATH_INPUT}" ]]; then
|
||||
artifact_path="${ARTIFACT_PATH_INPUT}"
|
||||
if [[ ! -f "${artifact_path}" ]]; then
|
||||
echo "artifact path does not exist: ${artifact_path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
compression="$(burrow_detect_compression "${artifact_path}")"
|
||||
if [[ -z "${output_hash}" ]]; then
|
||||
if command -v sha256sum >/dev/null 2>&1; then
|
||||
output_hash="$(sha256sum "${artifact_path}" | awk '{print $1}')"
|
||||
else
|
||||
output_hash="$(shasum -a 256 "${artifact_path}" | awk '{print $1}')"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
nix_build_cmd=(
|
||||
nix
|
||||
--extra-experimental-features
|
||||
"nix-command flakes"
|
||||
build
|
||||
"${flake_ref}#images.${CONFIG}-raw"
|
||||
--no-link
|
||||
--print-out-paths
|
||||
)
|
||||
|
||||
if [[ -n "${BUILDER_SPEC}" ]]; then
|
||||
nix_build_cmd+=(--builders "${BUILDER_SPEC}")
|
||||
fi
|
||||
if [[ -n "${NIX_BUILD_STORE:-}" ]]; then
|
||||
nix_build_cmd+=(--store "${NIX_BUILD_STORE}")
|
||||
elif [[ -n "${LOCAL_STORE_DIR}" ]]; then
|
||||
nix_build_cmd+=(--store "${LOCAL_STORE_DIR}")
|
||||
fi
|
||||
|
||||
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
|
||||
nix_build_cmd+=("${NIX_BUILD_FLAGS[@]}")
|
||||
fi
|
||||
|
||||
build_output=""
|
||||
if ! build_output="$("${nix_build_cmd[@]}" 2>&1)"; then
|
||||
printf '%s\n' "${build_output}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
store_path="$(printf '%s\n' "${build_output}" | tail -n1)"
|
||||
if [[ -z "${store_path}" ]]; then
|
||||
echo "nix build did not return a store path" >&2
|
||||
printf '%s\n' "${build_output}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
artifact_path="$(burrow_resolve_image_artifact "${store_path}")"
|
||||
compression="$(burrow_detect_compression "${artifact_path}")"
|
||||
output_hash="$(basename "${store_path}")"
|
||||
output_hash="${output_hash%%-*}"
|
||||
fi
|
||||
|
||||
label_args=(
|
||||
"burrow.nixos-config=${CONFIG}"
|
||||
"burrow.nixos-output-hash=${output_hash}"
|
||||
)
|
||||
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
|
||||
label_args+=("${EXTRA_LABELS[@]}")
|
||||
fi
|
||||
label_csv="$(IFS=,; printf '%s' "${label_args[*]}")"
|
||||
|
||||
find_existing_image() {
|
||||
HCLOUD_TOKEN="${HCLOUD_TOKEN}" \
|
||||
BURROW_LABEL_SELECTOR="burrow.nixos-config=${CONFIG},burrow.nixos-output-hash=${output_hash}" \
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
selector = urllib.parse.quote(os.environ["BURROW_LABEL_SELECTOR"], safe=",=")
|
||||
req = urllib.request.Request(
|
||||
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
|
||||
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.load(resp)
|
||||
|
||||
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
|
||||
if images:
|
||||
print(images[-1]["id"])
|
||||
PY
|
||||
}
|
||||
|
||||
if [[ "${NO_UPDATE}" -eq 1 ]]; then
|
||||
existing_id="$(find_existing_image || true)"
|
||||
if [[ -n "${existing_id}" ]]; then
|
||||
printf 'Reusing existing Hetzner snapshot %s for %s\n' "${existing_id}" "${CONFIG}" >&2
|
||||
printf '%s\n' "${existing_id}"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
uploader_bin="${HCLOUD_UPLOAD_IMAGE_BIN:-}"
|
||||
if [[ -z "${uploader_bin}" ]]; then
|
||||
uploader_build_output="$(
|
||||
nix --extra-experimental-features "nix-command flakes" build \
|
||||
"${flake_ref}#hcloud-upload-image" \
|
||||
--no-link \
|
||||
--print-out-paths 2>&1
|
||||
)" || {
|
||||
printf '%s\n' "${uploader_build_output}" >&2
|
||||
exit 1
|
||||
}
|
||||
uploader_bin="$(printf '%s\n' "${uploader_build_output}" | tail -n1)/bin/hcloud-upload-image"
|
||||
fi
|
||||
|
||||
if [[ ! -x "${uploader_bin}" ]]; then
|
||||
echo "unable to resolve an executable hcloud-upload-image binary; set HCLOUD_UPLOAD_IMAGE_BIN explicitly" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
upload_cmd=(
|
||||
"${uploader_bin}"
|
||||
)
|
||||
if [[ "${UPLOAD_VERBOSE}" =~ ^[0-9]+$ ]] && [[ "${UPLOAD_VERBOSE}" -gt 0 ]]; then
|
||||
for _ in $(seq 1 "${UPLOAD_VERBOSE}"); do
|
||||
upload_cmd+=(-v)
|
||||
done
|
||||
fi
|
||||
upload_cmd+=(
|
||||
upload
|
||||
--image-path "${artifact_path}"
|
||||
--location "${LOCATION}"
|
||||
--description "${DESCRIPTION}"
|
||||
--labels "${label_csv}"
|
||||
)
|
||||
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
|
||||
upload_cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
|
||||
else
|
||||
upload_cmd+=(--architecture "${ARCHITECTURE}")
|
||||
fi
|
||||
if [[ -n "${compression}" ]]; then
|
||||
upload_cmd+=(--compression "${compression}")
|
||||
fi
|
||||
|
||||
printf 'Uploading %s to Hetzner Cloud via %s\n' "${artifact_path}" "${uploader_bin}" >&2
|
||||
HCLOUD_TOKEN="${HCLOUD_TOKEN}" "${upload_cmd[@]}" >&2
|
||||
|
||||
image_id=""
|
||||
for _ in $(seq 1 24); do
|
||||
image_id="$(find_existing_image || true)"
|
||||
if [[ -n "${image_id}" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "failed to locate uploaded Hetzner snapshot after upload completed" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\n' "${image_id}"
|
||||
284
Scripts/hetzner-forge.sh
Executable file
284
Scripts/hetzner-forge.sh
Executable file
|
|
@ -0,0 +1,284 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/hetzner-forge.sh [show|create|delete|recreate|build-image|create-from-image|recreate-from-image] [options]
|
||||
|
||||
Manage the Burrow forge server and its Hetzner snapshot lifecycle.
|
||||
|
||||
Defaults:
|
||||
action: show
|
||||
server-name: burrow-forge
|
||||
server-type: ccx23
|
||||
location: hel1
|
||||
image: ubuntu-24.04
|
||||
ssh keys: contact@burrow.net,agent@burrow.net
|
||||
|
||||
Options:
|
||||
--server-name <name> Server name to manage.
|
||||
--server-type <type> Hetzner server type.
|
||||
--location <code> Hetzner location.
|
||||
--image <name|id> Image used at create time.
|
||||
--config <name> Burrow image config name for snapshot lookup/build (default: burrow-forge).
|
||||
--ssh-key <name> SSH key name to attach. Repeatable.
|
||||
--token-file <path> Hetzner API token file.
|
||||
--flake <path> Flake path used by image-build actions (default: .)
|
||||
--upload-location <code> Hetzner location used for image upload (default: same as --location)
|
||||
--yes Required for delete and recreate.
|
||||
-h, --help Show this help text.
|
||||
|
||||
Environment:
|
||||
HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt
|
||||
EOF
|
||||
}
|
||||
|
||||
ACTION="show"
|
||||
SERVER_NAME="burrow-forge"
|
||||
SERVER_TYPE="ccx23"
|
||||
LOCATION="hel1"
|
||||
IMAGE="ubuntu-24.04"
|
||||
CONFIG="burrow-forge"
|
||||
FLAKE="."
|
||||
UPLOAD_LOCATION=""
|
||||
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}"
|
||||
YES=0
|
||||
SSH_KEYS=("contact@burrow.net" "agent@burrow.net")
|
||||
|
||||
if [[ $# -gt 0 ]]; then
|
||||
case "$1" in
|
||||
show|create|delete|recreate|build-image|create-from-image|recreate-from-image)
|
||||
ACTION="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--server-name)
|
||||
SERVER_NAME="${2:?missing value for --server-name}"
|
||||
shift 2
|
||||
;;
|
||||
--server-type)
|
||||
SERVER_TYPE="${2:?missing value for --server-type}"
|
||||
shift 2
|
||||
;;
|
||||
--location)
|
||||
LOCATION="${2:?missing value for --location}"
|
||||
shift 2
|
||||
;;
|
||||
--image)
|
||||
IMAGE="${2:?missing value for --image}"
|
||||
shift 2
|
||||
;;
|
||||
--config)
|
||||
CONFIG="${2:?missing value for --config}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-key)
|
||||
SSH_KEYS+=("${2:?missing value for --ssh-key}")
|
||||
shift 2
|
||||
;;
|
||||
--token-file)
|
||||
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||
shift 2
|
||||
;;
|
||||
--flake)
|
||||
FLAKE="${2:?missing value for --flake}"
|
||||
shift 2
|
||||
;;
|
||||
--upload-location)
|
||||
UPLOAD_LOCATION="${2:?missing value for --upload-location}"
|
||||
shift 2
|
||||
;;
|
||||
--yes)
|
||||
YES=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
usage >&2
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -f "${TOKEN_FILE}" ]]; then
|
||||
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${UPLOAD_LOCATION}" ]]; then
|
||||
UPLOAD_LOCATION="${LOCATION}"
|
||||
fi
|
||||
|
||||
if [[ "${ACTION}" == "delete" || "${ACTION}" == "recreate" || "${ACTION}" == "recreate-from-image" ]] && [[ ${YES} -ne 1 ]]; then
|
||||
echo "--yes is required for ${ACTION}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
latest_snapshot_id() {
|
||||
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" \
|
||||
BURROW_CONFIG="${CONFIG}" \
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
||||
selector = urllib.parse.quote(f"burrow.nixos-config={os.environ['BURROW_CONFIG']}", safe=",=")
|
||||
req = urllib.request.Request(
|
||||
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
|
||||
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
data = json.load(resp)
|
||||
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
|
||||
if images:
|
||||
print(images[-1]["id"])
|
||||
PY
|
||||
}
|
||||
|
||||
if [[ "${ACTION}" == "build-image" ]]; then
|
||||
exec "${SCRIPT_DIR}/nsc-build-and-upload-image.sh" \
|
||||
--config "${CONFIG}" \
|
||||
--flake "${FLAKE}" \
|
||||
--location "${UPLOAD_LOCATION}" \
|
||||
--upload-server-type "${SERVER_TYPE}" \
|
||||
--token-file "${TOKEN_FILE}"
|
||||
fi
|
||||
|
||||
if [[ "${ACTION}" == "create-from-image" || "${ACTION}" == "recreate-from-image" ]]; then
|
||||
if [[ "${IMAGE}" == "ubuntu-24.04" ]]; then
|
||||
IMAGE="$(latest_snapshot_id)"
|
||||
fi
|
||||
if [[ -z "${IMAGE}" ]]; then
|
||||
echo "No Burrow snapshot found for config ${CONFIG}. Run build-image first." >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${ACTION}" == "create-from-image" ]]; then
|
||||
ACTION="create"
|
||||
else
|
||||
ACTION="recreate"
|
||||
fi
|
||||
fi
|
||||
|
||||
ssh_keys_csv=""
|
||||
for key in "${SSH_KEYS[@]}"; do
|
||||
if [[ -n "${ssh_keys_csv}" ]]; then
|
||||
ssh_keys_csv+=","
|
||||
fi
|
||||
ssh_keys_csv+="${key}"
|
||||
done
|
||||
|
||||
export BURROW_HCLOUD_ACTION="${ACTION}"
|
||||
export BURROW_HCLOUD_SERVER_NAME="${SERVER_NAME}"
|
||||
export BURROW_HCLOUD_SERVER_TYPE="${SERVER_TYPE}"
|
||||
export BURROW_HCLOUD_LOCATION="${LOCATION}"
|
||||
export BURROW_HCLOUD_IMAGE="${IMAGE}"
|
||||
export BURROW_HCLOUD_TOKEN_FILE="${TOKEN_FILE}"
|
||||
export BURROW_HCLOUD_SSH_KEYS="${ssh_keys_csv}"
|
||||
|
||||
python3 - <<'PY'
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
base = "https://api.hetzner.cloud/v1"
|
||||
action = os.environ["BURROW_HCLOUD_ACTION"]
|
||||
server_name = os.environ["BURROW_HCLOUD_SERVER_NAME"]
|
||||
server_type = os.environ["BURROW_HCLOUD_SERVER_TYPE"]
|
||||
location = os.environ["BURROW_HCLOUD_LOCATION"]
|
||||
image = os.environ["BURROW_HCLOUD_IMAGE"]
|
||||
token = Path(os.environ["BURROW_HCLOUD_TOKEN_FILE"]).read_text(encoding="utf-8").strip()
|
||||
ssh_keys = [key for key in os.environ["BURROW_HCLOUD_SSH_KEYS"].split(",") if key]
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({"Authorization": f"Bearer {token}", "Content-Type": "application/json"})
|
||||
|
||||
|
||||
def request(method: str, path: str, **kwargs) -> requests.Response:
|
||||
response = session.request(method, f"{base}{path}", timeout=30, **kwargs)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
|
||||
def find_server():
|
||||
response = request("GET", "/servers", params={"name": server_name})
|
||||
data = response.json()
|
||||
for server in data.get("servers", []):
|
||||
if server.get("name") == server_name:
|
||||
return server
|
||||
return None
|
||||
|
||||
|
||||
def summarize(server):
|
||||
ipv4 = (((server.get("public_net") or {}).get("ipv4")) or {}).get("ip")
|
||||
image_name = ((server.get("image") or {}).get("name")) or ""
|
||||
summary = {
|
||||
"id": server.get("id"),
|
||||
"name": server.get("name"),
|
||||
"status": server.get("status"),
|
||||
"server_type": ((server.get("server_type") or {}).get("name")),
|
||||
"location": ((server.get("location") or {}).get("name")),
|
||||
"image": image_name,
|
||||
"ipv4": ipv4,
|
||||
"created": server.get("created"),
|
||||
}
|
||||
print(json.dumps(summary, indent=2))
|
||||
|
||||
|
||||
server = find_server()
|
||||
|
||||
if action == "show":
|
||||
if server is None:
|
||||
print(json.dumps({"name": server_name, "present": False}, indent=2))
|
||||
else:
|
||||
summarize(server)
|
||||
sys.exit(0)
|
||||
|
||||
if action == "delete":
|
||||
if server is None:
|
||||
print(json.dumps({"name": server_name, "deleted": False, "reason": "not found"}, indent=2))
|
||||
sys.exit(0)
|
||||
request("DELETE", f"/servers/{server['id']}")
|
||||
print(json.dumps({"name": server_name, "deleted": True, "id": server["id"]}, indent=2))
|
||||
sys.exit(0)
|
||||
|
||||
if action == "recreate" and server is not None:
|
||||
request("DELETE", f"/servers/{server['id']}")
|
||||
server = None
|
||||
|
||||
if action in {"create", "recreate"}:
|
||||
if server is not None:
|
||||
summarize(server)
|
||||
sys.exit(0)
|
||||
|
||||
payload = {
|
||||
"name": server_name,
|
||||
"server_type": server_type,
|
||||
"location": location,
|
||||
"image": image,
|
||||
"ssh_keys": ssh_keys,
|
||||
"labels": {
|
||||
"project": "burrow",
|
||||
"role": "forge",
|
||||
},
|
||||
}
|
||||
response = request("POST", "/servers", json=payload)
|
||||
created = response.json()["server"]
|
||||
summarize(created)
|
||||
sys.exit(0)
|
||||
|
||||
raise SystemExit(f"unsupported action: {action}")
|
||||
PY
|
||||
542
Scripts/nsc-build-and-upload-image.sh
Executable file
542
Scripts/nsc-build-and-upload-image.sh
Executable file
|
|
@ -0,0 +1,542 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# shellcheck source=Scripts/_burrow-flake.sh
|
||||
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||
|
||||
CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}"
|
||||
FLAKE="${HCLOUD_IMAGE_FLAKE:-.}"
|
||||
LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}"
|
||||
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}"
|
||||
NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}"
|
||||
NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}"
|
||||
NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}"
|
||||
NSC_BUILDER_JOBS="${NSC_BUILDER_JOBS:-32}"
|
||||
NSC_BUILDER_FEATURES="${NSC_BUILDER_FEATURES:-kvm,big-parallel}"
|
||||
NSC_BIN="${NSC_BIN:-}"
|
||||
REMOTE_COMPRESSION="${HCLOUD_IMAGE_REMOTE_COMPRESSION:-auto}"
|
||||
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
|
||||
KEEP_TMPDIR="${HCLOUD_IMAGE_KEEP_TMPDIR:-0}"
|
||||
NO_UPDATE=0
|
||||
NIX_BUILD_FLAGS=()
|
||||
EXTRA_LABELS=()
|
||||
BURROW_FLAKE_TMPDIRS=()
|
||||
BUILDER_ID=""
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/nsc-build-and-upload-image.sh [options]
|
||||
|
||||
Create a temporary Namespace Linux builder, build the Burrow raw image on it,
|
||||
and upload the resulting artifact to Hetzner Cloud.
|
||||
|
||||
Options:
|
||||
--config <name> images.<name>-raw output to build (default: burrow-forge)
|
||||
--flake <path> Flake path to build from (default: .)
|
||||
--location <code> Hetzner upload location (default: hel1)
|
||||
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
|
||||
--machine-type <type> Namespace machine type (default: linux/amd64:32x64)
|
||||
--ssh-host <host> Namespace SSH endpoint (default: ssh.ord2.namespace.so)
|
||||
--duration <ttl> Namespace builder lifetime (default: 4h)
|
||||
--builder-jobs <n> Nix builder job count advertised to the local client
|
||||
--builder-features <s> Comma-separated Nix system features (default: "kvm,big-parallel")
|
||||
--remote-compression <mode>
|
||||
Compress raw/image artifacts on the Namespace builder
|
||||
before copy-back. Modes: auto, none, xz, zstd
|
||||
(default: auto)
|
||||
--upload-server-type <name>
|
||||
Hetzner server type for the temporary upload host
|
||||
--label key=value Extra Hetzner snapshot label (repeatable)
|
||||
--nix-flag <arg> Extra argument passed to nix build (repeatable)
|
||||
--no-update Reuse an existing snapshot with the same config/output hash
|
||||
-h, --help Show this help text
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--config)
|
||||
CONFIG="${2:?missing value for --config}"
|
||||
shift 2
|
||||
;;
|
||||
--flake)
|
||||
FLAKE="${2:?missing value for --flake}"
|
||||
shift 2
|
||||
;;
|
||||
--location)
|
||||
LOCATION="${2:?missing value for --location}"
|
||||
shift 2
|
||||
;;
|
||||
--token-file)
|
||||
TOKEN_FILE="${2:?missing value for --token-file}"
|
||||
shift 2
|
||||
;;
|
||||
--machine-type)
|
||||
NSC_MACHINE_TYPE="${2:?missing value for --machine-type}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-host)
|
||||
NSC_SSH_HOST="${2:?missing value for --ssh-host}"
|
||||
shift 2
|
||||
;;
|
||||
--duration)
|
||||
NSC_BUILDER_DURATION="${2:?missing value for --duration}"
|
||||
shift 2
|
||||
;;
|
||||
--builder-jobs)
|
||||
NSC_BUILDER_JOBS="${2:?missing value for --builder-jobs}"
|
||||
shift 2
|
||||
;;
|
||||
--builder-features)
|
||||
NSC_BUILDER_FEATURES="${2:?missing value for --builder-features}"
|
||||
shift 2
|
||||
;;
|
||||
--remote-compression)
|
||||
REMOTE_COMPRESSION="${2:?missing value for --remote-compression}"
|
||||
shift 2
|
||||
;;
|
||||
--upload-server-type)
|
||||
UPLOAD_SERVER_TYPE="${2:?missing value for --upload-server-type}"
|
||||
shift 2
|
||||
;;
|
||||
--label)
|
||||
EXTRA_LABELS+=("${2:?missing value for --label}")
|
||||
shift 2
|
||||
;;
|
||||
--nix-flag)
|
||||
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
|
||||
shift 2
|
||||
;;
|
||||
--no-update)
|
||||
NO_UPDATE=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then
|
||||
"${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true
|
||||
fi
|
||||
burrow_cleanup_flake_tmpdirs
|
||||
if [[ "${KEEP_TMPDIR}" != "1" && -n "${TMPDIR_BURROW_NSC:-}" && -d "${TMPDIR_BURROW_NSC}" ]]; then
|
||||
rm -rf "${TMPDIR_BURROW_NSC}"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
burrow_require_cmd nix
|
||||
burrow_require_cmd curl
|
||||
burrow_require_cmd python3
|
||||
burrow_require_cmd ssh
|
||||
burrow_require_cmd ssh-keygen
|
||||
burrow_require_cmd ssh-keyscan
|
||||
burrow_require_cmd tar
|
||||
|
||||
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
|
||||
|
||||
if [[ -z "${NSC_BIN}" ]]; then
|
||||
nsc_build_output="$(
|
||||
nix --extra-experimental-features "nix-command flakes" build \
|
||||
"${flake_ref}#nsc" \
|
||||
--no-link \
|
||||
--print-out-paths 2>&1
|
||||
)" || {
|
||||
printf '%s\n' "${nsc_build_output}" >&2
|
||||
exit 1
|
||||
}
|
||||
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
|
||||
fi
|
||||
|
||||
if [[ ! -x "${NSC_BIN}" ]]; then
|
||||
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${NSC_SESSION:-}" && ! -f "${HOME}/.ns/session" ]]; then
|
||||
mkdir -p "${HOME}/.ns"
|
||||
printf '%s\n' "${NSC_SESSION}" > "${HOME}/.ns/session"
|
||||
chmod 600 "${HOME}/.ns/session"
|
||||
fi
|
||||
|
||||
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
|
||||
"${NSC_BIN}" version >/dev/null || true
|
||||
|
||||
TMPDIR_BURROW_NSC="$(mktemp -d "${HOME}/.cache/burrow/nsc-XXXXXX")"
|
||||
ssh_key="${TMPDIR_BURROW_NSC}/builder"
|
||||
known_hosts="${TMPDIR_BURROW_NSC}/known_hosts"
|
||||
id_file="${TMPDIR_BURROW_NSC}/builder.id"
|
||||
|
||||
ssh-keygen -q -t ed25519 -N "" -f "${ssh_key}"
|
||||
ssh-keyscan -H "${NSC_SSH_HOST}" > "${known_hosts}"
|
||||
|
||||
ssh_base=(
|
||||
ssh
|
||||
-i "${ssh_key}"
|
||||
-o UserKnownHostsFile="${known_hosts}"
|
||||
-o StrictHostKeyChecking=yes
|
||||
)
|
||||
|
||||
wait_for_ssh() {
|
||||
local instance_id="$1"
|
||||
for _ in $(seq 1 30); do
|
||||
if "${ssh_base[@]}" -q "${instance_id}@${NSC_SSH_HOST}" true >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
configure_builder() {
|
||||
local instance_id="$1"
|
||||
"${ssh_base[@]}" "${instance_id}@${NSC_SSH_HOST}" <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
curl -fsSL https://install.determinate.systems/nix | sh -s -- install linux --determinate --init none --no-confirm
|
||||
fi
|
||||
|
||||
if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
|
||||
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
|
||||
fi
|
||||
|
||||
mkdir -p /etc/nix
|
||||
cat <<CFG >/etc/nix/nix.conf
|
||||
build-users-group =
|
||||
trusted-users = root $USER
|
||||
auto-optimise-store = true
|
||||
substituters = https://cache.nixos.org
|
||||
builders-use-substitutes = true
|
||||
CFG
|
||||
|
||||
mkdir -p /nix/var/nix/daemon-socket
|
||||
|
||||
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
|
||||
nohup nix-daemon >/dev/null 2>&1 </dev/null &
|
||||
fi
|
||||
|
||||
for _ in $(seq 1 120); do
|
||||
if [ -S /nix/var/nix/daemon-socket/socket ]; then
|
||||
exit 0
|
||||
fi
|
||||
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
|
||||
nohup nix-daemon >/dev/null 2>&1 </dev/null &
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "nix-daemon socket never appeared" >&2
|
||||
exit 1
|
||||
EOF
|
||||
}
|
||||
|
||||
printf 'Creating temporary Namespace builder (%s)\n' "${NSC_MACHINE_TYPE}" >&2
|
||||
"${NSC_BIN}" create \
|
||||
--bare \
|
||||
--machine_type "${NSC_MACHINE_TYPE}" \
|
||||
--ssh_key "${ssh_key}.pub" \
|
||||
--duration "${NSC_BUILDER_DURATION}" \
|
||||
--label "burrow=true" \
|
||||
--label "purpose=hetzner-image-build" \
|
||||
--output_to "${id_file}" \
|
||||
>/dev/null
|
||||
|
||||
BUILDER_ID="$(tr -d '\r\n' < "${id_file}")"
|
||||
if [[ -z "${BUILDER_ID}" ]]; then
|
||||
echo "nsc create did not return a builder id" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf 'Waiting for Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||
wait_for_ssh "${BUILDER_ID}"
|
||||
configure_builder "${BUILDER_ID}" >&2
|
||||
|
||||
remote_root="burrow-image-build-${BUILDER_ID}"
|
||||
remote_flake_path="./${remote_root}"
|
||||
local_flake_dir="${flake_ref#path:}"
|
||||
remote_build_stdout="/tmp/burrow-image-build-${BUILDER_ID}.stdout"
|
||||
remote_build_stderr="/tmp/burrow-image-build-${BUILDER_ID}.stderr"
|
||||
|
||||
printf 'Syncing flake to Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||
tar -C "${local_flake_dir}" -cf - . \
|
||||
| "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "rm -rf '${remote_root}' && mkdir -p '${remote_root}' && tar -C '${remote_root}' -xf -"
|
||||
|
||||
run_remote_build() {
|
||||
local remote_cmd=(
|
||||
env
|
||||
"CONFIG=${CONFIG}"
|
||||
"REMOTE_FLAKE_PATH=${remote_flake_path}"
|
||||
"REMOTE_BUILD_STDOUT=${remote_build_stdout}"
|
||||
"REMOTE_BUILD_STDERR=${remote_build_stderr}"
|
||||
bash
|
||||
-s
|
||||
--
|
||||
)
|
||||
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
|
||||
remote_cmd+=("${NIX_BUILD_FLAGS[@]}")
|
||||
fi
|
||||
|
||||
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "${remote_cmd[@]}" <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
config="${CONFIG}"
|
||||
remote_flake_path="${REMOTE_FLAKE_PATH}"
|
||||
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
|
||||
remote_build_stderr="${REMOTE_BUILD_STDERR}"
|
||||
nix_build_cmd=(
|
||||
nix
|
||||
--extra-experimental-features
|
||||
"nix-command flakes"
|
||||
build
|
||||
"path:${remote_flake_path}#images.${config}-raw"
|
||||
--no-link
|
||||
--print-out-paths
|
||||
)
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
nix_build_cmd+=("$@")
|
||||
fi
|
||||
|
||||
rm -f "${remote_build_stdout}" "${remote_build_stderr}"
|
||||
if ! "${nix_build_cmd[@]}" >"${remote_build_stdout}" 2>"${remote_build_stderr}"; then
|
||||
cat "${remote_build_stderr}" >&2
|
||||
exit 1
|
||||
fi
|
||||
EOF
|
||||
}
|
||||
|
||||
resolve_remote_store_path() {
|
||||
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||
env "REMOTE_BUILD_STDOUT=${remote_build_stdout}" "REMOTE_BUILD_STDERR=${remote_build_stderr}" bash -s <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
|
||||
remote_build_stderr="${REMOTE_BUILD_STDERR}"
|
||||
|
||||
if [[ ! -s "${remote_build_stdout}" ]]; then
|
||||
echo "remote build stdout file is missing or empty: ${remote_build_stdout}" >&2
|
||||
if [[ -s "${remote_build_stderr}" ]]; then
|
||||
cat "${remote_build_stderr}" >&2
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
tail -n1 "${remote_build_stdout}"
|
||||
EOF
|
||||
}
|
||||
|
||||
resolve_remote_artifact_path() {
|
||||
local store_path="$1"
|
||||
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||
env "REMOTE_STORE_PATH=${store_path}" bash -s <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
store_path="${REMOTE_STORE_PATH}"
|
||||
artifact_path="${store_path}"
|
||||
if [[ -d "${artifact_path}" ]]; then
|
||||
artifact_path="$(find "${artifact_path}" -type f \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) | sort | head -n1)"
|
||||
fi
|
||||
if [[ -z "${artifact_path}" || ! -f "${artifact_path}" ]]; then
|
||||
echo "unable to locate image artifact under ${store_path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\n' "${artifact_path}"
|
||||
EOF
|
||||
}
|
||||
|
||||
plan_remote_artifact_transfer() {
|
||||
local artifact_path="$1"
|
||||
local compression_mode="$2"
|
||||
|
||||
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF'
|
||||
set -euo pipefail
|
||||
|
||||
artifact_path="${REMOTE_ARTIFACT_PATH}"
|
||||
compression_mode="${REMOTE_COMPRESSION}"
|
||||
|
||||
case "${artifact_path}" in
|
||||
*.bz2)
|
||||
printf '%s\tbz2\n' "$(basename "${artifact_path}")"
|
||||
exit 0
|
||||
;;
|
||||
*.xz)
|
||||
printf '%s\txz\n' "$(basename "${artifact_path}")"
|
||||
exit 0
|
||||
;;
|
||||
*.zst|*.zstd)
|
||||
printf '%s\tzstd\n' "$(basename "${artifact_path}")"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
select_compression() {
|
||||
case "${compression_mode}" in
|
||||
auto)
|
||||
if command -v zstd >/dev/null 2>&1; then
|
||||
printf 'zstd\n'
|
||||
return 0
|
||||
fi
|
||||
if command -v xz >/dev/null 2>&1; then
|
||||
printf 'xz\n'
|
||||
return 0
|
||||
fi
|
||||
printf 'none\n'
|
||||
;;
|
||||
none|xz|zstd)
|
||||
printf '%s\n' "${compression_mode}"
|
||||
;;
|
||||
*)
|
||||
echo "unsupported remote compression mode: ${compression_mode}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
mode="$(select_compression)"
|
||||
case "${mode}" in
|
||||
none)
|
||||
printf '%s\tnone\n' "$(basename "${artifact_path}")"
|
||||
;;
|
||||
zstd)
|
||||
printf '%s.zst\tzstd\n' "$(basename "${artifact_path}")"
|
||||
;;
|
||||
xz)
|
||||
printf '%s.xz\txz\n' "$(basename "${artifact_path}")"
|
||||
;;
|
||||
esac
|
||||
EOF
|
||||
}
|
||||
|
||||
stream_remote_artifact() {
|
||||
local artifact_path="$1"
|
||||
local compression_mode="$2"
|
||||
local destination="$3"
|
||||
|
||||
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
|
||||
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' > "${destination}"
|
||||
set -euo pipefail
|
||||
|
||||
artifact_path="${REMOTE_ARTIFACT_PATH}"
|
||||
compression_mode="${REMOTE_COMPRESSION}"
|
||||
|
||||
case "${artifact_path}" in
|
||||
*.bz2|*.xz|*.zst|*.zstd)
|
||||
cat "${artifact_path}"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
select_compression() {
|
||||
case "${compression_mode}" in
|
||||
auto)
|
||||
if command -v zstd >/dev/null 2>&1; then
|
||||
printf 'zstd\n'
|
||||
return 0
|
||||
fi
|
||||
if command -v xz >/dev/null 2>&1; then
|
||||
printf 'xz\n'
|
||||
return 0
|
||||
fi
|
||||
printf 'none\n'
|
||||
;;
|
||||
none|xz|zstd)
|
||||
printf '%s\n' "${compression_mode}"
|
||||
;;
|
||||
*)
|
||||
echo "unsupported remote compression mode: ${compression_mode}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
mode="$(select_compression)"
|
||||
case "${mode}" in
|
||||
none)
|
||||
cat "${artifact_path}"
|
||||
;;
|
||||
zstd)
|
||||
if ! command -v zstd >/dev/null 2>&1; then
|
||||
echo "zstd requested but not available on Namespace builder" >&2
|
||||
exit 1
|
||||
fi
|
||||
zstd -T0 -19 -c "${artifact_path}"
|
||||
;;
|
||||
xz)
|
||||
if ! command -v xz >/dev/null 2>&1; then
|
||||
echo "xz requested but not available on Namespace builder" >&2
|
||||
exit 1
|
||||
fi
|
||||
xz -T0 -c "${artifact_path}"
|
||||
;;
|
||||
esac
|
||||
EOF
|
||||
}
|
||||
|
||||
printf 'Building raw image on Namespace builder %s\n' "${BUILDER_ID}" >&2
|
||||
run_remote_build
|
||||
|
||||
remote_store_path="$(resolve_remote_store_path)"
|
||||
if [[ -z "${remote_store_path}" ]]; then
|
||||
echo "remote build did not return a store path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
remote_artifact_path="$(resolve_remote_artifact_path "${remote_store_path}")"
|
||||
if [[ -z "${remote_artifact_path}" ]]; then
|
||||
echo "remote build did not return an artifact path" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
transfer_plan="$(plan_remote_artifact_transfer "${remote_artifact_path}" "${REMOTE_COMPRESSION}")"
|
||||
local_artifact_name="$(printf '%s\n' "${transfer_plan}" | cut -f1)"
|
||||
transfer_compression="$(printf '%s\n' "${transfer_plan}" | cut -f2)"
|
||||
if [[ -z "${local_artifact_name}" || -z "${transfer_compression}" ]]; then
|
||||
echo "unable to determine artifact transfer plan for ${remote_artifact_path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output_hash="$(basename "${remote_store_path}")"
|
||||
output_hash="${output_hash%%-*}"
|
||||
local_artifact="${TMPDIR_BURROW_NSC}/${local_artifact_name}"
|
||||
|
||||
printf 'Streaming built artifact back from Namespace builder %s (%s)\n' "${BUILDER_ID}" "${transfer_compression}" >&2
|
||||
stream_remote_artifact "${remote_artifact_path}" "${REMOTE_COMPRESSION}" "${local_artifact}"
|
||||
|
||||
cmd=(
|
||||
"${SCRIPT_DIR}/hcloud-upload-nixos-image.sh"
|
||||
--config "${CONFIG}"
|
||||
--flake "${FLAKE}"
|
||||
--location "${LOCATION}"
|
||||
--token-file "${TOKEN_FILE}"
|
||||
--artifact-path "${local_artifact}"
|
||||
--output-hash "${output_hash}"
|
||||
)
|
||||
|
||||
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
|
||||
cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
|
||||
fi
|
||||
|
||||
if [[ "${NO_UPDATE}" -eq 1 ]]; then
|
||||
cmd+=(--no-update)
|
||||
fi
|
||||
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
|
||||
for label in "${EXTRA_LABELS[@]}"; do
|
||||
cmd+=(--label "${label}")
|
||||
done
|
||||
fi
|
||||
|
||||
"${cmd[@]}"
|
||||
237
Scripts/provision-forgejo-nsc.sh
Executable file
237
Scripts/provision-forgejo-nsc.sh
Executable file
|
|
@ -0,0 +1,237 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# shellcheck source=Scripts/_burrow-flake.sh
|
||||
source "${SCRIPT_DIR}/_burrow-flake.sh"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/provision-forgejo-nsc.sh [options]
|
||||
|
||||
Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the
|
||||
Namespace token from the currently logged-in namespace account.
|
||||
|
||||
Options:
|
||||
--host <user@host> SSH target used to mint the Forgejo PAT.
|
||||
Default: root@git.burrow.net
|
||||
--ssh-key <path> SSH private key for the forge host.
|
||||
Default: intake/agent_at_burrow_net_ed25519
|
||||
--nsc-bin <path> Override the nsc binary.
|
||||
--no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists.
|
||||
--token-name <name> Forgejo PAT name prefix (default: forgejo-nsc)
|
||||
--contact-user <name> Forgejo username used for PAT creation (default: contact)
|
||||
--scope-owner <name> Forgejo org/user owner for the default NSC scope (default: hackclub)
|
||||
--scope-name <name> Forgejo repository name for the default NSC scope (default: burrow)
|
||||
-h, --help Show this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
|
||||
NSC_BIN="${NSC_BIN:-}"
|
||||
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||
REFRESH_TOKEN=1
|
||||
TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}"
|
||||
CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}"
|
||||
SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}"
|
||||
SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}"
|
||||
BURROW_FLAKE_TMPDIRS=()
|
||||
|
||||
cleanup() {
|
||||
burrow_cleanup_flake_tmpdirs
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host)
|
||||
HOST="${2:?missing value for --host}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-key)
|
||||
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||
shift 2
|
||||
;;
|
||||
--nsc-bin)
|
||||
NSC_BIN="${2:?missing value for --nsc-bin}"
|
||||
shift 2
|
||||
;;
|
||||
--no-refresh-token)
|
||||
REFRESH_TOKEN=0
|
||||
shift
|
||||
;;
|
||||
--token-name)
|
||||
TOKEN_NAME_PREFIX="${2:?missing value for --token-name}"
|
||||
shift 2
|
||||
;;
|
||||
--contact-user)
|
||||
CONTACT_USER="${2:?missing value for --contact-user}"
|
||||
shift 2
|
||||
;;
|
||||
--scope-owner)
|
||||
SCOPE_OWNER="${2:?missing value for --scope-owner}"
|
||||
shift 2
|
||||
;;
|
||||
--scope-name)
|
||||
SCOPE_NAME="${2:?missing value for --scope-name}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||
|
||||
burrow_require_cmd nix
|
||||
burrow_require_cmd ssh
|
||||
burrow_require_cmd python3
|
||||
|
||||
if [[ ! -f "${SSH_KEY}" ]]; then
|
||||
echo "forge SSH key not found: ${SSH_KEY}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "${REPO_ROOT}/intake"
|
||||
chmod 700 "${REPO_ROOT}/intake"
|
||||
|
||||
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
|
||||
if [[ -z "${NSC_BIN}" ]]; then
|
||||
if command -v nsc >/dev/null 2>&1; then
|
||||
NSC_BIN="$(command -v nsc)"
|
||||
else
|
||||
nsc_build_output="$(
|
||||
nix --extra-experimental-features "nix-command flakes" build \
|
||||
"${flake_ref}#nsc" \
|
||||
--no-link \
|
||||
--print-out-paths 2>&1
|
||||
)" || {
|
||||
printf '%s\n' "${nsc_build_output}" >&2
|
||||
exit 1
|
||||
}
|
||||
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -x "${NSC_BIN}" ]]; then
|
||||
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
|
||||
dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
|
||||
autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
|
||||
dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml"
|
||||
autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml"
|
||||
|
||||
if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then
|
||||
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
|
||||
"${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null
|
||||
chmod 600 "${token_file}"
|
||||
fi
|
||||
|
||||
webhook_secret="$(python3 - <<'PY'
|
||||
import secrets
|
||||
print(secrets.token_hex(32))
|
||||
PY
|
||||
)"
|
||||
|
||||
token_name="${TOKEN_NAME_PREFIX}-$(date -u +%Y%m%dT%H%M%SZ)"
|
||||
forgejo_pat="$(
|
||||
ssh \
|
||||
-i "${SSH_KEY}" \
|
||||
-o IdentitiesOnly=yes \
|
||||
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||
-o StrictHostKeyChecking=accept-new \
|
||||
"${HOST}" \
|
||||
"set -euo pipefail; forgejo_bin=\$(systemctl show -p ExecStart forgejo.service --value | sed -E 's/^\\{ path=([^ ;]+).*/\\1/'); sudo -u forgejo \"\${forgejo_bin}\" --config /var/lib/forgejo/custom/conf/app.ini --custom-path /var/lib/forgejo/custom --work-path /var/lib/forgejo admin user generate-access-token --username '${CONTACT_USER}' --scopes all --raw --token-name '${token_name}'" \
|
||||
| tr -d '\r\n'
|
||||
)"
|
||||
|
||||
if [[ -z "${forgejo_pat}" ]]; then
|
||||
echo "failed to mint Forgejo PAT on ${HOST}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ssh \
|
||||
-i "${SSH_KEY}" \
|
||||
-o IdentitiesOnly=yes \
|
||||
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
|
||||
-o StrictHostKeyChecking=accept-new \
|
||||
"${HOST}" \
|
||||
'bash -s' <<EOF
|
||||
set -euo pipefail
|
||||
|
||||
base_url='http://127.0.0.1:3000'
|
||||
token='${forgejo_pat}'
|
||||
scope_owner='${SCOPE_OWNER}'
|
||||
scope_name='${SCOPE_NAME}'
|
||||
|
||||
api() {
|
||||
curl -sS -o /tmp/forgejo-provision-response.json -w '%{http_code}' \
|
||||
-H "Authorization: token \${token}" \
|
||||
-H 'Content-Type: application/json' \
|
||||
"\$@"
|
||||
}
|
||||
|
||||
org_code="\$(api "\${base_url}/api/v1/orgs/\${scope_owner}")"
|
||||
if [[ "\${org_code}" == "404" ]]; then
|
||||
cat >/tmp/forgejo-provision-org.json <<JSON
|
||||
{"username":"${SCOPE_OWNER}","full_name":"${SCOPE_OWNER}","visibility":"public"}
|
||||
JSON
|
||||
org_code="\$(api -X POST --data @/tmp/forgejo-provision-org.json "\${base_url}/api/v1/orgs")"
|
||||
if [[ "\${org_code}" != "201" ]]; then
|
||||
echo "failed to create Forgejo org ${SCOPE_OWNER} (HTTP \${org_code})" >&2
|
||||
cat /tmp/forgejo-provision-response.json >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
repo_code="\$(api "\${base_url}/api/v1/repos/\${scope_owner}/\${scope_name}")"
|
||||
if [[ "\${repo_code}" == "404" ]]; then
|
||||
cat >/tmp/forgejo-provision-repo.json <<JSON
|
||||
{"name":"${SCOPE_NAME}","description":"Burrow forge bootstrap repository","private":false,"default_branch":"main","auto_init":false}
|
||||
JSON
|
||||
repo_code="\$(api -X POST --data @/tmp/forgejo-provision-repo.json "\${base_url}/api/v1/orgs/\${scope_owner}/repos")"
|
||||
if [[ "\${repo_code}" != "201" ]]; then
|
||||
echo "failed to create Forgejo repo ${SCOPE_OWNER}/${SCOPE_NAME} (HTTP \${repo_code})" >&2
|
||||
cat /tmp/forgejo-provision-response.json >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
EOF
|
||||
|
||||
FORGEJO_PAT="${forgejo_pat}" \
|
||||
WEBHOOK_SECRET="${webhook_secret}" \
|
||||
DISPATCHER_SRC="${dispatcher_src}" \
|
||||
AUTOSCALER_SRC="${autoscaler_src}" \
|
||||
DISPATCHER_OUT="${dispatcher_out}" \
|
||||
AUTOSCALER_OUT="${autoscaler_out}" \
|
||||
python3 - <<'PY'
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
def render(src: str, dst: str) -> None:
|
||||
text = Path(src).read_text(encoding="utf-8")
|
||||
text = text.replace("PENDING-FORGEJO-PAT", os.environ["FORGEJO_PAT"])
|
||||
text = text.replace("PENDING-WEBHOOK-SECRET", os.environ["WEBHOOK_SECRET"])
|
||||
Path(dst).write_text(text, encoding="utf-8")
|
||||
|
||||
render(os.environ["DISPATCHER_SRC"], os.environ["DISPATCHER_OUT"])
|
||||
render(os.environ["AUTOSCALER_SRC"], os.environ["AUTOSCALER_OUT"])
|
||||
PY
|
||||
|
||||
chmod 600 "${dispatcher_out}" "${autoscaler_out}"
|
||||
|
||||
echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml."
|
||||
echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}."
|
||||
132
Scripts/sync-forgejo-nsc-config.sh
Executable file
132
Scripts/sync-forgejo-nsc-config.sh
Executable file
|
|
@ -0,0 +1,132 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage: Scripts/sync-forgejo-nsc-config.sh [options]
|
||||
|
||||
Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and
|
||||
restart the dispatcher/autoscaler units.
|
||||
|
||||
Options:
|
||||
--host <user@host> SSH target (default: root@git.burrow.net)
|
||||
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
|
||||
--rotate-pat Re-render the intake files before syncing.
|
||||
--no-restart Copy files only.
|
||||
-h, --help Show this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
|
||||
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
|
||||
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
|
||||
ROTATE_PAT=0
|
||||
NO_RESTART=0
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--host)
|
||||
HOST="${2:?missing value for --host}"
|
||||
shift 2
|
||||
;;
|
||||
--ssh-key)
|
||||
SSH_KEY="${2:?missing value for --ssh-key}"
|
||||
shift 2
|
||||
;;
|
||||
--rotate-pat)
|
||||
ROTATE_PAT=1
|
||||
shift
|
||||
;;
|
||||
--no-restart)
|
||||
NO_RESTART=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "unknown option: $1" >&2
|
||||
usage >&2
|
||||
exit 64
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
|
||||
|
||||
burrow_require_cmd() {
|
||||
if ! command -v "$1" >/dev/null 2>&1; then
|
||||
echo "missing required command: $1" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
burrow_require_cmd ssh
|
||||
burrow_require_cmd scp
|
||||
|
||||
if [[ ! -f "${SSH_KEY}" ]]; then
|
||||
echo "forge SSH key not found: ${SSH_KEY}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${ROTATE_PAT}" -eq 1 ]]; then
|
||||
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}"
|
||||
fi
|
||||
|
||||
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
|
||||
dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
|
||||
autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
|
||||
|
||||
for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do
|
||||
if [[ ! -s "${path}" ]]; then
|
||||
echo "required runtime input missing or empty: ${path}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
ssh_opts=(
|
||||
-i "${SSH_KEY}"
|
||||
-o IdentitiesOnly=yes
|
||||
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
|
||||
-o StrictHostKeyChecking=accept-new
|
||||
)
|
||||
|
||||
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
|
||||
cleanup() {
|
||||
if [[ -n "${remote_tmp:-}" ]]; then
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
scp "${ssh_opts[@]}" \
|
||||
"${token_file}" \
|
||||
"${dispatcher_file}" \
|
||||
"${autoscaler_file}" \
|
||||
"${HOST}:${remote_tmp}/"
|
||||
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||
set -euo pipefail
|
||||
install -d -m 0755 /var/lib/burrow/intake
|
||||
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt
|
||||
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml
|
||||
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
|
||||
"
|
||||
|
||||
if [[ "${NO_RESTART}" -eq 0 ]]; then
|
||||
ssh "${ssh_opts[@]}" "${HOST}" "
|
||||
set -euo pipefail
|
||||
systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
|
||||
systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
|
||||
ls -l \
|
||||
/var/lib/burrow/intake/forgejo_nsc_token.txt \
|
||||
/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \
|
||||
/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
|
||||
"
|
||||
fi
|
||||
|
||||
echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))."
|
||||
Loading…
Add table
Add a link
Reference in a new issue