Wire runner caches and forge secrets through agenix
Some checks failed
Build Rust / Cargo Test (push) Waiting to run
Build Site / Next.js Build (push) Waiting to run
Build Apple / Build App (iOS Simulator) (push) Failing after 14s
Build Apple / Build App (macOS) (push) Failing after 13s

This commit is contained in:
Conrad Kramer 2026-03-19 00:04:27 -07:00
parent afc3e79eb0
commit ed247b2f5e
20 changed files with 299 additions and 64 deletions

View file

@ -22,14 +22,17 @@ jobs:
matrix: matrix:
include: include:
- platform: macOS - platform: macOS
cache-id: macos
destination: platform=macOS destination: platform=macOS
rust-targets: x86_64-apple-darwin,aarch64-apple-darwin rust-targets: x86_64-apple-darwin,aarch64-apple-darwin
- platform: iOS Simulator - platform: iOS Simulator
cache-id: ios-simulator
destination: platform=iOS Simulator,name=iPhone 17 Pro destination: platform=iOS Simulator,name=iPhone 17 Pro
rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios
env: env:
CARGO_INCREMENTAL: 0 CARGO_INCREMENTAL: 0
RUST_BACKTRACE: short RUST_BACKTRACE: short
RUSTC_WRAPPER: sccache
steps: steps:
- name: Checkout - name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4 uses: https://code.forgejo.org/actions/checkout@v4
@ -65,12 +68,29 @@ jobs:
echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV" echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV"
DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true
- name: Prepare Cache Dirs
shell: bash
run: |
set -euo pipefail
cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}"
mkdir -p \
"${cache_root}/cargo" \
"${cache_root}/rustup" \
"${cache_root}/sccache" \
"${cache_root}/apple/PackageCache" \
"${cache_root}/apple/SourcePackages" \
"${cache_root}/apple/DerivedData/${{ matrix.cache-id }}"
echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}"
echo "RUSTUP_HOME=${cache_root}/rustup" >> "${GITHUB_ENV}"
echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}"
echo "APPLE_PACKAGE_CACHE=${cache_root}/apple/PackageCache" >> "${GITHUB_ENV}"
echo "APPLE_SOURCE_PACKAGES=${cache_root}/apple/SourcePackages" >> "${GITHUB_ENV}"
echo "APPLE_DERIVED_DATA=${cache_root}/apple/DerivedData/${{ matrix.cache-id }}" >> "${GITHUB_ENV}"
- name: Install Rust - name: Install Rust
shell: bash shell: bash
run: | run: |
set -euo pipefail set -euo pipefail
export RUSTUP_HOME="${HOME}/.rustup"
export CARGO_HOME="${HOME}/.cargo"
if ! command -v rustup >/dev/null 2>&1; then if ! command -v rustup >/dev/null 2>&1; then
curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.85.0 curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.85.0
@ -98,6 +118,9 @@ jobs:
if ! command -v protoc >/dev/null 2>&1; then if ! command -v protoc >/dev/null 2>&1; then
brew install protobuf brew install protobuf
fi fi
if ! command -v sccache >/dev/null 2>&1; then
brew install sccache
fi
- name: Build - name: Build
shell: bash shell: bash
@ -111,9 +134,9 @@ jobs:
-skipPackagePluginValidation \ -skipPackagePluginValidation \
-skipMacroValidation \ -skipMacroValidation \
-onlyUsePackageVersionsFromResolvedFile \ -onlyUsePackageVersionsFromResolvedFile \
-clonedSourcePackagesDirPath SourcePackages \ -clonedSourcePackagesDirPath "$APPLE_SOURCE_PACKAGES" \
-packageCachePath "$PWD/PackageCache" \ -packageCachePath "$APPLE_PACKAGE_CACHE" \
-derivedDataPath "$PWD/DerivedData" \ -derivedDataPath "$APPLE_DERIVED_DATA" \
CODE_SIGNING_ALLOWED=NO \ CODE_SIGNING_ALLOWED=NO \
CODE_SIGNING_REQUIRED=NO \ CODE_SIGNING_REQUIRED=NO \
CODE_SIGN_IDENTITY="" \ CODE_SIGN_IDENTITY="" \

View file

@ -17,6 +17,10 @@ jobs:
rust: rust:
name: Cargo Test name: Cargo Test
runs-on: [self-hosted, linux, x86_64, burrow-forge] runs-on: [self-hosted, linux, x86_64, burrow-forge]
env:
CARGO_INCREMENTAL: 0
RUSTC_WRAPPER: sccache
SCCACHE_CACHE_SIZE: 20G
steps: steps:
- name: Checkout - name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4 uses: https://code.forgejo.org/actions/checkout@v4
@ -24,8 +28,21 @@ jobs:
token: ${{ github.token }} token: ${{ github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Prepare Cache Dirs
shell: bash
run: |
set -euo pipefail
cache_root="${HOME}/.cache/burrow"
mkdir -p "${cache_root}/cargo" "${cache_root}/sccache"
echo "CARGO_HOME=${cache_root}/cargo" >> "${GITHUB_ENV}"
echo "SCCACHE_DIR=${cache_root}/sccache" >> "${GITHUB_ENV}"
- name: Test - name: Test
shell: bash shell: bash
run: | run: |
set -euo pipefail set -euo pipefail
nix develop .#ci -c cargo test --workspace --all-features nix develop .#ci -c bash -lc '
sccache --zero-stats >/dev/null 2>&1 || true
cargo test --workspace --all-features
sccache --show-stats || true
'

View file

@ -24,6 +24,14 @@ jobs:
token: ${{ github.token }} token: ${{ github.token }}
fetch-depth: 0 fetch-depth: 0
- name: Prepare Cache Dirs
shell: bash
run: |
set -euo pipefail
cache_root="${HOME}/.cache/burrow"
mkdir -p "${cache_root}/npm"
echo "NPM_CONFIG_CACHE=${cache_root}/npm" >> "${GITHUB_ENV}"
- name: Build - name: Build
shell: bash shell: bash
run: | run: |

View file

@ -73,7 +73,13 @@ CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH"
# Run cargo without the various environment variables set by Xcode. # Run cargo without the various environment variables set by Xcode.
# Those variables can confuse cargo and the build scripts it runs. # Those variables can confuse cargo and the build scripts it runs.
env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo build "${CARGO_ARGS[@]}" EXTRA_ENV=()
for VAR_NAME in HOME CARGO_HOME RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do
if [[ -n "${!VAR_NAME:-}" ]]; then
EXTRA_ENV+=("${VAR_NAME}=${!VAR_NAME}")
fi
done
env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" "${EXTRA_ENV[@]}" cargo build "${CARGO_ARGS[@]}"
mkdir -p "${BUILT_PRODUCTS_DIR}" mkdir -p "${BUILT_PRODUCTS_DIR}"

View file

@ -1,7 +1,9 @@
FLAKE ?= . FLAKE ?= .
AGENIX ?= nix run ${FLAKE}\#agenix -- AGENIX ?= nix run ${FLAKE}\#agenix --
SECRETS := forgejo/nsc-token \ SECRETS := forgejo/admin-password \
forgejo/agent-ssh-key \
forgejo/nsc-token \
forgejo/nsc-dispatcher-config \ forgejo/nsc-dispatcher-config \
forgejo/nsc-autoscaler-config forgejo/nsc-autoscaler-config

View file

@ -36,6 +36,7 @@
agenixPkg = agenix.packages.${system}.agenix; agenixPkg = agenix.packages.${system}.agenix;
commonPackages = with pkgs; [ commonPackages = with pkgs; [
cargo cargo
sccache
rustc rustc
rustfmt rustfmt
clippy clippy

View file

@ -15,19 +15,19 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
- `keys/agent_at_burrow_net.pub`: automation SSH public key - `keys/agent_at_burrow_net.pub`: automation SSH public key
- `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow - `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow
- `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot - `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot
- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/` - `../Scripts/bootstrap-forge-intake.sh`: legacy intake bootstrap helper; current forge runtime secrets should live in `../secrets/forgejo/*.age`
- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot - `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot
- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers - `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers
- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host
- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists - `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists
- `../secrets/forgejo/*.age`: authoritative encrypted Namespace token + dispatcher/autoscaler configs for the forge host - `../secrets/forgejo/*.age`: authoritative encrypted forge admin password, agent SSH key, and Namespace runtime configs for the forge host
## Intended Flow ## Intended Flow
1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`. 1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`.
2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`. 2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`.
3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. 3. Encrypt the Forgejo admin password and agent SSH key into `secrets/forgejo/{admin-password,agent-ssh-key}.age`.
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account from the agenix secret path.
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally, re-encrypt the resulting NSC token + configs into `secrets/forgejo/*.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. 6. Run `Scripts/provision-forgejo-nsc.sh` locally, re-encrypt the resulting NSC token + configs into `secrets/forgejo/*.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths.
7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.

View file

@ -20,7 +20,7 @@
services.burrow.forge = { services.burrow.forge = {
enable = true; enable = true;
adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; adminPasswordFile = config.age.secrets.forgejoAdminPassword.path;
authorizedKeys = [ authorizedKeys = [
(builtins.readFile ../../keys/contact_at_burrow_net.pub) (builtins.readFile ../../keys/contact_at_burrow_net.pub)
(builtins.readFile ../../keys/agent_at_burrow_net.pub) (builtins.readFile ../../keys/agent_at_burrow_net.pub)
@ -29,7 +29,21 @@
services.burrow.forgeRunner = { services.burrow.forgeRunner = {
enable = true; enable = true;
sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; sshPrivateKeyFile = config.age.secrets.forgejoAgentSshKey.path;
};
age.secrets.forgejoAdminPassword = {
file = ../../../secrets/forgejo/admin-password.age;
mode = "0400";
owner = "forgejo";
group = "forgejo";
};
age.secrets.forgejoAgentSshKey = {
file = ../../../secrets/forgejo/agent-ssh-key.age;
mode = "0400";
owner = "root";
group = "root";
}; };
age.secrets.forgejoNscToken = { age.secrets.forgejoNscToken = {

View file

@ -4,6 +4,8 @@ Burrow secrets live in `secrets/<name>.age` and are managed with `agenix`.
For the Forgejo Namespace Cloud runtime: For the Forgejo Namespace Cloud runtime:
- `secrets/forgejo/admin-password.age`
- `secrets/forgejo/agent-ssh-key.age`
- `secrets/forgejo/nsc-token.age` - `secrets/forgejo/nsc-token.age`
- `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-dispatcher-config.age`
- `secrets/forgejo/nsc-autoscaler-config.age` - `secrets/forgejo/nsc-autoscaler-config.age`
@ -11,7 +13,8 @@ For the Forgejo Namespace Cloud runtime:
Use: Use:
- `make secret name=forgejo/nsc-token` - `make secret name=forgejo/nsc-token`
- `make secret-file name=forgejo/nsc-token file=/path/to/source` - `make secret-file name=forgejo/agent-ssh-key file=/path/to/source`
The forge host decrypts these files at activation time and feeds the resulting The forge host decrypts these files at activation time and feeds the resulting
paths into `services.burrow.forgejoNsc`. paths into `services.burrow.forge`, `services.burrow.forgeRunner`, and
`services.burrow.forgejoNsc`.

View file

@ -0,0 +1,11 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q nmGFzw38TKiVVuA9CM8wHQDVib0RddB+M/UjQnD45jk
iZNLNBlS32zR+TNfcK27T1V3w27sFKJkWfuOzHwcOL0
-> ssh-ed25519 IrZmAg Y53DC0wGX8mjaXkD3+jZn2DviO5iSXsnZDBNCBTmLgA
XLz+YXzT4fYb7q0xuZMKgv88lAd0gGKaquSMcA6Yu3c
-> ssh-ed25519 JzXUWA EDAXBKEvHccJ4KKtHjUTA+KA+wN9bBu9v+kzRTFt9AI
JNADezBCxx26+QPD2tIpz5O8cncrJwnqaYQEWY56VGY
--- RpjdftRPUGT80IMYKFDFuHkKEr1heJOvqrqYLufhc10
ûÈÂ_
F(
((0ˆ‡Õɉ·',¿€8d]d%T[MÁ¼¬KRQÿxiIf<49>Òæ

Binary file not shown.

View file

@ -1,4 +1,3 @@
{ }:
let let
contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa";
agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net";
@ -6,6 +5,8 @@ let
forgeAutomation = [ contact agent forge ]; forgeAutomation = [ contact agent forge ];
in { in {
"secrets/forgejo/admin-password.age".publicKeys = forgeAutomation;
"secrets/forgejo/agent-ssh-key.age".publicKeys = forgeAutomation;
"secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation;
"secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation;
"secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation; "secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation;

View file

@ -45,6 +45,9 @@ profile. The important knobs are:
- `namespace.machine_type` / `namespace.duration` shape + TTL for the ephemeral - `namespace.machine_type` / `namespace.duration` shape + TTL for the ephemeral
Namespace environment. The dispatcher destroys the instance after a job so the Namespace environment. The dispatcher destroys the instance after a job so the
TTL acts as a hard cap, not an idle timeout. TTL acts as a hard cap, not an idle timeout.
- `namespace.linux_cache_*` / `namespace.macos_cache_*` persistent cache
volumes mounted into runners so Linux can keep `/nix` plus build caches warm
and macOS can reuse Rust toolchains, Xcode package caches, and derived data.
### Running locally ### Running locally
@ -160,12 +163,15 @@ consume the same secret material.
Long-lived runtime state is now sourced from age-encrypted files: Long-lived runtime state is now sourced from age-encrypted files:
- `secrets/forgejo/admin-password.age`
- `secrets/forgejo/agent-ssh-key.age`
- `secrets/forgejo/nsc-token.age` - `secrets/forgejo/nsc-token.age`
- `secrets/forgejo/nsc-dispatcher-config.age` - `secrets/forgejo/nsc-dispatcher-config.age`
- `secrets/forgejo/nsc-autoscaler-config.age` - `secrets/forgejo/nsc-autoscaler-config.age`
After refreshing the intake files, re-encrypt them into `secrets/forgejo/*.age` After refreshing the intake files, re-encrypt them into `secrets/forgejo/*.age`
and deploy the forge host so `config.age.secrets.*` updates the live paths for and deploy the forge host so `config.age.secrets.*` updates the live paths for
`services.burrow.forge`, `services.burrow.forgeRunner`, and
`services.burrow.forgejoNsc`. `services.burrow.forgejoNsc`.
Run it next to the dispatcher: Run it next to the dispatcher:

View file

@ -52,6 +52,10 @@ func main() {
DefaultDuration: cfg.Namespace.Duration.Duration, DefaultDuration: cfg.Namespace.Duration.Duration,
WorkDir: cfg.Namespace.WorkDir, WorkDir: cfg.Namespace.WorkDir,
MaxParallel: cfg.Namespace.MaxParallel, MaxParallel: cfg.Namespace.MaxParallel,
LinuxCachePath: cfg.Namespace.LinuxCachePath,
LinuxCacheVolumes: toNSCCacheVolumes(cfg.Namespace.LinuxCacheVolumes),
MacosCachePath: cfg.Namespace.MacosCachePath,
MacosCacheVolumes: toNSCCacheVolumes(cfg.Namespace.MacosCacheVolumes),
RunnerNamePrefix: cfg.Runner.NamePrefix, RunnerNamePrefix: cfg.Runner.NamePrefix,
Executor: cfg.Runner.Executor, Executor: cfg.Runner.Executor,
Network: cfg.Namespace.Network, Network: cfg.Namespace.Network,
@ -88,3 +92,15 @@ func main() {
defer cancel() defer cancel()
_ = srv.Shutdown(ctx) _ = srv.Shutdown(ctx)
} }
func toNSCCacheVolumes(volumes []config.CacheVolumeConfig) []nsc.CacheVolume {
out := make([]nsc.CacheVolume, 0, len(volumes))
for _, volume := range volumes {
out = append(out, nsc.CacheVolume{
Tag: volume.Tag,
MountPoint: volume.MountPoint,
SizeGb: volume.SizeGb,
})
}
return out
}

View file

@ -21,6 +21,19 @@ namespace:
workdir: "/var/lib/forgejo-runner" workdir: "/var/lib/forgejo-runner"
max_parallel: 4 max_parallel: 4
network: "" network: ""
linux_cache_path: "/var/cache/burrow"
linux_cache_volumes:
- tag: "burrow-forgejo-linux-nix"
mount_point: "/nix"
size_gb: 60
- tag: "burrow-forgejo-linux-cache"
mount_point: "/var/cache/burrow"
size_gb: 40
macos_cache_path: "/Users/runner/.cache/burrow"
macos_cache_volumes:
- tag: "burrow-forgejo-macos-cache"
mount_point: "/Users/runner/.cache/burrow"
size_gb: 60
runner: runner:
name_prefix: "nscloud-" name_prefix: "nscloud-"

View file

@ -31,6 +31,19 @@ namespace:
instance_tags: instance_tags:
- "burrow" - "burrow"
network: "" network: ""
linux_cache_path: "/var/cache/burrow"
linux_cache_volumes:
- tag: "burrow-forgejo-linux-nix"
mount_point: "/nix"
size_gb: 60
- tag: "burrow-forgejo-linux-cache"
mount_point: "/var/cache/burrow"
size_gb: 40
macos_cache_path: "/Users/runner/.cache/burrow"
macos_cache_volumes:
- tag: "burrow-forgejo-macos-cache"
mount_point: "/Users/runner/.cache/burrow"
size_gb: 60
runner: runner:
name_prefix: "nscloud-" name_prefix: "nscloud-"

View file

@ -49,6 +49,12 @@ type Config struct {
Runner RunnerConfig `yaml:"runner"` Runner RunnerConfig `yaml:"runner"`
} }
type CacheVolumeConfig struct {
Tag string `yaml:"tag"`
MountPoint string `yaml:"mount_point"`
SizeGb int64 `yaml:"size_gb"`
}
type ForgejoConfig struct { type ForgejoConfig struct {
BaseURL string `yaml:"base_url"` BaseURL string `yaml:"base_url"`
// InstanceURL is the URL runners should use when registering with Forgejo. // InstanceURL is the URL runners should use when registering with Forgejo.
@ -89,6 +95,10 @@ type NamespaceConfig struct {
AllowScopes []string `yaml:"allow_scopes"` AllowScopes []string `yaml:"allow_scopes"`
Network string `yaml:"network"` Network string `yaml:"network"`
InstanceTags []string `yaml:"instance_tags"` InstanceTags []string `yaml:"instance_tags"`
LinuxCachePath string `yaml:"linux_cache_path"`
LinuxCacheVolumes []CacheVolumeConfig `yaml:"linux_cache_volumes"`
MacosCachePath string `yaml:"macos_cache_path"`
MacosCacheVolumes []CacheVolumeConfig `yaml:"macos_cache_volumes"`
} }
type RunnerConfig struct { type RunnerConfig struct {
@ -160,6 +170,46 @@ func (c *Config) Validate() error {
if c.Namespace.MaxParallel <= 0 { if c.Namespace.MaxParallel <= 0 {
c.Namespace.MaxParallel = 4 c.Namespace.MaxParallel = 4
} }
if c.Namespace.LinuxCachePath == "" {
c.Namespace.LinuxCachePath = "/var/cache/burrow"
}
if len(c.Namespace.LinuxCacheVolumes) == 0 {
c.Namespace.LinuxCacheVolumes = []CacheVolumeConfig{
{
Tag: "burrow-forgejo-linux-nix",
MountPoint: "/nix",
SizeGb: 60,
},
{
Tag: "burrow-forgejo-linux-cache",
MountPoint: c.Namespace.LinuxCachePath,
SizeGb: 40,
},
}
}
if c.Namespace.MacosCachePath == "" {
c.Namespace.MacosCachePath = "/Users/runner/.cache/burrow"
}
if len(c.Namespace.MacosCacheVolumes) == 0 {
c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{
{
Tag: "burrow-forgejo-macos-cache",
MountPoint: c.Namespace.MacosCachePath,
SizeGb: 60,
},
}
}
for _, volume := range append(append([]CacheVolumeConfig{}, c.Namespace.LinuxCacheVolumes...), c.Namespace.MacosCacheVolumes...) {
if strings.TrimSpace(volume.Tag) == "" {
return errors.New("namespace cache volume tag is required")
}
if strings.TrimSpace(volume.MountPoint) == "" {
return fmt.Errorf("namespace cache volume %q mount_point is required", volume.Tag)
}
if volume.SizeGb <= 0 {
return fmt.Errorf("namespace cache volume %q size_gb must be positive", volume.Tag)
}
}
return nil return nil
} }

View file

@ -29,9 +29,19 @@ type Options struct {
ComputeBaseURL string ComputeBaseURL string
MacosBaseImageID string MacosBaseImageID string
MacosMachineArch string MacosMachineArch string
LinuxCachePath string
LinuxCacheVolumes []CacheVolume
MacosCachePath string
MacosCacheVolumes []CacheVolume
Logger *slog.Logger Logger *slog.Logger
} }
type CacheVolume struct {
Tag string
MountPoint string
SizeGb int64
}
type LaunchRequest struct { type LaunchRequest struct {
Token string Token string
InstanceURL string InstanceURL string
@ -73,6 +83,12 @@ func NewDispatcher(opts Options) (*Dispatcher, error) {
if opts.DefaultDuration == 0 { if opts.DefaultDuration == 0 {
opts.DefaultDuration = 30 * time.Minute opts.DefaultDuration = 30 * time.Minute
} }
if opts.LinuxCachePath == "" {
opts.LinuxCachePath = "/var/cache/burrow"
}
if opts.MacosCachePath == "" {
opts.MacosCachePath = "/Users/runner/.cache/burrow"
}
logger := opts.Logger logger := opts.Logger
if logger == nil { if logger == nil {
logger = slog.New(slog.NewTextHandler(io.Discard, nil)) logger = slog.New(slog.NewTextHandler(io.Discard, nil))
@ -104,6 +120,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin
} }
machineType := choose(req.MachineType, d.opts.DefaultMachine) machineType := choose(req.MachineType, d.opts.DefaultMachine)
image := choose(req.Image, d.opts.DefaultImage) image := choose(req.Image, d.opts.DefaultImage)
if req.ExtraEnv == nil {
req.ExtraEnv = make(map[string]string)
}
if hasWindowsLabel(req.Labels) { if hasWindowsLabel(req.Labels) {
if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil { if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil {
@ -113,6 +132,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin
} }
if hasMacOSLabel(req.Labels) { if hasMacOSLabel(req.Labels) {
if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok {
req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.MacosCachePath
}
// Compute macOS shapes differ from the Linux "run" defaults. If the request // Compute macOS shapes differ from the Linux "run" defaults. If the request
// didn't specify a machine type, ensure we pick a macOS-valid default. // didn't specify a machine type, ensure we pick a macOS-valid default.
if machineType == "" || machineType == d.opts.DefaultMachine { if machineType == "" || machineType == d.opts.DefaultMachine {
@ -129,6 +151,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin
} }
return runnerName, nil return runnerName, nil
} }
if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok {
req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.LinuxCachePath
}
env := map[string]string{ env := map[string]string{
"FORGEJO_INSTANCE_URL": req.InstanceURL, "FORGEJO_INSTANCE_URL": req.InstanceURL,
@ -140,9 +165,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin
for k, v := range req.ExtraEnv { for k, v := range req.ExtraEnv {
env[k] = v env[k] = v
} }
if _, ok := env["NSC_CACHE_PATH"]; !ok {
env["NSC_CACHE_PATH"] = "/nix/store"
}
script := d.bootstrapScript() script := d.bootstrapScript()
args := []string{ args := []string{
@ -161,6 +183,7 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin
if d.opts.Network != "" { if d.opts.Network != "" {
args = append(args, "--network", d.opts.Network) args = append(args, "--network", d.opts.Network)
} }
args = appendVolumeArgs(args, d.opts.LinuxCacheVolumes)
for key, value := range env { for key, value := range env {
if value == "" { if value == "" {
continue continue
@ -370,6 +393,16 @@ func choose(values ...string) string {
return "" return ""
} }
func appendVolumeArgs(args []string, volumes []CacheVolume) []string {
for _, volume := range volumes {
if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 {
continue
}
args = append(args, "--volume", fmt.Sprintf("cache:%s:%s:%d", volume.Tag, volume.MountPoint, volume.SizeGb))
}
return args
}
func (d *Dispatcher) bootstrapScript() string { func (d *Dispatcher) bootstrapScript() string {
var builder strings.Builder var builder strings.Builder
builder.WriteString(`set -euo pipefail builder.WriteString(`set -euo pipefail

View file

@ -206,12 +206,8 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r
for k, v := range req.ExtraEnv { for k, v := range req.ExtraEnv {
env[k] = v env[k] = v
} }
// Best-effort caching: workflows call Scripts/nscloud-cache.sh, which is a
// no-op unless NSC_CACHE_PATH is set. This may still be skipped if spacectl
// lacks credentials, but setting the path is harmless and keeps behavior
// consistent across macOS / Linux runners.
if _, ok := env["NSC_CACHE_PATH"]; !ok { if _, ok := env["NSC_CACHE_PATH"]; !ok {
env["NSC_CACHE_PATH"] = "/Users/runner/.cache/nscloud" env["NSC_CACHE_PATH"] = d.opts.MacosCachePath
} }
deadline := timestamppb.New(time.Now().Add(ttl)) deadline := timestamppb.New(time.Now().Add(ttl))
@ -243,10 +239,15 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r
}, },
}, },
} }
experimental := &computev1beta.CreateInstanceRequest_ExperimentalFeatures{}
if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" { if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" {
createReq.Experimental = &computev1beta.CreateInstanceRequest_ExperimentalFeatures{ experimental.MacosBaseImageId = imageID
MacosBaseImageId: imageID,
} }
if volumes := computeCacheVolumeRequests(d.opts.MacosCacheVolumes); len(volumes) > 0 {
experimental.Volumes = volumes
}
if experimental.MacosBaseImageId != "" || len(experimental.Volumes) > 0 {
createReq.Experimental = experimental
} }
d.log.Info("launching Namespace macos runner", d.log.Info("launching Namespace macos runner",
@ -572,6 +573,22 @@ func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev
d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID) d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID)
} }
func computeCacheVolumeRequests(volumes []CacheVolume) []*computev1beta.VolumeRequest {
var out []*computev1beta.VolumeRequest
for _, volume := range volumes {
if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 {
continue
}
out = append(out, &computev1beta.VolumeRequest{
MountPoint: volume.MountPoint,
Tag: volume.Tag,
SizeMb: volume.SizeGb * 1024,
PersistencyKind: computev1beta.VolumeRequest_CACHE,
})
}
return out
}
func macosBootstrapScript() string { func macosBootstrapScript() string {
// Keep this script self-contained: it runs on a fresh macOS VM base image. // Keep this script self-contained: it runs on a fresh macOS VM base image.
var b strings.Builder var b strings.Builder

View file

@ -144,6 +144,7 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str
"--wait_timeout", a.waitTimeout.String(), "--wait_timeout", a.waitTimeout.String(),
} }
args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL)
args = appendVolumeArgs(args, d.opts.MacosCacheVolumes)
createCtx, cancel := context.WithTimeout(ctx, a.createTimeout) createCtx, cancel := context.WithTimeout(ctx, a.createTimeout)
defer cancel() defer cancel()