From 0e68c25a994a1b0d046912021d2da9d025e4b3fc Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 1 Apr 2026 01:12:15 -0700 Subject: [PATCH 01/59] Wire Forgejo sign-in through Authentik --- Scripts/authentik-sync-forgejo-oidc.sh | 203 +++++++++++++++++++ nixos/README.md | 2 +- nixos/hosts/burrow-forge/default.nix | 8 + nixos/modules/burrow-authentik.nix | 81 +++++++- nixos/modules/burrow-forge.nix | 132 ++++++++++++ secrets.nix | 1 + secrets/infra/forgejo-oidc-client-secret.age | 10 + 7 files changed, 434 insertions(+), 3 deletions(-) create mode 100644 Scripts/authentik-sync-forgejo-oidc.sh create mode 100644 secrets/infra/forgejo-oidc-client-secret.age diff --git a/Scripts/authentik-sync-forgejo-oidc.sh b/Scripts/authentik-sync-forgejo-oidc.sh new file mode 100644 index 0000000..f354633 --- /dev/null +++ b/Scripts/authentik-sync-forgejo-oidc.sh @@ -0,0 +1,203 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-git}" +application_name="${AUTHENTIK_FORGEJO_APPLICATION_NAME:-burrow.net}" +provider_name="${AUTHENTIK_FORGEJO_PROVIDER_NAME:-burrow.net}" +client_id="${AUTHENTIK_FORGEJO_CLIENT_ID:-git.burrow.net}" +client_secret="${AUTHENTIK_FORGEJO_CLIENT_SECRET:-}" +launch_url="${AUTHENTIK_FORGEJO_LAUNCH_URL:-https://git.burrow.net/}" +redirect_uris_json="${AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON:-[ + \"https://git.burrow.net/user/oauth2/burrow.net/callback\", + \"https://git.burrow.net/user/oauth2/authentik/callback\", + \"https://git.burrow.net/user/oauth2/GitHub/callback\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-forgejo-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_FORGEJO_CLIENT_SECRET + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_FORGEJO_APPLICATION_SLUG + AUTHENTIK_FORGEJO_APPLICATION_NAME + AUTHENTIK_FORGEJO_PROVIDER_NAME + AUTHENTIK_FORGEJO_CLIENT_ID + AUTHENTIK_FORGEJO_LAUNCH_URL + AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then + echo "Forgejo OIDC client secret is not configured; skipping Authentik Forgejo sync." >&2 + exit 0 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c '.results[]? | select(.assigned_application_slug == "ts")' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Burrow Tailnet OAuth provider template" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg slug "$application_slug" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg client_secret "$client_secret" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + slug: $slug, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "confidential", + client_id: $client_id, + client_secret: $client_secret, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Forgejo OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: false, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?slug=${application_slug}" \ + | jq -c '.results[]? | select(.slug != null)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + application_pk="$( + api POST "/api/v3/core/applications/" "$application_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Forgejo OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Forgejo OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})." diff --git a/nixos/README.md b/nixos/README.md index acae40f..07b421d 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -33,7 +33,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. 6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`. -7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. +7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. 8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. 10. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 6d4134c..314d6f1 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -33,6 +33,12 @@ group = "root"; mode = "0400"; }; + age.secrets.burrowForgejoOidcClientSecret = { + file = ../../../secrets/infra/forgejo-oidc-client-secret.age; + owner = "forgejo"; + group = "forgejo"; + mode = "0440"; + }; age.secrets.burrowAuthentikGoogleClientId = { file = ../../../secrets/infra/authentik-google-client-id.age; owner = "root"; @@ -54,6 +60,7 @@ services.burrow.forge = { enable = true; adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; + oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; authorizedKeys = [ (builtins.readFile ../../keys/contact_at_burrow_net.pub) (builtins.readFile ../../keys/agent_at_burrow_net.pub) @@ -80,6 +87,7 @@ services.burrow.authentik = { enable = true; envFile = config.age.secrets.burrowAuthentikEnv.path; + forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 9e6bf1f..78a305a 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -8,6 +8,7 @@ let blueprintFile = "${blueprintDir}/burrow-authentik.yaml"; postgresVolume = "burrow-authentik-postgresql:/var/lib/postgresql/data"; dataVolume = "burrow-authentik-data:/data"; + forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' version: 1 @@ -102,6 +103,30 @@ in description = "Authentik provider slug for Headscale."; }; + forgejoDomain = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "Forgejo public domain used for the bundled OIDC client."; + }; + + forgejoProviderSlug = lib.mkOption { + type = lib.types.str; + default = "git"; + description = "Authentik application slug for Forgejo."; + }; + + forgejoClientId = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "Client ID Authentik should present to Forgejo."; + }; + + forgejoClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Authentik Forgejo OIDC client secret."; + }; + headscaleClientSecretFile = lib.mkOption { type = lib.types.str; default = "/var/lib/burrow/intake/authentik_headscale_client_secret.txt"; @@ -182,6 +207,13 @@ in exit 1 fi + ${lib.optionalString (cfg.forgejoClientSecretFile != null) '' + if [ ! -s ${lib.escapeShellArg cfg.forgejoClientSecretFile} ]; then + echo "Forgejo client secret missing: ${cfg.forgejoClientSecretFile}" >&2 + exit 1 + fi + ''} + install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir} install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile} @@ -208,6 +240,7 @@ AUTHENTIK_SECRET_KEY=$AUTHENTIK_SECRET_KEY AUTHENTIK_BOOTSTRAP_PASSWORD=$AUTHENTIK_BOOTSTRAP_PASSWORD AUTHENTIK_BOOTSTRAP_TOKEN=$AUTHENTIK_BOOTSTRAP_TOKEN AUTHENTIK_BURROW_TS_CLIENT_SECRET=$(read_secret ${lib.escapeShellArg cfg.headscaleClientSecretFile}) +${lib.optionalString (cfg.forgejoClientSecretFile != null) "AUTHENTIK_BURROW_FORGEJO_CLIENT_SECRET=$(read_secret ${lib.escapeShellArg cfg.forgejoClientSecretFile})"} EOF chown root:root ${envFile} chmod 0600 ${envFile} @@ -320,8 +353,6 @@ EOF Type = "oneshot"; User = "root"; Group = "root"; - Restart = "on-failure"; - RestartSec = 5; }; script = '' set -euo pipefail @@ -340,6 +371,52 @@ EOF ''; }; + systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) { + description = "Reconcile the Burrow Authentik Forgejo OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + forgejoOidcSyncScript + cfg.envFile + cfg.forgejoClientSecretFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} + export AUTHENTIK_FORGEJO_APPLICATION_NAME=burrow.net + export AUTHENTIK_FORGEJO_PROVIDER_NAME=burrow.net + export AUTHENTIK_FORGEJO_CLIENT_ID=${lib.escapeShellArg cfg.forgejoClientId} + export AUTHENTIK_FORGEJO_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.forgejoClientSecretFile})" + export AUTHENTIK_FORGEJO_LAUNCH_URL=https://${cfg.forgejoDomain}/ + export AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON='["https://${cfg.forgejoDomain}/user/oauth2/burrow.net/callback","https://${cfg.forgejoDomain}/user/oauth2/authentik/callback","https://${cfg.forgejoDomain}/user/oauth2/GitHub/callback"]' + + ${pkgs.bash}/bin/bash ${forgejoOidcSyncScript} + ''; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index e02475f..edf5538 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -68,6 +68,30 @@ in description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin."; }; + oidcDisplayName = lib.mkOption { + type = lib.types.str; + default = "burrow.net"; + description = "Login button label for the Forgejo OIDC provider."; + }; + + oidcClientId = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "OIDC client ID that Forgejo should use against Authentik."; + }; + + oidcClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local path to the Forgejo OIDC client secret."; + }; + + oidcDiscoveryUrl = lib.mkOption { + type = lib.types.str; + default = "https://auth.burrow.net/application/o/git/.well-known/openid-configuration"; + description = "OpenID Connect discovery URL for the Forgejo login source."; + }; + authorizedKeys = lib.mkOption { type = with lib.types; listOf str; default = [ ]; @@ -243,5 +267,113 @@ in fi ''; }; + + systemd.services.burrow-forgejo-oidc-bootstrap = lib.mkIf (cfg.oidcClientSecretFile != null) { + description = "Seed the Burrow Forgejo OIDC login source"; + after = [ + "forgejo.service" + "postgresql.service" + ] ++ lib.optionals config.services.burrow.authentik.enable [ + "burrow-authentik-ready.service" + ]; + wants = lib.optionals config.services.burrow.authentik.enable [ + "burrow-authentik-ready.service" + ]; + requires = [ + "forgejo.service" + "postgresql.service" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + cfg.oidcClientSecretFile + ]; + path = [ + pkgs.coreutils + pkgs.gnugrep + pkgs.jq + pkgs.postgresql + ]; + serviceConfig = { + Type = "oneshot"; + User = forgejoCfg.user; + Group = forgejoCfg.group; + WorkingDirectory = forgejoCfg.stateDir; + }; + script = '' + set -euo pipefail + + if [ ! -s ${lib.escapeShellArg cfg.oidcClientSecretFile} ]; then + echo "Forgejo OIDC client secret missing: ${cfg.oidcClientSecretFile}" >&2 + exit 1 + fi + + ready=0 + for attempt in $(seq 1 60); do + if ${pkgs.postgresql}/bin/psql -h /run/postgresql -U forgejo forgejo -tAc \ + "SELECT 1 FROM pg_tables WHERE schemaname='public' AND tablename='login_source';" \ + | grep -q 1; then + ready=1 + break + fi + sleep 1 + done + + if [ "$ready" -ne 1 ]; then + echo "Forgejo login_source table did not become ready" >&2 + exit 1 + fi + + oidc_secret="$(${pkgs.coreutils}/bin/tr -d '\r\n' < ${lib.escapeShellArg cfg.oidcClientSecretFile})" + if [ -z "$oidc_secret" ]; then + echo "Forgejo OIDC client secret is empty" >&2 + exit 1 + fi + + cfg_json="$(${pkgs.jq}/bin/jq -nc \ + --arg client_id ${lib.escapeShellArg cfg.oidcClientId} \ + --arg client_secret "$oidc_secret" \ + --arg discovery_url ${lib.escapeShellArg cfg.oidcDiscoveryUrl} \ + '{ + Provider: "openidConnect", + ClientID: $client_id, + ClientSecret: $client_secret, + OpenIDConnectAutoDiscoveryURL: $discovery_url, + CustomURLMapping: null, + IconURL: "", + Scopes: ["openid", "profile", "email"], + AttributeSSHPublicKey: "", + RequiredClaimName: "", + RequiredClaimValue: "", + GroupClaimName: "", + AdminGroup: "", + GroupTeamMap: "", + GroupTeamMapRemoval: false, + RestrictedGroup: "" + }')" + + ${pkgs.postgresql}/bin/psql -v ON_ERROR_STOP=1 \ + -h /run/postgresql -U forgejo forgejo \ + -v oidc_name=${lib.escapeShellArg cfg.oidcDisplayName} \ + -v cfg_json="$cfg_json" <<'SQL' + INSERT INTO login_source ( + type, name, is_active, is_sync_enabled, cfg, created_unix, updated_unix + ) VALUES ( + 6, + :'oidc_name', + TRUE, + FALSE, + :'cfg_json', + EXTRACT(EPOCH FROM NOW())::BIGINT, + EXTRACT(EPOCH FROM NOW())::BIGINT + ) + ON CONFLICT (name) DO UPDATE SET + type = EXCLUDED.type, + is_active = TRUE, + is_sync_enabled = FALSE, + cfg = EXCLUDED.cfg, + updated_unix = EXCLUDED.updated_unix; + SQL + ''; + }; }; } diff --git a/secrets.nix b/secrets.nix index c63d898..909b929 100644 --- a/secrets.nix +++ b/secrets.nix @@ -12,5 +12,6 @@ in "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/forgejo-oidc-client-secret.age b/secrets/infra/forgejo-oidc-client-secret.age new file mode 100644 index 0000000..ce6c440 --- /dev/null +++ b/secrets/infra/forgejo-oidc-client-secret.age @@ -0,0 +1,10 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q eaJ7I0AyitRWPLXnTbaazTiQ0qv2DRKOBNwx++QVrGk +1ScGy1EN80pr6QjJCToe/YRb0yHuFDR9pjoaWI/GlW8 +-> ssh-ed25519 IrZmAg AQIz2iWOSu+ewmasAa0nRFV17grA5/IRi4NEBinKaQ8 +8QIufDokWybbiRWV/OJle7kOdomyOnXSnxJeKF+5YI8 +-> X25519 9pO0rjF27QSQ6ZOgLiWAzbCBIP3MVZSapB+udiuz400 +74Ws3sCw4O3HvoCX96UhZd6b1SMptE82z9OIuEisOu8 +--- 8UR5iYLjAo6k1A3hpwiG+/mi2ZweMDvTbvi+XMWiimA +*Z(єQ ^ܯu+.nhs=0VRF +=Ge;zm_VMark4hݑ~Y<#:> \ No newline at end of file From 1ff8270a0128d1210f559b13b97e927b14150379 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 1 Apr 2026 01:26:08 -0700 Subject: [PATCH 02/59] Advertise OIDC discovery on burrow.net --- nixos/modules/burrow-forge.nix | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index edf5538..890e1d3 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -199,6 +199,12 @@ in reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT} ''; "${cfg.siteDomain}".extraConfig = '' + encode gzip zstd + @oidcConfig path /.well-known/openid-configuration + redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/.well-known/openid-configuration 308 + @webfinger path /.well-known/webfinger + header @webfinger Content-Type application/jrd+json + respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/\"}]}" 200 @root path / redir @root ${homeRepoUrl} 308 respond 404 From bb05bd9014aa7244ccf825f22ed17ffbb9fff8fe Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 1 Apr 2026 11:39:29 -0700 Subject: [PATCH 03/59] Add Burrow Authentik admin directory sync --- Scripts/authentik-sync-burrow-directory.sh | 249 +++++++++++++++++++++ Scripts/authentik-sync-forgejo-oidc.sh | 61 ++++- nixos/hosts/burrow-forge/default.nix | 16 ++ nixos/modules/burrow-authentik.nix | 128 +++++++++++ nixos/modules/burrow-forge.nix | 41 +++- 5 files changed, 484 insertions(+), 11 deletions(-) create mode 100644 Scripts/authentik-sync-burrow-directory.sh diff --git a/Scripts/authentik-sync-burrow-directory.sh b/Scripts/authentik-sync-burrow-directory.sh new file mode 100644 index 0000000..656b738 --- /dev/null +++ b/Scripts/authentik-sync-burrow-directory.sh @@ -0,0 +1,249 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +directory_json="${AUTHENTIK_BURROW_DIRECTORY_JSON:-[]}" +users_group="${AUTHENTIK_BURROW_USERS_GROUP:-burrow-users}" +admins_group="${AUTHENTIK_BURROW_ADMINS_GROUP:-burrow-admins}" +forgejo_application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-burrow-directory.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_BURROW_DIRECTORY_JSON + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_BURROW_USERS_GROUP + AUTHENTIK_BURROW_ADMINS_GROUP + AUTHENTIK_FORGEJO_APPLICATION_SLUG +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if ! printf '%s' "$directory_json" | jq -e 'type == "array"' >/dev/null; then + echo "error: AUTHENTIK_BURROW_DIRECTORY_JSON must be a JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \ + | jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 +} + +ensure_group() { + local group_name="$1" + local payload group_pk + + payload="$( + jq -cn \ + --arg name "$group_name" \ + '{name: $name}' + )" + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -n "$group_pk" ]]; then + api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null + else + group_pk="$( + api POST "/api/v3/core/groups/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$group_pk" ]]; then + echo "error: could not create Authentik group ${group_name}" >&2 + exit 1 + fi + + printf '%s\n' "$group_pk" +} + +lookup_user_pk() { + local username="$1" + + api GET "/api/v3/core/users/?page_size=200&search=${username}" \ + | jq -r --arg username "$username" '.results[]? | select(.username == $username) | .pk // empty' \ + | head -n1 +} + +ensure_user() { + local user_spec="$1" + local username name email is_admin groups_json effective_groups_json group_name + local group_pks_json payload user_pk + + username="$(printf '%s\n' "$user_spec" | jq -r '.username')" + name="$(printf '%s\n' "$user_spec" | jq -r '.name')" + email="$(printf '%s\n' "$user_spec" | jq -r '.email')" + is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')" + groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')" + + if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then + echo "error: each Burrow Authentik user requires username and email" >&2 + exit 1 + fi + + effective_groups_json="$( + printf '%s\n' "$groups_json" \ + | jq -c --arg users_group "$users_group" --arg admins_group "$admins_group" --argjson is_admin "$is_admin" ' + . + [$users_group] + (if $is_admin then [$admins_group] else [] end) | unique + ' + )" + + group_pks_json='[]' + while IFS= read -r group_name; do + group_pk="$(ensure_group "$group_name")" + group_pks_json="$( + jq -cn \ + --argjson current "$group_pks_json" \ + --arg next "$group_pk" \ + '$current + [$next]' + )" + done < <(printf '%s\n' "$effective_groups_json" | jq -r '.[]') + + payload="$( + jq -cn \ + --arg username "$username" \ + --arg name "$name" \ + --arg email "$email" \ + --argjson groups "$group_pks_json" \ + '{ + username: $username, + name: $name, + email: $email, + is_active: true, + path: "users", + groups: $groups + }' + )" + + user_pk="$(lookup_user_pk "$username")" + if [[ -n "$user_pk" ]]; then + api PATCH "/api/v3/core/users/${user_pk}/" "$payload" >/dev/null + else + user_pk="$( + api POST "/api/v3/core/users/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$user_pk" ]]; then + echo "error: could not create Authentik user ${username}" >&2 + exit 1 + fi +} + +lookup_application_pk() { + local slug="$1" + + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +wait_for_authentik +ensure_group "$users_group" >/dev/null +ensure_group "$admins_group" >/dev/null + +while IFS= read -r user_spec; do + ensure_user "$user_spec" +done < <(printf '%s\n' "$directory_json" | jq -c '.[]') + +if [[ -n "$forgejo_application_slug" ]]; then + ensure_application_group_binding "$forgejo_application_slug" "$users_group" +fi + +echo "Synced Burrow Authentik directory." diff --git a/Scripts/authentik-sync-forgejo-oidc.sh b/Scripts/authentik-sync-forgejo-oidc.sh index f354633..7b292dc 100644 --- a/Scripts/authentik-sync-forgejo-oidc.sh +++ b/Scripts/authentik-sync-forgejo-oidc.sh @@ -74,6 +74,41 @@ api() { fi } +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + wait_for_authentik() { for _ in $(seq 1 90); do if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then @@ -106,7 +141,6 @@ signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" provider_payload="$( jq -n \ --arg name "$provider_name" \ - --arg slug "$application_slug" \ --arg authorization_flow "$authorization_flow" \ --arg invalidation_flow "$invalidation_flow" \ --arg client_id "$client_id" \ @@ -116,7 +150,6 @@ provider_payload="$( --argjson redirect_uris "$redirect_uris_json" \ '{ name: $name, - slug: $slug, authorization_flow: $authorization_flow, invalidation_flow: $invalidation_flow, client_type: "confidential", @@ -172,18 +205,32 @@ application_payload="$( )" existing_application="$( - api GET "/api/v3/core/applications/?slug=${application_slug}" \ - | jq -c '.results[]? | select(.slug != null)' \ + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ | head -n1 )" if [[ -n "$existing_application" ]]; then application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" else - application_pk="$( - api POST "/api/v3/core/applications/" "$application_payload" \ - | jq -r '.pk // empty' + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi fi if [[ -z "${application_pk:-}" ]]; then diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 314d6f1..76b0ef5 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -91,6 +91,22 @@ headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; + bootstrapUsers = [ + { + username = "contact"; + name = "Burrow"; + email = "contact@burrow.net"; + sourceEmail = "net.burrow@gmail.com"; + isAdmin = true; + } + { + username = "conrad"; + name = "Conrad Kramer"; + email = "conrad@burrow.net"; + sourceEmail = "ckrames1234@gmail.com"; + isAdmin = true; + } + ]; }; services.burrow.headscale = { diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 78a305a..4e31d43 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -8,6 +8,7 @@ let blueprintFile = "${blueprintDir}/burrow-authentik.yaml"; postgresVolume = "burrow-authentik-postgresql:/var/lib/postgresql/data"; dataVolume = "burrow-authentik-data:/data"; + directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' @@ -31,6 +32,19 @@ let "email_verified": True, } + - model: authentik_providers_oauth2.scopemapping + id: burrow-oidc-groups + identifiers: + name: Burrow OIDC Groups + attrs: + name: Burrow OIDC Groups + scope_name: groups + description: Group membership mapping for Burrow + expression: | + return { + "groups": [group.name for group in request.user.ak_groups.all()], + } + - model: authentik_providers_oauth2.oauth2provider id: burrow-oidc-provider-ts identifiers: @@ -50,6 +64,7 @@ let property_mappings: - !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-openid]] - !KeyOf burrow-oidc-email + - !KeyOf burrow-oidc-groups - !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-profile]] signing_key: !Find [authentik_crypto.certificatekeypair, [name, authentik Self-signed Certificate]] @@ -159,6 +174,54 @@ in default = "redirect"; description = "Identification-stage behavior for the Google Authentik source."; }; + + userGroupName = lib.mkOption { + type = lib.types.str; + default = "burrow-users"; + description = "Authentik group granted baseline Burrow access."; + }; + + adminGroupName = lib.mkOption { + type = lib.types.str; + default = "burrow-admins"; + description = "Authentik group granted Burrow administrator access."; + }; + + bootstrapUsers = lib.mkOption { + type = with lib.types; listOf (submodule { + options = { + username = lib.mkOption { + type = str; + description = "Authentik username."; + }; + name = lib.mkOption { + type = str; + description = "Display name for the user."; + }; + email = lib.mkOption { + type = str; + description = "Canonical email stored in Authentik."; + }; + sourceEmail = lib.mkOption { + type = nullOr str; + default = null; + description = "External Google account email that should map onto this Authentik user."; + }; + groups = lib.mkOption { + type = listOf str; + default = [ ]; + description = "Additional Authentik groups for this user."; + }; + isAdmin = lib.mkOption { + type = bool; + default = false; + description = "Whether this user should be in the Burrow admin group."; + }; + }; + }); + default = [ ]; + description = "Declarative Burrow users to create in Authentik."; + }; }; config = lib.mkIf cfg.enable { @@ -295,6 +358,16 @@ EOF ]; }; + systemd.services.podman-burrow-authentik-server.restartTriggers = [ + blueprintFile + envFile + ]; + + systemd.services.podman-burrow-authentik-worker.restartTriggers = [ + blueprintFile + envFile + ]; + systemd.services.burrow-authentik-ready = { description = "Wait for Burrow Authentik to become ready"; after = [ "podman-burrow-authentik-server.service" ]; @@ -366,11 +439,66 @@ EOF export AUTHENTIK_GOOGLE_USER_MATCHING_MODE=email_link export AUTHENTIK_GOOGLE_CLIENT_ID="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientIDFile})" export AUTHENTIK_GOOGLE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientSecretFile})" + export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON='${builtins.toJSON (map (user: { + source_email = user.sourceEmail; + username = user.username; + email = user.email; + name = user.name; + }) (lib.filter (user: user.sourceEmail != null) cfg.bootstrapUsers))}' ${pkgs.bash}/bin/bash ${googleSourceSyncScript} ''; }; + systemd.services.burrow-authentik-directory = lib.mkIf (cfg.bootstrapUsers != [ ]) { + description = "Reconcile Burrow Authentik users and groups"; + after = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ]; + wants = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + directorySyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_BURROW_USERS_GROUP=${lib.escapeShellArg cfg.userGroupName} + export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName} + export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} + export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: { + inherit (user) username name email isAdmin; + groups = user.groups; + }) cfg.bootstrapUsers)}' + + ${pkgs.bash}/bin/bash ${directorySyncScript} + ''; + }; + systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) { description = "Reconcile the Burrow Authentik Forgejo OIDC application"; after = [ diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index 890e1d3..e2a57e0 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -92,6 +92,35 @@ in description = "OpenID Connect discovery URL for the Forgejo login source."; }; + oidcScopes = lib.mkOption { + type = with lib.types; listOf str; + default = [ + "openid" + "profile" + "email" + "groups" + ]; + description = "OIDC scopes requested from Authentik."; + }; + + oidcGroupClaimName = lib.mkOption { + type = lib.types.str; + default = "groups"; + description = "OIDC claim name that carries group membership."; + }; + + oidcAdminGroup = lib.mkOption { + type = lib.types.str; + default = "burrow-admins"; + description = "OIDC group that should grant Forgejo admin access."; + }; + + oidcRestrictedGroup = lib.mkOption { + type = lib.types.str; + default = "burrow-users"; + description = "OIDC group that is required to log into Forgejo."; + }; + authorizedKeys = lib.mkOption { type = with lib.types; listOf str; default = [ ]; @@ -339,6 +368,10 @@ in --arg client_id ${lib.escapeShellArg cfg.oidcClientId} \ --arg client_secret "$oidc_secret" \ --arg discovery_url ${lib.escapeShellArg cfg.oidcDiscoveryUrl} \ + --argjson scopes '${builtins.toJSON cfg.oidcScopes}' \ + --arg group_claim_name ${lib.escapeShellArg cfg.oidcGroupClaimName} \ + --arg admin_group ${lib.escapeShellArg cfg.oidcAdminGroup} \ + --arg restricted_group ${lib.escapeShellArg cfg.oidcRestrictedGroup} \ '{ Provider: "openidConnect", ClientID: $client_id, @@ -346,15 +379,15 @@ in OpenIDConnectAutoDiscoveryURL: $discovery_url, CustomURLMapping: null, IconURL: "", - Scopes: ["openid", "profile", "email"], + Scopes: $scopes, AttributeSSHPublicKey: "", RequiredClaimName: "", RequiredClaimValue: "", - GroupClaimName: "", - AdminGroup: "", + GroupClaimName: $group_claim_name, + AdminGroup: $admin_group, GroupTeamMap: "", GroupTeamMapRemoval: false, - RestrictedGroup: "" + RestrictedGroup: $restricted_group }')" ${pkgs.postgresql}/bin/psql -v ON_ERROR_STOP=1 \ From 3332bf5c53c244eae3867449c7b2ec8908798231 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Wed, 1 Apr 2026 13:43:47 -0700 Subject: [PATCH 04/59] Fix Forgejo OIDC account linking --- nixos/modules/burrow-forge.nix | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index e2a57e0..d238f2e 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -121,6 +121,24 @@ in description = "OIDC group that is required to log into Forgejo."; }; + oidcAutoRegistration = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Whether Forgejo should automatically create users for new OIDC sign-ins."; + }; + + oidcAccountLinking = lib.mkOption { + type = lib.types.enum [ "disabled" "login" "auto" ]; + default = "auto"; + description = "How Forgejo should link existing local accounts for OIDC sign-ins."; + }; + + oidcUsernameSource = lib.mkOption { + type = lib.types.enum [ "userid" "nickname" "email" ]; + default = "email"; + description = "Which OIDC claim Forgejo should use to derive usernames for auto-registration."; + }; + authorizedKeys = lib.mkOption { type = with lib.types; listOf str; default = [ ]; @@ -201,6 +219,13 @@ in ENABLE_OPENID_SIGNUP = false; }; + oauth2_client = { + OPENID_CONNECT_SCOPES = lib.concatStringsSep " " (lib.subtractLists [ "openid" ] cfg.oidcScopes); + ENABLE_AUTO_REGISTRATION = cfg.oidcAutoRegistration; + ACCOUNT_LINKING = cfg.oidcAccountLinking; + USERNAME = cfg.oidcUsernameSource; + }; + actions = { ENABLED = true; }; From 72b7f1467b18bd1bb376134ad13fa54e2f041c7b Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 2 Apr 2026 21:44:10 -0700 Subject: [PATCH 05/59] Disable Forgejo local password sign-in --- nixos/hosts/burrow-forge/default.nix | 1 + nixos/modules/burrow-forge.nix | 3 +++ 2 files changed, 4 insertions(+) diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 76b0ef5..d612ea8 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -91,6 +91,7 @@ headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; + googleLoginMode = "redirect"; bootstrapUsers = [ { username = "contact"; diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index d238f2e..51af7eb 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -203,6 +203,9 @@ in service = { DISABLE_REGISTRATION = true; + ENABLE_INTERNAL_SIGNIN = false; + ENABLE_BASIC_AUTHENTICATION = false; + SHOW_REGISTRATION_BUTTON = false; REQUIRE_SIGNIN_VIEW = false; DEFAULT_ALLOW_CREATE_ORGANIZATION = false; ENABLE_NOTIFY_MAIL = false; From baf1408060597229ff3cf0082cf783cbe3ff064f Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 00:17:12 -0700 Subject: [PATCH 06/59] Add Tailnet landing page --- nixos/modules/burrow-headscale.nix | 134 ++++++++++++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) diff --git a/nixos/modules/burrow-headscale.nix b/nixos/modules/burrow-headscale.nix index ad5ec68..98cf5ba 100644 --- a/nixos/modules/burrow-headscale.nix +++ b/nixos/modules/burrow-headscale.nix @@ -3,6 +3,131 @@ let cfg = config.services.burrow.headscale; policyFile = ./burrow-headscale-policy.hujson; + landingPage = pkgs.writeTextDir "index.html" '' + + + + + + Burrow Tailnet + + + +
+
Burrow Tailnet
+
+

Sign-in starts from your client, not this page.

+

+ ts.burrow.net is the Burrow Headscale control plane. Headscale does not provide a built-in web UI, + so browser authentication starts only after a Tailscale-compatible client initiates login. +

+
+
tailscale up --login-server https://ts.burrow.net
+ +
+ + + ''; in { options.services.burrow.headscale = { @@ -221,7 +346,14 @@ in services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd - reverse_proxy 127.0.0.1:${toString cfg.port} + @root path / + handle @root { + root * ${landingPage} + file_server + } + handle { + reverse_proxy 127.0.0.1:${toString cfg.port} + } ''; }; } From 1da00ecdf3126cc33bd718efd00a72deb39d610f Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 00:42:39 -0700 Subject: [PATCH 07/59] Add email-based tailnet discovery to Apple app --- Apple/Burrow.xcodeproj/project.pbxproj | 36 +- Apple/Core/Client/Generated/burrow.grpc.swift | 761 ++++++++++++++++++ Apple/Core/Client/Generated/burrow.pb.swift | 566 +++++++++++++ Apple/Core/Client/grpc-swift-config.json | 11 - Apple/Core/Client/swift-protobuf-config.json | 10 - Apple/UI/BurrowView.swift | 175 +++- Apple/UI/Networks/Network.swift | 46 +- burrow/src/auth/server/mod.rs | 39 +- burrow/src/control/discovery.rs | 212 +++++ burrow/src/control/mod.rs | 2 + nixos/modules/burrow-forge.nix | 5 +- nixos/modules/burrow-headscale.nix | 134 +-- 12 files changed, 1784 insertions(+), 213 deletions(-) create mode 100644 Apple/Core/Client/Generated/burrow.grpc.swift create mode 100644 Apple/Core/Client/Generated/burrow.pb.swift delete mode 100644 Apple/Core/Client/grpc-swift-config.json delete mode 100644 Apple/Core/Client/swift-protobuf-config.json create mode 100644 burrow/src/control/discovery.rs diff --git a/Apple/Burrow.xcodeproj/project.pbxproj b/Apple/Burrow.xcodeproj/project.pbxproj index 995af28..9897f79 100644 --- a/Apple/Burrow.xcodeproj/project.pbxproj +++ b/Apple/Burrow.xcodeproj/project.pbxproj @@ -42,8 +42,8 @@ D0D4E5A62C8D9E65007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; D0F4FAD32C8DC79C0068730A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; D0F7594E2C8DAB6B00126CF3 /* GRPC in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E02C8DA375008A8CEC /* GRPC */; }; - D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4962C8D921A007F820A /* grpc-swift-config.json */; }; - D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */; }; + D0FA10012D10200100112233 /* burrow.pb.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10032D10200100112233 /* burrow.pb.swift */; }; + D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10042D10200100112233 /* burrow.grpc.swift */; }; D0F7597E2C8DB30500126CF3 /* CGRPCZlib in Frameworks */ = {isa = PBXBuildFile; productRef = D0F7597D2C8DB30500126CF3 /* CGRPCZlib */; }; D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4992C8D921A007F820A /* Client.swift */; }; /* End PBXBuildFile section */ @@ -154,8 +154,6 @@ D0BCC6032A09535900AD070D /* libburrow.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libburrow.a; sourceTree = BUILT_PRODUCTS_DIR; }; D0BF09582C8E6789000D8DEC /* UI.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UI.xcconfig; sourceTree = ""; }; D0D4E4952C8D921A007F820A /* burrow.proto */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.protobuf; path = burrow.proto; sourceTree = ""; }; - D0D4E4962C8D921A007F820A /* grpc-swift-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "grpc-swift-config.json"; sourceTree = ""; }; - D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "swift-protobuf-config.json"; sourceTree = ""; }; D0D4E4992C8D921A007F820A /* Client.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Client.swift; sourceTree = ""; }; D0D4E49A2C8D921A007F820A /* Logging.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Logging.swift; sourceTree = ""; }; D0D4E49E2C8D921A007F820A /* Network.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Network.swift; sourceTree = ""; }; @@ -179,6 +177,8 @@ D0D4E58E2C8D9D0A007F820A /* Constants.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Constants.h; sourceTree = ""; }; D0D4E58F2C8D9D0A007F820A /* Constants.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Constants.swift; sourceTree = ""; }; D0D4E5902C8D9D0A007F820A /* module.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + D0FA10032D10200100112233 /* burrow.pb.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.pb.swift; sourceTree = ""; }; + D0FA10042D10200100112233 /* burrow.grpc.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.grpc.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -317,8 +317,8 @@ isa = PBXGroup; children = ( D0D4E4952C8D921A007F820A /* burrow.proto */, - D0D4E4962C8D921A007F820A /* grpc-swift-config.json */, - D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */, + D0FA10032D10200100112233 /* burrow.pb.swift */, + D0FA10042D10200100112233 /* burrow.grpc.swift */, ); path = Client; sourceTree = ""; @@ -428,8 +428,6 @@ ); dependencies = ( D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */, - D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */, - D0F759602C8DB24400126CF3 /* PBXTargetDependency */, ); name = Core; packageProductDependencies = ( @@ -617,8 +615,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */, - D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */, + D0FA10012D10200100112233 /* burrow.pb.swift in Sources */, + D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */, D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */, D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */, ); @@ -689,14 +687,6 @@ target = D0D4E5302C8D996F007F820A /* Core */; targetProxy = D0F4FAD12C8DC7960068730A /* PBXContainerItemProxy */; }; - D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - productRef = D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */; - }; - D0F759602C8DB24400126CF3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - productRef = D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */; - }; D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */ = { isa = PBXTargetDependency; productRef = D0F759892C8DB34200126CF3 /* GRPC */; @@ -921,16 +911,6 @@ package = D0B1D10E2C436152004B7823 /* XCRemoteSwiftPackageReference "swift-async-algorithms" */; productName = AsyncAlgorithms; }; - D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */ = { - isa = XCSwiftPackageProductDependency; - package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */; - productName = "plugin:GRPCSwiftPlugin"; - }; - D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */ = { - isa = XCSwiftPackageProductDependency; - package = D0D4E4852C8D8F29007F820A /* XCRemoteSwiftPackageReference "swift-protobuf" */; - productName = "plugin:SwiftProtobufPlugin"; - }; D0F7597D2C8DB30500126CF3 /* CGRPCZlib */ = { isa = XCSwiftPackageProductDependency; package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */; diff --git a/Apple/Core/Client/Generated/burrow.grpc.swift b/Apple/Core/Client/Generated/burrow.grpc.swift new file mode 100644 index 0000000..d1f848c --- /dev/null +++ b/Apple/Core/Client/Generated/burrow.grpc.swift @@ -0,0 +1,761 @@ +// +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the protocol buffer compiler. +// Source: burrow.proto +// +import GRPC +import NIO +import NIOConcurrencyHelpers +import SwiftProtobuf + + +/// Usage: instantiate `Burrow_TunnelClient`, then call methods of this protocol to make API calls. +public protocol Burrow_TunnelClientProtocol: GRPCClient { + var serviceName: String { get } + var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get } + + func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void + ) -> ServerStreamingCall + + func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> UnaryCall + + func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> UnaryCall + + func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_TunnelStatusResponse) -> Void + ) -> ServerStreamingCall +} + +extension Burrow_TunnelClientProtocol { + public var serviceName: String { + return "burrow.Tunnel" + } + + /// Server streaming call to TunnelConfiguration + /// + /// - Parameters: + /// - request: Request to send to TunnelConfiguration. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [], + handler: handler + ) + } + + /// Unary call to TunnelStart + /// + /// - Parameters: + /// - request: Request to send to TunnelStart. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + /// Unary call to TunnelStop + /// + /// - Parameters: + /// - request: Request to send to TunnelStop. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + /// Server streaming call to TunnelStatus + /// + /// - Parameters: + /// - request: Request to send to TunnelStatus. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_TunnelStatusResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [], + handler: handler + ) + } +} + +@available(*, deprecated) +extension Burrow_TunnelClient: @unchecked Sendable {} + +@available(*, deprecated, renamed: "Burrow_TunnelNIOClient") +public final class Burrow_TunnelClient: Burrow_TunnelClientProtocol { + private let lock = Lock() + private var _defaultCallOptions: CallOptions + private var _interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions { + get { self.lock.withLock { return self._defaultCallOptions } } + set { self.lock.withLockVoid { self._defaultCallOptions = newValue } } + } + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { + get { self.lock.withLock { return self._interceptors } } + set { self.lock.withLockVoid { self._interceptors = newValue } } + } + + /// Creates a client for the burrow.Tunnel service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self._defaultCallOptions = defaultCallOptions + self._interceptors = interceptors + } +} + +public struct Burrow_TunnelNIOClient: Burrow_TunnelClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + + /// Creates a client for the burrow.Tunnel service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public protocol Burrow_TunnelAsyncClientProtocol: GRPCClient { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get } + + func makeTunnelConfigurationCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeTunnelStartCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeTunnelStopCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeTunnelStatusCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_TunnelAsyncClientProtocol { + public static var serviceDescriptor: GRPCServiceDescriptor { + return Burrow_TunnelClientMetadata.serviceDescriptor + } + + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { + return nil + } + + public func makeTunnelConfigurationCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [] + ) + } + + public func makeTunnelStartCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + public func makeTunnelStopCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + public func makeTunnelStatusCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_TunnelAsyncClientProtocol { + public func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [] + ) + } + + public func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + public func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + public func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public struct Burrow_TunnelAsyncClient: Burrow_TunnelAsyncClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +public protocol Burrow_TunnelClientInterceptorFactoryProtocol: Sendable { + + /// - Returns: Interceptors to use when invoking 'tunnelConfiguration'. + func makeTunnelConfigurationInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStart'. + func makeTunnelStartInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStop'. + func makeTunnelStopInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStatus'. + func makeTunnelStatusInterceptors() -> [ClientInterceptor] +} + +public enum Burrow_TunnelClientMetadata { + public static let serviceDescriptor = GRPCServiceDescriptor( + name: "Tunnel", + fullName: "burrow.Tunnel", + methods: [ + Burrow_TunnelClientMetadata.Methods.tunnelConfiguration, + Burrow_TunnelClientMetadata.Methods.tunnelStart, + Burrow_TunnelClientMetadata.Methods.tunnelStop, + Burrow_TunnelClientMetadata.Methods.tunnelStatus, + ] + ) + + public enum Methods { + public static let tunnelConfiguration = GRPCMethodDescriptor( + name: "TunnelConfiguration", + path: "/burrow.Tunnel/TunnelConfiguration", + type: GRPCCallType.serverStreaming + ) + + public static let tunnelStart = GRPCMethodDescriptor( + name: "TunnelStart", + path: "/burrow.Tunnel/TunnelStart", + type: GRPCCallType.unary + ) + + public static let tunnelStop = GRPCMethodDescriptor( + name: "TunnelStop", + path: "/burrow.Tunnel/TunnelStop", + type: GRPCCallType.unary + ) + + public static let tunnelStatus = GRPCMethodDescriptor( + name: "TunnelStatus", + path: "/burrow.Tunnel/TunnelStatus", + type: GRPCCallType.serverStreaming + ) + } +} + +/// Usage: instantiate `Burrow_NetworksClient`, then call methods of this protocol to make API calls. +public protocol Burrow_NetworksClientProtocol: GRPCClient { + var serviceName: String { get } + var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get } + + func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? + ) -> UnaryCall + + func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_NetworkListResponse) -> Void + ) -> ServerStreamingCall + + func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? + ) -> UnaryCall + + func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? + ) -> UnaryCall +} + +extension Burrow_NetworksClientProtocol { + public var serviceName: String { + return "burrow.Networks" + } + + /// Unary call to NetworkAdd + /// + /// - Parameters: + /// - request: Request to send to NetworkAdd. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + /// Server streaming call to NetworkList + /// + /// - Parameters: + /// - request: Request to send to NetworkList. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_NetworkListResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [], + handler: handler + ) + } + + /// Unary call to NetworkReorder + /// + /// - Parameters: + /// - request: Request to send to NetworkReorder. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + /// Unary call to NetworkDelete + /// + /// - Parameters: + /// - request: Request to send to NetworkDelete. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(*, deprecated) +extension Burrow_NetworksClient: @unchecked Sendable {} + +@available(*, deprecated, renamed: "Burrow_NetworksNIOClient") +public final class Burrow_NetworksClient: Burrow_NetworksClientProtocol { + private let lock = Lock() + private var _defaultCallOptions: CallOptions + private var _interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions { + get { self.lock.withLock { return self._defaultCallOptions } } + set { self.lock.withLockVoid { self._defaultCallOptions = newValue } } + } + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { + get { self.lock.withLock { return self._interceptors } } + set { self.lock.withLockVoid { self._interceptors = newValue } } + } + + /// Creates a client for the burrow.Networks service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self._defaultCallOptions = defaultCallOptions + self._interceptors = interceptors + } +} + +public struct Burrow_NetworksNIOClient: Burrow_NetworksClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + + /// Creates a client for the burrow.Networks service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public protocol Burrow_NetworksAsyncClientProtocol: GRPCClient { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get } + + func makeNetworkAddCall( + _ request: Burrow_Network, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeNetworkListCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeNetworkReorderCall( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeNetworkDeleteCall( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_NetworksAsyncClientProtocol { + public static var serviceDescriptor: GRPCServiceDescriptor { + return Burrow_NetworksClientMetadata.serviceDescriptor + } + + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { + return nil + } + + public func makeNetworkAddCall( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + public func makeNetworkListCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [] + ) + } + + public func makeNetworkReorderCall( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + public func makeNetworkDeleteCall( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_NetworksAsyncClientProtocol { + public func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + public func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [] + ) + } + + public func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + public func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public struct Burrow_NetworksAsyncClient: Burrow_NetworksAsyncClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +public protocol Burrow_NetworksClientInterceptorFactoryProtocol: Sendable { + + /// - Returns: Interceptors to use when invoking 'networkAdd'. + func makeNetworkAddInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkList'. + func makeNetworkListInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkReorder'. + func makeNetworkReorderInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkDelete'. + func makeNetworkDeleteInterceptors() -> [ClientInterceptor] +} + +public enum Burrow_NetworksClientMetadata { + public static let serviceDescriptor = GRPCServiceDescriptor( + name: "Networks", + fullName: "burrow.Networks", + methods: [ + Burrow_NetworksClientMetadata.Methods.networkAdd, + Burrow_NetworksClientMetadata.Methods.networkList, + Burrow_NetworksClientMetadata.Methods.networkReorder, + Burrow_NetworksClientMetadata.Methods.networkDelete, + ] + ) + + public enum Methods { + public static let networkAdd = GRPCMethodDescriptor( + name: "NetworkAdd", + path: "/burrow.Networks/NetworkAdd", + type: GRPCCallType.unary + ) + + public static let networkList = GRPCMethodDescriptor( + name: "NetworkList", + path: "/burrow.Networks/NetworkList", + type: GRPCCallType.serverStreaming + ) + + public static let networkReorder = GRPCMethodDescriptor( + name: "NetworkReorder", + path: "/burrow.Networks/NetworkReorder", + type: GRPCCallType.unary + ) + + public static let networkDelete = GRPCMethodDescriptor( + name: "NetworkDelete", + path: "/burrow.Networks/NetworkDelete", + type: GRPCCallType.unary + ) + } +} + diff --git a/Apple/Core/Client/Generated/burrow.pb.swift b/Apple/Core/Client/Generated/burrow.pb.swift new file mode 100644 index 0000000..bba0f16 --- /dev/null +++ b/Apple/Core/Client/Generated/burrow.pb.swift @@ -0,0 +1,566 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: burrow.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +public enum Burrow_NetworkType: SwiftProtobuf.Enum, Swift.CaseIterable { + public typealias RawValue = Int + case wireGuard // = 0 + case tailnet // = 1 + case UNRECOGNIZED(Int) + + public init() { + self = .wireGuard + } + + public init?(rawValue: Int) { + switch rawValue { + case 0: self = .wireGuard + case 1: self = .tailnet + default: self = .UNRECOGNIZED(rawValue) + } + } + + public var rawValue: Int { + switch self { + case .wireGuard: return 0 + case .tailnet: return 1 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + public static let allCases: [Burrow_NetworkType] = [ + .wireGuard, + .tailnet, + ] + +} + +public enum Burrow_State: SwiftProtobuf.Enum, Swift.CaseIterable { + public typealias RawValue = Int + case stopped // = 0 + case running // = 1 + case UNRECOGNIZED(Int) + + public init() { + self = .stopped + } + + public init?(rawValue: Int) { + switch rawValue { + case 0: self = .stopped + case 1: self = .running + default: self = .UNRECOGNIZED(rawValue) + } + } + + public var rawValue: Int { + switch self { + case .stopped: return 0 + case .running: return 1 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + public static let allCases: [Burrow_State] = [ + .stopped, + .running, + ] + +} + +public struct Burrow_NetworkReorderRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var index: Int32 = 0 + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_WireGuardPeer: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var endpoint: String = String() + + public var subnet: [String] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_WireGuardNetwork: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var address: String = String() + + public var dns: String = String() + + public var peer: [Burrow_WireGuardPeer] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_NetworkDeleteRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_Network: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var type: Burrow_NetworkType = .wireGuard + + public var payload: Data = Data() + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_NetworkListResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var network: [Burrow_Network] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_Empty: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TunnelStatusResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var state: Burrow_State = .stopped + + public var start: SwiftProtobuf.Google_Protobuf_Timestamp { + get {return _start ?? SwiftProtobuf.Google_Protobuf_Timestamp()} + set {_start = newValue} + } + /// Returns true if `start` has been explicitly set. + public var hasStart: Bool {return self._start != nil} + /// Clears the value of `start`. Subsequent reads from it will return its default value. + public mutating func clearStart() {self._start = nil} + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} + + fileprivate var _start: SwiftProtobuf.Google_Protobuf_Timestamp? = nil +} + +public struct Burrow_TunnelConfigurationResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var addresses: [String] = [] + + public var mtu: Int32 = 0 + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "burrow" + +extension Burrow_NetworkType: SwiftProtobuf._ProtoNameProviding { + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "WireGuard"), + 1: .same(proto: "Tailnet"), + ] +} + +extension Burrow_State: SwiftProtobuf._ProtoNameProviding { + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "Stopped"), + 1: .same(proto: "Running"), + ] +} + +extension Burrow_NetworkReorderRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkReorderRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .same(proto: "index"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.index) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + if self.index != 0 { + try visitor.visitSingularInt32Field(value: self.index, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkReorderRequest, rhs: Burrow_NetworkReorderRequest) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.index != rhs.index {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_WireGuardPeer: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".WireGuardPeer" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "endpoint"), + 2: .same(proto: "subnet"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.endpoint) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &self.subnet) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.endpoint.isEmpty { + try visitor.visitSingularStringField(value: self.endpoint, fieldNumber: 1) + } + if !self.subnet.isEmpty { + try visitor.visitRepeatedStringField(value: self.subnet, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_WireGuardPeer, rhs: Burrow_WireGuardPeer) -> Bool { + if lhs.endpoint != rhs.endpoint {return false} + if lhs.subnet != rhs.subnet {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_WireGuardNetwork: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".WireGuardNetwork" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "address"), + 2: .same(proto: "dns"), + 3: .same(proto: "peer"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.address) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.dns) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.peer) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.address.isEmpty { + try visitor.visitSingularStringField(value: self.address, fieldNumber: 1) + } + if !self.dns.isEmpty { + try visitor.visitSingularStringField(value: self.dns, fieldNumber: 2) + } + if !self.peer.isEmpty { + try visitor.visitRepeatedMessageField(value: self.peer, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_WireGuardNetwork, rhs: Burrow_WireGuardNetwork) -> Bool { + if lhs.address != rhs.address {return false} + if lhs.dns != rhs.dns {return false} + if lhs.peer != rhs.peer {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_NetworkDeleteRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkDeleteRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkDeleteRequest, rhs: Burrow_NetworkDeleteRequest) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_Network: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".Network" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .same(proto: "type"), + 3: .same(proto: "payload"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.type) }() + case 3: try { try decoder.decodeSingularBytesField(value: &self.payload) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + if self.type != .wireGuard { + try visitor.visitSingularEnumField(value: self.type, fieldNumber: 2) + } + if !self.payload.isEmpty { + try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_Network, rhs: Burrow_Network) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.type != rhs.type {return false} + if lhs.payload != rhs.payload {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_NetworkListResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkListResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "network"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.network) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.network.isEmpty { + try visitor.visitRepeatedMessageField(value: self.network, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkListResponse, rhs: Burrow_NetworkListResponse) -> Bool { + if lhs.network != rhs.network {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_Empty: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".Empty" + public static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + public mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + public func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_Empty, rhs: Burrow_Empty) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_TunnelStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".TunnelStatusResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "state"), + 2: .same(proto: "start"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularEnumField(value: &self.state) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._start) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if self.state != .stopped { + try visitor.visitSingularEnumField(value: self.state, fieldNumber: 1) + } + try { if let v = self._start { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_TunnelStatusResponse, rhs: Burrow_TunnelStatusResponse) -> Bool { + if lhs.state != rhs.state {return false} + if lhs._start != rhs._start {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".TunnelConfigurationResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "addresses"), + 2: .same(proto: "mtu"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.addresses) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.mtu) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.addresses.isEmpty { + try visitor.visitRepeatedStringField(value: self.addresses, fieldNumber: 1) + } + if self.mtu != 0 { + try visitor.visitSingularInt32Field(value: self.mtu, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_TunnelConfigurationResponse, rhs: Burrow_TunnelConfigurationResponse) -> Bool { + if lhs.addresses != rhs.addresses {return false} + if lhs.mtu != rhs.mtu {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Apple/Core/Client/grpc-swift-config.json b/Apple/Core/Client/grpc-swift-config.json deleted file mode 100644 index 2d89698..0000000 --- a/Apple/Core/Client/grpc-swift-config.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "invocations": [ - { - "protoFiles": [ - "burrow.proto", - ], - "server": false, - "visibility": "public" - } - ] -} diff --git a/Apple/Core/Client/swift-protobuf-config.json b/Apple/Core/Client/swift-protobuf-config.json deleted file mode 100644 index 87aaec3..0000000 --- a/Apple/Core/Client/swift-protobuf-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "invocations": [ - { - "protoFiles": [ - "burrow.proto", - ], - "visibility": "public" - } - ] -} diff --git a/Apple/UI/BurrowView.swift b/Apple/UI/BurrowView.swift index 835510d..b4fa7d8 100644 --- a/Apple/UI/BurrowView.swift +++ b/Apple/UI/BurrowView.swift @@ -284,6 +284,7 @@ private struct AccountDraft { var identityName = "" var wireGuardConfig = "" + var discoveryEmail = "" var tailnetProvider: TailnetProvider = .tailscale var authority = "" var tailnet = "" @@ -327,6 +328,9 @@ private struct ConfigurationSheetView: View { @State private var errorMessage: String? @State private var loginSessionID: String? @State private var loginStatus: TailnetLoginStatus? + @State private var discoveryStatus: TailnetDiscoveryResponse? + @State private var discoveryError: String? + @State private var isDiscoveringTailnet = false @State private var authorityProbeStatus: TailnetAuthorityProbeStatus? @State private var authorityProbeError: String? @State private var isProbingAuthority = false @@ -449,6 +453,9 @@ private struct ConfigurationSheetView: View { .onChange(of: draft.authority) { _, _ in resetAuthorityProbe() } + .onChange(of: draft.discoveryEmail) { _, _ in + resetTailnetDiscoveryFeedback() + } .onDisappear { pollingTask?.cancel() webAuthenticationTask?.cancel() @@ -459,7 +466,37 @@ private struct ConfigurationSheetView: View { @ViewBuilder private var tailnetSections: some View { Section("Connection") { - Picker("Provider", selection: $draft.tailnetProvider) { + TextField("Email address", text: $draft.discoveryEmail) + .textInputAutocapitalization(.never) + .keyboardType(.emailAddress) + .burrowLoginField() + .autocorrectionDisabled() + + Button { + discoverTailnetAuthority() + } label: { + Label { + Text(isDiscoveringTailnet ? "Finding Server" : "Find Server") + } icon: { + Image(systemName: isDiscoveringTailnet ? "hourglass" : "at.circle") + } + } + .buttonStyle(.borderless) + .disabled(isDiscoveringTailnet || normalizedOptional(draft.discoveryEmail) == nil) + + if let discoveryStatus { + tailnetDiscoveryCard(status: discoveryStatus, failure: nil) + } else if let discoveryError { + tailnetDiscoveryCard(status: nil, failure: discoveryError) + } + + Picker( + "Provider", + selection: Binding( + get: { draft.tailnetProvider }, + set: { applyTailnetProvider($0) } + ) + ) { ForEach(TailnetProvider.allCases) { provider in Text(provider.title).tag(provider) } @@ -503,14 +540,14 @@ private struct ConfigurationSheetView: View { } Section("Authentication") { - if draft.tailnetProvider.usesWebLogin { + if tailnetUsesWebLogin { tailnetWebLoginCard } else { TextField("Username", text: $draft.username) .burrowLoginField() .autocorrectionDisabled() Picker("Authentication", selection: $draft.authMode) { - ForEach([AccountAuthMode.none, .password, .preauthKey]) { mode in + ForEach(availableTailnetAuthModes) { mode in Text(mode.title).tag(mode) } } @@ -583,7 +620,7 @@ private struct ConfigurationSheetView: View { HStack(spacing: 8) { summaryBadge(draft.tailnetProvider.title) summaryBadge( - draft.tailnetProvider.usesWebLogin ? "Web Sign-In" : draft.authMode.title + tailnetUsesWebLogin ? "Web Sign-In" : draft.authMode.title ) } } @@ -656,7 +693,7 @@ private struct ConfigurationSheetView: View { .foregroundStyle(.secondary) } } else { - Text("Burrow launches the local bridge, then opens the real Tailscale sign-in page in-app.") + Text("Burrow launches the local bridge, then opens the real provider sign-in page in-app.") .font(.footnote) .foregroundStyle(.secondary) } @@ -696,6 +733,41 @@ private struct ConfigurationSheetView: View { ) } + private func tailnetDiscoveryCard( + status: TailnetDiscoveryResponse?, + failure: String? + ) -> some View { + VStack(alignment: .leading, spacing: 6) { + if let status { + Text("Discovered \(status.provider.title)") + .font(.subheadline.weight(.medium)) + Text(status.authority) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + if let oidcIssuer = status.oidcIssuer { + Text("OIDC: \(oidcIssuer)") + .font(.footnote) + .foregroundStyle(.secondary) + .lineLimit(3) + .textSelection(.enabled) + } + } else if let failure { + Text("Discovery failed") + .font(.subheadline.weight(.medium)) + .foregroundStyle(.red) + Text(failure) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + } + private func summaryBadge(_ label: String) -> some View { Text(label) .font(.caption.weight(.medium)) @@ -762,12 +834,12 @@ private struct ConfigurationSheetView: View { } } - if !draft.tailnetProvider.usesWebLogin { + if availableTailnetAuthModes.count > 1 { Menu("Authentication") { - ForEach([AccountAuthMode.none, .password, .preauthKey]) { mode in + ForEach(availableTailnetAuthModes) { mode in Button(mode.title) { draft.authMode = mode - if mode == .none { + if mode == .none || mode == .web { draft.secret = "" } } @@ -848,7 +920,7 @@ private struct ConfigurationSheetView: View { case .tor: return "Save Account" case .tailnet: - if draft.tailnetProvider.usesWebLogin { + if tailnetUsesWebLogin { return loginStatus?.running == true ? "Save Account" : "Start Sign-In" } return "Save Account" @@ -865,12 +937,12 @@ private struct ConfigurationSheetView: View { if normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil { return true } - if draft.tailnetProvider.usesWebLogin { - return false - } if draft.tailnetProvider.requiresControlURL && normalizedOptional(draft.authority) == nil { return true } + if tailnetUsesWebLogin { + return false + } if draft.authMode != .none && normalizedOptional(draft.secret) == nil { return true } @@ -955,14 +1027,14 @@ private struct ConfigurationSheetView: View { } private func submitTailnet() async throws { - if draft.tailnetProvider.usesWebLogin { + if tailnetUsesWebLogin { if loginStatus?.running == true { webAuthenticationTask?.cancel() webAuthenticationTask = nil try await saveTailnetAccount(secret: nil, username: nil) dismiss() } else { - try await startTailscaleLogin() + try await startTailnetLogin() } return } @@ -973,13 +1045,13 @@ private struct ConfigurationSheetView: View { dismiss() } - private func startTailscaleLogin() async throws { + private func startTailnetLogin() async throws { let response = try await TailnetBridgeClient.startLogin( TailnetLoginStartRequest( accountName: normalized(draft.accountName, fallback: "default"), identityName: normalized(draft.identityName, fallback: "apple"), hostname: normalizedOptional(draft.hostname), - controlURL: draft.tailnetProvider.defaultAuthority + controlURL: normalizedOptional(draft.authority) ?? draft.tailnetProvider.defaultAuthority ) ) loginSessionID = response.sessionID @@ -1010,7 +1082,7 @@ private struct ConfigurationSheetView: View { case .tailnetLogin: draft.tailnetProvider = .tailscale do { - try await startTailscaleLogin() + try await startTailnetLogin() } catch { errorMessage = error.localizedDescription } @@ -1078,14 +1150,14 @@ private struct ConfigurationSheetView: View { let provider = draft.tailnetProvider let title = titleOrFallback( hostnameFallback( - from: provider.usesWebLogin ? (loginStatus?.tailnetName ?? "") : draft.authority, + from: tailnetUsesWebLogin ? (loginStatus?.tailnetName ?? "") : draft.authority, fallback: provider.title ) ) let payload = TailnetNetworkPayload( provider: provider, - authority: normalizedOptional(provider.defaultAuthority ?? draft.authority), + authority: normalizedOptional(draft.authority) ?? normalizedOptional(provider.defaultAuthority ?? ""), account: normalized(draft.accountName, fallback: "default"), identity: normalized(draft.identityName, fallback: "apple"), tailnet: normalizedOptional(loginStatus?.tailnetName ?? draft.tailnet), @@ -1094,7 +1166,7 @@ private struct ConfigurationSheetView: View { var noteParts: [String] = [ provider.title, - provider.usesWebLogin + tailnetUsesWebLogin ? "State: \(loginStatus?.backendState ?? "NeedsLogin")" : "Auth: \(draft.authMode.title)", ] @@ -1123,7 +1195,7 @@ private struct ConfigurationSheetView: View { hostname: payload.hostname, username: username, tailnet: payload.tailnet, - authMode: provider.usesWebLogin ? .web : draft.authMode, + authMode: tailnetUsesWebLogin ? .web : draft.authMode, note: noteParts.joined(separator: " • "), createdAt: .now, updatedAt: .now @@ -1155,18 +1227,25 @@ private struct ConfigurationSheetView: View { } private func applyTailnetProvider(_ provider: TailnetProvider) { + resetTailnetDiscoveryFeedback() draft.tailnetProvider = provider applyTailnetDefaults(for: provider) } private func applyTailnetDefaults(for provider: TailnetProvider) { draft.authority = provider.defaultAuthority ?? "" - if provider.usesWebLogin { + loginStatus = nil + loginSessionID = nil + pollingTask?.cancel() + if provider == .tailscale { draft.authMode = .web draft.username = "" draft.secret = "" } else { - if draft.authMode == .web { + if !availableTailnetAuthModes.contains(draft.authMode) { + draft.authMode = provider.supportsWebLogin ? .web : .none + } + if draft.authMode == .web && !provider.supportsWebLogin { draft.authMode = .none } } @@ -1202,6 +1281,41 @@ private struct ConfigurationSheetView: View { authorityProbeError = nil } + private func resetTailnetDiscoveryFeedback() { + discoveryStatus = nil + discoveryError = nil + } + + private func discoverTailnetAuthority() { + guard let email = normalizedOptional(draft.discoveryEmail) else { + discoveryStatus = nil + discoveryError = "Enter an email address first." + return + } + + isDiscoveringTailnet = true + discoveryStatus = nil + discoveryError = nil + + Task { @MainActor in + defer { isDiscoveringTailnet = false } + do { + let discovery = try await TailnetDiscoveryClient.discover(email: email) + discoveryStatus = discovery + draft.tailnetProvider = discovery.provider + draft.authority = discovery.authority + if discovery.provider.supportsWebLogin, discovery.oidcIssuer != nil { + draft.authMode = .web + draft.username = "" + draft.secret = "" + } + probeTailnetAuthority() + } catch { + discoveryError = error.localizedDescription + } + } + } + private func pasteWireGuardConfiguration() { guard let clipboardString else { return } draft.wireGuardConfig = clipboardString @@ -1247,6 +1361,21 @@ private struct ConfigurationSheetView: View { return host } + private var tailnetUsesWebLogin: Bool { + draft.authMode == .web && draft.tailnetProvider.supportsWebLogin + } + + private var availableTailnetAuthModes: [AccountAuthMode] { + switch draft.tailnetProvider { + case .tailscale: + [.web] + case .headscale: + [.web, .none, .password, .preauthKey] + case .burrow: + [.none, .password, .preauthKey] + } + } + @ViewBuilder private func labeledValue(_ label: String, _ value: String) -> some View { VStack(alignment: .leading, spacing: 2) { diff --git a/Apple/UI/Networks/Network.swift b/Apple/UI/Networks/Network.swift index 71e5bca..9a534ce 100644 --- a/Apple/UI/Networks/Network.swift +++ b/Apple/UI/Networks/Network.swift @@ -33,6 +33,13 @@ struct TailnetLoginStartRequest: Codable, Sendable { var controlURL: String? } +struct TailnetDiscoveryResponse: Codable, Sendable { + var domain: String + var provider: TailnetProvider + var authority: String + var oidcIssuer: String? +} + struct TailnetLoginStatus: Codable, Sendable { var backendState: String var authURL: String? @@ -91,7 +98,7 @@ enum TailnetBridgeClient { return try decoder.decode(TailnetLoginStatus.self, from: data) } - private static func validate(response: URLResponse, data: Data) throws { + fileprivate static func validate(response: URLResponse, data: Data) throws { guard let http = response as? HTTPURLResponse else { throw URLError(.badServerResponse) } @@ -104,6 +111,32 @@ enum TailnetBridgeClient { } } +enum TailnetDiscoveryClient { + private static let baseURL = URL(string: "http://127.0.0.1:8080")! + + static func discover(email: String) async throws -> TailnetDiscoveryResponse { + guard var components = URLComponents( + url: baseURL.appendingPathComponent("v1/tailnet/discover"), + resolvingAgainstBaseURL: false + ) else { + throw URLError(.badURL) + } + components.queryItems = [ + URLQueryItem(name: "email", value: email) + ] + guard let url = components.url else { + throw URLError(.badURL) + } + + let (data, response) = try await URLSession.shared.data(from: url) + try TailnetBridgeClient.validate(response: response, data: data) + + let decoder = JSONDecoder() + decoder.keyDecodingStrategy = .convertFromSnakeCase + return try decoder.decode(TailnetDiscoveryResponse.self, from: data) + } +} + enum TailnetAuthorityProbeClient { static func probe(provider: TailnetProvider, authority: String) async throws -> TailnetAuthorityProbeStatus { let normalizedAuthority = normalizeAuthority(authority) @@ -308,8 +341,13 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { } } - var usesWebLogin: Bool { - self == .tailscale + var supportsWebLogin: Bool { + switch self { + case .tailscale, .headscale: + true + case .burrow: + false + } } var requiresControlURL: Bool { @@ -332,7 +370,7 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { case .tailscale: "Use Tailscale's real browser login flow." case .headscale: - "Store a Headscale control-plane endpoint and credentials." + "Use your Headscale control plane with browser or key-based sign-in." case .burrow: "Store Burrow control-plane credentials." } diff --git a/burrow/src/auth/server/mod.rs b/burrow/src/auth/server/mod.rs index b0c0522..fdffce3 100644 --- a/burrow/src/auth/server/mod.rs +++ b/burrow/src/auth/server/mod.rs @@ -5,17 +5,18 @@ use std::{env, path::Path}; use anyhow::{Context, Result}; use axum::{ - extract::{Json, Path as AxumPath, State}, + extract::{Json, Path as AxumPath, Query, State}, http::{header::AUTHORIZATION, HeaderMap, StatusCode}, response::IntoResponse, routing::{get, post}, Router, }; +use serde::Deserialize; use tokio::signal; use crate::control::{ - LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest, - RegisterResponse, BURROW_TAILNET_DOMAIN, + discovery, LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest, + RegisterResponse, TailnetDiscovery, BURROW_TAILNET_DOMAIN, }; #[derive(Clone, Debug)] @@ -105,6 +106,11 @@ struct AppState { tailscale: tailscale::TailscaleBridgeManager, } +#[derive(Debug, Deserialize)] +struct TailnetDiscoveryQuery { + email: String, +} + type AppResult = Result; pub async fn serve() -> Result<()> { @@ -139,6 +145,7 @@ pub fn build_router(config: AuthServerConfig) -> Router { .route("/v1/auth/login", post(login_local)) .route("/v1/control/register", post(control_register)) .route("/v1/control/map", post(control_map)) + .route("/v1/tailnet/discover", get(tailnet_discover)) .route("/v1/tailscale/login/start", post(tailscale_login_start)) .route("/v1/tailscale/login/:session_id", get(tailscale_login_status)) .with_state(AppState { @@ -205,6 +212,19 @@ async fn control_map( Ok(Json(response)) } +async fn tailnet_discover( + Query(query): Query, +) -> AppResult> { + if query.email.trim().is_empty() { + return Err((StatusCode::BAD_REQUEST, "email is required".to_owned())); + } + + let discovery = discovery::discover_tailnet(&query.email) + .await + .map_err(|err| (StatusCode::BAD_GATEWAY, err.to_string()))?; + Ok(Json(discovery)) +} + async fn tailscale_login_start( State(state): State, Json(request): Json, @@ -394,4 +414,17 @@ mod tests { assert!(map.dns.expect("dns").magic_dns); Ok(()) } + + #[tokio::test] + async fn tailnet_discover_requires_email() -> Result<()> { + let app = build_router(AuthServerConfig::default()); + let response = app + .oneshot( + Request::get("/v1/tailnet/discover?email=") + .body(Body::empty())?, + ) + .await?; + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + Ok(()) + } } diff --git a/burrow/src/control/discovery.rs b/burrow/src/control/discovery.rs new file mode 100644 index 0000000..28b48bb --- /dev/null +++ b/burrow/src/control/discovery.rs @@ -0,0 +1,212 @@ +use anyhow::{anyhow, Context, Result}; +use reqwest::{Client, StatusCode, Url}; +use serde::{Deserialize, Serialize}; + +use super::TailnetProvider; + +pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server"; +const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet"; +const WEBFINGER_PATH: &str = "/.well-known/webfinger"; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct TailnetDiscovery { + pub domain: String, + pub provider: TailnetProvider, + pub authority: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub oidc_issuer: Option, +} + +#[derive(Clone, Debug, Default, Deserialize)] +struct WebFingerDocument { + #[serde(default)] + links: Vec, +} + +#[derive(Clone, Debug, Default, Deserialize)] +struct WebFingerLink { + #[serde(default)] + rel: String, + #[serde(default)] + href: Option, +} + +pub async fn discover_tailnet(email: &str) -> Result { + let domain = email_domain(email)?; + let base_url = Url::parse(&format!("https://{domain}")) + .with_context(|| format!("invalid discovery domain {domain}"))?; + let client = Client::builder() + .user_agent("burrow-tailnet-discovery") + .timeout(std::time::Duration::from_secs(10)) + .build() + .context("failed to build tailnet discovery client")?; + discover_tailnet_at(&client, email, &base_url).await +} + +pub async fn discover_tailnet_at( + client: &Client, + email: &str, + base_url: &Url, +) -> Result { + let domain = email_domain(email)?; + + if let Some(discovery) = discover_well_known(client, base_url).await? { + return Ok(TailnetDiscovery { domain, ..discovery }); + } + + if let Some(authority) = discover_webfinger(client, email, base_url).await? { + return Ok(TailnetDiscovery { + domain, + provider: TailnetProvider::Headscale, + authority, + oidc_issuer: None, + }); + } + + Err(anyhow!("no tailnet discovery metadata found for {domain}")) +} + +pub fn email_domain(email: &str) -> Result { + let trimmed = email.trim(); + let (_, domain) = trimmed + .rsplit_once('@') + .ok_or_else(|| anyhow!("email address must include a domain"))?; + let domain = domain.trim().trim_matches('.').to_ascii_lowercase(); + if domain.is_empty() { + return Err(anyhow!("email address must include a domain")); + } + Ok(domain) +} + +async fn discover_well_known(client: &Client, base_url: &Url) -> Result> { + let url = base_url + .join(TAILNET_DISCOVERY_PATH) + .context("failed to build tailnet discovery URL")?; + let response = client + .get(url) + .header("accept", "application/json") + .send() + .await + .context("tailnet well-known request failed")?; + + match response.status() { + StatusCode::OK => response + .json::() + .await + .context("invalid tailnet discovery document") + .map(Some), + StatusCode::NOT_FOUND => Ok(None), + status => Err(anyhow!("tailnet well-known lookup failed with HTTP {status}")), + } +} + +async fn discover_webfinger(client: &Client, email: &str, base_url: &Url) -> Result> { + let mut url = base_url + .join(WEBFINGER_PATH) + .context("failed to build webfinger URL")?; + url.query_pairs_mut() + .append_pair("resource", &format!("acct:{email}")) + .append_pair("rel", TAILNET_DISCOVERY_REL); + + let response = client + .get(url) + .header("accept", "application/jrd+json, application/json") + .send() + .await + .context("tailnet webfinger request failed")?; + + match response.status() { + StatusCode::OK => { + let document = response + .json::() + .await + .context("invalid webfinger document")?; + Ok(document + .links + .into_iter() + .find(|link| link.rel == TAILNET_DISCOVERY_REL) + .and_then(|link| link.href) + .filter(|href| !href.trim().is_empty())) + } + StatusCode::NOT_FOUND => Ok(None), + status => Err(anyhow!("tailnet webfinger lookup failed with HTTP {status}")), + } +} + +#[cfg(test)] +mod tests { + use axum::{routing::get, Router}; + use serde_json::json; + use tokio::net::TcpListener; + + use super::*; + + #[test] + fn extracts_domain_from_email() { + assert_eq!(email_domain("Contact@Burrow.net").unwrap(), "burrow.net"); + assert!(email_domain("contact").is_err()); + } + + #[tokio::test] + async fn discovers_from_well_known_document() -> Result<()> { + let router = Router::new().route( + TAILNET_DISCOVERY_PATH, + get(|| async { + axum::Json(json!({ + "domain": "burrow.net", + "provider": "headscale", + "authority": "https://ts.burrow.net", + "oidc_issuer": "https://auth.burrow.net/application/o/ts/" + })) + }), + ); + + let listener = TcpListener::bind("127.0.0.1:0").await?; + let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?; + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let client = Client::builder().build()?; + let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?; + assert_eq!(discovery.provider, TailnetProvider::Headscale); + assert_eq!(discovery.authority, "https://ts.burrow.net"); + assert_eq!(discovery.domain, "burrow.net"); + + server.abort(); + Ok(()) + } + + #[tokio::test] + async fn falls_back_to_webfinger_authority() -> Result<()> { + let router = Router::new() + .route( + TAILNET_DISCOVERY_PATH, + get(|| async { (StatusCode::NOT_FOUND, "") }), + ) + .route( + WEBFINGER_PATH, + get(|| async { + axum::Json(json!({ + "subject": "acct:contact@burrow.net", + "links": [ + { + "rel": TAILNET_DISCOVERY_REL, + "href": "https://ts.burrow.net" + } + ] + })) + }), + ); + + let listener = TcpListener::bind("127.0.0.1:0").await?; + let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?; + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let client = Client::builder().build()?; + let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?; + assert_eq!(discovery.provider, TailnetProvider::Headscale); + assert_eq!(discovery.authority, "https://ts.burrow.net"); + + server.abort(); + Ok(()) + } +} diff --git a/burrow/src/control/mod.rs b/burrow/src/control/mod.rs index 331a7d2..472f673 100644 --- a/burrow/src/control/mod.rs +++ b/burrow/src/control/mod.rs @@ -1,4 +1,5 @@ pub mod config; +pub mod discovery; use std::collections::BTreeMap; @@ -6,6 +7,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; pub use config::{TailnetConfig, TailnetProvider}; +pub use discovery::{TailnetDiscovery, TAILNET_DISCOVERY_REL}; pub const BURROW_CAPABILITY_VERSION: i32 = 1; pub const BURROW_TAILNET_DOMAIN: &str = "burrow.net"; diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index 51af7eb..0d0f5c8 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -259,9 +259,12 @@ in encode gzip zstd @oidcConfig path /.well-known/openid-configuration redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/.well-known/openid-configuration 308 + @tailnetConfig path /.well-known/burrow-tailnet + header @tailnetConfig Content-Type application/json + respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200 @webfinger path /.well-known/webfinger header @webfinger Content-Type application/jrd+json - respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/\"}]}" 200 + respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200 @root path / redir @root ${homeRepoUrl} 308 respond 404 diff --git a/nixos/modules/burrow-headscale.nix b/nixos/modules/burrow-headscale.nix index 98cf5ba..ad5ec68 100644 --- a/nixos/modules/burrow-headscale.nix +++ b/nixos/modules/burrow-headscale.nix @@ -3,131 +3,6 @@ let cfg = config.services.burrow.headscale; policyFile = ./burrow-headscale-policy.hujson; - landingPage = pkgs.writeTextDir "index.html" '' - - - - - - Burrow Tailnet - - - -
-
Burrow Tailnet
-
-

Sign-in starts from your client, not this page.

-

- ts.burrow.net is the Burrow Headscale control plane. Headscale does not provide a built-in web UI, - so browser authentication starts only after a Tailscale-compatible client initiates login. -

-
-
tailscale up --login-server https://ts.burrow.net
- -
- - - ''; in { options.services.burrow.headscale = { @@ -346,14 +221,7 @@ in services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd - @root path / - handle @root { - root * ${landingPage} - file_server - } - handle { - reverse_proxy 127.0.0.1:${toString cfg.port} - } + reverse_proxy 127.0.0.1:${toString cfg.port} ''; }; } From f6a7f0922d14107a5dbeec9f3eaf605dde041155 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 01:36:10 -0700 Subject: [PATCH 08/59] Add governance and identity registry scaffolding --- .forgejo/workflows/lint-governance.yml | 27 ++++ .github/workflows/lint-governance.yml | 23 +++ AGENTS.md | 14 ++ Makefile | 6 + README.md | 3 + Scripts/bep | 133 ++++++++++++++++++ Scripts/check-bep-metadata.py | 94 +++++++++++++ contributors.nix | 47 +++++++ evolution/README.md | 14 ++ .../BEP-0005-daemon-ipc-and-apple-boundary.md | 78 ++++++++++ ...6-tailnet-authority-first-control-plane.md | 71 ++++++++++ ...dentity-registry-and-operator-bootstrap.md | 73 ++++++++++ nixos/hosts/burrow-forge/default.nix | 50 ++++--- 13 files changed, 612 insertions(+), 21 deletions(-) create mode 100644 .forgejo/workflows/lint-governance.yml create mode 100644 .github/workflows/lint-governance.yml create mode 100644 AGENTS.md create mode 100755 Scripts/bep create mode 100755 Scripts/check-bep-metadata.py create mode 100644 contributors.nix create mode 100644 evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md create mode 100644 evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md create mode 100644 evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md diff --git a/.forgejo/workflows/lint-governance.yml b/.forgejo/workflows/lint-governance.yml new file mode 100644 index 0000000..490702e --- /dev/null +++ b/.forgejo/workflows/lint-governance.yml @@ -0,0 +1,27 @@ +name: Lint Governance + +on: + push: + branches: + - main + pull_request: + branches: + - "**" + workflow_dispatch: + +jobs: + governance: + name: BEP Metadata + runs-on: [self-hosted, linux, x86_64, burrow-forge] + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + + - name: Validate BEP metadata + shell: bash + run: | + set -euo pipefail + python3 Scripts/check-bep-metadata.py diff --git a/.github/workflows/lint-governance.yml b/.github/workflows/lint-governance.yml new file mode 100644 index 0000000..08b665c --- /dev/null +++ b/.github/workflows/lint-governance.yml @@ -0,0 +1,23 @@ +name: Governance Lint + +on: + pull_request: + branches: + - "*" + +jobs: + governance: + name: BEP Metadata + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Validate BEP metadata + shell: bash + run: | + set -euo pipefail + python3 Scripts/check-bep-metadata.py diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..0ca7ced --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,14 @@ +# instructions for agents + +1. Spell the project name as `Burrow` in user-facing copy and `burrow` in code, package, and protocol identifiers unless an existing integration requires a different literal. +2. Read [CONSTITUTION.md](CONSTITUTION.md) before changing Apple clients, the daemon, the control plane, forge infrastructure, identity, or security-sensitive code. +3. Anchor non-trivial changes in a Burrow Evolution Proposal (BEP) under [evolution/](evolution/README.md) so future contributors can inherit the rationale, safeguards, and rollout shape. +4. Before touching the Apple app, daemon IPC, or Tailnet flows, review: + - [evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md](evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md) + - [evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md](evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md) + - [evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md](evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md) + - [evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md](evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md) +5. Apple clients must talk only to the daemon over gRPC. Do not add direct HTTP, control-plane, or helper-process calls from Swift UI code. +6. Treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a separate user-facing protocol surface. +7. Maintain canonical identity and operator metadata in [contributors.nix](contributors.nix). If Burrow forge, Authentik, Headscale, or admin/group mappings need to change, edit that registry first and derive runtime configuration from it. +8. When process or architecture is unclear, stop and draft or update a BEP instead of improvising durable behavior in code. diff --git a/Makefile b/Makefile index f927f5f..1a0488c 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,12 @@ check: build: @cargo build +bep-check: + @python3 Scripts/check-bep-metadata.py + +bep-list: + @Scripts/bep list + daemon-console: @$(sudo_cargo_console) daemon diff --git a/README.md b/README.md index b8684c3..ba4f50c 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ Routine verification now runs unprivileged with `cargo test --workspace --all-fe The repository now carries its own design and deployment record: - [Constitution](./CONSTITUTION.md) +- [Agent Instructions](./AGENTS.md) - [Burrow Evolution](./evolution/README.md) - [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md) - [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md) @@ -19,6 +20,8 @@ The repository now carries its own design and deployment record: Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh). +Agent and governance-sensitive work should start with [AGENTS.md](./AGENTS.md), [CONSTITUTION.md](./CONSTITUTION.md), and the relevant BEPs under [`evolution/proposals/`](./evolution/proposals/). Identity and bootstrap metadata now live in [`contributors.nix`](./contributors.nix). + The project structure is divided in the following folders: ``` diff --git a/Scripts/bep b/Scripts/bep new file mode 100755 index 0000000..1c6bd64 --- /dev/null +++ b/Scripts/bep @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root=$(git rev-parse --show-toplevel) +proposals_dir="$repo_root/evolution/proposals" + +auto_browse() { + if command -v wisu >/dev/null 2>&1; then + exec wisu -i -g --icons "$repo_root/evolution" + fi + exec ls -la "$repo_root/evolution" +} + +usage() { + cat <<'USAGE' +Usage: bep [command] + +Commands: + list [--status ] List BEPs, optionally filtered by status. + open Open a BEP in $EDITOR. + help Show this help. + +If no command is provided, bep launches a simple browser for evolution/. +USAGE +} + +normalize_id() { + local raw="$1" + if [[ "$raw" =~ ^BEP-[0-9]+$ ]]; then + printf '%s' "$raw" + return 0 + fi + if [[ "$raw" =~ ^[0-9]+$ ]]; then + printf 'BEP-%04d' "$raw" + return 0 + fi + return 1 +} + +read_status() { + local file="$1" + awk -F ': ' '/^Status:/ {print $2; exit}' "$file" +} + +read_title() { + local file="$1" + local line + line=$(head -n 1 "$file" || true) + printf '%s' "$line" | sed -E 's/^# `[^`]+`[[:space:]]+//; s/^[^A-Za-z0-9]+//' +} + +list_bep() { + local filter="${1:-}" + local filter_lower="" + if [[ -n "$filter" ]]; then + filter_lower=$(printf '%s' "$filter" | tr '[:upper:]' '[:lower:]') + fi + + printf '%-10s %-18s %s\n' "BEP" "Status" "Title" + local file + local entries=() + for file in "$proposals_dir"/BEP-*.md; do + [[ -e "$file" ]] || continue + local base + base=$(basename "$file") + local id + id=$(printf '%s' "$base" | cut -d- -f1-2) + local status + status=$(read_status "$file") + local status_lower + status_lower=$(printf '%s' "$status" | tr '[:upper:]' '[:lower:]') + if [[ -n "$filter_lower" && "$status_lower" != "$filter_lower" ]]; then + continue + fi + local title + title=$(read_title "$file") + entries+=("$(printf '%-10s %-18s %s' "$id" "$status" "$title")") + done + if [[ ${#entries[@]} -gt 0 ]]; then + printf '%s\n' "${entries[@]}" | sort + fi +} + +open_bep() { + local raw="$1" + local id + if ! id=$(normalize_id "$raw"); then + echo "Unknown BEP id: $raw" >&2 + exit 1 + fi + local matches + matches=("$proposals_dir"/"$id"-*.md) + if [[ ${#matches[@]} -eq 0 || ! -e "${matches[0]}" ]]; then + echo "No proposal found for $id" >&2 + exit 1 + fi + if [[ ${#matches[@]} -gt 1 ]]; then + echo "Multiple proposals match $id:" >&2 + printf ' %s\n' "${matches[@]}" >&2 + exit 1 + fi + local editor="${EDITOR:-vi}" + exec "$editor" "${matches[0]}" +} + +command=${1:-} +case "$command" in + "") + auto_browse + ;; + list) + if [[ ${2:-} == "--status" && -n ${3:-} ]]; then + list_bep "$3" + else + list_bep + fi + ;; + open) + if [[ -z ${2:-} ]]; then + echo "bep open requires an id" >&2 + exit 1 + fi + open_bep "$2" + ;; + help|-h|--help) + usage + ;; + *) + echo "Unknown command: $command" >&2 + usage + exit 1 + ;; +esac diff --git a/Scripts/check-bep-metadata.py b/Scripts/check-bep-metadata.py new file mode 100755 index 0000000..d054934 --- /dev/null +++ b/Scripts/check-bep-metadata.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import pathlib +import re +import sys + + +REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent +PROPOSALS_DIR = REPO_ROOT / "evolution" / "proposals" +ALLOWED_STATUSES = { + "Pitch", + "Draft", + "In Review", + "Accepted", + "Implemented", + "Rejected", + "Returned for Revision", + "Superseded", + "Archived", +} +REQUIRED_FIELDS = [ + "Status", + "Proposal", + "Authors", + "Coordinator", + "Reviewers", + "Constitution Sections", + "Implementation PRs", + "Decision Date", +] + + +def text_block_lines(path: pathlib.Path) -> list[str]: + content = path.read_text(encoding="utf-8") + match = re.search(r"```text\n(.*?)\n```", content, re.DOTALL) + if not match: + raise ValueError("missing leading ```text metadata block") + return [line.rstrip() for line in match.group(1).splitlines() if line.strip()] + + +def validate(path: pathlib.Path) -> list[str]: + errors: list[str] = [] + proposal_id = path.name.split("-", 2)[:2] + expected_id = "-".join(proposal_id).removesuffix(".md") + + try: + lines = text_block_lines(path) + except ValueError as exc: + return [f"{path}: {exc}"] + + field_names = [line.split(":", 1)[0] for line in lines] + if field_names != REQUIRED_FIELDS: + errors.append( + f"{path}: metadata fields must appear in order {', '.join(REQUIRED_FIELDS)}" + ) + return errors + + fields = dict(line.split(":", 1) for line in lines) + fields = {key.strip(): value.strip() for key, value in fields.items()} + + if fields["Status"] not in ALLOWED_STATUSES: + errors.append(f"{path}: invalid Status {fields['Status']!r}") + + if fields["Proposal"] != expected_id: + errors.append( + f"{path}: Proposal field {fields['Proposal']!r} does not match filename id {expected_id!r}" + ) + + if fields["Status"] in {"Accepted", "Implemented", "Superseded", "Rejected", "Archived"} and fields["Decision Date"] == "Pending": + errors.append( + f"{path}: Decision Date must not be Pending once status is {fields['Status']}" + ) + + return errors + + +def main() -> int: + errors: list[str] = [] + for path in sorted(PROPOSALS_DIR.glob("BEP-*.md")): + errors.extend(validate(path)) + + if errors: + for error in errors: + print(error, file=sys.stderr) + return 1 + + print(f"checked {len(list(PROPOSALS_DIR.glob('BEP-*.md')))} BEPs") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/contributors.nix b/contributors.nix new file mode 100644 index 0000000..f6cc014 --- /dev/null +++ b/contributors.nix @@ -0,0 +1,47 @@ +{ + groups = { + users = "burrow-users"; + admins = "burrow-admins"; + }; + + identities = { + contact = { + displayName = "Burrow"; + canonicalEmail = "contact@burrow.net"; + sourceEmail = "net.burrow@gmail.com"; + isAdmin = true; + forgeAuthorized = true; + bootstrapAuthentik = true; + sshPublicKeyPath = ./nixos/keys/contact_at_burrow_net.pub; + roles = [ + "operator" + "forge-admin" + ]; + }; + + conrad = { + displayName = "Conrad Kramer"; + canonicalEmail = "conrad@burrow.net"; + sourceEmail = "ckrames1234@gmail.com"; + isAdmin = true; + forgeAuthorized = false; + bootstrapAuthentik = true; + roles = [ + "operator" + "founder" + ]; + }; + + agent = { + displayName = "Burrow Agent"; + canonicalEmail = "agent@burrow.net"; + isAdmin = false; + forgeAuthorized = true; + bootstrapAuthentik = false; + sshPublicKeyPath = ./nixos/keys/agent_at_burrow_net.pub; + roles = [ + "automation" + ]; + }; + }; +} diff --git a/evolution/README.md b/evolution/README.md index e55a347..794b1fe 100644 --- a/evolution/README.md +++ b/evolution/README.md @@ -58,3 +58,17 @@ evolution/ ``` Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly. + +## BEP Helper + +Use the `bep` helper under `Scripts/` to browse or list proposals: + +- `Scripts/bep` opens a quick browser for `evolution/`. +- `Scripts/bep list --status Draft` lists proposals by status. +- `Scripts/bep open BEP-0005` opens a proposal in `$EDITOR`. + +Validate proposal metadata with: + +```bash +python3 Scripts/check-bep-metadata.py +``` diff --git a/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md new file mode 100644 index 0000000..1227444 --- /dev/null +++ b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md @@ -0,0 +1,78 @@ +# `BEP-0005` - Daemon IPC and Apple Boundary + +```text +Status: Draft +Proposal: BEP-0005 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should formalize one Apple/runtime boundary: Apple clients speak only to the daemon over gRPC on the app-group Unix socket, and the daemon owns all external control-plane, helper-process, and runtime coordination work. This prevents UI code from accreting side HTTP paths or ad hoc control-plane integrations that bypass the system Burrow is supposed to own. + +## Motivation + +- The current Tailnet work already showed the failure mode: Swift UI code started reaching around the daemon boundary to talk to helper HTTP endpoints directly. +- Apple-specific process ownership is easy to blur between the app, the network extension, and helper daemons unless the contract is explicit. +- If Burrow wants a durable multi-runtime architecture, the daemon must remain the only orchestration boundary between clients and control/data-plane behavior. + +## Detailed Design + +- Apple UI and Apple support libraries may call only daemon gRPC methods over the declared Burrow Unix socket. +- Direct Swift calls to external control-plane HTTP APIs, localhost helper HTTP servers, or runtime-specific subprocesses are forbidden. +- The daemon is responsible for: + - discovery of Tailnet authorities and related metadata + - control-plane session setup and tracking + - login/session lifecycle brokering + - runtime start/stop/reconcile + - translating helper or bridge processes into stable daemon RPCs +- `burrow/src/control/` owns transport-neutral control-plane semantics such as discovery, authority normalization, and request/response shaping. +- Apple UI owns presentation only: + - forms + - local state + - presenting returned auth URLs or statuses + - surfacing daemon availability and errors +- Any new Apple-facing runtime capability requires a daemon RPC first. + +## Security and Operational Considerations + +- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code. +- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized. +- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly. + +## Contributor Playbook + +- Before adding a new Apple-side workflow, identify the daemon RPC that should own it. +- If the RPC does not exist, add the protocol shape in `proto/burrow.proto`, implement it in the daemon, and only then wire Swift UI. +- Verify that no Swift UI or support code calls external control-plane HTTP endpoints directly. +- For Tailnet and similar flows, test: + - daemon unavailable behavior + - successful RPC path + - error propagation through the UI + +## Alternatives Considered + +- Let Apple UI call control-plane endpoints directly for convenience. Rejected because it creates parallel orchestration paths and breaks the daemon contract. +- Allow one-off exceptions for login helpers. Rejected because those exceptions become the architecture. + +## Impact on Other Work + +- Governs the Tailnet refactor and future Apple runtime work. +- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring. + +## Decision + +Pending. + +## References + +- `Apple/UI/` +- `Apple/Core/` +- `Apple/NetworkExtension/` +- `burrow/src/daemon/` +- `burrow/src/control/` diff --git a/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md new file mode 100644 index 0000000..fea4aba --- /dev/null +++ b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md @@ -0,0 +1,71 @@ +# `BEP-0006` - Tailnet Authority-First Control Plane + +```text +Status: Draft +Proposal: BEP-0006 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: I, II, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a distinct user-facing protocol. Burrow’s config and UI should therefore be authority-first rather than provider-first. + +## Motivation + +- Splitting Tailscale and Headscale into separate user-facing providers causes fake architectural divergence. +- Discovery already naturally returns an authority and optional issuer; that is the stable contract users actually need. +- Future managed or enterprise deployments should fit the same model without requiring another protocol picker. + +## Detailed Design + +- Tailnet configuration is centered on: + - account + - identity + - authority/login server URL + - optional tailnet name + - optional hostname + - auth method/material +- User-facing surfaces should not force a protocol choice between Tailscale and Headscale. +- Provider inference may remain internal metadata for compatibility and diagnostics: + - default managed Tailscale authority + - custom self-hosted authority + - Burrow-owned authority when explicitly applicable +- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server. +- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority. + +## Security and Operational Considerations + +- Authority-first config reduces UI complexity and makes misconfiguration easier to reason about. +- Provider-specific assumptions must not leak into packet or control-plane semantics unless the authority actually requires them. +- Auth material must remain authority-scoped and identity-scoped in daemon storage. + +## Contributor Playbook + +- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one. +- Store the authority explicitly in payloads and infer provider internally only when needed. +- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching. + +## Alternatives Considered + +- Keep separate user-facing providers for Tailscale and Headscale. Rejected because it models deployment shape as protocol shape. +- Collapse all control planes into one opaque Burrow provider. Rejected because the authority still matters operationally and diagnostically. + +## Impact on Other Work + +- Refines BEP-0002’s Tailscale-shaped control-plane work. +- Constrains the Tailnet Apple refactor and future daemon control-plane storage. + +## Decision + +Pending. + +## References + +- `burrow/src/control/` +- `Apple/UI/Networks/` +- `proto/burrow.proto` diff --git a/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md b/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md new file mode 100644 index 0000000..1fde0fb --- /dev/null +++ b/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md @@ -0,0 +1,73 @@ +# `BEP-0007` - Identity Registry and Operator Bootstrap + +```text +Status: Draft +Proposal: BEP-0007 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should maintain one canonical registry for project identities, aliases, bootstrap users, SSH keys, and admin-group mappings. Forgejo, Authentik, and related bootstrap configuration should derive from that registry instead of hardcoding overlapping identity facts in multiple modules. + +## Motivation + +- Burrow currently hardcodes operator and admin/bootstrap user facts directly in host configuration. +- Multi-account and self-hosted identity are becoming core architecture, not incidental infra details. +- A single registry reduces drift across Forgejo, Authentik, Headscale, SSH authorization, and future control-plane bootstrap. + +## Detailed Design + +- Add a root-level identity registry (`contributors.nix`) as the canonical source of truth for: + - usernames + - display names + - canonical emails + - external source emails or aliases + - admin scope + - bootstrap eligibility + - forge authorized SSH keys + - named roles +- Consume that registry from host configuration for: + - Forgejo authorized keys + - Forgejo bootstrap admin defaults + - Authentik bootstrap users + - Burrow user/admin group names +- Future work may derive contributor docs, OIDC bootstrap, and additional runtime configuration from the same registry. + +## Security and Operational Considerations + +- Identity drift is a security bug when it affects admin groups, bootstrap accounts, or SSH authorization. +- The registry stores metadata only; secrets remain in agenix or other declared secret paths. +- Changes to the registry should receive explicit review because they affect access and governance. + +## Contributor Playbook + +- Edit `contributors.nix` first when changing operator, admin, alias, or bootstrap identity state. +- Derive runtime configuration from the registry instead of duplicating the same facts elsewhere. +- Keep secret references separate from identity metadata. + +## Alternatives Considered + +- Continue hardcoding users in module options. Rejected because drift is inevitable once Forgejo, Authentik, and Headscale all depend on the same identities. +- Create separate per-service user lists. Rejected because it duplicates governance facts and weakens review. + +## Impact on Other Work + +- Supports forge auth, Authentik group sync, and future multi-account Burrow control-plane work. +- Creates the basis for stronger contributor and operator provenance later. + +## Decision + +Pending. + +## References + +- `contributors.nix` +- `nixos/hosts/burrow-forge/default.nix` +- `nixos/modules/burrow-authentik.nix` +- `nixos/modules/burrow-forge.nix` diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index d612ea8..fb5b8ae 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -1,4 +1,23 @@ -{ config, self, ... }: +{ config, lib, self, ... }: + +let + contributors = import ../../../contributors.nix; + identities = contributors.identities; + bootstrapUsers = lib.mapAttrsToList + ( + username: identity: { + inherit username; + name = identity.displayName; + email = identity.canonicalEmail; + sourceEmail = identity.sourceEmail or null; + isAdmin = identity.isAdmin or false; + } + ) + (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); + forgeAuthorizedKeys = map + (username: builtins.readFile identities.${username}.sshPublicKeyPath) + (builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities)); +in { imports = [ @@ -59,12 +78,14 @@ services.burrow.forge = { enable = true; + contactEmail = identities.contact.canonicalEmail; + adminUsername = "contact"; + adminEmail = identities.contact.canonicalEmail; adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; + oidcAdminGroup = contributors.groups.admins; + oidcRestrictedGroup = contributors.groups.users; oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; - authorizedKeys = [ - (builtins.readFile ../../keys/contact_at_burrow_net.pub) - (builtins.readFile ../../keys/agent_at_burrow_net.pub) - ]; + authorizedKeys = forgeAuthorizedKeys; }; services.burrow.forgeRunner = { @@ -92,22 +113,9 @@ googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleLoginMode = "redirect"; - bootstrapUsers = [ - { - username = "contact"; - name = "Burrow"; - email = "contact@burrow.net"; - sourceEmail = "net.burrow@gmail.com"; - isAdmin = true; - } - { - username = "conrad"; - name = "Conrad Kramer"; - email = "conrad@burrow.net"; - sourceEmail = "ckrames1234@gmail.com"; - isAdmin = true; - } - ]; + userGroupName = contributors.groups.users; + adminGroupName = contributors.groups.admins; + bootstrapUsers = bootstrapUsers; }; services.burrow.headscale = { From d1e28b881775967fa696294bc4d3c18ebebde757 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 01:36:55 -0700 Subject: [PATCH 09/59] Route Tailnet Apple flows through daemon gRPC --- Apple/Core/Client.swift | 198 ++++++++++++++++ Apple/UI/BurrowView.swift | 406 ++++++-------------------------- Apple/UI/Networks/Network.swift | 254 ++++++-------------- burrow/src/control/discovery.rs | 136 ++++++++++- burrow/src/daemon/instance.rs | 48 +++- burrow/src/daemon/mod.rs | 7 +- burrow/src/daemon/rpc/client.rs | 8 +- proto/burrow.proto | 28 +++ 8 files changed, 565 insertions(+), 520 deletions(-) diff --git a/Apple/Core/Client.swift b/Apple/Core/Client.swift index 8874e3b..c426fe7 100644 --- a/Apple/Core/Client.swift +++ b/Apple/Core/Client.swift @@ -1,5 +1,7 @@ +import Foundation import GRPC import NIOTransportServices +import SwiftProtobuf public typealias TunnelClient = Burrow_TunnelAsyncClient public typealias NetworksClient = Burrow_NetworksAsyncClient @@ -30,3 +32,199 @@ extension NetworksClient: Client { self.init(channel: channel, defaultCallOptions: .init(), interceptors: .none) } } + +public struct Burrow_TailnetDiscoverRequest: Sendable { + public var email: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetDiscoverResponse: Sendable { + public var domain: String = "" + public var authority: String = "" + public var oidcIssuer: String = "" + public var managed: Bool = false + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetProbeRequest: Sendable { + public var authority: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetProbeResponse: Sendable { + public var authority: String = "" + public var statusCode: Int32 = 0 + public var summary: String = "" + public var detail: String = "" + public var reachable: Bool = false + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetDiscoverRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "email") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.email) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.email.isEmpty { + try visitor.visitSingularStringField(value: self.email, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetDiscoverResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetDiscoverResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "domain"), + 2: .same(proto: "authority"), + 3: .same(proto: "oidc_issuer"), + 4: .same(proto: "managed"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.domain) + case 2: try decoder.decodeSingularStringField(value: &self.authority) + case 3: try decoder.decodeSingularStringField(value: &self.oidcIssuer) + case 4: try decoder.decodeSingularBoolField(value: &self.managed) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.domain.isEmpty { + try visitor.visitSingularStringField(value: self.domain, fieldNumber: 1) + } + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 2) + } + if !self.oidcIssuer.isEmpty { + try visitor.visitSingularStringField(value: self.oidcIssuer, fieldNumber: 3) + } + if self.managed { + try visitor.visitSingularBoolField(value: self.managed, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetProbeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetProbeRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "authority") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.authority) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetProbeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetProbeResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "authority"), + 2: .same(proto: "status_code"), + 3: .same(proto: "summary"), + 4: .same(proto: "detail"), + 5: .same(proto: "reachable"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.authority) + case 2: try decoder.decodeSingularInt32Field(value: &self.statusCode) + case 3: try decoder.decodeSingularStringField(value: &self.summary) + case 4: try decoder.decodeSingularStringField(value: &self.detail) + case 5: try decoder.decodeSingularBoolField(value: &self.reachable) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1) + } + if self.statusCode != 0 { + try visitor.visitSingularInt32Field(value: self.statusCode, fieldNumber: 2) + } + if !self.summary.isEmpty { + try visitor.visitSingularStringField(value: self.summary, fieldNumber: 3) + } + if !self.detail.isEmpty { + try visitor.visitSingularStringField(value: self.detail, fieldNumber: 4) + } + if self.reachable { + try visitor.visitSingularBoolField(value: self.reachable, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +public struct TailnetClient: Client, GRPCClient { + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions + + public init(channel: any GRPCChannel) { + self.channel = channel + self.defaultCallOptions = .init() + } + + public func discover( + _ request: Burrow_TailnetDiscoverRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetDiscoverResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/Discover", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func probe( + _ request: Burrow_TailnetProbeRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetProbeResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/Probe", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } +} diff --git a/Apple/UI/BurrowView.swift b/Apple/UI/BurrowView.swift index b4fa7d8..9938eef 100644 --- a/Apple/UI/BurrowView.swift +++ b/Apple/UI/BurrowView.swift @@ -1,4 +1,3 @@ -import AuthenticationServices import BurrowConfiguration import Foundation import SwiftUI @@ -204,7 +203,7 @@ private enum ConfigurationSheet: String, CaseIterable, Identifiable { switch self { case .wireGuard: .wireGuard case .tor: .tor - case .tailnet: .headscale + case .tailnet: .tailnet } } @@ -285,13 +284,12 @@ private struct AccountDraft { var wireGuardConfig = "" var discoveryEmail = "" - var tailnetProvider: TailnetProvider = .tailscale var authority = "" var tailnet = "" var hostname = ProcessInfo.processInfo.hostName var username = "" var secret = "" - var authMode: AccountAuthMode = .web + var authMode: AccountAuthMode = .none var torAddresses = "100.64.0.2/32" var torDNS = "1.1.1.1, 1.0.0.1" @@ -317,7 +315,6 @@ private struct AccountDraft { private struct ConfigurationSheetView: View { @Environment(\.dismiss) private var dismiss - @Environment(\.webAuthenticationSession) private var webAuthenticationSession let sheet: ConfigurationSheet let networkViewModel: NetworkViewModel @@ -326,17 +323,13 @@ private struct ConfigurationSheetView: View { @State private var draft: AccountDraft @State private var isSubmitting = false @State private var errorMessage: String? - @State private var loginSessionID: String? - @State private var loginStatus: TailnetLoginStatus? @State private var discoveryStatus: TailnetDiscoveryResponse? @State private var discoveryError: String? @State private var isDiscoveringTailnet = false @State private var authorityProbeStatus: TailnetAuthorityProbeStatus? @State private var authorityProbeError: String? @State private var isProbingAuthority = false - @State private var pollingTask: Task? @State private var didRunAutomation = false - @State private var webAuthenticationTask: Task? init( sheet: ConfigurationSheet, @@ -447,20 +440,12 @@ private struct ConfigurationSheetView: View { .onAppear { runAutomationIfNeeded() } - .onChange(of: draft.tailnetProvider) { _, _ in - resetAuthorityProbe() - } .onChange(of: draft.authority) { _, _ in resetAuthorityProbe() } .onChange(of: draft.discoveryEmail) { _, _ in resetTailnetDiscoveryFeedback() } - .onDisappear { - pollingTask?.cancel() - webAuthenticationTask?.cancel() - webAuthenticationTask = nil - } } @ViewBuilder @@ -490,48 +475,30 @@ private struct ConfigurationSheetView: View { tailnetDiscoveryCard(status: nil, failure: discoveryError) } - Picker( - "Provider", - selection: Binding( - get: { draft.tailnetProvider }, - set: { applyTailnetProvider($0) } - ) - ) { - ForEach(TailnetProvider.allCases) { provider in - Text(provider.title).tag(provider) + TextField("Authority URL", text: $draft.authority) + .burrowLoginField() + .autocorrectionDisabled() + + Text("Use the managed Tailnet authority or enter a custom Tailnet control server.") + .font(.footnote) + .foregroundStyle(.secondary) + + Button { + probeTailnetAuthority() + } label: { + Label { + Text(isProbingAuthority ? "Checking Connection" : "Check Connection") + } icon: { + Image(systemName: isProbingAuthority ? "hourglass" : "bolt.horizontal.circle") } } - .pickerStyle(.menu) + .buttonStyle(.borderless) + .disabled(isProbingAuthority || normalizedOptional(draft.authority) == nil) - tailnetProviderCard - - if draft.tailnetProvider.requiresControlURL { - TextField("Server URL", text: $draft.authority) - .burrowLoginField() - .autocorrectionDisabled() - - Button { - probeTailnetAuthority() - } label: { - Label { - Text(isProbingAuthority ? "Checking Connection" : "Check Connection") - } icon: { - Image(systemName: isProbingAuthority ? "hourglass" : "bolt.horizontal.circle") - } - } - .buttonStyle(.borderless) - .disabled(isProbingAuthority || normalizedOptional(draft.authority) == nil) - - if let authorityProbeStatus { - tailnetAuthorityProbeCard(status: authorityProbeStatus, failure: nil) - } else if let authorityProbeError { - tailnetAuthorityProbeCard(status: nil, failure: authorityProbeError) - } - } else { - LabeledContent("Server") { - Text("Tailscale managed") - .foregroundStyle(.secondary) - } + if let authorityProbeStatus { + tailnetAuthorityProbeCard(status: authorityProbeStatus, failure: nil) + } else if let authorityProbeError { + tailnetAuthorityProbeCard(status: nil, failure: authorityProbeError) } TextField("Tailnet", text: $draft.tailnet) @@ -540,28 +507,24 @@ private struct ConfigurationSheetView: View { } Section("Authentication") { - if tailnetUsesWebLogin { - tailnetWebLoginCard - } else { - TextField("Username", text: $draft.username) - .burrowLoginField() - .autocorrectionDisabled() - Picker("Authentication", selection: $draft.authMode) { - ForEach(availableTailnetAuthModes) { mode in - Text(mode.title).tag(mode) - } + TextField("Username", text: $draft.username) + .burrowLoginField() + .autocorrectionDisabled() + Picker("Authentication", selection: $draft.authMode) { + ForEach(availableTailnetAuthModes) { mode in + Text(mode.title).tag(mode) } - .pickerStyle(.menu) - if draft.authMode != .none { - SecureField( - draft.authMode == .password ? "Password" : "Preauth Key", - text: $draft.secret - ) - } - Text("Credentials stay on-device. Burrow uses them when it needs to register or refresh this identity.") - .font(.footnote) - .foregroundStyle(.secondary) } + .pickerStyle(.menu) + if draft.authMode != .none { + SecureField( + draft.authMode == .password ? "Password" : "Preauth Key", + text: $draft.secret + ) + } + Text("Tailnet account material stays on-device. Burrow stores the authority and credentials for daemon-managed registration and refresh.") + .font(.footnote) + .foregroundStyle(.secondary) } } @@ -618,10 +581,8 @@ private struct ConfigurationSheetView: View { if sheet == .tailnet { HStack(spacing: 8) { - summaryBadge(draft.tailnetProvider.title) - summaryBadge( - tailnetUsesWebLogin ? "Web Sign-In" : draft.authMode.title - ) + summaryBadge(isManagedTailnetAuthority ? "Managed" : "Custom") + summaryBadge(draft.authMode.title) } } } @@ -632,79 +593,6 @@ private struct ConfigurationSheetView: View { ) } - private var tailnetProviderCard: some View { - VStack(alignment: .leading, spacing: 6) { - HStack(spacing: 10) { - Image(systemName: tailnetProviderIconName) - .font(.headline) - .foregroundStyle(sheetAccentColor) - .frame(width: 28, height: 28) - .background( - Circle() - .fill(sheetAccentColor.opacity(0.14)) - ) - - VStack(alignment: .leading, spacing: 2) { - Text(draft.tailnetProvider.title) - .font(.headline) - Text(draft.tailnetProvider.subtitle) - .font(.footnote) - .foregroundStyle(.secondary) - } - - Spacer() - } - } - .padding(12) - .background( - RoundedRectangle(cornerRadius: 16) - .fill(.thinMaterial) - ) - } - - @ViewBuilder - private var tailnetWebLoginCard: some View { - VStack(alignment: .leading, spacing: 10) { - Text("Sign in with the shared browser session.") - .font(.subheadline.weight(.medium)) - - if let loginStatus { - labeledValue("State", loginStatus.backendState) - if let tailnetName = loginStatus.tailnetName { - labeledValue("Tailnet", tailnetName) - } - if let dnsName = loginStatus.selfDNSName { - labeledValue("Device", dnsName) - } - if !loginStatus.tailscaleIPs.isEmpty { - labeledValue("Addresses", loginStatus.tailscaleIPs.joined(separator: ", ")) - } - if let authURL = loginStatus.authURL { - Button("Resume Sign-In") { - if let url = URL(string: authURL) { - openLoginURL(url) - } - } - .buttonStyle(.borderless) - } - if !loginStatus.health.isEmpty { - Text(loginStatus.health.joined(separator: " • ")) - .font(.footnote) - .foregroundStyle(.secondary) - } - } else { - Text("Burrow launches the local bridge, then opens the real provider sign-in page in-app.") - .font(.footnote) - .foregroundStyle(.secondary) - } - } - .padding(12) - .background( - RoundedRectangle(cornerRadius: 16) - .fill(.thinMaterial) - ) - } - private func tailnetAuthorityProbeCard( status: TailnetAuthorityProbeStatus?, failure: String? @@ -739,12 +627,15 @@ private struct ConfigurationSheetView: View { ) -> some View { VStack(alignment: .leading, spacing: 6) { if let status { - Text("Discovered \(status.provider.title)") + Text("Discovered Tailnet Server") .font(.subheadline.weight(.medium)) Text(status.authority) .font(.footnote.monospaced()) .foregroundStyle(.secondary) .textSelection(.enabled) + Text(status.provider == .tailscale ? "Managed authority" : "Custom authority") + .font(.footnote) + .foregroundStyle(.secondary) if let oidcIssuer = status.oidcIssuer { Text("OIDC: \(oidcIssuer)") .font(.footnote) @@ -826,12 +717,8 @@ private struct ConfigurationSheetView: View { } case .tailnet: - Menu("Provider") { - ForEach(TailnetProvider.allCases) { provider in - Button(provider.title) { - applyTailnetProvider(provider) - } - } + Button("Use Tailscale Managed Server") { + applyTailnetDefaults(for: .tailscale) } if availableTailnetAuthModes.count > 1 { @@ -839,7 +726,7 @@ private struct ConfigurationSheetView: View { ForEach(availableTailnetAuthModes) { mode in Button(mode.title) { draft.authMode = mode - if mode == .none || mode == .web { + if mode == .none { draft.secret = "" } } @@ -847,8 +734,8 @@ private struct ConfigurationSheetView: View { } } - Button("Restore Provider Defaults") { - applyTailnetDefaults(for: draft.tailnetProvider) + Button("Clear Discovery Result") { + resetTailnetDiscoveryFeedback() } } } @@ -886,17 +773,6 @@ private struct ConfigurationSheetView: View { } } - private var tailnetProviderIconName: String { - switch draft.tailnetProvider { - case .tailscale: - "globe.badge.chevron.backward" - case .headscale: - "server.rack" - case .burrow: - "shield" - } - } - private var showsBottomActionButton: Bool { #if os(iOS) true @@ -920,9 +796,6 @@ private struct ConfigurationSheetView: View { case .tor: return "Save Account" case .tailnet: - if tailnetUsesWebLogin { - return loginStatus?.running == true ? "Save Account" : "Start Sign-In" - } return "Save Account" } } @@ -937,12 +810,9 @@ private struct ConfigurationSheetView: View { if normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil { return true } - if draft.tailnetProvider.requiresControlURL && normalizedOptional(draft.authority) == nil { + if normalizedOptional(draft.authority) == nil { return true } - if tailnetUsesWebLogin { - return false - } if draft.authMode != .none && normalizedOptional(draft.secret) == nil { return true } @@ -1027,41 +897,12 @@ private struct ConfigurationSheetView: View { } private func submitTailnet() async throws { - if tailnetUsesWebLogin { - if loginStatus?.running == true { - webAuthenticationTask?.cancel() - webAuthenticationTask = nil - try await saveTailnetAccount(secret: nil, username: nil) - dismiss() - } else { - try await startTailnetLogin() - } - return - } - let secret = draft.authMode == .none ? nil : draft.secret let username = normalizedOptional(draft.username) try await saveTailnetAccount(secret: secret, username: username) dismiss() } - private func startTailnetLogin() async throws { - let response = try await TailnetBridgeClient.startLogin( - TailnetLoginStartRequest( - accountName: normalized(draft.accountName, fallback: "default"), - identityName: normalized(draft.identityName, fallback: "apple"), - hostname: normalizedOptional(draft.hostname), - controlURL: normalizedOptional(draft.authority) ?? draft.tailnetProvider.defaultAuthority - ) - ) - loginSessionID = response.sessionID - loginStatus = response.status - if let authURL = response.status.authURL, let url = URL(string: authURL) { - openLoginURL(url) - } - startPollingTailscaleLogin() - } - private func runAutomationIfNeeded() { guard !didRunAutomation, sheet == .tailnet, @@ -1080,79 +921,19 @@ private struct ConfigurationSheetView: View { Task { @MainActor in switch automation.action { case .tailnetLogin: - draft.tailnetProvider = .tailscale - do { - try await startTailnetLogin() - } catch { - errorMessage = error.localizedDescription - } + applyTailnetDefaults(for: .tailscale) + probeTailnetAuthority() case .headscaleProbe: - applyTailnetProvider(.headscale) draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority probeTailnetAuthority() } } } - private func startPollingTailscaleLogin() { - pollingTask?.cancel() - guard let loginSessionID else { return } - pollingTask = Task { @MainActor in - while !Task.isCancelled { - do { - let status = try await TailnetBridgeClient.status(sessionID: loginSessionID) - let previousAuthURL = loginStatus?.authURL - loginStatus = status - if previousAuthURL == nil, - let authURL = status.authURL, - let url = URL(string: authURL) - { - openLoginURL(url) - } - if status.running { - webAuthenticationTask?.cancel() - webAuthenticationTask = nil - return - } - } catch { - errorMessage = error.localizedDescription - return - } - try? await Task.sleep(for: .seconds(2)) - } - } - } - - private func openLoginURL(_ url: URL) { - webAuthenticationTask?.cancel() - webAuthenticationTask = Task { @MainActor in - try? await Task.sleep(for: .milliseconds(300)) - do { - _ = try await webAuthenticationSession.authenticate( - using: url, - callbackURLScheme: "burrow", - preferredBrowserSession: .shared - ) - } catch is CancellationError { - return - } catch let error as ASWebAuthenticationSessionError - where error.code == .canceledLogin - { - return - } catch { - errorMessage = error.localizedDescription - } - webAuthenticationTask = nil - } - } - private func saveTailnetAccount(secret: String?, username: String?) async throws { - let provider = draft.tailnetProvider + let provider = inferredTailnetProvider let title = titleOrFallback( - hostnameFallback( - from: tailnetUsesWebLogin ? (loginStatus?.tailnetName ?? "") : draft.authority, - fallback: provider.title - ) + hostnameFallback(from: draft.authority, fallback: "Tailnet") ) let payload = TailnetNetworkPayload( @@ -1160,22 +941,14 @@ private struct ConfigurationSheetView: View { authority: normalizedOptional(draft.authority) ?? normalizedOptional(provider.defaultAuthority ?? ""), account: normalized(draft.accountName, fallback: "default"), identity: normalized(draft.identityName, fallback: "apple"), - tailnet: normalizedOptional(loginStatus?.tailnetName ?? draft.tailnet), + tailnet: normalizedOptional(draft.tailnet), hostname: normalizedOptional(draft.hostname) ) var noteParts: [String] = [ - provider.title, - tailnetUsesWebLogin - ? "State: \(loginStatus?.backendState ?? "NeedsLogin")" - : "Auth: \(draft.authMode.title)", + isManagedTailnetAuthority ? "Managed Tailnet" : "Custom Tailnet", + "Auth: \(draft.authMode.title)", ] - if let dnsName = loginStatus?.selfDNSName { - noteParts.append("Device: \(dnsName)") - } - if let magicDNSSuffix = loginStatus?.magicDNSSuffix { - noteParts.append("MagicDNS: \(magicDNSSuffix)") - } do { let networkID = try await networkViewModel.addTailnetNetwork(payload: payload) @@ -1186,7 +959,7 @@ private struct ConfigurationSheetView: View { let record = NetworkAccountRecord( id: UUID(), - kind: .headscale, + kind: .tailnet, title: title, authority: payload.authority, provider: provider, @@ -1195,7 +968,7 @@ private struct ConfigurationSheetView: View { hostname: payload.hostname, username: username, tailnet: payload.tailnet, - authMode: tailnetUsesWebLogin ? .web : draft.authMode, + authMode: draft.authMode, note: noteParts.joined(separator: " • "), createdAt: .now, updatedAt: .now @@ -1226,33 +999,15 @@ private struct ConfigurationSheetView: View { draft.torListen = defaults.torListen } - private func applyTailnetProvider(_ provider: TailnetProvider) { - resetTailnetDiscoveryFeedback() - draft.tailnetProvider = provider - applyTailnetDefaults(for: provider) - } - private func applyTailnetDefaults(for provider: TailnetProvider) { + resetTailnetDiscoveryFeedback() draft.authority = provider.defaultAuthority ?? "" - loginStatus = nil - loginSessionID = nil - pollingTask?.cancel() - if provider == .tailscale { - draft.authMode = .web - draft.username = "" - draft.secret = "" - } else { - if !availableTailnetAuthModes.contains(draft.authMode) { - draft.authMode = provider.supportsWebLogin ? .web : .none - } - if draft.authMode == .web && !provider.supportsWebLogin { - draft.authMode = .none - } + if !availableTailnetAuthModes.contains(draft.authMode) { + draft.authMode = .none } } private func probeTailnetAuthority() { - guard draft.tailnetProvider.requiresControlURL else { return } guard let authority = normalizedOptional(draft.authority) else { authorityProbeStatus = nil authorityProbeError = "Enter a server URL first." @@ -1266,10 +1021,7 @@ private struct ConfigurationSheetView: View { Task { @MainActor in defer { isProbingAuthority = false } do { - authorityProbeStatus = try await TailnetAuthorityProbeClient.probe( - provider: draft.tailnetProvider, - authority: authority - ) + authorityProbeStatus = try await networkViewModel.probeTailnetAuthority(authority) } catch { authorityProbeError = error.localizedDescription } @@ -1300,15 +1052,9 @@ private struct ConfigurationSheetView: View { Task { @MainActor in defer { isDiscoveringTailnet = false } do { - let discovery = try await TailnetDiscoveryClient.discover(email: email) + let discovery = try await networkViewModel.discoverTailnet(email: email) discoveryStatus = discovery - draft.tailnetProvider = discovery.provider draft.authority = discovery.authority - if discovery.provider.supportsWebLogin, discovery.oidcIssuer != nil { - draft.authMode = .web - draft.username = "" - draft.secret = "" - } probeTailnetAuthority() } catch { discoveryError = error.localizedDescription @@ -1361,19 +1107,19 @@ private struct ConfigurationSheetView: View { return host } - private var tailnetUsesWebLogin: Bool { - draft.authMode == .web && draft.tailnetProvider.supportsWebLogin + private var availableTailnetAuthModes: [AccountAuthMode] { + [.none, .password, .preauthKey] } - private var availableTailnetAuthModes: [AccountAuthMode] { - switch draft.tailnetProvider { - case .tailscale: - [.web] - case .headscale: - [.web, .none, .password, .preauthKey] - case .burrow: - [.none, .password, .preauthKey] - } + private var inferredTailnetProvider: TailnetProvider { + TailnetProvider.inferred( + authority: normalizedOptional(draft.authority), + explicit: discoveryStatus?.provider + ) + } + + private var isManagedTailnetAuthority: Bool { + TailnetProvider.isManagedTailscaleAuthority(normalizedOptional(draft.authority)) } @ViewBuilder diff --git a/Apple/UI/Networks/Network.swift b/Apple/UI/Networks/Network.swift index 9a534ce..b048add 100644 --- a/Apple/UI/Networks/Network.swift +++ b/Apple/UI/Networks/Network.swift @@ -26,13 +26,6 @@ struct TailnetNetworkPayload: Codable, Sendable { } } -struct TailnetLoginStartRequest: Codable, Sendable { - var accountName: String - var identityName: String - var hostname: String? - var controlURL: String? -} - struct TailnetDiscoveryResponse: Codable, Sendable { var domain: String var provider: TailnetProvider @@ -40,23 +33,6 @@ struct TailnetDiscoveryResponse: Codable, Sendable { var oidcIssuer: String? } -struct TailnetLoginStatus: Codable, Sendable { - var backendState: String - var authURL: String? - var running: Bool - var needsLogin: Bool - var tailnetName: String? - var magicDNSSuffix: String? - var selfDNSName: String? - var tailscaleIPs: [String] - var health: [String] -} - -struct TailnetLoginStartResponse: Codable, Sendable { - var sessionID: String - var status: TailnetLoginStatus -} - struct TailnetAuthorityProbeStatus: Sendable { var authority: String var statusCode: Int @@ -64,148 +40,38 @@ struct TailnetAuthorityProbeStatus: Sendable { var detail: String? } -enum TailnetBridgeClient { - private static let baseURL = URL(string: "http://127.0.0.1:8080")! - - static func startLogin(_ request: TailnetLoginStartRequest) async throws -> TailnetLoginStartResponse { - var urlRequest = URLRequest( - url: baseURL.appendingPathComponent("v1/tailscale/login/start") - ) - urlRequest.httpMethod = "POST" - urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type") - - let encoder = JSONEncoder() - encoder.keyEncodingStrategy = .convertToSnakeCase - urlRequest.httpBody = try encoder.encode(request) - - let (data, response) = try await URLSession.shared.data(for: urlRequest) - try validate(response: response, data: data) - - let decoder = JSONDecoder() - decoder.keyDecodingStrategy = .convertFromSnakeCase - return try decoder.decode(TailnetLoginStartResponse.self, from: data) - } - - static func status(sessionID: String) async throws -> TailnetLoginStatus { - let url = baseURL - .appendingPathComponent("v1/tailscale/login") - .appendingPathComponent(sessionID) - let (data, response) = try await URLSession.shared.data(from: url) - try validate(response: response, data: data) - - let decoder = JSONDecoder() - decoder.keyDecodingStrategy = .convertFromSnakeCase - return try decoder.decode(TailnetLoginStatus.self, from: data) - } - - fileprivate static func validate(response: URLResponse, data: Data) throws { - guard let http = response as? HTTPURLResponse else { - throw URLError(.badServerResponse) - } - guard (200..<300).contains(http.statusCode) else { - let message = String(data: data, encoding: .utf8)?.trimmingCharacters( - in: .whitespacesAndNewlines - ) - throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)") - } - } -} - enum TailnetDiscoveryClient { - private static let baseURL = URL(string: "http://127.0.0.1:8080")! + static func discover(email: String, socketURL: URL) async throws -> TailnetDiscoveryResponse { + var request = Burrow_TailnetDiscoverRequest() + request.email = email - static func discover(email: String) async throws -> TailnetDiscoveryResponse { - guard var components = URLComponents( - url: baseURL.appendingPathComponent("v1/tailnet/discover"), - resolvingAgainstBaseURL: false - ) else { - throw URLError(.badURL) - } - components.queryItems = [ - URLQueryItem(name: "email", value: email) - ] - guard let url = components.url else { - throw URLError(.badURL) - } - - let (data, response) = try await URLSession.shared.data(from: url) - try TailnetBridgeClient.validate(response: response, data: data) - - let decoder = JSONDecoder() - decoder.keyDecodingStrategy = .convertFromSnakeCase - return try decoder.decode(TailnetDiscoveryResponse.self, from: data) + let response = try await TailnetClient.unix(socketURL: socketURL).discover(request) + return TailnetDiscoveryResponse( + domain: response.domain, + provider: response.managed ? .tailscale : .headscale, + authority: response.authority, + oidcIssuer: response.oidcIssuer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.oidcIssuer + ) } } enum TailnetAuthorityProbeClient { - static func probe(provider: TailnetProvider, authority: String) async throws -> TailnetAuthorityProbeStatus { - let normalizedAuthority = normalizeAuthority(authority) - let baseURL = try validatedBaseURL(normalizedAuthority) - let probeURL = probeURL(for: provider, baseURL: baseURL) - - var request = URLRequest(url: probeURL) - request.timeoutInterval = 10 - request.setValue("application/json", forHTTPHeaderField: "Accept") - - let (data, response) = try await URLSession.shared.data(for: request) - guard let http = response as? HTTPURLResponse else { - throw URLError(.badServerResponse) - } - guard (200..<300).contains(http.statusCode) else { - let message = String(data: data, encoding: .utf8)?.trimmingCharacters( - in: .whitespacesAndNewlines - ) - throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)") - } - - let body = String(data: data, encoding: .utf8)? - .trimmingCharacters(in: .whitespacesAndNewlines) - let detail = body.flatMap { $0.isEmpty ? nil : $0 } + static func probe(authority: String, socketURL: URL) async throws -> TailnetAuthorityProbeStatus { + var request = Burrow_TailnetProbeRequest() + request.authority = authority + let response = try await TailnetClient.unix(socketURL: socketURL).probe(request) return TailnetAuthorityProbeStatus( - authority: normalizedAuthority, - statusCode: http.statusCode, - summary: "\(provider.title) reachable", - detail: detail + authority: response.authority, + statusCode: Int(response.statusCode), + summary: response.summary, + detail: response.detail.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.detail ) } - - private static func normalizeAuthority(_ authority: String) -> String { - let trimmed = authority.trimmingCharacters(in: .whitespacesAndNewlines) - if trimmed.contains("://") { - return trimmed - } - return "https://\(trimmed)" - } - - private static func validatedBaseURL(_ authority: String) throws -> URL { - guard let url = URL(string: authority), url.host != nil else { - throw TailnetBridgeError.server("Invalid server URL") - } - return url - } - - private static func probeURL(for provider: TailnetProvider, baseURL: URL) -> URL { - switch provider { - case .headscale: - baseURL.appendingPathComponent("health") - case .burrow: - baseURL.appendingPathComponent("healthz") - case .tailscale: - baseURL - } - } -} - -enum TailnetBridgeError: LocalizedError { - case server(String) - - var errorDescription: String? { - switch self { - case .server(let message): - message - } - } } @Observable @@ -215,7 +81,7 @@ final class NetworkViewModel: Sendable { private(set) var connectionError: String? private let socketURLResult: Result - nonisolated(unsafe) private var task: Task? + @ObservationIgnored private var task: Task? init(socketURLResult: Result) { self.socketURLResult = socketURLResult @@ -242,6 +108,16 @@ final class NetworkViewModel: Sendable { try await addNetwork(type: .tailnet, payload: payload.encoded()) } + func discoverTailnet(email: String) async throws -> TailnetDiscoveryResponse { + let socketURL = try socketURLResult.get() + return try await TailnetDiscoveryClient.discover(email: email, socketURL: socketURL) + } + + func probeTailnetAuthority(_ authority: String) async throws -> TailnetAuthorityProbeStatus { + let socketURL = try socketURLResult.get() + return try await TailnetAuthorityProbeClient.probe(authority: authority, socketURL: socketURL) + } + private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 { let socketURL = try socketURLResult.get() let networkID = nextNetworkID @@ -341,19 +217,6 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { } } - var supportsWebLogin: Bool { - switch self { - case .tailscale, .headscale: - true - case .burrow: - false - } - } - - var requiresControlURL: Bool { - self != .tailscale - } - var defaultAuthority: String? { switch self { case .tailscale: @@ -368,19 +231,44 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { var subtitle: String { switch self { case .tailscale: - "Use Tailscale's real browser login flow." + "Managed Tailnet authority." case .headscale: - "Use your Headscale control plane with browser or key-based sign-in." + "Custom Tailnet control server." case .burrow: - "Store Burrow control-plane credentials." + "Burrow-native Tailnet authority." } } + + static func inferred(authority: String?, explicit: TailnetProvider?) -> TailnetProvider { + if explicit == .burrow { + return .burrow + } + if isManagedTailscaleAuthority(authority) { + return .tailscale + } + return .headscale + } + + static func isManagedTailscaleAuthority(_ authority: String?) -> Bool { + guard let normalized = authority? + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + .trimmingCharacters(in: CharacterSet(charactersIn: "/")), + !normalized.isEmpty + else { + return false + } + + return normalized == "https://controlplane.tailscale.com" + || normalized == "http://controlplane.tailscale.com" + || normalized == "controlplane.tailscale.com" + } } enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { case wireGuard case tor - case headscale + case tailnet var id: String { rawValue } @@ -388,7 +276,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { switch self { case .wireGuard: "WireGuard" case .tor: "Tor" - case .headscale: "Tailnet" + case .tailnet: "Tailnet" } } @@ -396,7 +284,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { switch self { case .wireGuard: "Import a tunnel and optional account metadata." case .tor: "Store Arti account and identity preferences." - case .headscale: "Save Tailscale, Headscale, or Burrow control-plane identities." + case .tailnet: "Save Tailnet authority, identity, and login material." } } @@ -404,7 +292,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { switch self { case .wireGuard: .init("WireGuard") case .tor: .orange - case .headscale: .mint + case .tailnet: .mint } } @@ -412,7 +300,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { switch self { case .wireGuard: "Add Network" case .tor: "Save Account" - case .headscale: "Save Account" + case .tailnet: "Save Account" } } @@ -422,7 +310,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { nil case .tor: "Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet." - case .headscale: + case .tailnet: "Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon." } } @@ -430,7 +318,6 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { case none - case web case password case preauthKey @@ -439,7 +326,6 @@ enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { var title: String { switch self { case .none: "None" - case .web: "Web Login" case .password: "Password" case .preauthKey: "Preauth Key" } @@ -465,17 +351,15 @@ struct NetworkAccountRecord: Codable, Identifiable, Hashable, Sendable { struct TailnetCard { var id: Int32 - var provider: String var title: String var detail: String init(network: Burrow_Network) { let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload)) id = network.id - provider = payload?.provider.title ?? "Tailnet" title = payload?.tailnet ?? payload?.hostname ?? "Tailnet" detail = [ - payload?.provider.title, + payload?.authority.flatMap { URL(string: $0)?.host } ?? payload?.authority, payload?.authority, payload.map { "Account: \($0.account)" }, ] @@ -492,7 +376,7 @@ struct TailnetCard { VStack(alignment: .leading, spacing: 12) { HStack { VStack(alignment: .leading, spacing: 4) { - Text(provider) + Text("Tailnet") .font(.headline) .foregroundStyle(.white.opacity(0.85)) Text(title) diff --git a/burrow/src/control/discovery.rs b/burrow/src/control/discovery.rs index 28b48bb..5fc7add 100644 --- a/burrow/src/control/discovery.rs +++ b/burrow/src/control/discovery.rs @@ -7,6 +7,7 @@ use super::TailnetProvider; pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server"; const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet"; const WEBFINGER_PATH: &str = "/.well-known/webfinger"; +const MANAGED_TAILSCALE_AUTHORITY: &str = "controlplane.tailscale.com"; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct TailnetDiscovery { @@ -17,6 +18,15 @@ pub struct TailnetDiscovery { pub oidc_issuer: Option, } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct TailnetAuthorityProbe { + pub authority: String, + pub status_code: i32, + pub summary: String, + pub detail: String, + pub reachable: bool, +} + #[derive(Clone, Debug, Default, Deserialize)] struct WebFingerDocument { #[serde(default)] @@ -43,6 +53,63 @@ pub async fn discover_tailnet(email: &str) -> Result { discover_tailnet_at(&client, email, &base_url).await } +pub fn normalize_authority(authority: &str) -> String { + let trimmed = authority.trim(); + if trimmed.contains("://") { + trimmed.to_owned() + } else { + format!("https://{trimmed}") + } +} + +pub fn is_managed_tailscale_authority(authority: &str) -> bool { + let normalized = normalize_authority(authority) + .trim_end_matches('/') + .to_ascii_lowercase(); + normalized == format!("https://{MANAGED_TAILSCALE_AUTHORITY}") + || normalized == format!("http://{MANAGED_TAILSCALE_AUTHORITY}") +} + +pub async fn probe_tailnet_authority(authority: &str) -> Result { + let authority = normalize_authority(authority); + if is_managed_tailscale_authority(&authority) { + return Ok(TailnetAuthorityProbe { + authority, + status_code: 200, + summary: "Tailscale-managed control plane".to_owned(), + detail: "Using Tailscale's default login server.".to_owned(), + reachable: true, + }); + } + + let base_url = + Url::parse(&authority).with_context(|| format!("invalid tailnet authority {authority}"))?; + let client = Client::builder() + .user_agent("burrow-tailnet-probe") + .timeout(std::time::Duration::from_secs(10)) + .build() + .context("failed to build tailnet authority probe client")?; + + if let Some(status) = + probe_url(&client, base_url.join("/health")?, &authority, "Tailnet server reachable").await? + { + return Ok(status); + } + + if let Some(status) = probe_url( + &client, + base_url.clone(), + &authority, + "Tailnet server reachable", + ) + .await? + { + return Ok(status); + } + + Err(anyhow!("could not connect to the server")) +} + pub async fn discover_tailnet_at( client: &Client, email: &str, @@ -57,7 +124,7 @@ pub async fn discover_tailnet_at( if let Some(authority) = discover_webfinger(client, email, base_url).await? { return Ok(TailnetDiscovery { domain, - provider: TailnetProvider::Headscale, + provider: inferred_provider(Some(&authority), None), authority, oidc_issuer: None, }); @@ -78,6 +145,19 @@ pub fn email_domain(email: &str) -> Result { Ok(domain) } +pub fn inferred_provider( + authority: Option<&str>, + explicit: Option<&TailnetProvider>, +) -> TailnetProvider { + if matches!(explicit, Some(TailnetProvider::Burrow)) { + return TailnetProvider::Burrow; + } + if authority.is_some_and(is_managed_tailscale_authority) { + return TailnetProvider::Tailscale; + } + TailnetProvider::Headscale +} + async fn discover_well_known(client: &Client, base_url: &Url) -> Result> { let url = base_url .join(TAILNET_DISCOVERY_PATH) @@ -133,6 +213,37 @@ async fn discover_webfinger(client: &Client, email: &str, base_url: &Url) -> Res } } +async fn probe_url( + client: &Client, + url: Url, + authority: &str, + summary: &str, +) -> Result> { + let response = match client + .get(url) + .header("accept", "application/json") + .send() + .await + { + Ok(response) => response, + Err(_) => return Ok(None), + }; + + let status = response.status(); + if !status.is_success() { + return Ok(None); + } + + let detail = response.text().await.unwrap_or_default().trim().to_owned(); + Ok(Some(TailnetAuthorityProbe { + authority: authority.to_owned(), + status_code: i32::from(status.as_u16()), + summary: summary.to_owned(), + detail, + reachable: true, + })) +} + #[cfg(test)] mod tests { use axum::{routing::get, Router}; @@ -147,6 +258,13 @@ mod tests { assert!(email_domain("contact").is_err()); } + #[test] + fn detects_managed_tailscale_authority() { + assert!(is_managed_tailscale_authority("controlplane.tailscale.com")); + assert!(is_managed_tailscale_authority("https://controlplane.tailscale.com/")); + assert!(!is_managed_tailscale_authority("https://ts.burrow.net")); + } + #[tokio::test] async fn discovers_from_well_known_document() -> Result<()> { let router = Router::new().route( @@ -209,4 +327,20 @@ mod tests { server.abort(); Ok(()) } + + #[tokio::test] + async fn probes_custom_authority() -> Result<()> { + let router = Router::new().route("/health", get(|| async { "ok" })); + let listener = TcpListener::bind("127.0.0.1:0").await?; + let authority = format!("http://{}", listener.local_addr()?); + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let status = probe_tailnet_authority(&authority).await?; + assert_eq!(status.authority, authority); + assert_eq!(status.status_code, 200); + assert!(status.reachable); + + server.abort(); + Ok(()) + } } diff --git a/burrow/src/daemon/instance.rs b/burrow/src/daemon/instance.rs index 1eb0629..e4e6d96 100644 --- a/burrow/src/daemon/instance.rs +++ b/burrow/src/daemon/instance.rs @@ -13,13 +13,16 @@ use tun::tokio::TunInterface; use super::{ rpc::grpc_defs::{ - networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, - NetworkListResponse, NetworkReorderRequest, State as RPCTunnelState, + networks_server::Networks, tailnet_control_server::TailnetControl, + tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, NetworkListResponse, + NetworkReorderRequest, State as RPCTunnelState, TailnetDiscoverRequest, + TailnetDiscoverResponse, TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, TunnelStatusResponse, }, runtime::{ActiveTunnel, ResolvedTunnel}, }; use crate::{ + control::discovery, daemon::rpc::ServerConfig, database::{add_network, delete_network, get_connection, list_networks, reorder_network}, }; @@ -266,6 +269,47 @@ impl Networks for DaemonRPCServer { } } +#[tonic::async_trait] +impl TailnetControl for DaemonRPCServer { + async fn discover( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let discovery = discovery::discover_tailnet(&request.email) + .await + .map_err(proc_err)?; + + Ok(Response::new(TailnetDiscoverResponse { + domain: discovery.domain, + authority: discovery.authority.clone(), + oidc_issuer: discovery.oidc_issuer.unwrap_or_default(), + managed: matches!( + discovery::inferred_provider(Some(&discovery.authority), Some(&discovery.provider)), + crate::control::TailnetProvider::Tailscale + ), + })) + } + + async fn probe( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let status = discovery::probe_tailnet_authority(&request.authority) + .await + .map_err(proc_err)?; + + Ok(Response::new(TailnetProbeResponse { + authority: status.authority, + status_code: status.status_code, + summary: status.summary, + detail: status.detail, + reachable: status.reachable, + })) + } +} + fn proc_err(err: impl ToString) -> RspStatus { RspStatus::internal(err.to_string()) } diff --git a/burrow/src/daemon/mod.rs b/burrow/src/daemon/mod.rs index a016788..724e3bb 100644 --- a/burrow/src/daemon/mod.rs +++ b/burrow/src/daemon/mod.rs @@ -16,7 +16,10 @@ use tonic::transport::Server; use tracing::info; use crate::{ - daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer}, + daemon::rpc::grpc_defs::{ + networks_server::NetworksServer, tailnet_control_server::TailnetControlServer, + tunnel_server::TunnelServer, + }, database::get_connection, }; @@ -36,9 +39,11 @@ pub async fn daemon_main( let uds = UnixListener::bind(sock_path)?; let serve_job = tokio::spawn(async move { let uds_stream = UnixListenerStream::new(uds); + let tailnet_server = burrow_server.clone(); let _srv = Server::builder() .add_service(TunnelServer::new(burrow_server.clone())) .add_service(NetworksServer::new(burrow_server)) + .add_service(TailnetControlServer::new(tailnet_server)) .serve_with_incoming(uds_stream) .await?; Ok::<(), AhError>(()) diff --git a/burrow/src/daemon/rpc/client.rs b/burrow/src/daemon/rpc/client.rs index 06a9b45..aa84c64 100644 --- a/burrow/src/daemon/rpc/client.rs +++ b/burrow/src/daemon/rpc/client.rs @@ -5,11 +5,15 @@ use tokio::net::UnixStream; use tonic::transport::{Endpoint, Uri}; use tower::service_fn; -use super::grpc_defs::{networks_client::NetworksClient, tunnel_client::TunnelClient}; +use super::grpc_defs::{ + networks_client::NetworksClient, tailnet_control_client::TailnetControlClient, + tunnel_client::TunnelClient, +}; use crate::daemon::get_socket_path; pub struct BurrowClient { pub networks_client: NetworksClient, + pub tailnet_client: TailnetControlClient, pub tunnel_client: TunnelClient, } @@ -31,9 +35,11 @@ impl BurrowClient { })) .await?; let nw_client = NetworksClient::new(channel.clone()); + let tailnet_client = TailnetControlClient::new(channel.clone()); let tun_client = TunnelClient::new(channel.clone()); Ok(BurrowClient { networks_client: nw_client, + tailnet_client, tunnel_client: tun_client, }) } diff --git a/proto/burrow.proto b/proto/burrow.proto index 5b5a30b..79e8976 100644 --- a/proto/burrow.proto +++ b/proto/burrow.proto @@ -17,6 +17,11 @@ service Networks { rpc NetworkDelete (NetworkDeleteRequest) returns (Empty); } +service TailnetControl { + rpc Discover (TailnetDiscoverRequest) returns (TailnetDiscoverResponse); + rpc Probe (TailnetProbeRequest) returns (TailnetProbeResponse); +} + message NetworkReorderRequest { int32 id = 1; int32 index = 2; @@ -56,6 +61,29 @@ message Empty { } +message TailnetDiscoverRequest { + string email = 1; +} + +message TailnetDiscoverResponse { + string domain = 1; + string authority = 2; + string oidc_issuer = 3; + bool managed = 4; +} + +message TailnetProbeRequest { + string authority = 1; +} + +message TailnetProbeResponse { + string authority = 1; + int32 status_code = 2; + string summary = 3; + string detail = 4; + bool reachable = 5; +} + enum State { Stopped = 0; Running = 1; From 0c660acd1e0b61dde4a3ea80643b5df9ae381623 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 02:09:58 -0700 Subject: [PATCH 10/59] Add daemon-owned Tailnet login flow --- Apple/Core/Client.swift | 228 ++++++++++++++++++++ Apple/UI/BurrowView.swift | 318 ++++++++++++++++++++++++++-- Apple/UI/Networks/Network.swift | 93 ++++++++ burrow/src/auth/server/tailscale.rs | 77 ++++++- burrow/src/daemon/instance.rs | 93 +++++++- proto/burrow.proto | 31 +++ 6 files changed, 812 insertions(+), 28 deletions(-) diff --git a/Apple/Core/Client.swift b/Apple/Core/Client.swift index c426fe7..e44ebcd 100644 --- a/Apple/Core/Client.swift +++ b/Apple/Core/Client.swift @@ -68,6 +68,46 @@ public struct Burrow_TailnetProbeResponse: Sendable { public init() {} } +public struct Burrow_TailnetLoginStartRequest: Sendable { + public var accountName: String = "" + public var identityName: String = "" + public var hostname: String = "" + public var authority: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginStatusRequest: Sendable { + public var sessionID: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginCancelRequest: Sendable { + public var sessionID: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginStatusResponse: Sendable { + public var sessionID: String = "" + public var backendState: String = "" + public var authURL: String = "" + public var running: Bool = false + public var needsLogin: Bool = false + public var tailnetName: String = "" + public var magicDNSSuffix: String = "" + public var selfDNSName: String = "" + public var tailnetIPs: [String] = [] + public var health: [String] = [] + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { public static let protoMessageName: String = "burrow.TailnetDiscoverRequest" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ @@ -195,6 +235,158 @@ extension Burrow_TailnetProbeResponse: SwiftProtobuf.Message, SwiftProtobuf._Mes } } +extension Burrow_TailnetLoginStartRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStartRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "account_name"), + 2: .standard(proto: "identity_name"), + 3: .same(proto: "hostname"), + 4: .same(proto: "authority"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.accountName) + case 2: try decoder.decodeSingularStringField(value: &self.identityName) + case 3: try decoder.decodeSingularStringField(value: &self.hostname) + case 4: try decoder.decodeSingularStringField(value: &self.authority) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.accountName.isEmpty { + try visitor.visitSingularStringField(value: self.accountName, fieldNumber: 1) + } + if !self.identityName.isEmpty { + try visitor.visitSingularStringField(value: self.identityName, fieldNumber: 2) + } + if !self.hostname.isEmpty { + try visitor.visitSingularStringField(value: self.hostname, fieldNumber: 3) + } + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginStatusRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStatusRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginCancelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginCancelRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStatusResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 2: .standard(proto: "backend_state"), + 3: .standard(proto: "auth_url"), + 4: .same(proto: "running"), + 5: .standard(proto: "needs_login"), + 6: .standard(proto: "tailnet_name"), + 7: .standard(proto: "magic_dns_suffix"), + 8: .standard(proto: "self_dns_name"), + 9: .standard(proto: "tailnet_ips"), + 10: .same(proto: "health"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + case 2: try decoder.decodeSingularStringField(value: &self.backendState) + case 3: try decoder.decodeSingularStringField(value: &self.authURL) + case 4: try decoder.decodeSingularBoolField(value: &self.running) + case 5: try decoder.decodeSingularBoolField(value: &self.needsLogin) + case 6: try decoder.decodeSingularStringField(value: &self.tailnetName) + case 7: try decoder.decodeSingularStringField(value: &self.magicDNSSuffix) + case 8: try decoder.decodeSingularStringField(value: &self.selfDNSName) + case 9: try decoder.decodeRepeatedStringField(value: &self.tailnetIPs) + case 10: try decoder.decodeRepeatedStringField(value: &self.health) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + if !self.backendState.isEmpty { + try visitor.visitSingularStringField(value: self.backendState, fieldNumber: 2) + } + if !self.authURL.isEmpty { + try visitor.visitSingularStringField(value: self.authURL, fieldNumber: 3) + } + if self.running { + try visitor.visitSingularBoolField(value: self.running, fieldNumber: 4) + } + if self.needsLogin { + try visitor.visitSingularBoolField(value: self.needsLogin, fieldNumber: 5) + } + if !self.tailnetName.isEmpty { + try visitor.visitSingularStringField(value: self.tailnetName, fieldNumber: 6) + } + if !self.magicDNSSuffix.isEmpty { + try visitor.visitSingularStringField(value: self.magicDNSSuffix, fieldNumber: 7) + } + if !self.selfDNSName.isEmpty { + try visitor.visitSingularStringField(value: self.selfDNSName, fieldNumber: 8) + } + if !self.tailnetIPs.isEmpty { + try visitor.visitRepeatedStringField(value: self.tailnetIPs, fieldNumber: 9) + } + if !self.health.isEmpty { + try visitor.visitRepeatedStringField(value: self.health, fieldNumber: 10) + } + try unknownFields.traverse(visitor: &visitor) + } +} + public struct TailnetClient: Client, GRPCClient { public let channel: GRPCChannel public var defaultCallOptions: CallOptions @@ -227,4 +419,40 @@ public struct TailnetClient: Client, GRPCClient { interceptors: [] ) } + + public func loginStart( + _ request: Burrow_TailnetLoginStartRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetLoginStatusResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginStart", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func loginStatus( + _ request: Burrow_TailnetLoginStatusRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetLoginStatusResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginStatus", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func loginCancel( + _ request: Burrow_TailnetLoginCancelRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginCancel", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } } diff --git a/Apple/UI/BurrowView.swift b/Apple/UI/BurrowView.swift index 9938eef..b95e904 100644 --- a/Apple/UI/BurrowView.swift +++ b/Apple/UI/BurrowView.swift @@ -1,6 +1,9 @@ import BurrowConfiguration import Foundation import SwiftUI +#if canImport(AuthenticationServices) +import AuthenticationServices +#endif #if canImport(UIKit) import UIKit #elseif canImport(AppKit) @@ -309,6 +312,7 @@ private struct AccountDraft { accountName = "default" identityName = "apple" authority = TailnetProvider.tailscale.defaultAuthority ?? "" + authMode = .web } } } @@ -329,6 +333,14 @@ private struct ConfigurationSheetView: View { @State private var authorityProbeStatus: TailnetAuthorityProbeStatus? @State private var authorityProbeError: String? @State private var isProbingAuthority = false + @State private var tailnetLoginStatus: TailnetLoginStatus? + @State private var tailnetLoginError: String? + @State private var tailnetLoginSessionID: String? + @State private var isStartingTailnetLogin = false + @State private var tailnetPresentedAuthURL: URL? + @State private var preserveTailnetLoginSession = false + @State private var browserAuthenticator = TailnetBrowserAuthenticator() + @State private var tailnetLoginPollTask: Task? @State private var didRunAutomation = false init( @@ -397,7 +409,10 @@ private struct ConfigurationSheetView: View { .toolbar { ToolbarItem(placement: .cancellationAction) { Button("Cancel") { - dismiss() + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + dismiss() + } } } #if os(iOS) @@ -446,14 +461,28 @@ private struct ConfigurationSheetView: View { .onChange(of: draft.discoveryEmail) { _, _ in resetTailnetDiscoveryFeedback() } + .onChange(of: draft.authMode) { _, newMode in + guard newMode != .web else { return } + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + } + } + .onDisappear { + tailnetLoginPollTask?.cancel() + browserAuthenticator.cancel() + if !preserveTailnetLoginSession { + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + } + } + } } @ViewBuilder private var tailnetSections: some View { Section("Connection") { TextField("Email address", text: $draft.discoveryEmail) - .textInputAutocapitalization(.never) - .keyboardType(.emailAddress) + .burrowEmailField() .burrowLoginField() .autocorrectionDisabled() @@ -507,22 +536,44 @@ private struct ConfigurationSheetView: View { } Section("Authentication") { - TextField("Username", text: $draft.username) - .burrowLoginField() - .autocorrectionDisabled() Picker("Authentication", selection: $draft.authMode) { ForEach(availableTailnetAuthModes) { mode in Text(mode.title).tag(mode) } } .pickerStyle(.menu) - if draft.authMode != .none { - SecureField( - draft.authMode == .password ? "Password" : "Preauth Key", - text: $draft.secret - ) + + if draft.authMode == .web { + Button { + startTailnetLogin() + } label: { + Label { + Text(isStartingTailnetLogin ? "Starting Sign-In" : tailnetSignInActionTitle) + } icon: { + Image(systemName: isStartingTailnetLogin ? "hourglass" : "person.badge.key") + } + } + .buttonStyle(.borderless) + .disabled(isStartingTailnetLogin || normalizedOptional(draft.authority) == nil) + + if let tailnetLoginStatus { + tailnetLoginCard(status: tailnetLoginStatus, failure: nil) + } else if let tailnetLoginError { + tailnetLoginCard(status: nil, failure: tailnetLoginError) + } + } else { + TextField("Username", text: $draft.username) + .burrowLoginField() + .autocorrectionDisabled() + if draft.authMode != .none { + SecureField( + draft.authMode == .password ? "Password" : "Preauth Key", + text: $draft.secret + ) + } } - Text("Tailnet account material stays on-device. Burrow stores the authority and credentials for daemon-managed registration and refresh.") + + Text(tailnetAuthenticationFootnote) .font(.footnote) .foregroundStyle(.secondary) } @@ -583,6 +634,9 @@ private struct ConfigurationSheetView: View { HStack(spacing: 8) { summaryBadge(isManagedTailnetAuthority ? "Managed" : "Custom") summaryBadge(draft.authMode.title) + if tailnetLoginStatus?.running == true { + summaryBadge("Signed In") + } } } } @@ -659,6 +713,52 @@ private struct ConfigurationSheetView: View { ) } + private func tailnetLoginCard( + status: TailnetLoginStatus?, + failure: String? + ) -> some View { + VStack(alignment: .leading, spacing: 6) { + if let status { + Text(status.running ? "Signed In" : status.needsLogin ? "Browser Sign-In Required" : "Checking Sign-In") + .font(.subheadline.weight(.medium)) + if let tailnetName = status.tailnetName, !tailnetName.isEmpty { + Text("Tailnet: \(tailnetName)") + .font(.footnote) + .foregroundStyle(.secondary) + } + if let selfDNSName = status.selfDNSName, !selfDNSName.isEmpty { + Text(selfDNSName) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + if !status.tailnetIPs.isEmpty { + Text(status.tailnetIPs.joined(separator: ", ")) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + if !status.health.isEmpty { + Text(status.health.joined(separator: " • ")) + .font(.footnote) + .foregroundStyle(.secondary) + } + } else if let failure { + Text("Sign-In failed") + .font(.subheadline.weight(.medium)) + .foregroundStyle(.red) + Text(failure) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + } + private func summaryBadge(_ label: String) -> some View { Text(label) .font(.caption.weight(.medium)) @@ -813,6 +913,9 @@ private struct ConfigurationSheetView: View { if normalizedOptional(draft.authority) == nil { return true } + if draft.authMode == .web { + return tailnetLoginStatus?.running != true + } if draft.authMode != .none && normalizedOptional(draft.secret) == nil { return true } @@ -897,8 +1000,9 @@ private struct ConfigurationSheetView: View { } private func submitTailnet() async throws { - let secret = draft.authMode == .none ? nil : draft.secret + let secret = (draft.authMode == .none || draft.authMode == .web) ? nil : draft.secret let username = normalizedOptional(draft.username) + preserveTailnetLoginSession = draft.authMode == .web && tailnetLoginStatus?.running == true try await saveTailnetAccount(secret: secret, username: username) dismiss() } @@ -922,7 +1026,7 @@ private struct ConfigurationSheetView: View { switch automation.action { case .tailnetLogin: applyTailnetDefaults(for: .tailscale) - probeTailnetAuthority() + startTailnetLogin() case .headscaleProbe: draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority probeTailnetAuthority() @@ -950,6 +1054,10 @@ private struct ConfigurationSheetView: View { "Auth: \(draft.authMode.title)", ] + if draft.authMode == .web, tailnetLoginStatus?.running == true { + noteParts.append("Browser sign-in complete") + } + do { let networkID = try await networkViewModel.addTailnetNetwork(payload: payload) noteParts.append("Linked to daemon network #\(networkID)") @@ -1003,7 +1111,36 @@ private struct ConfigurationSheetView: View { resetTailnetDiscoveryFeedback() draft.authority = provider.defaultAuthority ?? "" if !availableTailnetAuthModes.contains(draft.authMode) { - draft.authMode = .none + draft.authMode = .web + } + } + + private func startTailnetLogin() { + guard let authority = normalizedOptional(draft.authority) else { + tailnetLoginStatus = nil + tailnetLoginError = "Enter a server URL first." + return + } + + isStartingTailnetLogin = true + tailnetLoginError = nil + preserveTailnetLoginSession = false + + Task { @MainActor in + defer { isStartingTailnetLogin = false } + do { + let status = try await networkViewModel.startTailnetLogin( + accountName: normalized(draft.accountName, fallback: "default"), + identityName: normalized(draft.identityName, fallback: "apple"), + hostname: normalizedOptional(draft.hostname), + authority: authority + ) + tailnetLoginSessionID = status.sessionID + updateTailnetLoginStatus(status) + beginTailnetLoginPolling(sessionID: status.sessionID) + } catch { + tailnetLoginError = error.localizedDescription + } } } @@ -1031,6 +1168,7 @@ private struct ConfigurationSheetView: View { private func resetAuthorityProbe() { authorityProbeStatus = nil authorityProbeError = nil + tailnetLoginError = nil } private func resetTailnetDiscoveryFeedback() { @@ -1062,6 +1200,76 @@ private struct ConfigurationSheetView: View { } } + private func beginTailnetLoginPolling(sessionID: String) { + tailnetLoginPollTask?.cancel() + tailnetLoginPollTask = Task { @MainActor in + while !Task.isCancelled { + do { + let status = try await networkViewModel.tailnetLoginStatus(sessionID: sessionID) + updateTailnetLoginStatus(status) + if status.running { + tailnetLoginPollTask = nil + return + } + } catch { + tailnetLoginError = error.localizedDescription + tailnetLoginPollTask = nil + return + } + try? await Task.sleep(for: .seconds(1)) + } + } + } + + private func updateTailnetLoginStatus(_ status: TailnetLoginStatus) { + tailnetLoginStatus = status + tailnetLoginError = nil + tailnetLoginSessionID = status.sessionID + + if status.running { + browserAuthenticator.cancel() + tailnetPresentedAuthURL = nil + return + } + + guard let authURL = status.authURL else { + return + } + + if tailnetPresentedAuthURL != authURL { + tailnetPresentedAuthURL = authURL + browserAuthenticator.start(url: authURL) { [sessionID = status.sessionID] in + Task { @MainActor in + if tailnetLoginStatus?.running != true { + tailnetLoginSessionID = sessionID + } + } + } + } + } + + private func cancelTailnetLoginIfNeeded() async { + tailnetLoginPollTask?.cancel() + tailnetLoginPollTask = nil + browserAuthenticator.cancel() + tailnetPresentedAuthURL = nil + + guard tailnetLoginStatus?.running != true, + let sessionID = tailnetLoginSessionID + else { + return + } + + do { + try await networkViewModel.cancelTailnetLogin(sessionID: sessionID) + } catch { + tailnetLoginError = error.localizedDescription + } + + tailnetLoginStatus = nil + tailnetLoginSessionID = nil + } + private func pasteWireGuardConfiguration() { guard let clipboardString else { return } draft.wireGuardConfig = clipboardString @@ -1108,7 +1316,28 @@ private struct ConfigurationSheetView: View { } private var availableTailnetAuthModes: [AccountAuthMode] { - [.none, .password, .preauthKey] + [.web, .none, .password, .preauthKey] + } + + private var tailnetSignInActionTitle: String { + if tailnetLoginStatus?.running == true { + return "Signed In" + } + if tailnetLoginSessionID != nil { + return "Resume Sign-In" + } + return "Start Sign-In" + } + + private var tailnetAuthenticationFootnote: String { + switch draft.authMode { + case .web: + return "Burrow asks the daemon to start a Tailnet browser sign-in session, then closes it locally once the daemon reports the device is running." + case .none: + return "Save the authority only. Useful when the control plane handles authentication elsewhere." + case .password, .preauthKey: + return "Tailnet account material stays on-device. Burrow stores the authority and credentials for daemon-managed registration and refresh." + } } private var inferredTailnetProvider: TailnetProvider { @@ -1215,8 +1444,65 @@ private extension View { self #endif } + + @ViewBuilder + func burrowEmailField() -> some View { + #if os(iOS) + textInputAutocapitalization(.never) + .keyboardType(.emailAddress) + #else + self + #endif + } } +#if canImport(AuthenticationServices) +@MainActor +private final class TailnetBrowserAuthenticator: NSObject { + private var session: ASWebAuthenticationSession? + + func start(url: URL, onDismiss: @escaping @Sendable () -> Void) { + cancel() + let session = ASWebAuthenticationSession(url: url, callbackURLScheme: nil) { _, _ in + onDismiss() + } + session.presentationContextProvider = self + session.prefersEphemeralWebBrowserSession = false + self.session = session + _ = session.start() + } + + func cancel() { + session?.cancel() + session = nil + } +} + +extension TailnetBrowserAuthenticator: ASWebAuthenticationPresentationContextProviding { + func presentationAnchor(for session: ASWebAuthenticationSession) -> ASPresentationAnchor { + #if canImport(AppKit) + return NSApplication.shared.keyWindow + ?? NSApplication.shared.windows.first + ?? ASPresentationAnchor() + #elseif canImport(UIKit) + return ASPresentationAnchor() + #else + return ASPresentationAnchor() + #endif + } +} +#else +@MainActor +private final class TailnetBrowserAuthenticator { + func start(url: URL, onDismiss: @escaping @Sendable () -> Void) { + _ = url + onDismiss() + } + + func cancel() {} +} +#endif + private struct BurrowAutomationConfig { enum Action: String { case tailnetLogin = "tailnet-login" diff --git a/Apple/UI/Networks/Network.swift b/Apple/UI/Networks/Network.swift index b048add..32f0b8c 100644 --- a/Apple/UI/Networks/Network.swift +++ b/Apple/UI/Networks/Network.swift @@ -40,6 +40,19 @@ struct TailnetAuthorityProbeStatus: Sendable { var detail: String? } +struct TailnetLoginStatus: Sendable { + var sessionID: String + var backendState: String + var authURL: URL? + var running: Bool + var needsLogin: Bool + var tailnetName: String? + var magicDNSSuffix: String? + var selfDNSName: String? + var tailnetIPs: [String] + var health: [String] +} + enum TailnetDiscoveryClient { static func discover(email: String, socketURL: URL) async throws -> TailnetDiscoveryResponse { var request = Burrow_TailnetDiscoverRequest() @@ -74,6 +87,58 @@ enum TailnetAuthorityProbeClient { } } +enum TailnetLoginClient { + static func start( + accountName: String, + identityName: String, + hostname: String?, + authority: String, + socketURL: URL + ) async throws -> TailnetLoginStatus { + var request = Burrow_TailnetLoginStartRequest() + request.accountName = accountName + request.identityName = identityName + request.hostname = hostname ?? "" + request.authority = authority + let response = try await TailnetClient.unix(socketURL: socketURL).loginStart(request) + return decode(response) + } + + static func status(sessionID: String, socketURL: URL) async throws -> TailnetLoginStatus { + var request = Burrow_TailnetLoginStatusRequest() + request.sessionID = sessionID + let response = try await TailnetClient.unix(socketURL: socketURL).loginStatus(request) + return decode(response) + } + + static func cancel(sessionID: String, socketURL: URL) async throws { + var request = Burrow_TailnetLoginCancelRequest() + request.sessionID = sessionID + _ = try await TailnetClient.unix(socketURL: socketURL).loginCancel(request) + } + + private static func decode(_ response: Burrow_TailnetLoginStatusResponse) -> TailnetLoginStatus { + TailnetLoginStatus( + sessionID: response.sessionID, + backendState: response.backendState, + authURL: URL(string: response.authURL.trimmingCharacters(in: .whitespacesAndNewlines)), + running: response.running, + needsLogin: response.needsLogin, + tailnetName: response.tailnetName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.tailnetName, + magicDNSSuffix: response.magicDNSSuffix.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.magicDNSSuffix, + selfDNSName: response.selfDNSName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.selfDNSName, + tailnetIPs: response.tailnetIPs, + health: response.health + ) + } +} + @Observable @MainActor final class NetworkViewModel: Sendable { @@ -118,6 +183,32 @@ final class NetworkViewModel: Sendable { return try await TailnetAuthorityProbeClient.probe(authority: authority, socketURL: socketURL) } + func startTailnetLogin( + accountName: String, + identityName: String, + hostname: String?, + authority: String + ) async throws -> TailnetLoginStatus { + let socketURL = try socketURLResult.get() + return try await TailnetLoginClient.start( + accountName: accountName, + identityName: identityName, + hostname: hostname, + authority: authority, + socketURL: socketURL + ) + } + + func tailnetLoginStatus(sessionID: String) async throws -> TailnetLoginStatus { + let socketURL = try socketURLResult.get() + return try await TailnetLoginClient.status(sessionID: sessionID, socketURL: socketURL) + } + + func cancelTailnetLogin(sessionID: String) async throws { + let socketURL = try socketURLResult.get() + try await TailnetLoginClient.cancel(sessionID: sessionID, socketURL: socketURL) + } + private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 { let socketURL = try socketURLResult.get() let networkID = nextNetworkID @@ -317,6 +408,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { } enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { + case web case none case password case preauthKey @@ -325,6 +417,7 @@ enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { var title: String { switch self { + case .web: "Browser Sign-In" case .none: "None" case .password: "Password" case .preauthKey: "Preauth Key" diff --git a/burrow/src/auth/server/tailscale.rs b/burrow/src/auth/server/tailscale.rs index fbe1980..55516e1 100644 --- a/burrow/src/auth/server/tailscale.rs +++ b/burrow/src/auth/server/tailscale.rs @@ -82,11 +82,22 @@ impl TailscaleBridgeManager { let key = session_key(&request.account_name, &request.identity_name); if let Some(existing) = self.sessions.lock().await.get(&key).cloned() { - let status = self.fetch_status(existing.as_ref()).await?; - return Ok(TailscaleLoginStartResponse { - session_id: existing.session_id.clone(), - status, - }); + match self.fetch_status(existing.as_ref()).await { + Ok(status) => { + return Ok(TailscaleLoginStartResponse { + session_id: existing.session_id.clone(), + status, + }); + } + Err(err) => { + log::warn!( + "tailscale login session {} is stale, restarting: {err}", + existing.session_id + ); + self.sessions.lock().await.remove(&key); + let _ = self.shutdown_session(existing.as_ref()).await; + } + } } let state_dir = state_root().join(session_dir_name(&request)); @@ -155,11 +166,28 @@ impl TailscaleBridgeManager { }; match session { - Some(session) => self.fetch_status(session.as_ref()).await.map(Some), + Some(session) => match self.fetch_status(session.as_ref()).await { + Ok(status) => Ok(Some(status)), + Err(err) => { + self.remove_session_by_id(session_id).await; + Err(err) + } + }, None => Ok(None), } } + pub async fn cancel(&self, session_id: &str) -> Result { + let session = self.remove_session_by_id(session_id).await; + match session { + Some(session) => { + self.shutdown_session(session.as_ref()).await?; + Ok(true) + } + None => Ok(false), + } + } + async fn wait_for_status(&self, session: &ManagedSession) -> Result { let mut last_error = None; let mut last_status = None; @@ -201,6 +229,38 @@ impl TailscaleBridgeManager { .await .context("invalid tailscale helper status response") } + + async fn remove_session_by_id(&self, session_id: &str) -> Option> { + let mut sessions = self.sessions.lock().await; + let key = sessions + .iter() + .find_map(|(key, session)| (session.session_id == session_id).then(|| key.clone()))?; + sessions.remove(&key) + } + + async fn shutdown_session(&self, session: &ManagedSession) -> Result<()> { + let _ = self + .client + .post(format!("{}/shutdown", session.listen_url)) + .send() + .await; + + for _ in 0..10 { + let mut child = session.child.lock().await; + if child.try_wait()?.is_some() { + return Ok(()); + } + drop(child); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + let mut child = session.child.lock().await; + child + .start_kill() + .context("failed to kill tailscale helper")?; + let _ = child.wait().await; + Ok(()) + } } fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result { @@ -249,7 +309,10 @@ fn state_root() -> PathBuf { .join("Burrow") .join("tailscale"); } - home.join(".local").join("share").join("burrow").join("tailscale") + home.join(".local") + .join("share") + .join("burrow") + .join("tailscale") } fn session_dir_name(request: &TailscaleLoginStartRequest) -> String { diff --git a/burrow/src/daemon/instance.rs b/burrow/src/daemon/instance.rs index e4e6d96..0a23ddc 100644 --- a/burrow/src/daemon/instance.rs +++ b/burrow/src/daemon/instance.rs @@ -13,15 +13,19 @@ use tun::tokio::TunInterface; use super::{ rpc::grpc_defs::{ - networks_server::Networks, tailnet_control_server::TailnetControl, - tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, NetworkListResponse, - NetworkReorderRequest, State as RPCTunnelState, TailnetDiscoverRequest, - TailnetDiscoverResponse, TailnetProbeRequest, TailnetProbeResponse, - TunnelConfigurationResponse, TunnelStatusResponse, + networks_server::Networks, tailnet_control_server::TailnetControl, tunnel_server::Tunnel, + Empty, Network, NetworkDeleteRequest, NetworkListResponse, NetworkReorderRequest, + State as RPCTunnelState, TailnetDiscoverRequest, TailnetDiscoverResponse, + TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, + TunnelStatusResponse, }, runtime::{ActiveTunnel, ResolvedTunnel}, }; use crate::{ + auth::server::tailscale::{ + TailscaleBridgeManager, TailscaleLoginStartRequest as BridgeLoginStartRequest, + TailscaleLoginStatus, + }, control::discovery, daemon::rpc::ServerConfig, database::{add_network, delete_network, get_connection, list_networks, reorder_network}, @@ -49,6 +53,7 @@ pub struct DaemonRPCServer { wg_state_chan: (watch::Sender, watch::Receiver), network_update_chan: (watch::Sender<()>, watch::Receiver<()>), active_tunnel: Arc>>, + tailnet_login: TailscaleBridgeManager, } impl DaemonRPCServer { @@ -59,6 +64,7 @@ impl DaemonRPCServer { wg_state_chan: watch::channel(RunState::Idle), network_update_chan: watch::channel(()), active_tunnel: Arc::new(RwLock::new(None)), + tailnet_login: TailscaleBridgeManager::default(), }) } @@ -130,6 +136,11 @@ impl DaemonRPCServer { Ok(()) } + + fn tailnet_control_url(authority: &str) -> Option { + let authority = discovery::normalize_authority(authority); + (!discovery::is_managed_tailscale_authority(&authority)).then_some(authority) + } } #[tonic::async_trait] @@ -308,6 +319,60 @@ impl TailnetControl for DaemonRPCServer { reachable: status.reachable, })) } + + async fn login_start( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let response = self + .tailnet_login + .start_login(BridgeLoginStartRequest { + account_name: request.account_name, + identity_name: request.identity_name, + hostname: (!request.hostname.trim().is_empty()).then_some(request.hostname), + control_url: Self::tailnet_control_url(&request.authority), + }) + .await + .map_err(proc_err)?; + + Ok(Response::new(tailnet_login_rsp( + response.session_id, + response.status, + ))) + } + + async fn login_status( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let status = self + .tailnet_login + .status(&request.session_id) + .await + .map_err(proc_err)?; + let Some(status) = status else { + return Err(RspStatus::not_found("tailnet login session not found")); + }; + Ok(Response::new(tailnet_login_rsp(request.session_id, status))) + } + + async fn login_cancel( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let canceled = self + .tailnet_login + .cancel(&request.session_id) + .await + .map_err(proc_err)?; + if !canceled { + return Err(RspStatus::not_found("tailnet login session not found")); + } + Ok(Response::new(Empty {})) + } } fn proc_err(err: impl ToString) -> RspStatus { @@ -327,3 +392,21 @@ fn status_rsp(state: RunState) -> TunnelStatusResponse { start: None, // TODO: Add timestamp } } + +fn tailnet_login_rsp( + session_id: String, + status: TailscaleLoginStatus, +) -> super::rpc::grpc_defs::TailnetLoginStatusResponse { + super::rpc::grpc_defs::TailnetLoginStatusResponse { + session_id, + backend_state: status.backend_state, + auth_url: status.auth_url.unwrap_or_default(), + running: status.running, + needs_login: status.needs_login, + tailnet_name: status.tailnet_name.unwrap_or_default(), + magic_dns_suffix: status.magic_dns_suffix.unwrap_or_default(), + self_dns_name: status.self_dns_name.unwrap_or_default(), + tailnet_ips: status.tailscale_ips, + health: status.health, + } +} diff --git a/proto/burrow.proto b/proto/burrow.proto index 79e8976..a590cb1 100644 --- a/proto/burrow.proto +++ b/proto/burrow.proto @@ -20,6 +20,9 @@ service Networks { service TailnetControl { rpc Discover (TailnetDiscoverRequest) returns (TailnetDiscoverResponse); rpc Probe (TailnetProbeRequest) returns (TailnetProbeResponse); + rpc LoginStart (TailnetLoginStartRequest) returns (TailnetLoginStatusResponse); + rpc LoginStatus (TailnetLoginStatusRequest) returns (TailnetLoginStatusResponse); + rpc LoginCancel (TailnetLoginCancelRequest) returns (Empty); } message NetworkReorderRequest { @@ -84,6 +87,34 @@ message TailnetProbeResponse { bool reachable = 5; } +message TailnetLoginStartRequest { + string account_name = 1; + string identity_name = 2; + string hostname = 3; + string authority = 4; +} + +message TailnetLoginStatusRequest { + string session_id = 1; +} + +message TailnetLoginCancelRequest { + string session_id = 1; +} + +message TailnetLoginStatusResponse { + string session_id = 1; + string backend_state = 2; + string auth_url = 3; + bool running = 4; + bool needs_login = 5; + string tailnet_name = 6; + string magic_dns_suffix = 7; + string self_dns_name = 8; + repeated string tailnet_ips = 9; + repeated string health = 10; +} + enum State { Stopped = 0; Running = 1; From 75bcfaf6559bec939a07480c14dbab07438e0e14 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Fri, 3 Apr 2026 03:03:17 -0700 Subject: [PATCH 11/59] Add Tailnet UI auth test flow --- Apple/AppUITests/BurrowUITests.swift | 232 ++++++++++++++ Apple/Burrow.xcodeproj/project.pbxproj | 95 ++++++ .../xcshareddata/xcschemes/App.xcscheme | 15 +- Apple/Configuration/UITests.xcconfig | 14 + Apple/UI/BurrowView.swift | 10 + Scripts/authentik-sync-burrow-directory.sh | 16 +- Scripts/authentik-sync-tailnet-auth-flow.sh | 294 ++++++++++++++++++ Scripts/run-ios-tailnet-ui-tests.sh | 73 +++++ contributors.nix | 13 + nixos/hosts/burrow-forge/default.nix | 11 + nixos/modules/burrow-authentik.nix | 93 +++++- secrets.nix | 1 + secrets/infra/authentik-ui-test-password.age | 9 + 13 files changed, 872 insertions(+), 4 deletions(-) create mode 100644 Apple/AppUITests/BurrowUITests.swift create mode 100644 Apple/Configuration/UITests.xcconfig create mode 100755 Scripts/authentik-sync-tailnet-auth-flow.sh create mode 100755 Scripts/run-ios-tailnet-ui-tests.sh create mode 100644 secrets/infra/authentik-ui-test-password.age diff --git a/Apple/AppUITests/BurrowUITests.swift b/Apple/AppUITests/BurrowUITests.swift new file mode 100644 index 0000000..f9dbeae --- /dev/null +++ b/Apple/AppUITests/BurrowUITests.swift @@ -0,0 +1,232 @@ +import XCTest + +@MainActor +final class BurrowTailnetLoginUITests: XCTestCase { + override func setUpWithError() throws { + continueAfterFailure = false + } + + func testTailnetLoginThroughAuthentikWebSession() throws { + let email = try requiredEnvironment("BURROW_UI_TEST_EMAIL") + let username = ProcessInfo.processInfo.environment["BURROW_UI_TEST_USERNAME"] ?? email + let password = try requiredEnvironment("BURROW_UI_TEST_PASSWORD") + + let app = XCUIApplication() + app.launch() + + let tailnetButton = app.buttons["quick-add-tailnet"] + XCTAssertTrue(tailnetButton.waitForExistence(timeout: 15), "Tailnet add button did not appear") + tailnetButton.tap() + + let discoveryField = app.textFields["tailnet-discovery-email"] + XCTAssertTrue(discoveryField.waitForExistence(timeout: 10), "Tailnet discovery email field did not appear") + replaceText(in: discoveryField, with: email) + + let findServerButton = app.buttons["tailnet-find-server"] + XCTAssertTrue(findServerButton.waitForExistence(timeout: 5), "Find Server button did not appear") + findServerButton.tap() + + let discoveryCard = app.otherElements["tailnet-discovery-card"] + XCTAssertTrue(discoveryCard.waitForExistence(timeout: 20), "Tailnet discovery result did not appear") + + let authorityField = app.textFields["tailnet-authority"] + XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear") + XCTAssertTrue( + waitForFieldValue(authorityField, containing: "ts.burrow.net", timeout: 20), + "Tailnet authority was not populated from discovery" + ) + + let probeButton = app.buttons["tailnet-check-connection"] + XCTAssertTrue(probeButton.waitForExistence(timeout: 5), "Check Connection button did not appear") + probeButton.tap() + + let probeCard = app.otherElements["tailnet-authority-probe-card"] + XCTAssertTrue(probeCard.waitForExistence(timeout: 20), "Tailnet connection probe did not complete") + + let signInButton = app.buttons["tailnet-start-sign-in"] + XCTAssertTrue(signInButton.waitForExistence(timeout: 10), "Tailnet sign-in button did not appear") + signInButton.tap() + + acceptAuthenticationPromptIfNeeded(in: app) + + let webSession = webAuthenticationSession() + XCTAssertTrue(webSession.waitForExistence(timeout: 20), "Safari authentication session did not appear") + + signIntoAuthentik(in: webSession, username: username, password: password) + + app.activate() + XCTAssertTrue( + waitForButtonLabel(app.buttons["tailnet-start-sign-in"], equals: "Signed In", timeout: 60), + "Tailnet sign-in never reached the running state" + ) + } + + private func acceptAuthenticationPromptIfNeeded(in app: XCUIApplication) { + let springboard = XCUIApplication(bundleIdentifier: "com.apple.springboard") + let promptCandidates = [ + springboard.buttons["Continue"], + springboard.buttons["Allow"], + app.buttons["Continue"], + app.buttons["Allow"], + ] + + for button in promptCandidates where button.waitForExistence(timeout: 3) { + button.tap() + return + } + } + + private func webAuthenticationSession() -> XCUIApplication { + let safariViewService = XCUIApplication(bundleIdentifier: "com.apple.SafariViewService") + if safariViewService.waitForExistence(timeout: 5) { + return safariViewService + } + + let safari = XCUIApplication(bundleIdentifier: "com.apple.mobilesafari") + _ = safari.waitForExistence(timeout: 5) + return safari + } + + private func signIntoAuthentik(in webSession: XCUIApplication, username: String, password: String) { + let usernameField = firstExistingElement( + in: webSession, + queries: [ + { $0.textFields["Username"] }, + { $0.textFields["Email or Username"] }, + { $0.textFields["Email address"] }, + { $0.textFields["Email"] }, + { $0.webViews.textFields["Username"] }, + { $0.webViews.textFields["Email or Username"] }, + { $0.descendants(matching: .textField).firstMatch }, + ], + timeout: 25 + ) + XCTAssertTrue(usernameField.exists, "Authentik username field did not appear") + replaceText(in: usernameField, with: username) + + let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2) + if immediatePasswordField.exists { + replaceSecureText(in: immediatePasswordField, with: password) + tapFirstExistingButton( + in: webSession, + titles: ["Continue", "Sign In", "Log in", "Login"], + timeout: 5 + ) + return + } + + tapFirstExistingButton( + in: webSession, + titles: ["Continue", "Next", "Sign In", "Log in", "Login"], + timeout: 5 + ) + + let passwordField = firstExistingSecureField(in: webSession, timeout: 20) + XCTAssertTrue(passwordField.exists, "Authentik password field did not appear") + replaceSecureText(in: passwordField, with: password) + tapFirstExistingButton( + in: webSession, + titles: ["Continue", "Sign In", "Log in", "Login"], + timeout: 5 + ) + } + + private func firstExistingSecureField(in app: XCUIApplication, timeout: TimeInterval) -> XCUIElement { + let candidates = [ + app.secureTextFields["Password"], + app.secureTextFields["Password or Token"], + app.webViews.secureTextFields["Password"], + app.webViews.secureTextFields["Password or Token"], + app.descendants(matching: .secureTextField).firstMatch, + ] + + return firstExistingElement(from: candidates, timeout: timeout) + } + + private func tapFirstExistingButton( + in app: XCUIApplication, + titles: [String], + timeout: TimeInterval + ) { + let candidates = titles.flatMap { title in + [ + app.buttons[title], + app.webViews.buttons[title], + ] + } + [app.descendants(matching: .button).firstMatch] + + let button = firstExistingElement(from: candidates, timeout: timeout) + XCTAssertTrue(button.exists, "Expected one of \(titles.joined(separator: ", ")) to appear") + button.tap() + } + + private func requiredEnvironment(_ key: String) throws -> String { + guard let value = ProcessInfo.processInfo.environment[key], + !value.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + else { + throw XCTSkip("Missing required UI test environment variable \(key)") + } + return value + } + + private func waitForFieldValue( + _ field: XCUIElement, + containing substring: String, + timeout: TimeInterval + ) -> Bool { + let predicate = NSPredicate(format: "value CONTAINS %@", substring) + let expectation = XCTNSPredicateExpectation(predicate: predicate, object: field) + return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed + } + + private func waitForButtonLabel( + _ button: XCUIElement, + equals expected: String, + timeout: TimeInterval + ) -> Bool { + let predicate = NSPredicate(format: "label == %@", expected) + let expectation = XCTNSPredicateExpectation(predicate: predicate, object: button) + return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed + } + + private func firstExistingElement( + in app: XCUIApplication, + queries: [(XCUIApplication) -> XCUIElement], + timeout: TimeInterval + ) -> XCUIElement { + firstExistingElement(from: queries.map { $0(app) }, timeout: timeout) + } + + private func firstExistingElement(from candidates: [XCUIElement], timeout: TimeInterval) -> XCUIElement { + let deadline = Date().addingTimeInterval(timeout) + repeat { + for candidate in candidates where candidate.exists { + return candidate + } + RunLoop.current.run(until: Date().addingTimeInterval(0.2)) + } while Date() < deadline + + return candidates[0] + } + + private func replaceText(in element: XCUIElement, with value: String) { + element.tap() + clearText(in: element) + element.typeText(value) + } + + private func replaceSecureText(in element: XCUIElement, with value: String) { + element.tap() + clearText(in: element) + element.typeText(value) + } + + private func clearText(in element: XCUIElement) { + guard let currentValue = element.value as? String, !currentValue.isEmpty else { + return + } + + let deleteSequence = String(repeating: XCUIKeyboardKey.delete.rawValue, count: currentValue.count) + element.typeText(deleteSequence) + } +} diff --git a/Apple/Burrow.xcodeproj/project.pbxproj b/Apple/Burrow.xcodeproj/project.pbxproj index 9897f79..83d32e0 100644 --- a/Apple/Burrow.xcodeproj/project.pbxproj +++ b/Apple/Burrow.xcodeproj/project.pbxproj @@ -8,6 +8,7 @@ /* Begin PBXBuildFile section */ D00AA8972A4669BC005C8102 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00AA8962A4669BC005C8102 /* AppDelegate.swift */; }; + D11000012F70000100112233 /* BurrowUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D11000042F70000100112233 /* BurrowUITests.swift */; }; D020F65829E4A697002790F6 /* PacketTunnelProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = D020F65729E4A697002790F6 /* PacketTunnelProvider.swift */; }; D020F65D29E4A697002790F6 /* BurrowNetworkExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; }; D03383AD2C8E67E300F7C44E /* SwiftProtobuf in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E22C8DA375008A8CEC /* SwiftProtobuf */; }; @@ -49,6 +50,13 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + D11000022F70000100112233 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D05B9F7129E39EEC008CB1F9; + remoteInfo = App; + }; D020F65B29E4A697002790F6 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */; @@ -130,6 +138,9 @@ /* Begin PBXFileReference section */ D00117422B30348D00D87C25 /* Configuration.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Configuration.xcconfig; sourceTree = ""; }; D00AA8962A4669BC005C8102 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + D11000032F70000100112233 /* BurrowUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = BurrowUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + D11000042F70000100112233 /* BurrowUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BurrowUITests.swift; sourceTree = ""; }; + D11000052F70000100112233 /* UITests.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UITests.xcconfig; sourceTree = ""; }; D020F63D29E4A1FF002790F6 /* Identity.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Identity.xcconfig; sourceTree = ""; }; D020F64029E4A1FF002790F6 /* Compiler.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Compiler.xcconfig; sourceTree = ""; }; D020F64229E4A1FF002790F6 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; @@ -182,6 +193,13 @@ /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + D11000062F70000100112233 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; D020F65029E4A697002790F6 /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -243,6 +261,7 @@ D0D4E4F72C8D941D007F820A /* Framework.xcconfig */, D020F64029E4A1FF002790F6 /* Compiler.xcconfig */, D0D4E4F62C8D932D007F820A /* Debug.xcconfig */, + D11000052F70000100112233 /* UITests.xcconfig */, D04A3E1D2BAF465F0043EC85 /* Version.xcconfig */, D020F64229E4A1FF002790F6 /* Info.plist */, D0D4E5912C8D9D0A007F820A /* Constants */, @@ -268,6 +287,7 @@ isa = PBXGroup; children = ( D05B9F7429E39EEC008CB1F9 /* App */, + D11000072F70000100112233 /* AppUITests */, D020F65629E4A697002790F6 /* NetworkExtension */, D0D4E49C2C8D921A007F820A /* Core */, D0D4E4AD2C8D921A007F820A /* UI */, @@ -281,6 +301,7 @@ isa = PBXGroup; children = ( D05B9F7229E39EEC008CB1F9 /* Burrow.app */, + D11000032F70000100112233 /* BurrowUITests.xctest */, D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */, D0BCC6032A09535900AD070D /* libburrow.a */, D0D4E5312C8D996F007F820A /* BurrowCore.framework */, @@ -303,6 +324,14 @@ path = App; sourceTree = ""; }; + D11000072F70000100112233 /* AppUITests */ = { + isa = PBXGroup; + children = ( + D11000042F70000100112233 /* BurrowUITests.swift */, + ); + path = AppUITests; + sourceTree = ""; + }; D0B98FD729FDDB57004E7149 /* libburrow */ = { isa = PBXGroup; children = ( @@ -375,6 +404,24 @@ /* End PBXGroup section */ /* Begin PBXNativeTarget section */ + D11000082F70000100112233 /* BurrowUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */; + buildPhases = ( + D110000A2F70000100112233 /* Sources */, + D11000062F70000100112233 /* Frameworks */, + D11000092F70000100112233 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + D110000B2F70000100112233 /* PBXTargetDependency */, + ); + name = BurrowUITests; + productName = BurrowUITests; + productReference = D11000032F70000100112233 /* BurrowUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; D020F65229E4A697002790F6 /* NetworkExtension */ = { isa = PBXNativeTarget; buildConfigurationList = D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */; @@ -490,6 +537,10 @@ LastSwiftUpdateCheck = 1600; LastUpgradeCheck = 1520; TargetAttributes = { + D11000082F70000100112233 = { + CreatedOnToolsVersion = 16.0; + TestTargetID = D05B9F7129E39EEC008CB1F9; + }; D020F65229E4A697002790F6 = { CreatedOnToolsVersion = 14.3; }; @@ -522,6 +573,7 @@ projectRoot = ""; targets = ( D05B9F7129E39EEC008CB1F9 /* App */, + D11000082F70000100112233 /* BurrowUITests */, D020F65229E4A697002790F6 /* NetworkExtension */, D0D4E5502C8D9BF2007F820A /* UI */, D0D4E5302C8D996F007F820A /* Core */, @@ -531,6 +583,13 @@ /* End PBXProject section */ /* Begin PBXResourcesBuildPhase section */ + D11000092F70000100112233 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; D05B9F7029E39EEC008CB1F9 /* Resources */ = { isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; @@ -594,6 +653,14 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + D110000A2F70000100112233 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D11000012F70000100112233 /* BurrowUITests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D020F64F29E4A697002790F6 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -652,6 +719,11 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + D110000B2F70000100112233 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D05B9F7129E39EEC008CB1F9 /* App */; + targetProxy = D11000022F70000100112233 /* PBXContainerItemProxy */; + }; D020F65C29E4A697002790F6 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = D020F65229E4A697002790F6 /* NetworkExtension */; @@ -694,6 +766,20 @@ /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ + D110000C2F70000100112233 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + D110000D2F70000100112233 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */; + buildSettings = { + }; + name = Release; + }; D020F65F29E4A697002790F6 /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = D020F66229E4A6E5002790F6 /* NetworkExtension.xcconfig */; @@ -781,6 +867,15 @@ /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ + D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D110000C2F70000100112233 /* Debug */, + D110000D2F70000100112233 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme b/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme index a524e87..f580ea7 100644 --- a/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme +++ b/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme @@ -28,7 +28,20 @@ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" shouldUseLaunchSchemeArgsEnv = "YES" - shouldAutocreateTestPlan = "YES"> + shouldAutocreateTestPlan = "NO"> + + + + + + some View { diff --git a/Scripts/authentik-sync-burrow-directory.sh b/Scripts/authentik-sync-burrow-directory.sh index 656b738..277c5f4 100644 --- a/Scripts/authentik-sync-burrow-directory.sh +++ b/Scripts/authentik-sync-burrow-directory.sh @@ -116,7 +116,7 @@ lookup_user_pk() { ensure_user() { local user_spec="$1" - local username name email is_admin groups_json effective_groups_json group_name + local username name email is_admin groups_json password_file effective_groups_json group_name local group_pks_json payload user_pk username="$(printf '%s\n' "$user_spec" | jq -r '.username')" @@ -124,6 +124,7 @@ ensure_user() { email="$(printf '%s\n' "$user_spec" | jq -r '.email')" is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')" groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')" + password_file="$(printf '%s\n' "$user_spec" | jq -r '.passwordFile // empty')" if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then echo "error: each Burrow Authentik user requires username and email" >&2 @@ -178,6 +179,19 @@ ensure_user() { echo "error: could not create Authentik user ${username}" >&2 exit 1 fi + + if [[ -n "$password_file" ]]; then + if [[ ! -s "$password_file" ]]; then + echo "error: password file for Authentik user ${username} is missing: ${password_file}" >&2 + exit 1 + fi + + api POST "/api/v3/core/users/${user_pk}/set_password/" "$( + jq -cn \ + --arg password "$(tr -d '\r\n' < "$password_file")" \ + '{password: $password}' + )" >/dev/null + fi } lookup_application_pk() { diff --git a/Scripts/authentik-sync-tailnet-auth-flow.sh b/Scripts/authentik-sync-tailnet-auth-flow.sh new file mode 100755 index 0000000..bfb00ef --- /dev/null +++ b/Scripts/authentik-sync-tailnet-auth-flow.sh @@ -0,0 +1,294 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +provider_slug="${AUTHENTIK_TAILNET_PROVIDER_SLUG:-ts}" +authentication_flow_name="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME:-Burrow Tailnet Authentication}" +authentication_flow_slug="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG:-burrow-tailnet-authentication}" +identification_stage_name="${AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME:-burrow-tailnet-identification-stage}" +password_stage_name="${AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME:-burrow-tailnet-password-stage}" +user_login_stage_name="${AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME:-burrow-tailnet-user-login-stage}" +google_source_slug="${AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG:-google}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-tailnet-auth-flow.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_TAILNET_PROVIDER_SLUG + AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME + AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG + AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME + AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME + AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME + AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_stage_by_name() { + local path="$1" + local name="$2" + + api GET "${path}?page_size=200" \ + | jq -c --arg name "$name" '.results[]? | select(.name == $name)' \ + | head -n1 +} + +lookup_flow_pk() { + local slug="$1" + + api GET "/api/v3/flows/instances/?slug=${slug}" \ + | jq -r '.results[]? | select(.slug != null) | .pk // empty' \ + | head -n1 +} + +lookup_source_pk() { + local slug="$1" + + api GET "/api/v3/sources/oauth/?page_size=200&slug=${slug}" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_password_stage() { + local existing payload stage_pk + + existing="$(lookup_stage_by_name "/api/v3/stages/password/" "$password_stage_name")" + payload="$( + jq -cn \ + --arg name "$password_stage_name" \ + '{ + name: $name, + backends: [ + "authentik.core.auth.InbuiltBackend", + "authentik.core.auth.TokenBackend" + ], + allow_show_password: false, + failed_attempts_before_cancel: 5 + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/password/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/password/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_identification_stage() { + local password_stage_pk="$1" + local google_source_pk="$2" + local existing payload stage_pk sources_json + + existing="$(lookup_stage_by_name "/api/v3/stages/identification/" "$identification_stage_name")" + if [[ -n "$google_source_pk" ]]; then + sources_json="$(jq -cn --arg source "$google_source_pk" '[$source]')" + else + sources_json='[]' + fi + + payload="$( + jq -cn \ + --arg name "$identification_stage_name" \ + --arg password_stage "$password_stage_pk" \ + --argjson sources "$sources_json" \ + '{ + name: $name, + user_fields: ["username", "email"], + password_stage: $password_stage, + case_insensitive_matching: true, + show_matched_user: true, + sources: $sources, + show_source_labels: true, + pretend_user_exists: false, + enable_remember_me: false + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/identification/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/identification/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_user_login_stage() { + local existing payload stage_pk + + existing="$(lookup_stage_by_name "/api/v3/stages/user_login/" "$user_login_stage_name")" + payload="$( + jq -cn \ + --arg name "$user_login_stage_name" \ + '{ + name: $name, + session_duration: "hours=12", + terminate_other_sessions: false, + remember_me_offset: "seconds=0", + network_binding: "no_binding", + geoip_binding: "no_binding" + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/user_login/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/user_login/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_authentication_flow() { + local existing_pk payload + + existing_pk="$(lookup_flow_pk "$authentication_flow_slug")" + payload="$( + jq -cn \ + --arg name "$authentication_flow_name" \ + --arg slug "$authentication_flow_slug" \ + '{ + name: $name, + title: $name, + slug: $slug, + designation: "authentication", + policy_engine_mode: "any", + layout: "stacked" + }' + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/flows/instances/${authentication_flow_slug}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/flows/instances/" "$payload" \ + | jq -r '.pk // empty' + fi +} + +ensure_flow_binding() { + local flow_pk="$1" + local stage_pk="$2" + local order="$3" + local existing payload binding_pk + + existing="$( + api GET "/api/v3/flows/bindings/?target=${flow_pk}&stage=${stage_pk}&page_size=200" \ + | jq -c '.results[]?' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$flow_pk" \ + --arg stage "$stage_pk" \ + --argjson order "$order" \ + '{ + target: $target, + stage: $stage, + order: $order, + policy_engine_mode: "any" + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/flows/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/flows/bindings/" "$payload" >/dev/null + fi +} + +wait_for_authentik + +provider_pk="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --arg provider_slug "$provider_slug" ' + .results[]? + | select(.assigned_application_slug == $provider_slug or .slug == $provider_slug) + | .pk // empty + ' \ + | head -n1 +)" + +if [[ -z "$provider_pk" ]]; then + echo "error: could not resolve Authentik Tailnet OAuth provider ${provider_slug}" >&2 + exit 1 +fi + +google_source_pk="$(lookup_source_pk "$google_source_slug" || true)" +password_stage_pk="$(ensure_password_stage)" +identification_stage_pk="$(ensure_identification_stage "$password_stage_pk" "$google_source_pk")" +user_login_stage_pk="$(ensure_user_login_stage)" +authentication_flow_pk="$(ensure_authentication_flow)" + +ensure_flow_binding "$authentication_flow_pk" "$identification_stage_pk" 10 +ensure_flow_binding "$authentication_flow_pk" "$user_login_stage_pk" 30 + +api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$( + jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}' +)" >/dev/null + +echo "Synced Burrow Tailnet authentication flow for provider ${provider_slug}." diff --git a/Scripts/run-ios-tailnet-ui-tests.sh b/Scripts/run-ios-tailnet-ui-tests.sh new file mode 100755 index 0000000..5086bd1 --- /dev/null +++ b/Scripts/run-ios-tailnet-ui-tests.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}" +simulator_name="${BURROW_UI_TEST_SIMULATOR_NAME:-iPhone 17 Pro}" +simulator_os="${BURROW_UI_TEST_SIMULATOR_OS:-26.4}" +derived_data_path="${BURROW_UI_TEST_DERIVED_DATA_PATH:-/tmp/burrow-ui-tests-deriveddata}" +source_packages_path="${BURROW_UI_TEST_SOURCE_PACKAGES_PATH:-/tmp/burrow-ui-tests-sourcepackages}" +fallback_dir="${HOME}/Library/Application Support/${bundle_id}/SimulatorFallback" +socket_path="${fallback_dir}/burrow.sock" +daemon_log="${BURROW_UI_TEST_DAEMON_LOG:-/tmp/burrow-ui-test-daemon.log}" +ui_test_email="${BURROW_UI_TEST_EMAIL:-ui-test@burrow.net}" +ui_test_username="${BURROW_UI_TEST_USERNAME:-ui-test}" +password_secret="${repo_root}/secrets/infra/authentik-ui-test-password.age" +age_identity="${BURROW_UI_TEST_AGE_IDENTITY:-${HOME}/.ssh/id_ed25519}" + +ui_test_password="${BURROW_UI_TEST_PASSWORD:-}" +if [[ -z "$ui_test_password" ]]; then + if [[ -f "$password_secret" && -f "$age_identity" ]]; then + ui_test_password="$(age -d -i "$age_identity" "$password_secret" | tr -d '\r\n')" + else + echo "error: BURROW_UI_TEST_PASSWORD is unset and ${password_secret} could not be decrypted" >&2 + exit 1 + fi +fi + +mkdir -p "$fallback_dir" "$derived_data_path" "$source_packages_path" +rm -f "$socket_path" + +cleanup() { + if [[ -n "${daemon_pid:-}" ]]; then + kill "$daemon_pid" >/dev/null 2>&1 || true + wait "$daemon_pid" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +cargo build -p burrow --bin burrow + +( + cd "$fallback_dir" + BURROW_SOCKET_PATH="burrow.sock" \ + "${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1 +) & +daemon_pid=$! + +for _ in $(seq 1 50); do + [[ -S "$socket_path" ]] && break + sleep 0.2 +done + +if [[ ! -S "$socket_path" ]]; then + echo "error: Burrow daemon did not create ${socket_path}" >&2 + [[ -f "$daemon_log" ]] && cat "$daemon_log" >&2 + exit 1 +fi + +BURROW_UI_TEST_EMAIL="$ui_test_email" \ +BURROW_UI_TEST_USERNAME="$ui_test_username" \ +BURROW_UI_TEST_PASSWORD="$ui_test_password" \ +xcodebuild \ + -quiet \ + -skipPackagePluginValidation \ + -project "${repo_root}/Apple/Burrow.xcodeproj" \ + -scheme App \ + -configuration Debug \ + -destination "platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}" \ + -derivedDataPath "$derived_data_path" \ + -clonedSourcePackagesDirPath "$source_packages_path" \ + -only-testing:BurrowUITests \ + CODE_SIGNING_ALLOWED=NO \ + test diff --git a/contributors.nix b/contributors.nix index f6cc014..22c28b6 100644 --- a/contributors.nix +++ b/contributors.nix @@ -43,5 +43,18 @@ "automation" ]; }; + + ui-test = { + displayName = "Burrow UI Test"; + canonicalEmail = "ui-test@burrow.net"; + isAdmin = false; + forgeAuthorized = false; + bootstrapAuthentik = true; + authentikPasswordSecret = "burrowAuthentikUiTestPassword"; + roles = [ + "testing" + "apple-ui" + ]; + }; }; } diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index fb5b8ae..6c106f4 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -3,6 +3,10 @@ let contributors = import ../../../contributors.nix; identities = contributors.identities; + authentikPasswordSecretPath = identity: + if identity ? authentikPasswordSecret + then config.age.secrets.${identity.authentikPasswordSecret}.path + else null; bootstrapUsers = lib.mapAttrsToList ( username: identity: { @@ -11,6 +15,7 @@ let email = identity.canonicalEmail; sourceEmail = identity.sourceEmail or null; isAdmin = identity.isAdmin or false; + passwordFile = authentikPasswordSecretPath identity; } ) (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); @@ -70,6 +75,12 @@ in group = "root"; mode = "0400"; }; + age.secrets.burrowAuthentikUiTestPassword = { + file = ../../../secrets/infra/authentik-ui-test-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; networking.extraHosts = '' 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 4e31d43..478d0d9 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -11,6 +11,7 @@ let directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; + tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' version: 1 metadata: @@ -175,6 +176,36 @@ in description = "Identification-stage behavior for the Google Authentik source."; }; + headscaleAuthenticationFlowSlug = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-authentication"; + description = "Authentik authentication flow slug used for Burrow Tailnet sign-in."; + }; + + headscaleAuthenticationFlowName = lib.mkOption { + type = lib.types.str; + default = "Burrow Tailnet Authentication"; + description = "Authentik authentication flow name used for Burrow Tailnet sign-in."; + }; + + headscaleIdentificationStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-identification-stage"; + description = "Authentik identification stage used for Burrow Tailnet sign-in."; + }; + + headscalePasswordStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-password-stage"; + description = "Authentik password stage used for Burrow Tailnet sign-in."; + }; + + headscaleUserLoginStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-user-login-stage"; + description = "Authentik user-login stage used for Burrow Tailnet sign-in."; + }; + userGroupName = lib.mkOption { type = lib.types.str; default = "burrow-users"; @@ -217,6 +248,11 @@ in default = false; description = "Whether this user should be in the Burrow admin group."; }; + passwordFile = lib.mkOption { + type = nullOr str; + default = null; + description = "Optional host-local file containing a bootstrap password for this user."; + }; }; }); default = [ ]; @@ -468,7 +504,7 @@ EOF restartTriggers = [ directorySyncScript cfg.envFile - ]; + ] ++ lib.concatMap (user: lib.optional (user.passwordFile != null) user.passwordFile) cfg.bootstrapUsers; path = [ pkgs.bash pkgs.coreutils @@ -491,7 +527,7 @@ EOF export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName} export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: { - inherit (user) username name email isAdmin; + inherit (user) username name email isAdmin passwordFile; groups = user.groups; }) cfg.bootstrapUsers)}' @@ -499,6 +535,59 @@ EOF ''; }; + systemd.services.burrow-authentik-tailnet-auth-flow = { + description = "Reconcile the Burrow Tailnet authentication flow"; + after = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals ( + cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null + ) [ "burrow-authentik-google-source.service" ]; + wants = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals ( + cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null + ) [ "burrow-authentik-google-source.service" ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + tailnetAuthFlowSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_TAILNET_PROVIDER_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME=${lib.escapeShellArg cfg.headscaleAuthenticationFlowName} + export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG=${lib.escapeShellArg cfg.headscaleAuthenticationFlowSlug} + export AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME=${lib.escapeShellArg cfg.headscaleIdentificationStageName} + export AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME=${lib.escapeShellArg cfg.headscalePasswordStageName} + export AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME=${lib.escapeShellArg cfg.headscaleUserLoginStageName} + export AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug} + + ${pkgs.bash}/bin/bash ${tailnetAuthFlowSyncScript} + ''; + }; + systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) { description = "Reconcile the Burrow Authentik Forgejo OIDC application"; after = [ diff --git a/secrets.nix b/secrets.nix index 909b929..cc23605 100644 --- a/secrets.nix +++ b/secrets.nix @@ -12,6 +12,7 @@ in "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-ui-test-password.age".publicKeys = burrowForgeRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/authentik-ui-test-password.age b/secrets/infra/authentik-ui-test-password.age new file mode 100644 index 0000000..f39c21a --- /dev/null +++ b/secrets/infra/authentik-ui-test-password.age @@ -0,0 +1,9 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q 4+zOIEyQTCHqKdZKV/H4D7e4y+UTrc9rYzvCgGUPVEg +S+tAlc4wvzVUe9r9+mBAnUj5C31bQqo4PK3muBCzs2Y +-> ssh-ed25519 IrZmAg 1KasjHiY1MQVLIzoDdGshhDhaDimOtZ5EyE4GyZngHg +ov711Sp+Q/zQw0NUpB2rnKEF8bFxoVafdVQ/8gSbSZA +-> X25519 3EWdCP5UkWd1g6bDaQm/kNCNlhSONrz8RB7OZgT9nXE +6+HoM9mg6P/CtU39P8SCyutLkmYw27MikoZZ5L9nI54 +--- Rw0o+MvtvHQrrYPNtCPxHGR67K67nyJUQRd4DN3nOCY +fn Date: Fri, 3 Apr 2026 03:08:06 -0700 Subject: [PATCH 12/59] Allow local UI test secret decryption --- secrets.nix | 4 +++- secrets/infra/authentik-ui-test-password.age | 23 ++++++++++++-------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/secrets.nix b/secrets.nix index cc23605..5a3ac8c 100644 --- a/secrets.nix +++ b/secrets.nix @@ -1,4 +1,5 @@ let + conradev = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBueQxNbP2246pxr/m7au4zNVm+ShC96xuOcfEcpIjWZ"; contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l"; @@ -7,12 +8,13 @@ let agent burrowForgeHost ]; + uiTestRecipients = burrowForgeRecipients ++ [ conradev ]; in { "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; - "secrets/infra/authentik-ui-test-password.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/authentik-ui-test-password.age b/secrets/infra/authentik-ui-test-password.age index f39c21a..e84a7be 100644 --- a/secrets/infra/authentik-ui-test-password.age +++ b/secrets/infra/authentik-ui-test-password.age @@ -1,9 +1,14 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q 4+zOIEyQTCHqKdZKV/H4D7e4y+UTrc9rYzvCgGUPVEg -S+tAlc4wvzVUe9r9+mBAnUj5C31bQqo4PK3muBCzs2Y --> ssh-ed25519 IrZmAg 1KasjHiY1MQVLIzoDdGshhDhaDimOtZ5EyE4GyZngHg -ov711Sp+Q/zQw0NUpB2rnKEF8bFxoVafdVQ/8gSbSZA --> X25519 3EWdCP5UkWd1g6bDaQm/kNCNlhSONrz8RB7OZgT9nXE -6+HoM9mg6P/CtU39P8SCyutLkmYw27MikoZZ5L9nI54 ---- Rw0o+MvtvHQrrYPNtCPxHGR67K67nyJUQRd4DN3nOCY -fn Date: Fri, 3 Apr 2026 17:49:11 -0700 Subject: [PATCH 13/59] Add tailnet connectivity smoke path --- Scripts/run-tailnet-connectivity-smoke.sh | 186 ++++++++++ Tools/tailscale-login-bridge/main.go | 410 +++++++++++++++++++- burrow/src/main.rs | 433 ++++++++++++++++++++++ 3 files changed, 1019 insertions(+), 10 deletions(-) create mode 100755 Scripts/run-tailnet-connectivity-smoke.sh diff --git a/Scripts/run-tailnet-connectivity-smoke.sh b/Scripts/run-tailnet-connectivity-smoke.sh new file mode 100755 index 0000000..f3053d3 --- /dev/null +++ b/Scripts/run-tailnet-connectivity-smoke.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}" +smoke_root="${BURROW_TAILNET_SMOKE_ROOT:-/tmp/burrow-tailnet-connectivity}" +socket_path="${smoke_root}/burrow.sock" +db_path="${smoke_root}/burrow.db" +daemon_log="${BURROW_TAILNET_SMOKE_DAEMON_LOG:-${smoke_root}/daemon.log}" +payload_path="${smoke_root}/tailnet.json" +authority="${BURROW_TAILNET_SMOKE_AUTHORITY:-https://ts.burrow.net}" +account_name="${BURROW_TAILNET_SMOKE_ACCOUNT:-ui-test}" +identity_name="${BURROW_TAILNET_SMOKE_IDENTITY:-apple}" +hostname="${BURROW_TAILNET_SMOKE_HOSTNAME:-burrow-apple}" +message="${BURROW_TAILNET_SMOKE_MESSAGE:-burrow-tailnet-smoke}" +timeout_ms="${BURROW_TAILNET_SMOKE_TIMEOUT_MS:-8000}" +remote_ip="${BURROW_TAILNET_SMOKE_REMOTE_IP:-}" +remote_port="${BURROW_TAILNET_SMOKE_REMOTE_PORT:-18081}" +remote_hostname="${BURROW_TAILNET_SMOKE_REMOTE_HOSTNAME:-burrow-echo}" +remote_authkey="${BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY:-}" +helper_bin="${BURROW_TAILNET_SMOKE_HELPER_BIN:-${smoke_root}/tailscale-login-bridge}" +remote_state_root="${BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT:-${smoke_root}/remote-state}" +remote_stdout="${smoke_root}/remote-helper.stdout" +remote_stderr="${BURROW_TAILNET_SMOKE_REMOTE_LOG:-${smoke_root}/remote-helper.log}" + +if [[ -n "${TS_AUTHKEY:-}" ]]; then + default_tailnet_state_root="${smoke_root}/local-state" +else + default_tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState" +fi +tailnet_state_root="${BURROW_TAILNET_STATE_ROOT:-${default_tailnet_state_root}}" + +need_login=0 +if [[ -z "${TS_AUTHKEY:-}" ]] && { [[ ! -d "$tailnet_state_root" ]] || [[ -z "$(find "$tailnet_state_root" -mindepth 1 -maxdepth 2 -print -quit 2>/dev/null)" ]]; }; then + need_login=1 +fi + +if [[ "$need_login" -eq 1 ]]; then + echo "Tailnet state root is empty; running iOS login bootstrap first..." + "${repo_root}/Scripts/run-ios-tailnet-ui-tests.sh" +fi + +rm -rf "$smoke_root" +mkdir -p "$smoke_root" + +cleanup() { + rm -f "$payload_path" + if [[ -n "${daemon_pid:-}" ]]; then + kill "$daemon_pid" >/dev/null 2>&1 || true + wait "$daemon_pid" >/dev/null 2>&1 || true + fi + if [[ -n "${remote_pid:-}" ]]; then + kill "$remote_pid" >/dev/null 2>&1 || true + wait "$remote_pid" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +wait_for_helper_listen() { + python3 - <<'PY' "$1" +import json +import pathlib +import sys +import time + +path = pathlib.Path(sys.argv[1]) +deadline = time.time() + 20 +while time.time() < deadline: + if path.exists(): + with path.open("r", encoding="utf-8") as handle: + line = handle.readline().strip() + if line: + hello = json.loads(line) + print(hello["listen_addr"]) + raise SystemExit(0) + time.sleep(0.1) +raise SystemExit("timed out waiting for helper startup line") +PY +} + +wait_for_helper_ip() { + python3 - <<'PY' "$1" +import json +import sys +import time +import urllib.request + +url = sys.argv[1] +deadline = time.time() + 30 +while time.time() < deadline: + with urllib.request.urlopen(url, timeout=5) as response: + status = json.load(response) + if status.get("running") and status.get("tailscale_ips"): + print(status["tailscale_ips"][0]) + raise SystemExit(0) + time.sleep(0.25) +raise SystemExit("timed out waiting for helper to become ready") +PY +} + +python3 - <<'PY' "$payload_path" "$authority" "$account_name" "$identity_name" "$hostname" +import json +import pathlib +import sys + +path = pathlib.Path(sys.argv[1]) +payload = { + "authority": sys.argv[2], + "account": sys.argv[3], + "identity": sys.argv[4], + "hostname": sys.argv[5], +} +path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8") +PY + +cargo build -p burrow --bin burrow +( + cd "${repo_root}/Tools/tailscale-login-bridge" + GOWORK=off go build -o "$helper_bin" . +) + +if [[ -z "$remote_ip" ]]; then + if [[ -z "$remote_authkey" ]] && { [[ ! -d "$remote_state_root" ]] || [[ -z "$(find "$remote_state_root" -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null)" ]]; }; then + echo "error: set BURROW_TAILNET_SMOKE_REMOTE_IP, BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY, or BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT to an existing logged-in helper state" >&2 + exit 1 + fi + + if [[ -n "$remote_authkey" ]]; then + rm -rf "$remote_state_root" + mkdir -p "$remote_state_root" + fi + + ( + cd "$repo_root" + if [[ -n "$remote_authkey" ]]; then + export TS_AUTHKEY="$remote_authkey" + fi + "$helper_bin" \ + --listen 127.0.0.1:0 \ + --state-dir "$remote_state_root" \ + --hostname "$remote_hostname" \ + --control-url "$authority" \ + --udp-echo-port "$remote_port" \ + >"$remote_stdout" 2>"$remote_stderr" + ) & + remote_pid=$! + + remote_listen_addr="$(wait_for_helper_listen "$remote_stdout")" + remote_ip="$(wait_for_helper_ip "http://${remote_listen_addr}/status")" +fi + +( + cd "$smoke_root" + RUST_LOG="${BURROW_TAILNET_SMOKE_RUST_LOG:-info,burrow=debug}" \ + BURROW_SOCKET_PATH="$socket_path" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ + "${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1 +) & +daemon_pid=$! + +for _ in $(seq 1 50); do + [[ -S "$socket_path" ]] && break + sleep 0.2 +done + +if [[ ! -S "$socket_path" ]]; then + echo "error: Burrow daemon did not create ${socket_path}" >&2 + [[ -f "$daemon_log" ]] && cat "$daemon_log" >&2 + exit 1 +fi + +run_burrow() { + BURROW_SOCKET_PATH="$socket_path" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ + "${repo_root}/target/debug/burrow" "$@" +} + +run_burrow network-add 1 1 "$payload_path" +run_burrow start +run_burrow tunnel-config +run_burrow tailnet-udp-echo "${remote_ip}:${remote_port}" --message "$message" --timeout-ms "$timeout_ms" + +echo +echo "Tailnet connectivity smoke passed." +echo "State root: $tailnet_state_root" +echo "Remote: ${remote_ip}:${remote_port}" diff --git a/Tools/tailscale-login-bridge/main.go b/Tools/tailscale-login-bridge/main.go index 82ca9b0..877d0e4 100644 --- a/Tools/tailscale-login-bridge/main.go +++ b/Tools/tailscale-login-bridge/main.go @@ -2,17 +2,26 @@ package main import ( "context" + "encoding/binary" "encoding/json" + "errors" "flag" "fmt" + "io" "log" "net" + "net/netip" "net/http" "os" + "strconv" + "sync" "time" + "github.com/tailscale/wireguard-go/tun" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" "tailscale.com/tsnet" ) @@ -26,13 +35,123 @@ type statusResponse struct { SelfDNSName string `json:"self_dns_name,omitempty"` TailscaleIPs []string `json:"tailscale_ips,omitempty"` Health []string `json:"health,omitempty"` + Peers []peerSummary `json:"peers,omitempty"` } +type peerSummary struct { + Name string `json:"name,omitempty"` + DNSName string `json:"dns_name,omitempty"` + TailscaleIPs []string `json:"tailscale_ips,omitempty"` + Online bool `json:"online"` + Active bool `json:"active"` + Relay string `json:"relay,omitempty"` + CurAddr string `json:"cur_addr,omitempty"` + LastSeenUnix int64 `json:"last_seen_unix,omitempty"` +} + +type pingResponse struct { + Result *ipnstate.PingResult `json:"result,omitempty"` +} + +type helperHello struct { + ListenAddr string `json:"listen_addr"` + PacketSocket string `json:"packet_socket,omitempty"` +} + +type helperState struct { + mu sync.RWMutex + authURL string +} + +func (s *helperState) authURLSnapshot() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.authURL +} + +func (s *helperState) setAuthURL(url string) { + s.mu.Lock() + defer s.mu.Unlock() + s.authURL = url +} + +func (s *helperState) clearAuthURL() { + s.setAuthURL("") +} + +// chanTUN is a tun.Device backed by channels so another process can feed and +// consume raw IP packets while tsnet handles the Tailnet control/data plane. +type chanTUN struct { + Inbound chan []byte + Outbound chan []byte + closed chan struct{} + events chan tun.Event +} + +func newChanTUN() *chanTUN { + t := &chanTUN{ + Inbound: make(chan []byte, 1024), + Outbound: make(chan []byte, 1024), + closed: make(chan struct{}), + events: make(chan tun.Event, 1), + } + t.events <- tun.EventUp + return t +} + +func (t *chanTUN) File() *os.File { return nil } + +func (t *chanTUN) Close() error { + select { + case <-t.closed: + default: + close(t.closed) + close(t.Inbound) + } + return nil +} + +func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) { + select { + case <-t.closed: + return 0, io.EOF + case pkt, ok := <-t.Outbound: + if !ok { + return 0, io.EOF + } + sizes[0] = copy(bufs[0][offset:], pkt) + return 1, nil + } +} + +func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) { + for _, buf := range bufs { + pkt := buf[offset:] + if len(pkt) == 0 { + continue + } + select { + case <-t.closed: + return 0, errors.New("closed") + case t.Inbound <- append([]byte(nil), pkt...): + default: + } + } + return len(bufs), nil +} + +func (t *chanTUN) MTU() (int, error) { return 1280, nil } +func (t *chanTUN) Name() (string, error) { return "burrow-tailnet", nil } +func (t *chanTUN) Events() <-chan tun.Event { return t.events } +func (t *chanTUN) BatchSize() int { return 1 } + func main() { listen := flag.String("listen", "127.0.0.1:0", "local listen address") stateDir := flag.String("state-dir", "", "persistent state directory") hostname := flag.String("hostname", "burrow-apple", "tailnet hostname") controlURL := flag.String("control-url", "", "optional control URL") + packetSocket := flag.String("packet-socket", "", "optional unix socket path for raw packet bridging") + udpEchoPort := flag.Int("udp-echo-port", 0, "optional tailnet UDP echo port") flag.Parse() if *stateDir == "" { @@ -48,6 +167,24 @@ func main() { Hostname: *hostname, UserLogf: log.Printf, } + + var tunDevice *chanTUN + var packetListener net.Listener + if *packetSocket != "" { + _ = os.Remove(*packetSocket) + ln, err := net.Listen("unix", *packetSocket) + if err != nil { + log.Fatalf("packet listen: %v", err) + } + packetListener = ln + defer func() { + packetListener.Close() + _ = os.Remove(*packetSocket) + }() + + tunDevice = newChanTUN() + server.Tun = tunDevice + } if *controlURL != "" { server.ControlURL = *controlURL } @@ -61,6 +198,7 @@ func main() { if err != nil { log.Fatalf("local client: %v", err) } + state := &helperState{} ln, err := net.Listen("tcp", *listen) if err != nil { @@ -68,12 +206,27 @@ func main() { } defer ln.Close() - fmt.Printf("{\"listen_addr\":%q}\n", ln.Addr().String()) + if packetListener != nil { + go servePacketBridge(packetListener, tunDevice) + } + if *udpEchoPort > 0 { + go serveUDPEcho(context.Background(), server, localClient, *udpEchoPort) + } + + hello := helperHello{ + ListenAddr: ln.Addr().String(), + } + if *packetSocket != "" { + hello.PacketSocket = *packetSocket + } + if err := json.NewEncoder(os.Stdout).Encode(hello); err != nil { + log.Fatalf("write hello: %v", err) + } _ = os.Stdout.Sync() mux := http.NewServeMux() mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { - status, err := snapshot(r.Context(), localClient) + status, err := snapshot(r.Context(), localClient, state) if err != nil { http.Error(w, err.Error(), http.StatusBadGateway) return @@ -81,6 +234,40 @@ func main() { w.Header().Set("content-type", "application/json") _ = json.NewEncoder(w).Encode(status) }) + mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { + ip := r.URL.Query().Get("ip") + if ip == "" { + http.Error(w, "missing ip", http.StatusBadRequest) + return + } + target, err := netip.ParseAddr(ip) + if err != nil { + http.Error(w, fmt.Sprintf("invalid ip: %v", err), http.StatusBadRequest) + return + } + + pingType := tailcfg.PingTSMP + switch r.URL.Query().Get("type") { + case "", "tsmp", "TSMP": + pingType = tailcfg.PingTSMP + case "icmp", "ICMP": + pingType = tailcfg.PingICMP + case "peerapi": + pingType = tailcfg.PingPeerAPI + default: + http.Error(w, "unsupported ping type", http.StatusBadRequest) + return + } + + result, err := localClient.Ping(r.Context(), target, pingType) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + + w.Header().Set("content-type", "application/json") + _ = json.NewEncoder(w).Encode(&pingResponse{Result: result}) + }) mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) go func() { @@ -96,16 +283,110 @@ func main() { log.Fatal(httpServer.Serve(ln)) } -func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, error) { - status, err := localClient.StatusWithoutPeers(ctx) +func servePacketBridge(listener net.Listener, device *chanTUN) { + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) { + return + } + log.Printf("packet accept: %v", err) + continue + } + log.Printf("packet bridge connected") + if err := bridgePacketConn(conn, device); err != nil && !errors.Is(err, io.EOF) { + log.Printf("packet bridge error: %v", err) + } + _ = conn.Close() + log.Printf("packet bridge disconnected") + } +} + +func bridgePacketConn(conn net.Conn, device *chanTUN) error { + errCh := make(chan error, 2) + + go func() { + for { + pkt, err := readFrame(conn) + if err != nil { + errCh <- err + return + } + select { + case <-device.closed: + errCh <- io.EOF + return + case device.Outbound <- pkt: + } + } + }() + + go func() { + for { + select { + case <-device.closed: + errCh <- io.EOF + return + case pkt, ok := <-device.Inbound: + if !ok { + errCh <- io.EOF + return + } + if err := writeFrame(conn, pkt); err != nil { + errCh <- err + return + } + } + } + }() + + return <-errCh +} + +func readFrame(r io.Reader) ([]byte, error) { + var size [4]byte + if _, err := io.ReadFull(r, size[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(size[:]) + if length == 0 { + return []byte{}, nil + } + packet := make([]byte, length) + if _, err := io.ReadFull(r, packet); err != nil { + return nil, err + } + return packet, nil +} + +func writeFrame(w io.Writer, packet []byte) error { + var size [4]byte + binary.BigEndian.PutUint32(size[:], uint32(len(packet))) + if _, err := w.Write(size[:]); err != nil { + return err + } + if len(packet) == 0 { + return nil + } + _, err := w.Write(packet) + return err +} + +func snapshot(ctx context.Context, localClient *local.Client, state *helperState) (*statusResponse, error) { + status, err := localClient.Status(ctx) if err != nil { return nil, err } - if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && status.AuthURL == "" { - if err := localClient.StartLoginInteractive(ctx); err != nil { - return nil, err - } - status, err = localClient.StatusWithoutPeers(ctx) + + authURL := status.AuthURL + if authURL == "" { + authURL = state.authURLSnapshot() + } + if status.BackendState == ipn.Running.String() { + state.clearAuthURL() + authURL = "" + } else if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && authURL == "" { + authURL, err = awaitAuthURL(ctx, localClient, state) if err != nil { return nil, err } @@ -113,7 +394,7 @@ func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, response := &statusResponse{ BackendState: status.BackendState, - AuthURL: status.AuthURL, + AuthURL: authURL, Running: status.BackendState == ipn.Running.String(), NeedsLogin: status.BackendState == ipn.NeedsLogin.String(), Health: append([]string(nil), status.Health...), @@ -129,5 +410,114 @@ func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, for _, ip := range status.TailscaleIPs { response.TailscaleIPs = append(response.TailscaleIPs, ip.String()) } + for _, key := range status.Peers() { + peer := status.Peer[key] + if peer == nil { + continue + } + summary := peerSummary{ + Name: peer.HostName, + DNSName: peer.DNSName, + Online: peer.Online, + Active: peer.Active, + Relay: peer.Relay, + CurAddr: peer.CurAddr, + LastSeenUnix: peer.LastSeen.Unix(), + } + for _, ip := range peer.TailscaleIPs { + summary.TailscaleIPs = append(summary.TailscaleIPs, ip.String()) + } + response.Peers = append(response.Peers, summary) + } return response, nil } + +func serveUDPEcho(ctx context.Context, server *tsnet.Server, localClient *local.Client, port int) { + ip, err := awaitTailscaleIP(ctx, localClient) + if err != nil { + log.Printf("udp echo setup failed: %v", err) + return + } + + listenAddr := net.JoinHostPort(ip.String(), strconv.Itoa(port)) + pc, err := server.ListenPacket("udp", listenAddr) + if err != nil { + log.Printf("udp echo listen failed on %s: %v", listenAddr, err) + return + } + defer pc.Close() + + log.Printf("udp echo listening on %s", pc.LocalAddr()) + buf := make([]byte, 64<<10) + for { + n, addr, err := pc.ReadFrom(buf) + if err != nil { + if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) { + return + } + log.Printf("udp echo read failed: %v", err) + return + } + if _, err := pc.WriteTo(buf[:n], addr); err != nil { + log.Printf("udp echo write failed: %v", err) + return + } + } +} + +func awaitTailscaleIP(ctx context.Context, localClient *local.Client) (netip.Addr, error) { + for range 60 { + status, err := localClient.StatusWithoutPeers(ctx) + if err == nil { + for _, ip := range status.TailscaleIPs { + if ip.Is4() { + return ip, nil + } + } + for _, ip := range status.TailscaleIPs { + if ip.Is6() { + return ip, nil + } + } + } + select { + case <-ctx.Done(): + return netip.Addr{}, ctx.Err() + case <-time.After(250 * time.Millisecond): + } + } + return netip.Addr{}, errors.New("timed out waiting for tailscale IP") +} + +func awaitAuthURL(ctx context.Context, localClient *local.Client, state *helperState) (string, error) { + watchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) + defer cancel() + + watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + if err != nil { + return "", err + } + defer watcher.Close() + + if err := localClient.StartLoginInteractive(ctx); err != nil { + return "", err + } + + for { + notify, err := watcher.Next() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + return state.authURLSnapshot(), nil + } + return "", err + } + if notify.BrowseToURL != nil && *notify.BrowseToURL != "" { + state.setAuthURL(*notify.BrowseToURL) + return *notify.BrowseToURL, nil + } + if notify.State != nil && *notify.State == ipn.Running { + state.clearAuthURL() + return "", nil + } + } +} diff --git a/burrow/src/main.rs b/burrow/src/main.rs index c91f36f..4ab7700 100644 --- a/burrow/src/main.rs +++ b/burrow/src/main.rs @@ -72,6 +72,14 @@ enum Commands { NetworkReorder(NetworkReorderArgs), /// Delete Network NetworkDelete(NetworkDeleteArgs), + /// Discover a Tailnet authority through the daemon + TailnetDiscover(TailnetDiscoverArgs), + /// Probe a Tailnet authority through the daemon + TailnetProbe(TailnetProbeArgs), + /// Send an ICMP echo probe through the active Tailnet tunnel over daemon packet streaming + TailnetPing(TailnetPingArgs), + /// Send a UDP echo probe through the active Tailnet tunnel over daemon packet streaming + TailnetUdpEcho(TailnetUdpEchoArgs), #[cfg(target_os = "linux")] /// Run a command in an unshared Linux namespace using a Burrow backend Exec(ExecArgs), @@ -110,6 +118,36 @@ struct NetworkDeleteArgs { id: i32, } +#[derive(Args)] +struct TailnetDiscoverArgs { + email: String, +} + +#[derive(Args)] +struct TailnetProbeArgs { + authority: String, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +#[derive(Args)] +struct TailnetPingArgs { + remote: String, + #[arg(long, default_value = "burrow-tailnet-smoke")] + payload: String, + #[arg(long, default_value_t = 5000)] + timeout_ms: u64, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +#[derive(Args)] +struct TailnetUdpEchoArgs { + remote: String, + #[arg(long, default_value = "burrow-tailnet-smoke")] + message: String, + #[arg(long, default_value_t = 5000)] + timeout_ms: u64, +} + #[cfg(target_os = "linux")] #[derive(Args)] struct TorExecArgs { @@ -240,6 +278,393 @@ async fn try_network_delete(id: i32) -> Result<()> { Ok(()) } +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_discover(email: &str) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + let response = client + .tailnet_client + .discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest { + email: email.to_owned(), + }) + .await? + .into_inner(); + println!("Tailnet Discover Response: {:?}", response); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_probe(authority: &str) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + let response = client + .tailnet_client + .probe(crate::daemon::rpc::grpc_defs::TailnetProbeRequest { + authority: authority.to_owned(), + }) + .await? + .into_inner(); + println!("Tailnet Probe Response: {:?}", response); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_ping(remote: &str, payload: &str, timeout_ms: u64) -> Result<()> { + use std::net::IpAddr; + + use anyhow::Context; + use rand::Rng; + use tokio::{ + sync::mpsc, + time::{timeout, Duration}, + }; + use tokio_stream::wrappers::ReceiverStream; + + use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket}; + + let remote_ip: IpAddr = remote + .parse() + .with_context(|| format!("invalid remote IP address {remote}"))?; + let message = payload.as_bytes().to_vec(); + + let mut client = BurrowClient::from_uds().await?; + client.tunnel_client.tunnel_start(Empty {}).await?; + + let mut config_stream = client + .tunnel_client + .tunnel_configuration(Empty {}) + .await? + .into_inner(); + let config = config_stream + .message() + .await? + .context("tunnel configuration stream ended before yielding a config")?; + let local_ip = select_tailnet_local_ip(&config.addresses, remote_ip)?; + + let identifier = rand::thread_rng().gen::(); + let sequence = 1_u16; + let packet = build_icmp_echo_request(local_ip, remote_ip, identifier, sequence, &message)?; + + let (outbound_tx, outbound_rx) = mpsc::channel::(128); + let mut tunnel_packets = client + .tunnel_client + .tunnel_packets(ReceiverStream::new(outbound_rx)) + .await? + .into_inner(); + + outbound_tx + .send(TunnelPacket { payload: packet }) + .await + .context("failed to send ICMP echo probe into daemon packet stream")?; + log::debug!( + "tailnet ping probe queued from {local_ip} to {remote_ip} identifier={identifier} sequence={sequence}" + ); + drop(outbound_tx); + + let reply = timeout(Duration::from_millis(timeout_ms), async { + loop { + let packet = tunnel_packets + .message() + .await + .context("failed to read packet from daemon packet stream")? + .context("daemon packet stream ended before returning a reply")?; + log::debug!( + "tailnet ping received {} bytes from daemon packet stream", + packet.payload.len() + ); + if let Some(reply) = parse_icmp_echo_reply( + &packet.payload, + local_ip, + remote_ip, + identifier, + sequence, + )? { + break Ok::<_, anyhow::Error>(reply); + } + } + }) + .await + .with_context(|| format!("timed out waiting for ICMP echo reply from {remote_ip}"))??; + + println!("Tailnet Ping Source: {}", reply.source); + println!("Tailnet Ping Destination: {}", reply.destination); + println!( + "Tailnet Ping Payload: {}", + String::from_utf8_lossy(&reply.payload) + ); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> Result<()> { + use std::net::SocketAddr; + + use anyhow::{bail, Context}; + use futures::{SinkExt, StreamExt}; + use netstack_smoltcp::StackBuilder; + use tokio::{ + sync::mpsc, + time::{timeout, Duration}, + }; + use tokio_stream::wrappers::ReceiverStream; + + use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket}; + + let remote_addr: SocketAddr = remote + .parse() + .with_context(|| format!("invalid remote socket address {remote}"))?; + + let mut client = BurrowClient::from_uds().await?; + client.tunnel_client.tunnel_start(Empty {}).await?; + + let mut config_stream = client + .tunnel_client + .tunnel_configuration(Empty {}) + .await? + .into_inner(); + let config = config_stream + .message() + .await? + .context("tunnel configuration stream ended before yielding a config")?; + let local_addr = select_tailnet_local_socket(&config.addresses, remote_addr.ip())?; + + let (stack, runner, udp_socket, _) = StackBuilder::default() + .enable_udp(true) + .enable_tcp(true) + .build() + .context("failed to build userspace UDP stack")?; + let runner = runner.context("userspace UDP stack runner unavailable")?; + let udp_socket = udp_socket.context("userspace UDP stack socket unavailable")?; + let (mut stack_sink, mut stack_stream) = stack.split(); + let (mut udp_reader, mut udp_writer) = udp_socket.split(); + + let (outbound_tx, outbound_rx) = mpsc::channel::(128); + let mut tunnel_packets = client + .tunnel_client + .tunnel_packets(ReceiverStream::new(outbound_rx)) + .await? + .into_inner(); + + let ingress_task = tokio::spawn(async move { + loop { + match tunnel_packets.message().await? { + Some(packet) => { + log::debug!( + "tailnet udp echo received {} bytes from daemon packet stream", + packet.payload.len() + ); + stack_sink + .send(packet.payload) + .await + .context("failed to feed inbound tailnet packet into userspace stack")?; + } + None => break, + } + } + Result::<()>::Ok(()) + }); + + let egress_task = tokio::spawn(async move { + while let Some(packet) = stack_stream.next().await { + let payload = + packet.context("failed to read outbound packet from userspace stack")?; + log::debug!( + "tailnet udp echo sending {} bytes into daemon packet stream", + payload.len() + ); + outbound_tx + .send(TunnelPacket { payload }) + .await + .context("failed to forward outbound tailnet packet to daemon")?; + } + Result::<()>::Ok(()) + }); + + let runner_task = tokio::spawn(async move { runner.await.map_err(anyhow::Error::from) }); + + udp_writer + .send((message.as_bytes().to_vec(), local_addr, remote_addr)) + .await + .context("failed to send UDP echo probe into userspace stack")?; + log::debug!( + "tailnet udp echo probe queued from {local_addr} to {remote_addr}" + ); + + let response = timeout(Duration::from_millis(timeout_ms), udp_reader.next()) + .await + .with_context(|| format!("timed out waiting for UDP echo from {remote_addr}"))? + .context("userspace UDP stack ended before returning a reply")?; + let (payload, reply_source, reply_destination) = response; + let response_text = String::from_utf8_lossy(&payload); + + ingress_task.abort(); + egress_task.abort(); + runner_task.abort(); + + if reply_source != remote_addr { + bail!("received UDP reply from unexpected source {reply_source}"); + } + if reply_destination != local_addr { + bail!("received UDP reply for unexpected local socket {reply_destination}"); + } + if payload != message.as_bytes() { + bail!("UDP echo payload mismatch"); + } + + println!("Tailnet UDP Echo Source: {reply_source}"); + println!("Tailnet UDP Echo Destination: {reply_destination}"); + println!("Tailnet UDP Echo Payload: {response_text}"); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn select_tailnet_local_ip(addresses: &[String], remote_ip: std::net::IpAddr) -> Result { + use anyhow::Context; + + let family_is_v4 = remote_ip.is_ipv4(); + addresses + .iter() + .filter_map(|cidr| cidr.split('/').next()) + .filter_map(|ip| ip.parse::().ok()) + .find(|ip| ip.is_ipv4() == family_is_v4) + .with_context(|| { + format!( + "no local {} tailnet address found in daemon config {:?}", + if family_is_v4 { "IPv4" } else { "IPv6" }, + addresses + ) + }) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn select_tailnet_local_socket( + addresses: &[String], + remote_ip: std::net::IpAddr, +) -> Result { + use rand::Rng; + + let local_ip = select_tailnet_local_ip(addresses, remote_ip)?; + let port = rand::thread_rng().gen_range(40000..50000); + Ok(std::net::SocketAddr::new(local_ip, port)) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +struct IcmpEchoReply { + source: std::net::IpAddr, + destination: std::net::IpAddr, + payload: Vec, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn build_icmp_echo_request( + source: std::net::IpAddr, + destination: std::net::IpAddr, + identifier: u16, + sequence: u16, + payload: &[u8], +) -> Result> { + use anyhow::bail; + + let (source, destination) = match (source, destination) { + (std::net::IpAddr::V4(source), std::net::IpAddr::V4(destination)) => (source, destination), + _ => bail!("tailnet ping currently supports IPv4 only"), + }; + + let mut icmp = Vec::with_capacity(8 + payload.len()); + icmp.push(8); + icmp.push(0); + icmp.extend_from_slice(&[0, 0]); + icmp.extend_from_slice(&identifier.to_be_bytes()); + icmp.extend_from_slice(&sequence.to_be_bytes()); + icmp.extend_from_slice(payload); + let icmp_checksum = internet_checksum(&icmp); + icmp[2..4].copy_from_slice(&icmp_checksum.to_be_bytes()); + + let total_len = 20 + icmp.len(); + let mut packet = Vec::with_capacity(total_len); + packet.push(0x45); + packet.push(0); + packet.extend_from_slice(&(total_len as u16).to_be_bytes()); + packet.extend_from_slice(&0u16.to_be_bytes()); + packet.extend_from_slice(&0u16.to_be_bytes()); + packet.push(64); + packet.push(1); + packet.extend_from_slice(&[0, 0]); + packet.extend_from_slice(&source.octets()); + packet.extend_from_slice(&destination.octets()); + let header_checksum = internet_checksum(&packet); + packet[10..12].copy_from_slice(&header_checksum.to_be_bytes()); + packet.extend_from_slice(&icmp); + Ok(packet) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn parse_icmp_echo_reply( + packet: &[u8], + local_ip: std::net::IpAddr, + remote_ip: std::net::IpAddr, + identifier: u16, + sequence: u16, +) -> Result> { + use anyhow::bail; + + let (local_ip, remote_ip) = match (local_ip, remote_ip) { + (std::net::IpAddr::V4(local_ip), std::net::IpAddr::V4(remote_ip)) => (local_ip, remote_ip), + _ => bail!("tailnet ping currently supports IPv4 only"), + }; + + if packet.len() < 20 { + return Ok(None); + } + let version = packet[0] >> 4; + if version != 4 { + return Ok(None); + } + let ihl = (packet[0] & 0x0f) as usize * 4; + if packet.len() < ihl + 8 { + return Ok(None); + } + if packet[9] != 1 { + return Ok(None); + } + + let source = std::net::Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]); + let destination = std::net::Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]); + if source != remote_ip || destination != local_ip { + return Ok(None); + } + + let icmp = &packet[ihl..]; + if icmp[0] != 0 || icmp[1] != 0 { + return Ok(None); + } + let reply_identifier = u16::from_be_bytes([icmp[4], icmp[5]]); + let reply_sequence = u16::from_be_bytes([icmp[6], icmp[7]]); + if reply_identifier != identifier || reply_sequence != sequence { + return Ok(None); + } + + Ok(Some(IcmpEchoReply { + source: std::net::IpAddr::V4(source), + destination: std::net::IpAddr::V4(destination), + payload: icmp[8..].to_vec(), + })) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn internet_checksum(bytes: &[u8]) -> u16 { + let mut sum = 0u32; + let mut chunks = bytes.chunks_exact(2); + for chunk in &mut chunks { + sum += u16::from_be_bytes([chunk[0], chunk[1]]) as u32; + } + if let Some(&last) = chunks.remainder().first() { + sum += (last as u32) << 8; + } + while (sum >> 16) != 0 { + sum = (sum & 0xffff) + (sum >> 16); + } + !(sum as u16) +} + #[cfg(target_os = "linux")] async fn try_tor_exec(payload_path: &str, command: Vec) -> Result<()> { let exit_code = usernet::run_exec(usernet::ExecInvocation { @@ -348,6 +773,14 @@ async fn main() -> Result<()> { Commands::NetworkList => try_network_list().await?, Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?, Commands::NetworkDelete(args) => try_network_delete(args.id).await?, + Commands::TailnetDiscover(args) => try_tailnet_discover(&args.email).await?, + Commands::TailnetProbe(args) => try_tailnet_probe(&args.authority).await?, + Commands::TailnetPing(args) => { + try_tailnet_ping(&args.remote, &args.payload, args.timeout_ms).await? + } + Commands::TailnetUdpEcho(args) => { + try_tailnet_udp_echo(&args.remote, &args.message, args.timeout_ms).await? + } #[cfg(target_os = "linux")] Commands::Exec(args) => { try_exec( From 9e3e8fa7834bc09a2152feba7ae45f7e38784810 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 4 Apr 2026 22:20:55 -0700 Subject: [PATCH 14/59] Use upstream nsc-autoscaler on burrow forge --- flake.lock | 26 ++- flake.nix | 9 +- nixos/README.md | 4 +- nixos/hosts/burrow-forge/default.nix | 2 +- nixos/modules/burrow-forge.nix | 2 +- nixos/modules/burrow-forgejo-nsc.nix | 234 --------------------------- 6 files changed, 36 insertions(+), 241 deletions(-) delete mode 100644 nixos/modules/burrow-forgejo-nsc.nix diff --git a/flake.lock b/flake.lock index 1bafc37..0067dab 100644 --- a/flake.lock +++ b/flake.lock @@ -123,13 +123,37 @@ "url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable" } }, + "nsc-autoscaler": { + "inputs": { + "flake-utils": [ + "flake-utils" + ], + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1775221037, + "narHash": "sha256-tv6Y3cqn76PEyZpSMMItVW96KKIboovBWTOv5Lt7PXg=", + "ref": "refs/heads/main", + "rev": "2c485752fde28ec3be2f228b571d1906f4bcf917", + "revCount": 10, + "type": "git", + "url": "https://compatible.systems/conrad/nsc-autoscaler.git" + }, + "original": { + "type": "git", + "url": "https://compatible.systems/conrad/nsc-autoscaler.git" + } + }, "root": { "inputs": { "agenix": "agenix", "disko": "disko", "flake-utils": "flake-utils", "hcloud-upload-image-src": "hcloud-upload-image-src", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "nsc-autoscaler": "nsc-autoscaler" } }, "systems": { diff --git a/flake.nix b/flake.nix index 5814c19..1e91dcc 100644 --- a/flake.nix +++ b/flake.nix @@ -12,13 +12,18 @@ url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master"; inputs.nixpkgs.follows = "nixpkgs"; }; + nsc-autoscaler = { + url = "git+https://compatible.systems/conrad/nsc-autoscaler.git"; + inputs.nixpkgs.follows = "nixpkgs"; + inputs.flake-utils.follows = "flake-utils"; + }; hcloud-upload-image-src = { url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"; flake = false; }; }; - outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }: + outputs = { self, nixpkgs, flake-utils, agenix, disko, nsc-autoscaler, hcloud-upload-image-src }: let supportedSystems = [ "x86_64-linux" @@ -175,7 +180,7 @@ // { nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix; nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix; - nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix; + nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default; nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; diff --git a/nixos/README.md b/nixos/README.md index 07b421d..c79d8ce 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -9,7 +9,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `hosts/burrow-forge/default.nix`: host entrypoint - `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module - `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap -- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services +- upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input - `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes - `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC - `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets @@ -32,7 +32,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. -6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`. +6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the raw Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/` for the upstream `services.forgejo-nsc` module. 7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. 8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 6c106f4..67c87ec 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -104,7 +104,7 @@ in sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; }; - services.burrow.forgejoNsc = { + services.forgejo-nsc = { enable = true; nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; dispatcher = { diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index 0d0f5c8..d74fc65 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -271,7 +271,7 @@ in ''; } // lib.optionalAttrs ( - config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable + config.services.forgejo-nsc.enable && config.services.forgejo-nsc.autoscaler.enable ) { "${cfg.nscAutoscalerDomain}".extraConfig = '' encode gzip zstd diff --git a/nixos/modules/burrow-forgejo-nsc.nix b/nixos/modules/burrow-forgejo-nsc.nix deleted file mode 100644 index ba116f7..0000000 --- a/nixos/modules/burrow-forgejo-nsc.nix +++ /dev/null @@ -1,234 +0,0 @@ -{ config, lib, pkgs, self, ... }: - -let - inherit (lib) - mkEnableOption - mkIf - mkOption - types - mkAfter - mkDefault - optional - optionalAttrs - optionalString - ; - - cfg = config.services.burrow.forgejoNsc; - dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml"; - autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml"; - - pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" '' - set -euo pipefail - if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then - echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2 - exit 1 - fi - ''; - - nscTokenPath = "${cfg.stateDir}/nsc.token"; - tokenSync = optionalString (cfg.nscTokenFile != null) '' - install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath} - chown ${cfg.user}:${cfg.group} ${nscTokenPath} - chmod 600 ${nscTokenPath} - ''; - dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) '' - install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig} - chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig} - chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig} - ''; - autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) '' - install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig} - chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig} - chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig} - ''; - - dispatcherEnv = - cfg.extraEnv - // optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; } - // optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; } - // optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; }; -in { - options.services.burrow.forgejoNsc = { - enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher"; - - user = mkOption { - type = types.str; - default = "forgejo-nsc"; - description = "System user that runs the forgejo-nsc services."; - }; - - group = mkOption { - type = types.str; - default = "forgejo-nsc"; - description = "System group for the forgejo-nsc services."; - }; - - stateDir = mkOption { - type = types.str; - default = "/var/lib/forgejo-nsc"; - description = "State directory for the dispatcher/autoscaler."; - }; - - nscTokenFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC token file (exported as NSC_TOKEN_FILE)."; - }; - - nscTokenSpecFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE)."; - }; - - nscEndpoint = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC endpoint override (exported as NSC_ENDPOINT)."; - }; - - extraEnv = mkOption { - type = types.attrsOf types.str; - default = { }; - description = "Extra environment variables injected into the services."; - }; - - nscPackage = mkOption { - type = types.nullOr types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null; - description = "Optional nsc CLI package added to the service PATH."; - }; - - dispatcher = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable the forgejo-nsc dispatcher service."; - }; - - package = mkOption { - type = types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher; - description = "Package providing the forgejo-nsc dispatcher binary."; - }; - - configFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Host-local YAML config file for the dispatcher."; - }; - - allowPending = mkOption { - type = types.bool; - default = false; - description = "Allow placeholder values (PENDING-) in the dispatcher config."; - }; - }; - - autoscaler = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable the forgejo-nsc autoscaler service."; - }; - - package = mkOption { - type = types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler; - description = "Package providing the forgejo-nsc autoscaler binary."; - }; - - configFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Host-local YAML config file for the autoscaler."; - }; - - allowPending = mkOption { - type = types.bool; - default = false; - description = "Allow placeholder values (PENDING-) in the autoscaler config."; - }; - }; - }; - - config = mkIf cfg.enable { - assertions = [ - { - assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null; - message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled."; - } - { - assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null; - message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled."; - } - ]; - - users.groups.${cfg.group} = { }; - users.users.${cfg.user} = { - uid = mkDefault 2011; - isSystemUser = true; - group = cfg.group; - description = "Forgejo Namespace Cloud runner services"; - home = cfg.stateDir; - createHome = true; - shell = pkgs.bashInteractive; - }; - - systemd.tmpfiles.rules = mkAfter [ - "d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -" - ]; - - systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable { - description = "Forgejo Namespace Cloud dispatcher"; - wantedBy = [ "multi-user.target" ]; - after = [ "network-online.target" ]; - wants = [ "network-online.target" ]; - unitConfig.ConditionPathExists = - optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile - ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; - serviceConfig = { - Type = "simple"; - User = cfg.user; - Group = cfg.group; - WorkingDirectory = cfg.stateDir; - ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}"; - Restart = "on-failure"; - RestartSec = 5; - }; - path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; - environment = dispatcherEnv; - preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ - (optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile)) - dispatcherConfigSync - tokenSync - ]); - }; - - systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable { - description = "Forgejo Namespace Cloud autoscaler"; - wantedBy = [ "multi-user.target" ]; - after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ]; - wants = [ "network-online.target" ]; - unitConfig.ConditionPathExists = - optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile - ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; - serviceConfig = { - Type = "simple"; - User = cfg.user; - Group = cfg.group; - WorkingDirectory = cfg.stateDir; - ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}"; - Restart = "on-failure"; - RestartSec = 5; - }; - path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; - environment = dispatcherEnv; - preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ - (optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile)) - autoscalerConfigSync - tokenSync - ]); - }; - }; -} From b15b6624cbeaba430a48a9e4c09ef963bbe45bd3 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 4 Apr 2026 22:21:03 -0700 Subject: [PATCH 15/59] Add Forgejo namespace release workflow --- .forgejo/workflows/release.yml | 60 ++++++++++ Scripts/ci/build-release-artifacts.sh | 20 ++++ Scripts/ci/ensure-nix.sh | 157 ++++++++++++++++++++++++++ Scripts/ci/publish-forgejo-release.sh | 65 +++++++++++ 4 files changed, 302 insertions(+) create mode 100644 .forgejo/workflows/release.yml create mode 100755 Scripts/ci/build-release-artifacts.sh create mode 100755 Scripts/ci/ensure-nix.sh create mode 100755 Scripts/ci/publish-forgejo-release.sh diff --git a/.forgejo/workflows/release.yml b/.forgejo/workflows/release.yml new file mode 100644 index 0000000..3d1e92a --- /dev/null +++ b/.forgejo/workflows/release.yml @@ -0,0 +1,60 @@ +name: Release + +on: + push: + tags: + - "v*" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + +jobs: + release: + name: Release Build + runs-on: namespace-profile-linux-medium + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + + - name: Bootstrap Nix + shell: bash + run: | + set -euo pipefail + chmod +x Scripts/ci/ensure-nix.sh + Scripts/ci/ensure-nix.sh + + - name: Build release artifacts + shell: bash + env: + RELEASE_REF: ${{ github.ref_name }} + run: | + set -euo pipefail + ref="${RELEASE_REF:-manual-${GITHUB_SHA::7}}" + export RELEASE_REF="${ref}" + chmod +x Scripts/ci/build-release-artifacts.sh + nix develop .#ci -c Scripts/ci/build-release-artifacts.sh + + - name: Upload release artifacts + uses: https://code.forgejo.org/actions/upload-artifact@v4 + with: + name: burrow-release-${{ github.ref_name }} + path: dist/* + if-no-files-found: error + + - name: Publish Forgejo release + if: startsWith(github.ref, 'refs/tags/') + shell: bash + env: + RELEASE_TAG: ${{ github.ref_name }} + API_URL: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + TOKEN: ${{ github.token }} + run: | + set -euo pipefail + chmod +x Scripts/ci/publish-forgejo-release.sh + nix develop .#ci -c Scripts/ci/publish-forgejo-release.sh diff --git a/Scripts/ci/build-release-artifacts.sh b/Scripts/ci/build-release-artifacts.sh new file mode 100755 index 0000000..20b4c06 --- /dev/null +++ b/Scripts/ci/build-release-artifacts.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "${repo_root}" + +release_ref="${RELEASE_REF:-manual-${GITHUB_SHA:-unknown}}" +target="x86_64-unknown-linux-gnu" +out_dir="${repo_root}/dist" +staging="${out_dir}/burrow-${release_ref}-${target}" + +mkdir -p "${staging}" + +cargo build --locked --release -p burrow --bin burrow +install -m 0755 target/release/burrow "${staging}/burrow" +cp README.md "${staging}/README.md" + +tarball="${out_dir}/burrow-${release_ref}-${target}.tar.gz" +tar -C "${out_dir}" -czf "${tarball}" "$(basename "${staging}")" +shasum -a 256 "${tarball}" > "${tarball}.sha256" diff --git a/Scripts/ci/ensure-nix.sh b/Scripts/ci/ensure-nix.sh new file mode 100755 index 0000000..14be895 --- /dev/null +++ b/Scripts/ci/ensure-nix.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +set -euo pipefail + +source_nix_profile() { + local candidate + for candidate in \ + "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" \ + "${HOME}/.nix-profile/etc/profile.d/nix.sh" + do + if [[ -f "${candidate}" ]]; then + # shellcheck disable=SC1090 + . "${candidate}" + return 0 + fi + done + return 1 +} + +linux_cp_supports_preserve() { + cp --help 2>&1 | grep -q -- '--preserve' +} + +ensure_root_owned_home() { + if [[ "$(id -u)" -ne 0 ]]; then + return 0 + fi + + if [[ ! -d "${HOME}" ]] || [[ ! -O "${HOME}" ]]; then + export HOME="/root" + fi + + mkdir -p "${HOME}" +} + +ensure_linux_nixbld_accounts() { + if [[ "$(id -u)" -ne 0 ]]; then + return 0 + fi + + if command -v getent >/dev/null 2>&1 && getent group nixbld >/dev/null 2>&1; then + return 0 + fi + + if command -v addgroup >/dev/null 2>&1 && ! command -v groupadd >/dev/null 2>&1; then + addgroup -S nixbld >/dev/null 2>&1 || true + for i in $(seq 1 10); do + adduser -S -D -H -h /var/empty -s /sbin/nologin -G nixbld "nixbld${i}" >/dev/null 2>&1 || true + done + return 0 + fi + + if command -v groupadd >/dev/null 2>&1; then + groupadd -r nixbld >/dev/null 2>&1 || true + for i in $(seq 1 10); do + useradd \ + --system \ + --no-create-home \ + --home-dir /var/empty \ + --shell /usr/sbin/nologin \ + --gid nixbld \ + "nixbld${i}" >/dev/null 2>&1 || true + done + return 0 + fi + + echo "linux nix bootstrap requires nixbld group creation support" >&2 + exit 1 +} + +ensure_linux_nix_bootstrap_prereqs() { + if linux_cp_supports_preserve; then + ensure_root_owned_home + ensure_linux_nixbld_accounts + return 0 + fi + + if command -v apk >/dev/null 2>&1; then + apk add --no-cache coreutils xz >/dev/null + elif command -v apt-get >/dev/null 2>&1; then + export DEBIAN_FRONTEND=noninteractive + apt-get update -y >/dev/null + apt-get install -y coreutils xz-utils >/dev/null + elif command -v dnf >/dev/null 2>&1; then + dnf install -y coreutils xz >/dev/null + elif command -v yum >/dev/null 2>&1; then + yum install -y coreutils xz >/dev/null + else + echo "linux nix bootstrap requires GNU cp but no supported package manager was found" >&2 + exit 1 + fi + + linux_cp_supports_preserve || { + echo "linux nix bootstrap still lacks GNU cp after installing prerequisites" >&2 + exit 1 + } + + ensure_root_owned_home + ensure_linux_nixbld_accounts +} + +if ! command -v nix >/dev/null 2>&1; then + if ! command -v curl >/dev/null 2>&1; then + echo "curl is required to install nix" >&2 + exit 1 + fi + + case "$(uname -s)" in + Linux) + ensure_linux_nix_bootstrap_prereqs + curl -fsSL https://nixos.org/nix/install | sh -s -- --no-daemon + ;; + Darwin) + installer="$(mktemp -t burrow-nix.XXXXXX)" + trap 'rm -f "${installer}"' EXIT + curl -fsSL -o "${installer}" https://install.determinate.systems/nix + chmod +x "${installer}" + if command -v sudo >/dev/null 2>&1; then + if sudo -n true 2>/dev/null; then + sudo -n sh "${installer}" install --no-confirm + else + sudo sh "${installer}" install --no-confirm + fi + else + sh "${installer}" install --no-confirm + fi + ;; + *) + echo "unsupported platform for nix bootstrap: $(uname -s)" >&2 + exit 1 + ;; + esac +fi + +source_nix_profile || true +export PATH="${HOME}/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" + +config_root="${XDG_CONFIG_HOME:-$HOME/.config}" +config_file="${config_root}/nix/nix.conf" +if [[ -e "${config_file}" && ! -w "${config_file}" ]]; then + config_root="$(mktemp -d -t burrow-nix-config.XXXXXX)" + export XDG_CONFIG_HOME="${config_root}" + config_file="${XDG_CONFIG_HOME}/nix/nix.conf" +fi + +mkdir -p "$(dirname -- "${config_file}")" +cat > "${config_file}" <<'EOF' +experimental-features = nix-command flakes +sandbox = true +fallback = true +substituters = https://cache.nixos.org +trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= +EOF + +command -v nix >/dev/null 2>&1 || { + echo "nix is still unavailable after bootstrap" >&2 + exit 1 +} diff --git a/Scripts/ci/publish-forgejo-release.sh b/Scripts/ci/publish-forgejo-release.sh new file mode 100755 index 0000000..338f71b --- /dev/null +++ b/Scripts/ci/publish-forgejo-release.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +set -euo pipefail + +: "${API_URL:?API_URL is required}" +: "${REPOSITORY:?REPOSITORY is required}" +: "${RELEASE_TAG:?RELEASE_TAG is required}" +: "${TOKEN:?TOKEN is required}" + +release_api="${API_URL}/repos/${REPOSITORY}/releases" +tag_api="${release_api}/tags/${RELEASE_TAG}" +release_json="$(mktemp)" +create_json="$(mktemp)" +trap 'rm -f "${release_json}" "${create_json}"' EXIT + +status="$( + curl -sS -o "${release_json}" -w '%{http_code}' \ + -H "Authorization: token ${TOKEN}" \ + "${tag_api}" +)" + +if [[ "${status}" == "404" ]]; then + jq -n \ + --arg tag "${RELEASE_TAG}" \ + --arg name "Burrow ${RELEASE_TAG}" \ + '{ + tag_name: $tag, + target_commitish: $tag, + name: $name, + body: "Automated prerelease built on Forgejo Namespace runners.", + draft: false, + prerelease: true + }' > "${create_json}" + + curl -fsS \ + -H "Authorization: token ${TOKEN}" \ + -H "Content-Type: application/json" \ + -d @"${create_json}" \ + "${release_api}" > "${release_json}" +elif [[ "${status}" != "200" ]]; then + echo "failed to query Forgejo release for ${RELEASE_TAG} (HTTP ${status})" >&2 + cat "${release_json}" >&2 + exit 1 +fi + +release_id="$(jq -r '.id' "${release_json}")" +if [[ -z "${release_id}" || "${release_id}" == "null" ]]; then + echo "Forgejo release payload is missing an id" >&2 + cat "${release_json}" >&2 + exit 1 +fi + +for file in dist/*; do + name="$(basename "${file}")" + asset_id="$(jq -r --arg name "${name}" '.assets[]? | select(.name == $name) | .id' "${release_json}" | head -n1)" + if [[ -n "${asset_id}" ]]; then + curl -fsS -X DELETE \ + -H "Authorization: token ${TOKEN}" \ + "${release_api}/${release_id}/assets/${asset_id}" >/dev/null + fi + + curl -fsS \ + -H "Authorization: token ${TOKEN}" \ + -F "attachment=@${file}" \ + "${release_api}/${release_id}/assets?name=${name}" >/dev/null +done From c8aa036ade560b76c128700b1e0922186a9b8626 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 4 Apr 2026 23:53:33 -0700 Subject: [PATCH 16/59] Add Tailscale Authentik OIDC app --- Scripts/authentik-sync-tailscale-oidc.sh | 251 ++++++++++++++++++ nixos/hosts/burrow-forge/default.nix | 7 + nixos/modules/burrow-authentik.nix | 73 +++++ nixos/modules/burrow-forge.nix | 4 +- secrets.nix | 1 + .../infra/tailscale-oidc-client-secret.age | 10 + 6 files changed, 344 insertions(+), 2 deletions(-) create mode 100755 Scripts/authentik-sync-tailscale-oidc.sh create mode 100644 secrets/infra/tailscale-oidc-client-secret.age diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh new file mode 100755 index 0000000..54564ad --- /dev/null +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -0,0 +1,251 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_TAILSCALE_APPLICATION_SLUG:-tailscale}" +application_name="${AUTHENTIK_TAILSCALE_APPLICATION_NAME:-Tailscale}" +provider_name="${AUTHENTIK_TAILSCALE_PROVIDER_NAME:-Tailscale}" +template_slug="${AUTHENTIK_TAILSCALE_TEMPLATE_SLUG:-ts}" +client_id="${AUTHENTIK_TAILSCALE_CLIENT_ID:-tailscale.burrow.net}" +client_secret="${AUTHENTIK_TAILSCALE_CLIENT_SECRET:-}" +launch_url="${AUTHENTIK_TAILSCALE_LAUNCH_URL:-https://login.tailscale.com/start/oidc}" +redirect_uris_json="${AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON:-[ + \"https://login.tailscale.com/a/oauth_response\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-tailscale-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_TAILSCALE_CLIENT_SECRET + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_TAILSCALE_APPLICATION_SLUG + AUTHENTIK_TAILSCALE_APPLICATION_NAME + AUTHENTIK_TAILSCALE_PROVIDER_NAME + AUTHENTIK_TAILSCALE_TEMPLATE_SLUG + AUTHENTIK_TAILSCALE_CLIENT_ID + AUTHENTIK_TAILSCALE_LAUNCH_URL + AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then + echo "Tailscale OIDC client secret is not configured; skipping Authentik Tailscale sync." >&2 + exit 0 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg client_secret "$client_secret" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "confidential", + client_id: $client_id, + client_secret: $client_secret, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Tailscale OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Tailscale OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Tailscale OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})." diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 67c87ec..75b76d4 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -63,6 +63,12 @@ in group = "forgejo"; mode = "0440"; }; + age.secrets.burrowTailscaleOidcClientSecret = { + file = ../../../secrets/infra/tailscale-oidc-client-secret.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; age.secrets.burrowAuthentikGoogleClientId = { file = ../../../secrets/infra/authentik-google-client-id.age; owner = "root"; @@ -121,6 +127,7 @@ in envFile = config.age.secrets.burrowAuthentikEnv.path; forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; + tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleLoginMode = "redirect"; diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 478d0d9..6861f17 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -10,6 +10,7 @@ let dataVolume = "burrow-authentik-data:/data"; directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; + tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' @@ -131,6 +132,24 @@ in description = "Authentik application slug for Forgejo."; }; + tailscaleProviderSlug = lib.mkOption { + type = lib.types.str; + default = "tailscale"; + description = "Authentik application slug for Tailscale custom OIDC sign-in."; + }; + + tailscaleClientId = lib.mkOption { + type = lib.types.str; + default = "tailscale.burrow.net"; + description = "Client ID Authentik should present to Tailscale."; + }; + + tailscaleClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Authentik Tailscale OIDC client secret."; + }; + forgejoClientId = lib.mkOption { type = lib.types.str; default = "git.burrow.net"; @@ -313,6 +332,13 @@ in fi ''} + ${lib.optionalString (cfg.tailscaleClientSecretFile != null) '' + if [ ! -s ${lib.escapeShellArg cfg.tailscaleClientSecretFile} ]; then + echo "Tailscale client secret missing: ${cfg.tailscaleClientSecretFile}" >&2 + exit 1 + fi + ''} + install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir} install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile} @@ -634,6 +660,53 @@ EOF ''; }; + systemd.services.burrow-authentik-tailscale-oidc = lib.mkIf (cfg.tailscaleClientSecretFile != null) { + description = "Reconcile the Burrow Authentik Tailscale OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + tailscaleOidcSyncScript + cfg.envFile + cfg.tailscaleClientSecretFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_TAILSCALE_APPLICATION_SLUG=${lib.escapeShellArg cfg.tailscaleProviderSlug} + export AUTHENTIK_TAILSCALE_APPLICATION_NAME=Tailscale + export AUTHENTIK_TAILSCALE_PROVIDER_NAME=Tailscale + export AUTHENTIK_TAILSCALE_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_TAILSCALE_CLIENT_ID=${lib.escapeShellArg cfg.tailscaleClientId} + export AUTHENTIK_TAILSCALE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.tailscaleClientSecretFile})" + export AUTHENTIK_TAILSCALE_LAUNCH_URL=https://login.tailscale.com/start/oidc + export AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON='["https://login.tailscale.com/a/oauth_response"]' + + ${pkgs.bash}/bin/bash ${tailscaleOidcSyncScript} + ''; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index d74fc65..d733135 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -258,13 +258,13 @@ in "${cfg.siteDomain}".extraConfig = '' encode gzip zstd @oidcConfig path /.well-known/openid-configuration - redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/.well-known/openid-configuration 308 + redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/.well-known/openid-configuration 308 @tailnetConfig path /.well-known/burrow-tailnet header @tailnetConfig Content-Type application/json respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200 @webfinger path /.well-known/webfinger header @webfinger Content-Type application/jrd+json - respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200 + respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200 @root path / redir @root ${homeRepoUrl} 308 respond 404 diff --git a/secrets.nix b/secrets.nix index 5a3ac8c..c0b9b53 100644 --- a/secrets.nix +++ b/secrets.nix @@ -17,4 +17,5 @@ in "secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/tailscale-oidc-client-secret.age b/secrets/infra/tailscale-oidc-client-secret.age new file mode 100644 index 0000000..e88c2d1 --- /dev/null +++ b/secrets/infra/tailscale-oidc-client-secret.age @@ -0,0 +1,10 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q KfvLMiH7JHE6v74Pp//SqzBP8WU1MNy1/EcqsONTTQQ +Y6SFXWe/5Pru6+3vU6e67bRZDWDkukdfgEX7uQjB4Uw +-> ssh-ed25519 IrZmAg AFn7BP4FktUYH9QvNJPVDdNcEpJjYqmOrisvX9XGV08 +Zho+KNtk1vUQZ55j1xUHdswAj0T0Soji/HC6p1tsVcA +-> X25519 sv50iZjBijWKfp6I+LfRlEJ2sqnj5/2m0hRWz5NqLTk +Hdfvo+87zemSCFWDSlzkpmvHLuvc0tjxEt0ociTPrCg +--- BkQd4O2m/i98rlBcNhczU6Wj0htoiNLQDn0W6yKn1/c + a "WL\#zDRq6.竂}#8²koyq>L\`wƔ>f/Ѵ^,# +hD<>]C \ No newline at end of file From 8de798469bac11fec1906b54b388f9c1e836e795 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 5 Apr 2026 01:34:32 -0700 Subject: [PATCH 17/59] Bind tailnet auth flow to tailscale --- Scripts/authentik-sync-tailnet-auth-flow.sh | 39 ++++++++++++++------- nixos/modules/burrow-authentik.nix | 1 + 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/Scripts/authentik-sync-tailnet-auth-flow.sh b/Scripts/authentik-sync-tailnet-auth-flow.sh index bfb00ef..bae760b 100755 --- a/Scripts/authentik-sync-tailnet-auth-flow.sh +++ b/Scripts/authentik-sync-tailnet-auth-flow.sh @@ -4,6 +4,7 @@ set -euo pipefail authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" provider_slug="${AUTHENTIK_TAILNET_PROVIDER_SLUG:-ts}" +provider_slugs_json="${AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON:-}" authentication_flow_name="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME:-Burrow Tailnet Authentication}" authentication_flow_slug="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG:-burrow-tailnet-authentication}" identification_stage_name="${AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME:-burrow-tailnet-identification-stage}" @@ -21,6 +22,7 @@ Required environment: Optional environment: AUTHENTIK_URL AUTHENTIK_TAILNET_PROVIDER_SLUG + AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME @@ -40,6 +42,15 @@ if [[ -z "$bootstrap_token" ]]; then exit 1 fi +if [[ -n "$provider_slugs_json" ]]; then + if ! printf '%s' "$provider_slugs_json" | jq -e 'type == "array" and length > 0 and all(.[]; type == "string" and length > 0)' >/dev/null; then + echo "error: AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON must be a non-empty JSON array of strings" >&2 + exit 1 + fi +else + provider_slugs_json="$(jq -cn --arg slug "$provider_slug" '[$slug]')" +fi + api() { local method="$1" local path="$2" @@ -263,18 +274,20 @@ ensure_flow_binding() { wait_for_authentik -provider_pk="$( +mapfile -t provider_pks < <( api GET "/api/v3/providers/oauth2/?page_size=200" \ - | jq -r --arg provider_slug "$provider_slug" ' + | jq -r --argjson provider_slugs "$provider_slugs_json" ' .results[]? - | select(.assigned_application_slug == $provider_slug or .slug == $provider_slug) + | select( + (.assigned_application_slug != null and ($provider_slugs | index(.assigned_application_slug) != null)) + or (.slug != null and ($provider_slugs | index(.slug) != null)) + ) | .pk // empty - ' \ - | head -n1 -)" + ' +) -if [[ -z "$provider_pk" ]]; then - echo "error: could not resolve Authentik Tailnet OAuth provider ${provider_slug}" >&2 +if [[ "${#provider_pks[@]}" -eq 0 ]]; then + echo "error: could not resolve any Authentik Tailnet OAuth providers from ${provider_slugs_json}" >&2 exit 1 fi @@ -287,8 +300,10 @@ authentication_flow_pk="$(ensure_authentication_flow)" ensure_flow_binding "$authentication_flow_pk" "$identification_stage_pk" 10 ensure_flow_binding "$authentication_flow_pk" "$user_login_stage_pk" 30 -api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$( - jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}' -)" >/dev/null +for provider_pk in "${provider_pks[@]}"; do + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$( + jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}' + )" >/dev/null +done -echo "Synced Burrow Tailnet authentication flow for provider ${provider_slug}." +echo "Synced Burrow Tailnet authentication flow for providers ${provider_slugs_json}." diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 6861f17..1616b36 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -603,6 +603,7 @@ EOF export AUTHENTIK_URL=https://${cfg.domain} export AUTHENTIK_TAILNET_PROVIDER_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON='["${cfg.headscaleProviderSlug}","${cfg.tailscaleProviderSlug}"]' export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME=${lib.escapeShellArg cfg.headscaleAuthenticationFlowName} export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG=${lib.escapeShellArg cfg.headscaleAuthenticationFlowSlug} export AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME=${lib.escapeShellArg cfg.headscaleIdentificationStageName} From 3ebb0a8e61b3420097483bf5a9f033c53e1cd5cf Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 5 Apr 2026 01:36:52 -0700 Subject: [PATCH 18/59] Fix tailnet auth flow provider lookup --- Scripts/authentik-sync-tailnet-auth-flow.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Scripts/authentik-sync-tailnet-auth-flow.sh b/Scripts/authentik-sync-tailnet-auth-flow.sh index bae760b..1c715cc 100755 --- a/Scripts/authentik-sync-tailnet-auth-flow.sh +++ b/Scripts/authentik-sync-tailnet-auth-flow.sh @@ -279,8 +279,8 @@ mapfile -t provider_pks < <( | jq -r --argjson provider_slugs "$provider_slugs_json" ' .results[]? | select( - (.assigned_application_slug != null and ($provider_slugs | index(.assigned_application_slug) != null)) - or (.slug != null and ($provider_slugs | index(.slug) != null)) + ((.assigned_application_slug // empty) as $assigned | ($provider_slugs | index($assigned)) != null) + or ((.slug // empty) as $slug | ($provider_slugs | index($slug)) != null) ) | .pk // empty ' From 64103abbea58979a360c5be7976be313a8c0d1e4 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 5 Apr 2026 02:10:49 -0700 Subject: [PATCH 19/59] Refocus Tailnet flow on Tailscale --- Apple/App/AppDelegate.swift | 2 +- Apple/AppUITests/BurrowUITests.swift | 317 +++++++++--- Apple/Configuration/Constants/Constants.swift | 10 +- Apple/Core/Client.swift | 50 ++ Apple/Core/Client/Generated/burrow.pb.swift | 32 ++ .../PacketTunnelProvider.swift | 250 +++++++++- Apple/UI/BurrowView.swift | 393 ++++++++++----- Apple/UI/Networks/Network.swift | 6 +- Scripts/run-ios-tailnet-ui-tests.sh | 116 ++++- burrow/src/auth/server/tailscale.rs | 338 +++++++++---- burrow/src/control/discovery.rs | 13 + burrow/src/daemon/instance.rs | 164 ++++++- burrow/src/daemon/rpc/response.rs | 20 + burrow/src/daemon/runtime.rs | 464 +++++++++++++++++- burrow/src/tracing.rs | 14 +- proto/burrow.proto | 9 + 16 files changed, 1856 insertions(+), 342 deletions(-) diff --git a/Apple/App/AppDelegate.swift b/Apple/App/AppDelegate.swift index 12fe52c..c3cb4cb 100644 --- a/Apple/App/AppDelegate.swift +++ b/Apple/App/AppDelegate.swift @@ -55,7 +55,7 @@ class AppDelegate: NSObject, NSApplicationDelegate { let statusBar = NSStatusBar.system let statusItem = statusBar.statusItem(withLength: NSStatusItem.squareLength) if let button = statusItem.button { - button.image = NSImage(systemSymbolName: "network.badge.shield.half.filled", accessibilityDescription: nil) + button.image = NSImage(systemSymbolName: "pipe.and.drop.fill", accessibilityDescription: nil) } return statusItem }() diff --git a/Apple/AppUITests/BurrowUITests.swift b/Apple/AppUITests/BurrowUITests.swift index f9dbeae..b7d8111 100644 --- a/Apple/AppUITests/BurrowUITests.swift +++ b/Apple/AppUITests/BurrowUITests.swift @@ -1,15 +1,31 @@ import XCTest +import UIKit @MainActor final class BurrowTailnetLoginUITests: XCTestCase { + private enum TailnetLoginMode: String, Decodable { + case tailscale + case discovered + } + + private struct TestConfig: Decodable { + let email: String + let username: String + let password: String + let mode: TailnetLoginMode? + } + override func setUpWithError() throws { continueAfterFailure = false } func testTailnetLoginThroughAuthentikWebSession() throws { - let email = try requiredEnvironment("BURROW_UI_TEST_EMAIL") - let username = ProcessInfo.processInfo.environment["BURROW_UI_TEST_USERNAME"] ?? email - let password = try requiredEnvironment("BURROW_UI_TEST_PASSWORD") + let config = try loadTestConfig() + let email = config.email + let username = config.username + let password = config.password + let mode = config.mode ?? .tailscale + let browserIdentity = mode == .tailscale ? email : username let app = XCUIApplication() app.launch() @@ -18,51 +34,90 @@ final class BurrowTailnetLoginUITests: XCTestCase { XCTAssertTrue(tailnetButton.waitForExistence(timeout: 15), "Tailnet add button did not appear") tailnetButton.tap() + configureTailnetIfNeeded(in: app, mode: mode) + let discoveryField = app.textFields["tailnet-discovery-email"] XCTAssertTrue(discoveryField.waitForExistence(timeout: 10), "Tailnet discovery email field did not appear") replaceText(in: discoveryField, with: email) - let findServerButton = app.buttons["tailnet-find-server"] - XCTAssertTrue(findServerButton.waitForExistence(timeout: 5), "Find Server button did not appear") - findServerButton.tap() - - let discoveryCard = app.otherElements["tailnet-discovery-card"] - XCTAssertTrue(discoveryCard.waitForExistence(timeout: 20), "Tailnet discovery result did not appear") - - let authorityField = app.textFields["tailnet-authority"] - XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear") - XCTAssertTrue( - waitForFieldValue(authorityField, containing: "ts.burrow.net", timeout: 20), - "Tailnet authority was not populated from discovery" - ) - - let probeButton = app.buttons["tailnet-check-connection"] - XCTAssertTrue(probeButton.waitForExistence(timeout: 5), "Check Connection button did not appear") - probeButton.tap() - - let probeCard = app.otherElements["tailnet-authority-probe-card"] - XCTAssertTrue(probeCard.waitForExistence(timeout: 20), "Tailnet connection probe did not complete") + let serverCard = app.descendants(matching: .any) + .matching(identifier: "tailnet-server-card") + .firstMatch + XCTAssertTrue(serverCard.waitForExistence(timeout: 5), "Tailnet server card did not appear") let signInButton = app.buttons["tailnet-start-sign-in"] XCTAssertTrue(signInButton.waitForExistence(timeout: 10), "Tailnet sign-in button did not appear") signInButton.tap() - acceptAuthenticationPromptIfNeeded(in: app) + acceptAuthenticationPromptIfNeeded(in: app, timeout: 20) let webSession = webAuthenticationSession() XCTAssertTrue(webSession.waitForExistence(timeout: 20), "Safari authentication session did not appear") - signIntoAuthentik(in: webSession, username: username, password: password) + signIntoAuthentik(in: webSession, username: browserIdentity, password: password) app.activate() XCTAssertTrue( - waitForButtonLabel(app.buttons["tailnet-start-sign-in"], equals: "Signed In", timeout: 60), + waitForTailnetSignedIn(in: app, timeout: 60), "Tailnet sign-in never reached the running state" ) } - private func acceptAuthenticationPromptIfNeeded(in app: XCUIApplication) { + private func configureTailnetIfNeeded(in app: XCUIApplication, mode: TailnetLoginMode) { + guard mode == .discovered else { return } + + openTailnetMenu(in: app) + tapMenuButton(named: "Edit Custom Server", in: app) + + openTailnetMenu(in: app) + tapMenuButton(named: "Show Advanced Settings", in: app) + + let authorityField = app.textFields["tailnet-authority"] + XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear") + replaceText(in: authorityField, with: "") + } + + private func openTailnetMenu(in app: XCUIApplication) { + let moreButton = app.buttons["More"] + XCTAssertTrue(moreButton.waitForExistence(timeout: 5), "Tailnet menu button did not appear") + moreButton.tap() + } + + private func tapMenuButton(named title: String, in app: XCUIApplication) { + let menuButton = firstExistingElement( + from: [ + app.buttons[title], + app.descendants(matching: .button)[title], + ], + timeout: 5 + ) + XCTAssertTrue(menuButton.exists, "Menu action \(title) did not appear") + menuButton.tap() + } + + private func acceptAuthenticationPromptIfNeeded( + in app: XCUIApplication, + timeout: TimeInterval + ) { let springboard = XCUIApplication(bundleIdentifier: "com.apple.springboard") + let deadline = Date().addingTimeInterval(timeout) + + repeat { + let promptCandidates = [ + springboard.buttons["Continue"], + springboard.buttons["Allow"], + app.buttons["Continue"], + app.buttons["Allow"], + ] + + for button in promptCandidates where button.exists && button.isHittable { + button.tap() + return + } + + RunLoop.current.run(until: Date().addingTimeInterval(0.25)) + } while Date() < deadline + let promptCandidates = [ springboard.buttons["Continue"], springboard.buttons["Allow"], @@ -70,7 +125,7 @@ final class BurrowTailnetLoginUITests: XCTestCase { app.buttons["Allow"], ] - for button in promptCandidates where button.waitForExistence(timeout: 3) { + for button in promptCandidates where button.exists { button.tap() return } @@ -88,6 +143,19 @@ final class BurrowTailnetLoginUITests: XCTestCase { } private func signIntoAuthentik(in webSession: XCUIApplication, username: String, password: String) { + followTailnetRedirectIfNeeded(in: webSession) + + if !webSession.exists { + return + } + + let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2) + if immediatePasswordField.exists { + replaceSecureText(in: immediatePasswordField, within: webSession, with: password) + submitAuthenticationForm(in: webSession, focusedField: immediatePasswordField) + return + } + let usernameField = firstExistingElement( in: webSession, queries: [ @@ -99,21 +167,12 @@ final class BurrowTailnetLoginUITests: XCTestCase { { $0.webViews.textFields["Email or Username"] }, { $0.descendants(matching: .textField).firstMatch }, ], - timeout: 25 + timeout: 12 ) - XCTAssertTrue(usernameField.exists, "Authentik username field did not appear") - replaceText(in: usernameField, with: username) - - let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2) - if immediatePasswordField.exists { - replaceSecureText(in: immediatePasswordField, with: password) - tapFirstExistingButton( - in: webSession, - titles: ["Continue", "Sign In", "Log in", "Login"], - timeout: 5 - ) + if !usernameField.exists { return } + replaceText(in: usernameField, with: username) tapFirstExistingButton( in: webSession, @@ -123,21 +182,31 @@ final class BurrowTailnetLoginUITests: XCTestCase { let passwordField = firstExistingSecureField(in: webSession, timeout: 20) XCTAssertTrue(passwordField.exists, "Authentik password field did not appear") - replaceSecureText(in: passwordField, with: password) - tapFirstExistingButton( - in: webSession, - titles: ["Continue", "Sign In", "Log in", "Login"], - timeout: 5 - ) + replaceSecureText(in: passwordField, within: webSession, with: password) + submitAuthenticationForm(in: webSession, focusedField: passwordField) + } + + private func followTailnetRedirectIfNeeded(in webSession: XCUIApplication) { + let redirectCandidates = [ + webSession.links["Found"], + webSession.webViews.links["Found"], + webSession.buttons["Found"], + webSession.webViews.buttons["Found"], + ] + + let redirectLink = firstExistingElement(from: redirectCandidates, timeout: 8) + if redirectLink.exists { + redirectLink.tap() + } } private func firstExistingSecureField(in app: XCUIApplication, timeout: TimeInterval) -> XCUIElement { let candidates = [ + app.descendants(matching: .secureTextField).firstMatch, app.secureTextFields["Password"], app.secureTextFields["Password or Token"], app.webViews.secureTextFields["Password"], app.webViews.secureTextFields["Password or Token"], - app.descendants(matching: .secureTextField).firstMatch, ] return firstExistingElement(from: candidates, timeout: timeout) @@ -160,11 +229,92 @@ final class BurrowTailnetLoginUITests: XCTestCase { button.tap() } - private func requiredEnvironment(_ key: String) throws -> String { - guard let value = ProcessInfo.processInfo.environment[key], - !value.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + private func submitAuthenticationForm(in app: XCUIApplication, focusedField: XCUIElement) { + focus(focusedField) + focusedField.typeText("\n") + if waitForAny( + [ + { !focusedField.exists }, + { !app.staticTexts["Burrow Tailnet Authentication"].exists }, + ], + timeout: 1.5 + ) { + return + } + + let keyboard = app.keyboards.firstMatch + if keyboard.waitForExistence(timeout: 2) { + let keyboardCandidates = [ + "Return", + "return", + "Go", + "go", + "Continue", + "continue", + "Done", + "done", + "Join", + "join", + "Sign In", + "Log In", + "Login", + ] + for title in keyboardCandidates { + let key = keyboard.buttons[title] + if key.exists && key.isHittable { + key.tap() + return + } + } + + if let lastKey = keyboard.buttons.allElementsBoundByIndex.last, + lastKey.exists, + lastKey.isHittable + { + lastKey.tap() + return + } + } + + tapFirstExistingButton( + in: app, + titles: ["Continue", "Sign In", "Log in", "Login"], + timeout: 5 + ) + } + + private func loadTestConfig() throws -> TestConfig { + let environment = ProcessInfo.processInfo.environment + if let email = nonEmptyEnvironment("BURROW_UI_TEST_EMAIL"), + let password = nonEmptyEnvironment("BURROW_UI_TEST_PASSWORD") + { + return TestConfig( + email: email, + username: nonEmptyEnvironment("BURROW_UI_TEST_USERNAME") ?? email, + password: password, + mode: nonEmptyEnvironment("BURROW_UI_TEST_TAILNET_MODE") + .flatMap(TailnetLoginMode.init(rawValue:)) + ) + } + + let configPath = environment["BURROW_UI_TEST_CONFIG_PATH"] ?? "/tmp/burrow-ui-test-config.json" + let configURL = URL(fileURLWithPath: configPath) + guard FileManager.default.fileExists(atPath: configURL.path) else { + throw XCTSkip( + "Missing UI test configuration. Expected env vars or config file at \(configURL.path)" + ) + } + + let data = try Data(contentsOf: configURL) + return try JSONDecoder().decode(TestConfig.self, from: data) + } + + private func nonEmptyEnvironment(_ key: String) -> String? { + guard let value = ProcessInfo.processInfo.environment[key]? + .trimmingCharacters(in: .whitespacesAndNewlines), + !value.isEmpty else { - throw XCTSkip("Missing required UI test environment variable \(key)") + return nil } return value } @@ -189,6 +339,32 @@ final class BurrowTailnetLoginUITests: XCTestCase { return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed } + private func waitForTailnetSignedIn(in app: XCUIApplication, timeout: TimeInterval) -> Bool { + let button = app.buttons["tailnet-start-sign-in"] + let deadline = Date().addingTimeInterval(timeout) + + repeat { + acceptAuthenticationPromptIfNeeded(in: app, timeout: 1) + if button.exists, button.label == "Signed In" { + return true + } + RunLoop.current.run(until: Date().addingTimeInterval(0.3)) + } while Date() < deadline + + return button.exists && button.label == "Signed In" + } + + private func waitForAny(_ conditions: [() -> Bool], timeout: TimeInterval) -> Bool { + let deadline = Date().addingTimeInterval(timeout) + repeat { + if conditions.contains(where: { $0() }) { + return true + } + RunLoop.current.run(until: Date().addingTimeInterval(0.2)) + } while Date() < deadline + return conditions.contains(where: { $0() }) + } + private func firstExistingElement( in app: XCUIApplication, queries: [(XCUIApplication) -> XCUIElement], @@ -210,14 +386,27 @@ final class BurrowTailnetLoginUITests: XCTestCase { } private func replaceText(in element: XCUIElement, with value: String) { - element.tap() + focus(element) clearText(in: element) element.typeText(value) } - private func replaceSecureText(in element: XCUIElement, with value: String) { - element.tap() - clearText(in: element) + private func replaceSecureText(in element: XCUIElement, within app: XCUIApplication, with value: String) { + UIPasteboard.general.string = value + focus(element) + for revealMenu in [ + { element.doubleTap() }, + { element.press(forDuration: 1.2) }, + ] { + revealMenu() + let pasteButton = firstExistingElement(from: pasteCandidates(in: app), timeout: 3) + if pasteButton.exists { + pasteButton.tap() + return + } + } + + focus(element) element.typeText(value) } @@ -229,4 +418,22 @@ final class BurrowTailnetLoginUITests: XCTestCase { let deleteSequence = String(repeating: XCUIKeyboardKey.delete.rawValue, count: currentValue.count) element.typeText(deleteSequence) } + + private func focus(_ element: XCUIElement) { + element.coordinate(withNormalizedOffset: CGVector(dx: 0.5, dy: 0.5)).tap() + RunLoop.current.run(until: Date().addingTimeInterval(0.3)) + } + + private func pasteCandidates(in app: XCUIApplication) -> [XCUIElement] { + let pasteLabels = ["Paste", "Incolla", "Paste from Clipboard"] + return pasteLabels.flatMap { label in + [ + app.menuItems[label], + app.buttons[label], + app.webViews.buttons[label], + app.descendants(matching: .button).matching(NSPredicate(format: "label == %@", label)).firstMatch, + app.descendants(matching: .menuItem).matching(NSPredicate(format: "label == %@", label)).firstMatch, + ] + } + } } diff --git a/Apple/Configuration/Constants/Constants.swift b/Apple/Configuration/Constants/Constants.swift index 8844564..95d8c78 100644 --- a/Apple/Configuration/Constants/Constants.swift +++ b/Apple/Configuration/Constants/Constants.swift @@ -36,13 +36,9 @@ public enum Constants { private static func fallbackContainerURL() -> Result { #if targetEnvironment(simulator) Result { - let baseURL = try FileManager.default.url( - for: .applicationSupportDirectory, - in: .userDomainMask, - appropriateFor: nil, - create: true - ) - let url = baseURL + // The simulator app's Application Support path lives inside its sandbox container, + // so the host daemon cannot reach it. Use a shared host temp location instead. + let url = URL(filePath: "/tmp", directoryHint: .isDirectory) .appending(component: bundleIdentifier, directoryHint: .isDirectory) .appending(component: "SimulatorFallback", directoryHint: .isDirectory) try FileManager.default.createDirectory(at: url, withIntermediateDirectories: true) diff --git a/Apple/Core/Client.swift b/Apple/Core/Client.swift index e44ebcd..7d4cfc7 100644 --- a/Apple/Core/Client.swift +++ b/Apple/Core/Client.swift @@ -108,6 +108,13 @@ public struct Burrow_TailnetLoginStatusResponse: Sendable { public init() {} } +public struct Burrow_TunnelPacket: Sendable { + public var payload = Data() + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { public static let protoMessageName: String = "burrow.TailnetDiscoverRequest" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ @@ -387,6 +394,29 @@ extension Burrow_TailnetLoginStatusResponse: SwiftProtobuf.Message, SwiftProtobu } } +extension Burrow_TunnelPacket: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TunnelPacket" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "payload") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &self.payload) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.payload.isEmpty { + try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + public struct TailnetClient: Client, GRPCClient { public let channel: GRPCChannel public var defaultCallOptions: CallOptions @@ -456,3 +486,23 @@ public struct TailnetClient: Client, GRPCClient { ) } } + +public struct TunnelPacketClient: Client, GRPCClient { + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions + + public init(channel: any GRPCChannel) { + self.channel = channel + self.defaultCallOptions = .init() + } + + public func makeTunnelPacketsCall( + callOptions: CallOptions? = nil + ) -> GRPCAsyncBidirectionalStreamingCall { + self.makeAsyncBidirectionalStreamingCall( + path: "/burrow.Tunnel/TunnelPackets", + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } +} diff --git a/Apple/Core/Client/Generated/burrow.pb.swift b/Apple/Core/Client/Generated/burrow.pb.swift index bba0f16..fccd769 100644 --- a/Apple/Core/Client/Generated/burrow.pb.swift +++ b/Apple/Core/Client/Generated/burrow.pb.swift @@ -215,6 +215,14 @@ public struct Burrow_TunnelConfigurationResponse: Sendable { public var mtu: Int32 = 0 + public var routes: [String] = [] + + public var dnsServers: [String] = [] + + public var searchDomains: [String] = [] + + public var includeDefaultRoute: Bool = false + public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} @@ -532,6 +540,10 @@ extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtob public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "addresses"), 2: .same(proto: "mtu"), + 3: .same(proto: "routes"), + 4: .standard(proto: "dns_servers"), + 5: .standard(proto: "search_domains"), + 6: .standard(proto: "include_default_route"), ] public mutating func decodeMessage(decoder: inout D) throws { @@ -542,6 +554,10 @@ extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtob switch fieldNumber { case 1: try { try decoder.decodeRepeatedStringField(value: &self.addresses) }() case 2: try { try decoder.decodeSingularInt32Field(value: &self.mtu) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &self.routes) }() + case 4: try { try decoder.decodeRepeatedStringField(value: &self.dnsServers) }() + case 5: try { try decoder.decodeRepeatedStringField(value: &self.searchDomains) }() + case 6: try { try decoder.decodeSingularBoolField(value: &self.includeDefaultRoute) }() default: break } } @@ -554,12 +570,28 @@ extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtob if self.mtu != 0 { try visitor.visitSingularInt32Field(value: self.mtu, fieldNumber: 2) } + if !self.routes.isEmpty { + try visitor.visitRepeatedStringField(value: self.routes, fieldNumber: 3) + } + if !self.dnsServers.isEmpty { + try visitor.visitRepeatedStringField(value: self.dnsServers, fieldNumber: 4) + } + if !self.searchDomains.isEmpty { + try visitor.visitRepeatedStringField(value: self.searchDomains, fieldNumber: 5) + } + if self.includeDefaultRoute { + try visitor.visitSingularBoolField(value: self.includeDefaultRoute, fieldNumber: 6) + } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: Burrow_TunnelConfigurationResponse, rhs: Burrow_TunnelConfigurationResponse) -> Bool { if lhs.addresses != rhs.addresses {return false} if lhs.mtu != rhs.mtu {return false} + if lhs.routes != rhs.routes {return false} + if lhs.dnsServers != rhs.dnsServers {return false} + if lhs.searchDomains != rhs.searchDomains {return false} + if lhs.includeDefaultRoute != rhs.includeDefaultRoute {return false} if lhs.unknownFields != rhs.unknownFields {return false} return true } diff --git a/Apple/NetworkExtension/PacketTunnelProvider.swift b/Apple/NetworkExtension/PacketTunnelProvider.swift index 4f29543..3f3d8b4 100644 --- a/Apple/NetworkExtension/PacketTunnelProvider.swift +++ b/Apple/NetworkExtension/PacketTunnelProvider.swift @@ -1,6 +1,7 @@ import AsyncAlgorithms import BurrowConfiguration import BurrowCore +import GRPC import libburrow import NetworkExtension import os @@ -19,6 +20,9 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable { } private let logger = Logger.logger(for: PacketTunnelProvider.self) + private var packetCall: GRPCAsyncBidirectionalStreamingCall? + private var inboundPacketTask: Task? + private var outboundPacketTask: Task? private var client: TunnelClient { get throws { try _client.get() } @@ -45,16 +49,18 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable { let completion = SendableCallbackBox(completionHandler) Task { do { + _ = try await client.tunnelStart(.init()) let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first guard let settings = configuration?.settings else { throw Error.missingTunnelConfiguration } try await setTunnelNetworkSettings(settings) - _ = try await client.tunnelStart(.init()) + try startPacketBridge() logger.log("Started tunnel with network settings: \(settings)") completion.callback(nil) } catch { logger.error("Failed to start tunnel: \(error)") + stopPacketBridge() completion.callback(error) } } @@ -66,6 +72,7 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable { ) { let completion = SendableCallbackBox(completionHandler) Task { + stopPacketBridge() do { _ = try await client.tunnelStop(.init()) logger.log("Stopped client") @@ -77,20 +84,243 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable { } } +extension PacketTunnelProvider { + private func startPacketBridge() throws { + stopPacketBridge() + + let packetClient = TunnelPacketClient.unix(socketURL: try Constants.socketURL) + let call = packetClient.makeTunnelPacketsCall() + self.packetCall = call + + inboundPacketTask = Task { [weak self] in + guard let self else { return } + do { + for try await packet in call.responseStream { + let payload = packet.payload + self.packetFlow.writePackets( + [payload], + withProtocols: [Self.protocolNumber(for: payload)] + ) + } + } catch { + guard !Task.isCancelled else { return } + self.logger.error("Tunnel packet receive loop failed: \(error)") + } + } + + outboundPacketTask = Task { [weak self] in + guard let self else { return } + defer { call.requestStream.finish() } + do { + while !Task.isCancelled { + let packets = await self.readPacketsBatch() + for (payload, _) in packets { + var packet = Burrow_TunnelPacket() + packet.payload = payload + try await call.requestStream.send(packet) + } + } + } catch { + guard !Task.isCancelled else { return } + self.logger.error("Tunnel packet send loop failed: \(error)") + } + } + } + + private func stopPacketBridge() { + inboundPacketTask?.cancel() + inboundPacketTask = nil + outboundPacketTask?.cancel() + outboundPacketTask = nil + packetCall?.cancel() + packetCall = nil + } + + private func readPacketsBatch() async -> [(Data, NSNumber)] { + await withCheckedContinuation { continuation in + packetFlow.readPackets { packets, protocols in + continuation.resume(returning: Array(zip(packets, protocols))) + } + } + } + + private static func protocolNumber(for payload: Data) -> NSNumber { + guard let version = payload.first.map({ $0 >> 4 }) else { + return NSNumber(value: AF_INET) + } + switch version { + case 6: + return NSNumber(value: AF_INET6) + default: + return NSNumber(value: AF_INET) + } + } +} + extension Burrow_TunnelConfigurationResponse { fileprivate var settings: NEPacketTunnelNetworkSettings { - let ipv6Addresses = addresses.filter { IPv6Address($0) != nil } + let parsedAddresses = addresses.compactMap(ParsedTunnelAddress.init(rawValue:)) + let ipv4Addresses = parsedAddresses.compactMap(\.ipv4Address) + let ipv6Addresses = parsedAddresses.compactMap(\.ipv6Address) + let parsedRoutes = routes.compactMap(ParsedTunnelRoute.init(rawValue:)) + var ipv4Routes = parsedRoutes.compactMap(\.ipv4Route) + var ipv6Routes = parsedRoutes.compactMap(\.ipv6Route) + if includeDefaultRoute { + ipv4Routes.append(.default()) + ipv6Routes.append(.default()) + } let settings = NEPacketTunnelNetworkSettings(tunnelRemoteAddress: "1.1.1.1") settings.mtu = NSNumber(value: mtu) - settings.ipv4Settings = NEIPv4Settings( - addresses: addresses.filter { IPv4Address($0) != nil }, - subnetMasks: ["255.255.255.0"] - ) - settings.ipv6Settings = NEIPv6Settings( - addresses: ipv6Addresses, - networkPrefixLengths: ipv6Addresses.map { _ in 64 } - ) + if !ipv4Addresses.isEmpty { + let ipv4Settings = NEIPv4Settings( + addresses: ipv4Addresses.map(\.address), + subnetMasks: ipv4Addresses.map(\.subnetMask) + ) + if !ipv4Routes.isEmpty { + ipv4Settings.includedRoutes = ipv4Routes + } + settings.ipv4Settings = ipv4Settings + } + if !ipv6Addresses.isEmpty { + let ipv6Settings = NEIPv6Settings( + addresses: ipv6Addresses.map(\.address), + networkPrefixLengths: ipv6Addresses.map(\.prefixLength) + ) + if !ipv6Routes.isEmpty { + ipv6Settings.includedRoutes = ipv6Routes + } + settings.ipv6Settings = ipv6Settings + } + if !dnsServers.isEmpty { + let dnsSettings = NEDNSSettings(servers: dnsServers) + if !searchDomains.isEmpty { + dnsSettings.matchDomains = searchDomains + } + settings.dnsSettings = dnsSettings + } return settings } } + +private struct ParsedTunnelAddress { + struct IPv4AddressSetting { + let address: String + let subnetMask: String + } + + struct IPv6AddressSetting { + let address: String + let prefixLength: NSNumber + } + + let ipv4Address: IPv4AddressSetting? + let ipv6Address: IPv6AddressSetting? + + init?(rawValue: String) { + let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init) + let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !address.isEmpty else { + return nil + } + + let prefix = components.count == 2 ? Int(components[1]) : nil + if IPv4Address(address) != nil { + let prefixLength = prefix ?? 32 + guard (0 ... 32).contains(prefixLength) else { + return nil + } + ipv4Address = IPv4AddressSetting( + address: address, + subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength) + ) + ipv6Address = nil + return + } + + if IPv6Address(address) != nil { + let prefixLength = prefix ?? 128 + guard (0 ... 128).contains(prefixLength) else { + return nil + } + ipv4Address = nil + ipv6Address = IPv6AddressSetting( + address: address, + prefixLength: NSNumber(value: prefixLength) + ) + return + } + + return nil + } + + private static func ipv4SubnetMask(prefixLength: Int) -> String { + guard prefixLength > 0 else { + return "0.0.0.0" + } + let mask = UInt32.max << (32 - prefixLength) + let octets = [ + (mask >> 24) & 0xff, + (mask >> 16) & 0xff, + (mask >> 8) & 0xff, + mask & 0xff, + ] + return octets.map(String.init).joined(separator: ".") + } +} + +private struct ParsedTunnelRoute { + let ipv4Route: NEIPv4Route? + let ipv6Route: NEIPv6Route? + + init?(rawValue: String) { + let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init) + let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !address.isEmpty else { + return nil + } + + let prefix = components.count == 2 ? Int(components[1]) : nil + if IPv4Address(address) != nil { + let prefixLength = prefix ?? 32 + guard (0 ... 32).contains(prefixLength) else { + return nil + } + ipv4Route = NEIPv4Route( + destinationAddress: address, + subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength) + ) + ipv6Route = nil + return + } + + if IPv6Address(address) != nil { + let prefixLength = prefix ?? 128 + guard (0 ... 128).contains(prefixLength) else { + return nil + } + ipv4Route = nil + ipv6Route = NEIPv6Route( + destinationAddress: address, + networkPrefixLength: NSNumber(value: prefixLength) + ) + return + } + + return nil + } + + private static func ipv4SubnetMask(prefixLength: Int) -> String { + var mask = UInt32.max << (32 - prefixLength) + if prefixLength == 0 { + mask = 0 + } + let octets = [ + String((mask >> 24) & 0xff), + String((mask >> 16) & 0xff), + String((mask >> 8) & 0xff), + String(mask & 0xff), + ] + return octets.joined(separator: ".") + } +} diff --git a/Apple/UI/BurrowView.swift b/Apple/UI/BurrowView.swift index 2128ec3..e15d3f7 100644 --- a/Apple/UI/BurrowView.swift +++ b/Apple/UI/BurrowView.swift @@ -83,7 +83,7 @@ public struct BurrowView: View { ContentUnavailableView( "No Accounts Yet", systemImage: "person.crop.circle.badge.plus", - description: Text("Save a Tor account or sign in to a Tailnet provider to keep network identities ready on this device.") + description: Text("Save a Tor account or sign in to Tailnet to keep network identities ready on this device.") ) .frame(maxWidth: .infinity, minHeight: 180) } else { @@ -135,7 +135,7 @@ public struct BurrowView: View { private func runAutomationIfNeeded() { guard !didRunAutomation, let automation = BurrowAutomationConfig.current, - automation.action == .tailnetLogin || automation.action == .headscaleProbe + automation.action == .tailnetLogin || automation.action == .tailnetProbe else { return } @@ -340,8 +340,12 @@ private struct ConfigurationSheetView: View { @State private var isStartingTailnetLogin = false @State private var tailnetPresentedAuthURL: URL? @State private var preserveTailnetLoginSession = false + @State private var usesCustomTailnetAuthority = false + @State private var showsAdvancedTailnetSettings = false @State private var browserAuthenticator = TailnetBrowserAuthenticator() @State private var tailnetLoginPollTask: Task? + @State private var tailnetDiscoveryTask: Task? + @State private var tailnetProbeTask: Task? @State private var didRunAutomation = false init( @@ -364,14 +368,9 @@ private struct ConfigurationSheetView: View { .listRowInsets(.init(top: 4, leading: 0, bottom: 4, trailing: 0)) .listRowBackground(Color.clear) - Section("Identity") { - TextField("Title", text: $draft.title) - TextField("Account", text: $draft.accountName) - TextField("Identity", text: $draft.identityName) - if sheet == .tailnet { - TextField("Hostname", text: $draft.hostname) - .burrowLoginField() - .autocorrectionDisabled() + if showsIdentitySection { + Section("Identity") { + identityFields } } @@ -458,9 +457,15 @@ private struct ConfigurationSheetView: View { } .onChange(of: draft.authority) { _, _ in resetAuthorityProbe() + if sheet == .tailnet, usesCustomTailnetAuthority { + scheduleTailnetAuthorityProbe() + } } .onChange(of: draft.discoveryEmail) { _, _ in resetTailnetDiscoveryFeedback() + if sheet == .tailnet, !usesCustomTailnetAuthority { + scheduleTailnetDiscovery() + } } .onChange(of: draft.authMode) { _, newMode in guard newMode != .web else { return } @@ -470,6 +475,8 @@ private struct ConfigurationSheetView: View { } .onDisappear { tailnetLoginPollTask?.cancel() + tailnetDiscoveryTask?.cancel() + tailnetProbeTask?.cancel() browserAuthenticator.cancel() if !preserveTailnetLoginSession { Task { @MainActor in @@ -479,6 +486,18 @@ private struct ConfigurationSheetView: View { } } + @ViewBuilder + private var identityFields: some View { + TextField("Title", text: $draft.title) + TextField("Account", text: $draft.accountName) + TextField("Identity", text: $draft.identityName) + if sheet == .tailnet { + TextField("Hostname", text: $draft.hostname) + .burrowLoginField() + .autocorrectionDisabled() + } + } + @ViewBuilder private var tailnetSections: some View { Section("Connection") { @@ -487,67 +506,39 @@ private struct ConfigurationSheetView: View { .burrowLoginField() .autocorrectionDisabled() .accessibilityIdentifier("tailnet-discovery-email") - - Button { - discoverTailnetAuthority() - } label: { - Label { - Text(isDiscoveringTailnet ? "Finding Server" : "Find Server") - } icon: { - Image(systemName: isDiscoveringTailnet ? "hourglass" : "at.circle") + .submitLabel(.continue) + .onSubmit { + if !usesCustomTailnetAuthority { + scheduleTailnetDiscovery(immediate: true) } } - .buttonStyle(.borderless) - .disabled(isDiscoveringTailnet || normalizedOptional(draft.discoveryEmail) == nil) - .accessibilityIdentifier("tailnet-find-server") - if let discoveryStatus { - tailnetDiscoveryCard(status: discoveryStatus, failure: nil) - } else if let discoveryError { - tailnetDiscoveryCard(status: nil, failure: discoveryError) - } + tailnetServerCard - TextField("Authority URL", text: $draft.authority) - .burrowLoginField() - .autocorrectionDisabled() - .accessibilityIdentifier("tailnet-authority") - - Text("Use the managed Tailnet authority or enter a custom Tailnet control server.") - .font(.footnote) - .foregroundStyle(.secondary) - - Button { - probeTailnetAuthority() - } label: { - Label { - Text(isProbingAuthority ? "Checking Connection" : "Check Connection") - } icon: { - Image(systemName: isProbingAuthority ? "hourglass" : "bolt.horizontal.circle") + if showsAdvancedTailnetSettings { + if usesCustomTailnetAuthority { + TextField("Server URL", text: $draft.authority) + .burrowLoginField() + .autocorrectionDisabled() + .accessibilityIdentifier("tailnet-authority") + } else { + TextField("Tailnet", text: $draft.tailnet) + .burrowLoginField() + .autocorrectionDisabled() + .accessibilityIdentifier("tailnet-name") } } - .buttonStyle(.borderless) - .disabled(isProbingAuthority || normalizedOptional(draft.authority) == nil) - .accessibilityIdentifier("tailnet-check-connection") - - if let authorityProbeStatus { - tailnetAuthorityProbeCard(status: authorityProbeStatus, failure: nil) - } else if let authorityProbeError { - tailnetAuthorityProbeCard(status: nil, failure: authorityProbeError) - } - - TextField("Tailnet", text: $draft.tailnet) - .burrowLoginField() - .autocorrectionDisabled() - .accessibilityIdentifier("tailnet-name") } Section("Authentication") { - Picker("Authentication", selection: $draft.authMode) { - ForEach(availableTailnetAuthModes) { mode in - Text(mode.title).tag(mode) + if showsAdvancedTailnetSettings { + Picker("Authentication", selection: $draft.authMode) { + ForEach(availableTailnetAuthModes) { mode in + Text(mode.title).tag(mode) + } } + .pickerStyle(.menu) } - .pickerStyle(.menu) if draft.authMode == .web { Button { @@ -560,7 +551,7 @@ private struct ConfigurationSheetView: View { } } .buttonStyle(.borderless) - .disabled(isStartingTailnetLogin || normalizedOptional(draft.authority) == nil) + .disabled(isStartingTailnetLogin || tailnetLoginActionDisabled) .accessibilityIdentifier("tailnet-start-sign-in") if let tailnetLoginStatus { @@ -616,32 +607,14 @@ private struct ConfigurationSheetView: View { } if sheet == .tailnet { - if let authorityProbeStatus { - Text(authorityProbeStatus.summary) + labeledValue("Server", tailnetServerDisplayLabel) + if let connectionSummary = tailnetConnectionSummary { + Text(connectionSummary) .font(.footnote.weight(.medium)) - .foregroundStyle(.primary) - if let detail = authorityProbeStatus.detail { - Text(detail) - .font(.footnote) - .foregroundStyle(.secondary) - .lineLimit(3) - } - } else if let authorityProbeError { - Text("Connection failed") - .font(.footnote.weight(.medium)) - .foregroundStyle(.red) - Text(authorityProbeError) - .font(.footnote) - .foregroundStyle(.secondary) - .lineLimit(3) + .foregroundStyle(tailnetConnectionSummaryColor) } - } - - if sheet == .tailnet { - HStack(spacing: 8) { - summaryBadge(isManagedTailnetAuthority ? "Managed" : "Custom") - summaryBadge(draft.authMode.title) - if tailnetLoginStatus?.running == true { + if tailnetLoginStatus?.running == true { + HStack(spacing: 8) { summaryBadge("Signed In") } } @@ -654,6 +627,44 @@ private struct ConfigurationSheetView: View { ) } + private var tailnetServerCard: some View { + VStack(alignment: .leading, spacing: 8) { + HStack(alignment: .top, spacing: 12) { + VStack(alignment: .leading, spacing: 4) { + Text(usesCustomTailnetAuthority ? "Custom Server" : "Server") + .font(.subheadline.weight(.medium)) + Text(tailnetServerDisplayLabel) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + + Spacer() + + if isDiscoveringTailnet || isProbingAuthority { + ProgressView() + .controlSize(.small) + } else if let summary = tailnetConnectionSummary { + Text(summary) + .font(.caption.weight(.medium)) + .foregroundStyle(tailnetConnectionSummaryColor) + } + } + + if let detail = tailnetServerDetail { + Text(detail) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + .accessibilityIdentifier("tailnet-server-card") + } + private func tailnetAuthorityProbeCard( status: TailnetAuthorityProbeStatus?, failure: String? @@ -827,11 +838,15 @@ private struct ConfigurationSheetView: View { } case .tailnet: - Button("Use Tailscale Managed Server") { - applyTailnetDefaults(for: .tailscale) + Button(usesCustomTailnetAuthority ? "Use Automatic Server" : "Edit Custom Server") { + toggleTailnetAuthorityMode() } - if availableTailnetAuthModes.count > 1 { + Button(showsAdvancedTailnetSettings ? "Hide Advanced Settings" : "Show Advanced Settings") { + showsAdvancedTailnetSettings.toggle() + } + + if showsAdvancedTailnetSettings, availableTailnetAuthModes.count > 1 { Menu("Authentication") { ForEach(availableTailnetAuthModes) { mode in Button(mode.title) { @@ -844,9 +859,10 @@ private struct ConfigurationSheetView: View { } } - Button("Clear Discovery Result") { - resetTailnetDiscoveryFeedback() + Button("Refresh Server Lookup") { + scheduleTailnetDiscovery(immediate: true) } + .disabled(usesCustomTailnetAuthority || normalizedOptional(draft.discoveryEmail) == nil) } } @@ -885,12 +901,21 @@ private struct ConfigurationSheetView: View { private var showsBottomActionButton: Bool { #if os(iOS) - true + return true #else - false + return false #endif } + private var showsIdentitySection: Bool { + switch sheet { + case .wireGuard, .tor: + return true + case .tailnet: + return showsAdvancedTailnetSettings + } + } + private var wireGuardEditorHeight: CGFloat { #if os(iOS) 180 @@ -910,6 +935,18 @@ private struct ConfigurationSheetView: View { } } + private var tailnetLoginActionDisabled: Bool { + switch sheet { + case .tailnet: + if usesCustomTailnetAuthority { + return normalizedOptional(draft.authority) == nil + } + return false + case .wireGuard, .tor: + return true + } + } + private var submissionDisabled: Bool { switch sheet { case .wireGuard: @@ -933,6 +970,50 @@ private struct ConfigurationSheetView: View { } } + private var tailnetServerDisplayLabel: String { + if usesCustomTailnetAuthority { + return normalizedOptional(draft.authority) + ?? "Enter a custom Tailnet server" + } + return TailnetProvider.tailscale.defaultAuthority ?? "Tailscale managed" + } + + private var tailnetServerDetail: String? { + if usesCustomTailnetAuthority { + if let discovery = discoveryStatus { + return "Discovered from \(discovery.domain)." + } + if let discoveryError { + return discoveryError + } + return "Use a custom Tailnet authority when your domain does not advertise one." + } + return "Continue with Tailscale, or open advanced settings to use a custom server." + } + + private var tailnetConnectionSummary: String? { + if isDiscoveringTailnet { + return "Finding server" + } + if isProbingAuthority { + return "Checking" + } + if let authorityProbeStatus { + return authorityProbeStatus.summary + } + if authorityProbeError != nil { + return "Unavailable" + } + return nil + } + + private var tailnetConnectionSummaryColor: Color { + if authorityProbeError != nil { + return .red + } + return .secondary + } + private func submit() { isSubmitting = true errorMessage = nil @@ -1021,7 +1102,7 @@ private struct ConfigurationSheetView: View { guard !didRunAutomation, sheet == .tailnet, let automation = BurrowAutomationConfig.current, - automation.action == .tailnetLogin || automation.action == .headscaleProbe + automation.action == .tailnetLogin || automation.action == .tailnetProbe else { return } @@ -1037,7 +1118,9 @@ private struct ConfigurationSheetView: View { case .tailnetLogin: applyTailnetDefaults(for: .tailscale) startTailnetLogin() - case .headscaleProbe: + case .tailnetProbe: + usesCustomTailnetAuthority = true + showsAdvancedTailnetSettings = true draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority probeTailnetAuthority() } @@ -1060,10 +1143,13 @@ private struct ConfigurationSheetView: View { ) var noteParts: [String] = [ - isManagedTailnetAuthority ? "Managed Tailnet" : "Custom Tailnet", - "Auth: \(draft.authMode.title)", + "Server: \(hostnameFallback(from: payload.authority ?? "", fallback: "tailnet"))", ] + if showsAdvancedTailnetSettings || draft.authMode != .web { + noteParts.append("Auth: \(draft.authMode.title)") + } + if draft.authMode == .web, tailnetLoginStatus?.running == true { noteParts.append("Browser sign-in complete") } @@ -1119,6 +1205,7 @@ private struct ConfigurationSheetView: View { private func applyTailnetDefaults(for provider: TailnetProvider) { resetTailnetDiscoveryFeedback() + usesCustomTailnetAuthority = provider != .tailscale draft.authority = provider.defaultAuthority ?? "" if !availableTailnetAuthModes.contains(draft.authMode) { draft.authMode = .web @@ -1126,12 +1213,6 @@ private struct ConfigurationSheetView: View { } private func startTailnetLogin() { - guard let authority = normalizedOptional(draft.authority) else { - tailnetLoginStatus = nil - tailnetLoginError = "Enter a server URL first." - return - } - isStartingTailnetLogin = true tailnetLoginError = nil preserveTailnetLoginSession = false @@ -1139,6 +1220,7 @@ private struct ConfigurationSheetView: View { Task { @MainActor in defer { isStartingTailnetLogin = false } do { + let authority = try await resolveTailnetAuthorityForLogin() let status = try await networkViewModel.startTailnetLogin( accountName: normalized(draft.accountName, fallback: "default"), identityName: normalized(draft.identityName, fallback: "apple"), @@ -1176,12 +1258,14 @@ private struct ConfigurationSheetView: View { } private func resetAuthorityProbe() { + tailnetProbeTask?.cancel() authorityProbeStatus = nil authorityProbeError = nil tailnetLoginError = nil } private func resetTailnetDiscoveryFeedback() { + tailnetDiscoveryTask?.cancel() discoveryStatus = nil discoveryError = nil } @@ -1210,6 +1294,83 @@ private struct ConfigurationSheetView: View { } } + private func scheduleTailnetDiscovery(immediate: Bool = false) { + guard sheet == .tailnet else { return } + tailnetDiscoveryTask?.cancel() + + guard !usesCustomTailnetAuthority else { + discoveryStatus = nil + discoveryError = nil + return + } + + guard normalizedOptional(draft.discoveryEmail) != nil else { + discoveryStatus = nil + discoveryError = nil + draft.authority = TailnetProvider.tailscale.defaultAuthority ?? "" + return + } + + tailnetDiscoveryTask = Task { @MainActor in + if !immediate { + try? await Task.sleep(for: .milliseconds(450)) + } + guard !Task.isCancelled else { return } + discoverTailnetAuthority() + } + } + + private func scheduleTailnetAuthorityProbe() { + guard sheet == .tailnet else { return } + tailnetProbeTask?.cancel() + guard normalizedOptional(draft.authority) != nil else { return } + + tailnetProbeTask = Task { @MainActor in + try? await Task.sleep(for: .milliseconds(300)) + guard !Task.isCancelled else { return } + probeTailnetAuthority() + } + } + + private func toggleTailnetAuthorityMode() { + let discoveredAuthority = discoveryStatus?.authority + usesCustomTailnetAuthority.toggle() + resetTailnetDiscoveryFeedback() + resetAuthorityProbe() + if usesCustomTailnetAuthority { + draft.authority = discoveredAuthority ?? draft.authority + } else { + draft.authority = TailnetProvider.tailscale.defaultAuthority ?? "" + scheduleTailnetDiscovery(immediate: normalizedOptional(draft.discoveryEmail) != nil) + } + } + + private func resolveTailnetAuthorityForLogin() async throws -> String { + if !usesCustomTailnetAuthority { + let authority = TailnetProvider.tailscale.defaultAuthority ?? "" + draft.authority = authority + scheduleTailnetAuthorityProbe() + return authority + } + + if let authority = normalizedOptional(draft.authority) { + return authority + } + + if let email = normalizedOptional(draft.discoveryEmail) { + let discovery = try await networkViewModel.discoverTailnet(email: email) + discoveryStatus = discovery + discoveryError = nil + draft.authority = discovery.authority + scheduleTailnetAuthorityProbe() + return discovery.authority + } + + throw NSError(domain: "BurrowTailnet", code: 1, userInfo: [ + NSLocalizedDescriptionKey: "Enter an email address or a custom server URL first." + ]) + } + private func beginTailnetLoginPolling(sessionID: String) { tailnetLoginPollTask?.cancel() tailnetLoginPollTask = Task { @MainActor in @@ -1336,13 +1497,16 @@ private struct ConfigurationSheetView: View { if tailnetLoginSessionID != nil { return "Resume Sign-In" } - return "Start Sign-In" + return "Continue with Tailscale" } private var tailnetAuthenticationFootnote: String { switch draft.authMode { case .web: - return "Burrow asks the daemon to start a Tailnet browser sign-in session, then closes it locally once the daemon reports the device is running." + if usesCustomTailnetAuthority { + return "Burrow signs in through the daemon using your custom Tailnet server." + } + return "Burrow signs in through the daemon using Tailscale's managed browser flow." case .none: return "Save the authority only. Useful when the control plane handles authentication elsewhere." case .password, .preauthKey: @@ -1357,10 +1521,6 @@ private struct ConfigurationSheetView: View { ) } - private var isManagedTailnetAuthority: Bool { - TailnetProvider.isManagedTailscaleAuthority(normalizedOptional(draft.authority)) - } - @ViewBuilder private func labeledValue(_ label: String, _ value: String) -> some View { VStack(alignment: .leading, spacing: 2) { @@ -1383,12 +1543,7 @@ private struct AccountRowView: View { VStack(alignment: .leading, spacing: 4) { Text(account.title) .font(.headline) - HStack(spacing: 8) { - Text(account.kind.title) - if let provider = account.provider { - Text(provider.title) - } - } + Text(account.kind.title) .font(.subheadline) .foregroundStyle(account.kind.accentColor) } @@ -1470,6 +1625,12 @@ private extension View { @MainActor private final class TailnetBrowserAuthenticator: NSObject { private var session: ASWebAuthenticationSession? + private static var prefersEphemeralSessionForCurrentProcess: Bool { + let rawValue = ProcessInfo.processInfo.environment["BURROW_UI_TEST_EPHEMERAL_AUTH"]? + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + return rawValue == "1" || rawValue == "true" || rawValue == "yes" + } func start(url: URL, onDismiss: @escaping @Sendable () -> Void) { cancel() @@ -1477,7 +1638,7 @@ private final class TailnetBrowserAuthenticator: NSObject { onDismiss() } session.presentationContextProvider = self - session.prefersEphemeralWebBrowserSession = false + session.prefersEphemeralWebBrowserSession = Self.prefersEphemeralSessionForCurrentProcess self.session = session _ = session.start() } @@ -1516,7 +1677,7 @@ private final class TailnetBrowserAuthenticator { private struct BurrowAutomationConfig { enum Action: String { case tailnetLogin = "tailnet-login" - case headscaleProbe = "headscale-probe" + case tailnetProbe = "tailnet-probe" } let action: Action diff --git a/Apple/UI/Networks/Network.swift b/Apple/UI/Networks/Network.swift index 32f0b8c..35bd0e1 100644 --- a/Apple/UI/Networks/Network.swift +++ b/Apple/UI/Networks/Network.swift @@ -303,7 +303,7 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { var title: String { switch self { case .tailscale: "Tailscale" - case .headscale: "Headscale" + case .headscale: "Custom Tailnet" case .burrow: "Burrow" } } @@ -375,7 +375,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { switch self { case .wireGuard: "Import a tunnel and optional account metadata." case .tor: "Store Arti account and identity preferences." - case .tailnet: "Save Tailnet authority, identity, and login material." + case .tailnet: "Save Tailnet authority, identity defaults, and login material." } } @@ -402,7 +402,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { case .tor: "Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet." case .tailnet: - "Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon." + "Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can already be stored in the daemon." } } } diff --git a/Scripts/run-ios-tailnet-ui-tests.sh b/Scripts/run-ios-tailnet-ui-tests.sh index 5086bd1..5170a1e 100755 --- a/Scripts/run-ios-tailnet-ui-tests.sh +++ b/Scripts/run-ios-tailnet-ui-tests.sh @@ -5,13 +5,18 @@ repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}" simulator_name="${BURROW_UI_TEST_SIMULATOR_NAME:-iPhone 17 Pro}" simulator_os="${BURROW_UI_TEST_SIMULATOR_OS:-26.4}" +simulator_id="${BURROW_UI_TEST_SIMULATOR_ID:-}" derived_data_path="${BURROW_UI_TEST_DERIVED_DATA_PATH:-/tmp/burrow-ui-tests-deriveddata}" source_packages_path="${BURROW_UI_TEST_SOURCE_PACKAGES_PATH:-/tmp/burrow-ui-tests-sourcepackages}" -fallback_dir="${HOME}/Library/Application Support/${bundle_id}/SimulatorFallback" +fallback_dir="/tmp/${bundle_id}/SimulatorFallback" socket_path="${fallback_dir}/burrow.sock" +tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState" daemon_log="${BURROW_UI_TEST_DAEMON_LOG:-/tmp/burrow-ui-test-daemon.log}" +ui_test_config_path="${BURROW_UI_TEST_CONFIG_PATH:-/tmp/burrow-ui-test-config.json}" +ui_test_runner_bundle_id="${bundle_id}.uitests.xctrunner" ui_test_email="${BURROW_UI_TEST_EMAIL:-ui-test@burrow.net}" ui_test_username="${BURROW_UI_TEST_USERNAME:-ui-test}" +ui_test_tailnet_mode="${BURROW_UI_TEST_TAILNET_MODE:-tailscale}" password_secret="${repo_root}/secrets/infra/authentik-ui-test-password.age" age_identity="${BURROW_UI_TEST_AGE_IDENTITY:-${HOME}/.ssh/id_ed25519}" @@ -25,10 +30,60 @@ if [[ -z "$ui_test_password" ]]; then fi fi -mkdir -p "$fallback_dir" "$derived_data_path" "$source_packages_path" +rm -rf "$fallback_dir" "$tailnet_state_root" +mkdir -p "$fallback_dir" "$tailnet_state_root" "$derived_data_path" "$source_packages_path" rm -f "$socket_path" +resolve_simulator_id() { + xcrun simctl list devices available -j | python3 -c ' +import json +import os +import sys + +target_name = sys.argv[1] +target_os = sys.argv[2] +target_runtime = "com.apple.CoreSimulator.SimRuntime.iOS-" + target_os.replace(".", "-") +devices = json.load(sys.stdin).get("devices", {}) +healthy = [] +for runtime, entries in devices.items(): + if runtime != target_runtime: + continue + for entry in entries: + if not entry.get("isAvailable", False): + continue + if not os.path.isdir(entry.get("dataPath", "")): + continue + healthy.append(entry) +for entry in healthy: + if entry.get("name") == target_name: + print(entry["udid"]) + raise SystemExit(0) +for entry in healthy: + if target_name in entry.get("name", ""): + print(entry["udid"]) + raise SystemExit(0) +raise SystemExit(1) +' "$simulator_name" "$simulator_os" +} + +if [[ -z "$simulator_id" ]]; then + simulator_id="$(resolve_simulator_id || true)" +fi + +if [[ -n "$simulator_id" ]]; then + xcrun simctl boot "$simulator_id" >/dev/null 2>&1 || true + xcrun simctl bootstatus "$simulator_id" -b + xcrun simctl terminate "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true + xcrun simctl terminate "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true + xcrun simctl uninstall "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true + xcrun simctl uninstall "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true + destination="id=${simulator_id}" +else + destination="platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}" +fi + cleanup() { + rm -f "$ui_test_config_path" if [[ -n "${daemon_pid:-}" ]]; then kill "$daemon_pid" >/dev/null 2>&1 || true wait "$daemon_pid" >/dev/null 2>&1 || true @@ -36,11 +91,33 @@ cleanup() { } trap cleanup EXIT +umask 077 +python3 - <<'PY' "$ui_test_config_path" "$ui_test_email" "$ui_test_username" "$ui_test_password" "$ui_test_tailnet_mode" +import json +import pathlib +import sys + +config_path = pathlib.Path(sys.argv[1]) +config_path.write_text( + json.dumps( + { + "email": sys.argv[2], + "username": sys.argv[3], + "password": sys.argv[4], + "mode": sys.argv[5], + } + ), + encoding="utf-8", +) +PY + cargo build -p burrow --bin burrow ( cd "$fallback_dir" + RUST_LOG="${BURROW_UI_TEST_RUST_LOG:-info,burrow=debug}" \ BURROW_SOCKET_PATH="burrow.sock" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ "${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1 ) & daemon_pid=$! @@ -56,18 +133,31 @@ if [[ ! -S "$socket_path" ]]; then exit 1 fi +common_xcodebuild_args=( + -quiet + -skipPackagePluginValidation + -project "${repo_root}/Apple/Burrow.xcodeproj" + -scheme App + -configuration Debug + -destination "$destination" + -derivedDataPath "$derived_data_path" + -clonedSourcePackagesDirPath "$source_packages_path" + -only-testing:BurrowUITests + -parallel-testing-enabled NO + -maximum-concurrent-test-simulator-destinations 1 + -maximum-parallel-testing-workers 1 + CODE_SIGNING_ALLOWED=NO +) + +xcodebuild \ + "${common_xcodebuild_args[@]}" \ + build-for-testing + BURROW_UI_TEST_EMAIL="$ui_test_email" \ BURROW_UI_TEST_USERNAME="$ui_test_username" \ BURROW_UI_TEST_PASSWORD="$ui_test_password" \ +BURROW_UI_TEST_CONFIG_PATH="$ui_test_config_path" \ +BURROW_UI_TEST_EPHEMERAL_AUTH=1 \ xcodebuild \ - -quiet \ - -skipPackagePluginValidation \ - -project "${repo_root}/Apple/Burrow.xcodeproj" \ - -scheme App \ - -configuration Debug \ - -destination "platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}" \ - -derivedDataPath "$derived_data_path" \ - -clonedSourcePackagesDirPath "$source_packages_path" \ - -only-testing:BurrowUITests \ - CODE_SIGNING_ALLOWED=NO \ - test + "${common_xcodebuild_args[@]}" \ + test-without-building diff --git a/burrow/src/auth/server/tailscale.rs b/burrow/src/auth/server/tailscale.rs index 55516e1..d08c807 100644 --- a/burrow/src/auth/server/tailscale.rs +++ b/burrow/src/auth/server/tailscale.rs @@ -26,6 +26,8 @@ pub struct TailscaleLoginStartRequest { pub hostname: Option, #[serde(default)] pub control_url: Option, + #[serde(default)] + pub packet_socket: Option, } #[derive(Clone, Debug, Serialize, Deserialize, Default)] @@ -55,23 +57,35 @@ pub struct TailscaleLoginStartResponse { pub status: TailscaleLoginStatus, } +pub struct TailscaleLoginSession { + pub session_id: String, + pub helper: Arc, + pub status: TailscaleLoginStatus, +} + #[derive(Clone, Default)] pub struct TailscaleBridgeManager { client: Client, sessions: Arc>>>, } -struct ManagedSession { +pub struct TailscaleHelperProcess { session_id: String, listen_url: String, + packet_socket: Option, + control_url: Option, state_dir: PathBuf, child: Arc>, _stderr_task: JoinHandle<()>, } +type ManagedSession = TailscaleHelperProcess; + #[derive(Debug, Deserialize)] struct HelperHello { listen_addr: String, + #[serde(default)] + packet_socket: Option, } impl TailscaleBridgeManager { @@ -79,76 +93,71 @@ impl TailscaleBridgeManager { &self, request: TailscaleLoginStartRequest, ) -> Result { - let key = session_key(&request.account_name, &request.identity_name); + let session = self.ensure_session(request).await?; + Ok(TailscaleLoginStartResponse { + session_id: session.session_id, + status: session.status, + }) + } + + pub async fn ensure_session( + &self, + request: TailscaleLoginStartRequest, + ) -> Result { + let key = session_key_for_request(&request); + let requested_packet_socket = request + .packet_socket + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()); + let requested_control_url = request + .control_url + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()); if let Some(existing) = self.sessions.lock().await.get(&key).cloned() { - match self.fetch_status(existing.as_ref()).await { - Ok(status) => { - return Ok(TailscaleLoginStartResponse { - session_id: existing.session_id.clone(), - status, - }); - } - Err(err) => { - log::warn!( - "tailscale login session {} is stale, restarting: {err}", - existing.session_id - ); - self.sessions.lock().await.remove(&key); - let _ = self.shutdown_session(existing.as_ref()).await; + let needs_restart_for_socket = match (requested_packet_socket, existing.packet_socket()) + { + (Some(requested), Some(current)) => current != Path::new(requested), + (Some(_), None) => true, + _ => false, + }; + let needs_restart_for_control_url = + requested_control_url != existing.control_url().map(|value| value.trim()); + + if !needs_restart_for_socket && !needs_restart_for_control_url { + match self.fetch_status(existing.as_ref()).await { + Ok(status) => { + return Ok(TailscaleLoginSession { + session_id: existing.session_id.clone(), + helper: existing, + status, + }); + } + Err(err) => { + log::warn!( + "tailscale login session {} is stale, restarting: {err}", + existing.session_id + ); + } } + } else { + log::info!( + "tailscale login session {} no longer matches requested transport, restarting", + existing.session_id + ); } + + self.sessions.lock().await.remove(&key); + let _ = self.shutdown_session(existing.as_ref()).await; } - let state_dir = state_root().join(session_dir_name(&request)); - tokio::fs::create_dir_all(&state_dir) - .await - .with_context(|| format!("failed to create {}", state_dir.display()))?; - - let mut child = helper_command(&request, &state_dir)? - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .context("failed to spawn tailscale login helper")?; - - let stdout = child - .stdout - .take() - .context("tailscale helper stdout unavailable")?; - let stderr = child - .stderr - .take() - .context("tailscale helper stderr unavailable")?; - - let hello_line = tokio::time::timeout(Duration::from_secs(20), async move { - let mut lines = BufReader::new(stdout).lines(); - lines.next_line().await - }) - .await - .context("timed out waiting for tailscale helper startup")?? - .context("tailscale helper exited before reporting listen address")?; - - let hello: HelperHello = - serde_json::from_str(&hello_line).context("invalid tailscale helper startup line")?; - - let stderr_task = tokio::spawn(async move { - let mut lines = BufReader::new(stderr).lines(); - while let Ok(Some(line)) = lines.next_line().await { - log::info!("tailscale-login-bridge: {line}"); - } - }); - - let session = Arc::new(ManagedSession { - session_id: random_session_id(), - listen_url: format!("http://{}", hello.listen_addr), - state_dir, - child: Arc::new(Mutex::new(child)), - _stderr_task: stderr_task, - }); - + let session = Arc::new(spawn_tailscale_helper(&request).await?); let status = self.wait_for_status(session.as_ref()).await?; - let response = TailscaleLoginStartResponse { + let response = TailscaleLoginSession { session_id: session.session_id.clone(), + helper: session.clone(), status, }; @@ -192,7 +201,7 @@ impl TailscaleBridgeManager { let mut last_error = None; let mut last_status = None; for _ in 0..40 { - match self.fetch_status(session).await { + match session.status_with_client(&self.client).await { Ok(status) if status.running || status.auth_url.is_some() => return Ok(status), Ok(status) => last_status = Some(status), Err(err) => last_error = Some(err), @@ -206,28 +215,7 @@ impl TailscaleBridgeManager { } async fn fetch_status(&self, session: &ManagedSession) -> Result { - let mut child = session.child.lock().await; - if let Some(status) = child.try_wait()? { - return Err(anyhow!( - "tailscale helper exited with status {status} for {}", - session.state_dir.display() - )); - } - drop(child); - - let response = self - .client - .get(format!("{}/status", session.listen_url)) - .send() - .await - .context("failed to query tailscale helper status")? - .error_for_status() - .context("tailscale helper status request failed")?; - - response - .json::() - .await - .context("invalid tailscale helper status response") + session.status_with_client(&self.client).await } async fn remove_session_by_id(&self, session_id: &str) -> Option> { @@ -239,14 +227,74 @@ impl TailscaleBridgeManager { } async fn shutdown_session(&self, session: &ManagedSession) -> Result<()> { - let _ = self - .client - .post(format!("{}/shutdown", session.listen_url)) + session.shutdown_with_client(&self.client).await + } +} + +impl TailscaleHelperProcess { + pub fn session_id(&self) -> &str { + &self.session_id + } + + pub fn packet_socket(&self) -> Option<&Path> { + self.packet_socket.as_deref() + } + + pub fn control_url(&self) -> Option<&str> { + self.control_url.as_deref() + } + + pub fn state_dir(&self) -> &Path { + &self.state_dir + } + + pub async fn status(&self) -> Result { + self.status_with_client(&Client::new()).await + } + + pub async fn shutdown(&self) -> Result<()> { + self.shutdown_with_client(&Client::new()).await + } + + async fn status_with_client(&self, client: &Client) -> Result { + let mut child = self.child.lock().await; + if let Some(status) = child.try_wait()? { + return Err(anyhow!( + "tailscale helper exited with status {status} for {}", + self.state_dir.display() + )); + } + drop(child); + + let response = client + .get(format!("{}/status", self.listen_url)) .send() - .await; + .await + .context("failed to query tailscale helper status")? + .error_for_status() + .context("tailscale helper status request failed")?; + + let status = response + .json::() + .await + .context("invalid tailscale helper status response")?; + + log::info!( + "tailscale helper status session={} backend_state={} running={} needs_login={} auth_url={:?}", + self.session_id, + status.backend_state, + status.running, + status.needs_login, + status.auth_url + ); + Ok(status) + } + + async fn shutdown_with_client(&self, client: &Client) -> Result<()> { + let _ = client.post(format!("{}/shutdown", self.listen_url)).send().await; for _ in 0..10 { - let mut child = session.child.lock().await; + let mut child = self.child.lock().await; if child.try_wait()?.is_some() { return Ok(()); } @@ -254,7 +302,7 @@ impl TailscaleBridgeManager { tokio::time::sleep(Duration::from_millis(100)).await; } - let mut child = session.child.lock().await; + let mut child = self.child.lock().await; child .start_kill() .context("failed to kill tailscale helper")?; @@ -263,6 +311,58 @@ impl TailscaleBridgeManager { } } +pub async fn spawn_tailscale_helper( + request: &TailscaleLoginStartRequest, +) -> Result { + let state_dir = state_root().join(session_dir_name(request)); + tokio::fs::create_dir_all(&state_dir) + .await + .with_context(|| format!("failed to create {}", state_dir.display()))?; + + let mut child = helper_command(request, &state_dir)? + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("failed to spawn tailscale login helper")?; + + let stdout = child + .stdout + .take() + .context("tailscale helper stdout unavailable")?; + let stderr = child + .stderr + .take() + .context("tailscale helper stderr unavailable")?; + + let hello_line = tokio::time::timeout(Duration::from_secs(20), async move { + let mut lines = BufReader::new(stdout).lines(); + lines.next_line().await + }) + .await + .context("timed out waiting for tailscale helper startup")?? + .context("tailscale helper exited before reporting listen address")?; + + let hello: HelperHello = + serde_json::from_str(&hello_line).context("invalid tailscale helper startup line")?; + + let stderr_task = tokio::spawn(async move { + let mut lines = BufReader::new(stderr).lines(); + while let Ok(Some(line)) = lines.next_line().await { + log::info!("tailscale-login-bridge: {line}"); + } + }); + + Ok(TailscaleHelperProcess { + session_id: random_session_id(), + listen_url: format!("http://{}", hello.listen_addr), + packet_socket: hello.packet_socket.map(PathBuf::from), + control_url: request.control_url.clone(), + state_dir, + child: Arc::new(Mutex::new(child)), + _stderr_task: stderr_task, + }) +} + fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result { let mut command = if let Ok(path) = env::var("BURROW_TAILSCALE_HELPER") { Command::new(path) @@ -291,10 +391,21 @@ fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Res } } + if let Some(packet_socket) = request.packet_socket.as_deref() { + let trimmed = packet_socket.trim(); + if !trimmed.is_empty() { + command.arg("--packet-socket").arg(trimmed); + } + } + Ok(command) } -fn state_root() -> PathBuf { +pub(crate) fn packet_socket_path(request: &TailscaleLoginStartRequest) -> PathBuf { + state_root().join(session_dir_name(request)).join("packet.sock") +} + +pub(crate) fn state_root() -> PathBuf { if let Ok(path) = env::var("BURROW_TAILSCALE_STATE_ROOT") { return PathBuf::from(path); } @@ -315,19 +426,34 @@ fn state_root() -> PathBuf { .join("tailscale") } -fn session_dir_name(request: &TailscaleLoginStartRequest) -> String { +pub(crate) fn session_dir_name(request: &TailscaleLoginStartRequest) -> String { format!( - "{}-{}", + "{}-{}-{}", slug(&request.account_name), - slug(&request.identity_name) + slug(&request.identity_name), + slug(control_scope(request)) ) } -fn session_key(account_name: &str, identity_name: &str) -> String { - format!("{account_name}:{identity_name}") +fn session_key_for_request(request: &TailscaleLoginStartRequest) -> String { + format!( + "{}:{}:{}", + request.account_name, + request.identity_name, + control_scope(request) + ) } -fn default_hostname(request: &TailscaleLoginStartRequest) -> String { +fn control_scope(request: &TailscaleLoginStartRequest) -> &str { + request + .control_url + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()) + .unwrap_or("tailscale-managed") +} + +pub(crate) fn default_hostname(request: &TailscaleLoginStartRequest) -> String { request .hostname .as_deref() @@ -370,14 +496,24 @@ mod tests { } #[test] - fn state_dir_is_stable_by_account_and_identity() { + fn state_dir_is_scoped_by_account_identity_and_control_plane() { let request = TailscaleLoginStartRequest { account_name: "default".to_owned(), identity_name: "apple".to_owned(), hostname: None, control_url: None, + packet_socket: None, }; - assert_eq!(session_dir_name(&request), "default-apple"); + assert_eq!(session_dir_name(&request), "default-apple-tailscale-managed"); assert_eq!(default_hostname(&request), "burrow-apple"); + + let custom_request = TailscaleLoginStartRequest { + control_url: Some("https://ts.burrow.net".to_owned()), + ..request + }; + assert_eq!( + session_dir_name(&custom_request), + "default-apple-httpstsburrownet" + ); } } diff --git a/burrow/src/control/discovery.rs b/burrow/src/control/discovery.rs index 5fc7add..d044a62 100644 --- a/burrow/src/control/discovery.rs +++ b/burrow/src/control/discovery.rs @@ -1,6 +1,7 @@ use anyhow::{anyhow, Context, Result}; use reqwest::{Client, StatusCode, Url}; use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; use super::TailnetProvider; @@ -43,6 +44,7 @@ struct WebFingerLink { pub async fn discover_tailnet(email: &str) -> Result { let domain = email_domain(email)?; + info!(%email, %domain, "tailnet discovery requested"); let base_url = Url::parse(&format!("https://{domain}")) .with_context(|| format!("invalid discovery domain {domain}"))?; let client = Client::builder() @@ -116,12 +118,21 @@ pub async fn discover_tailnet_at( base_url: &Url, ) -> Result { let domain = email_domain(email)?; + debug!(%email, %domain, base_url = %base_url, "starting tailnet domain discovery"); if let Some(discovery) = discover_well_known(client, base_url).await? { + info!( + %email, + %domain, + authority = %discovery.authority, + provider = ?discovery.provider, + "resolved tailnet discovery from well-known document" + ); return Ok(TailnetDiscovery { domain, ..discovery }); } if let Some(authority) = discover_webfinger(client, email, base_url).await? { + info!(%email, %domain, %authority, "resolved tailnet discovery from webfinger"); return Ok(TailnetDiscovery { domain, provider: inferred_provider(Some(&authority), None), @@ -162,6 +173,7 @@ async fn discover_well_known(client: &Client, base_url: &Url) -> Result Res url.query_pairs_mut() .append_pair("resource", &format!("acct:{email}")) .append_pair("rel", TAILNET_DISCOVERY_REL); + debug!(%email, url = %url, "requesting tailnet webfinger document"); let response = client .get(url) diff --git a/burrow/src/daemon/instance.rs b/burrow/src/daemon/instance.rs index 0a23ddc..9b2e138 100644 --- a/burrow/src/daemon/instance.rs +++ b/burrow/src/daemon/instance.rs @@ -8,7 +8,7 @@ use rusqlite::Connection; use tokio::sync::{mpsc, watch, RwLock}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status as RspStatus}; -use tracing::warn; +use tracing::{debug, info, warn}; use tun::tokio::TunInterface; use super::{ @@ -16,15 +16,15 @@ use super::{ networks_server::Networks, tailnet_control_server::TailnetControl, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, NetworkListResponse, NetworkReorderRequest, State as RPCTunnelState, TailnetDiscoverRequest, TailnetDiscoverResponse, - TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, + TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, TunnelPacket, TunnelStatusResponse, }, - runtime::{ActiveTunnel, ResolvedTunnel}, + runtime::{tailnet_helper_request, ActiveTunnel, ResolvedTunnel}, }; use crate::{ auth::server::tailscale::{ - TailscaleBridgeManager, TailscaleLoginStartRequest as BridgeLoginStartRequest, - TailscaleLoginStatus, + packet_socket_path, TailscaleBridgeManager, + TailscaleLoginStartRequest as BridgeLoginStartRequest, TailscaleLoginStatus, }, control::discovery, daemon::rpc::ServerConfig, @@ -87,11 +87,20 @@ impl DaemonRPCServer { } async fn current_tunnel_configuration(&self) -> Result { - let config = self - .resolve_tunnel() - .await? - .server_config() - .map_err(proc_err)?; + let config = { + let active = self.active_tunnel.read().await; + active + .as_ref() + .map(|tunnel| tunnel.server_config().clone()) + }; + let config = match config { + Some(config) => config, + None => self + .resolve_tunnel() + .await? + .server_config() + .map_err(proc_err)?, + }; Ok(configuration_rsp(config)) } @@ -111,8 +120,18 @@ impl DaemonRPCServer { async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> { let _ = self.stop_active_tunnel().await?; + let tailnet_helper = match &desired { + ResolvedTunnel::Tailnet { identity, config } => Some( + self.tailnet_login + .ensure_session(tailnet_helper_request(identity, config)) + .await + .map_err(proc_err)? + .helper, + ), + _ => None, + }; let active = desired - .start(self.tun_interface.clone()) + .start(self.tun_interface.clone(), tailnet_helper) .await .map_err(proc_err)?; self.active_tunnel.write().await.replace(active); @@ -137,6 +156,23 @@ impl DaemonRPCServer { Ok(()) } + fn tailnet_bridge_request( + account_name: String, + identity_name: String, + hostname: String, + authority: String, + ) -> BridgeLoginStartRequest { + let mut request = BridgeLoginStartRequest { + account_name, + identity_name, + hostname: (!hostname.trim().is_empty()).then_some(hostname), + control_url: Self::tailnet_control_url(&authority), + packet_socket: None, + }; + request.packet_socket = Some(packet_socket_path(&request).display().to_string()); + request + } + fn tailnet_control_url(authority: &str) -> Option { let authority = discovery::normalize_authority(authority); (!discovery::is_managed_tailscale_authority(&authority)).then_some(authority) @@ -146,6 +182,7 @@ impl DaemonRPCServer { #[tonic::async_trait] impl Tunnel for DaemonRPCServer { type TunnelConfigurationStream = ReceiverStream>; + type TunnelPacketsStream = ReceiverStream>; type TunnelStatusStream = ReceiverStream>; async fn tunnel_configuration( @@ -171,6 +208,62 @@ impl Tunnel for DaemonRPCServer { Ok(Response::new(ReceiverStream::new(rx))) } + async fn tunnel_packets( + &self, + request: Request>, + ) -> Result, RspStatus> { + let (packet_tx, mut packet_rx) = { + let guard = self.active_tunnel.read().await; + let Some(active) = guard.as_ref() else { + return Err(RspStatus::failed_precondition("no active tunnel")); + }; + active.packet_stream().ok_or_else(|| { + RspStatus::failed_precondition( + "active tunnel does not support packet streaming", + ) + })? + }; + + let (tx, rx) = mpsc::channel(128); + tokio::spawn(async move { + loop { + match packet_rx.recv().await { + Ok(payload) => { + if tx.send(Ok(TunnelPacket { payload })).await.is_err() { + break; + } + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + }); + + let mut inbound = request.into_inner(); + tokio::spawn(async move { + loop { + match inbound.message().await { + Ok(Some(packet)) => { + debug!( + "daemon tunnel packet stream received {} bytes from client", + packet.payload.len() + ); + if packet_tx.send(packet.payload).await.is_err() { + break; + } + } + Ok(None) => break, + Err(error) => { + warn!("tailnet packet stream receive error: {error}"); + break; + } + } + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } + async fn tunnel_start(&self, _request: Request) -> Result, RspStatus> { let desired = self.resolve_tunnel().await?; let already_running = { @@ -287,9 +380,16 @@ impl TailnetControl for DaemonRPCServer { request: Request, ) -> Result, RspStatus> { let request = request.into_inner(); + info!(email = %request.email, "daemon tailnet discover RPC received"); let discovery = discovery::discover_tailnet(&request.email) .await .map_err(proc_err)?; + info!( + email = %request.email, + authority = %discovery.authority, + provider = ?discovery.provider, + "daemon tailnet discover RPC resolved" + ); Ok(Response::new(TailnetDiscoverResponse { domain: discovery.domain, @@ -325,17 +425,32 @@ impl TailnetControl for DaemonRPCServer { request: Request, ) -> Result, RspStatus> { let request = request.into_inner(); + info!( + account = %request.account_name, + identity = %request.identity_name, + authority = %request.authority, + "daemon tailnet login start RPC received" + ); let response = self .tailnet_login - .start_login(BridgeLoginStartRequest { - account_name: request.account_name, - identity_name: request.identity_name, - hostname: (!request.hostname.trim().is_empty()).then_some(request.hostname), - control_url: Self::tailnet_control_url(&request.authority), - }) + .start_login(Self::tailnet_bridge_request( + request.account_name, + request.identity_name, + request.hostname, + request.authority, + )) .await .map_err(proc_err)?; + info!( + session_id = %response.session_id, + backend_state = %response.status.backend_state, + running = response.status.running, + needs_login = response.status.needs_login, + auth_url = ?response.status.auth_url, + "daemon tailnet login start RPC resolved" + ); + Ok(Response::new(tailnet_login_rsp( response.session_id, response.status, @@ -347,6 +462,7 @@ impl TailnetControl for DaemonRPCServer { request: Request, ) -> Result, RspStatus> { let request = request.into_inner(); + info!(session_id = %request.session_id, "daemon tailnet login status RPC received"); let status = self .tailnet_login .status(&request.session_id) @@ -355,6 +471,14 @@ impl TailnetControl for DaemonRPCServer { let Some(status) = status else { return Err(RspStatus::not_found("tailnet login session not found")); }; + info!( + session_id = %request.session_id, + backend_state = %status.backend_state, + running = status.running, + needs_login = status.needs_login, + auth_url = ?status.auth_url, + "daemon tailnet login status RPC resolved" + ); Ok(Response::new(tailnet_login_rsp(request.session_id, status))) } @@ -381,8 +505,12 @@ fn proc_err(err: impl ToString) -> RspStatus { fn configuration_rsp(config: ServerConfig) -> TunnelConfigurationResponse { TunnelConfigurationResponse { - mtu: config.mtu.unwrap_or(1000), addresses: config.address, + mtu: config.mtu.unwrap_or(1000), + routes: config.routes, + dns_servers: config.dns_servers, + search_domains: config.search_domains, + include_default_route: config.include_default_route, } } diff --git a/burrow/src/daemon/rpc/response.rs b/burrow/src/daemon/rpc/response.rs index 8948ca4..6d03581 100644 --- a/burrow/src/daemon/rpc/response.rs +++ b/burrow/src/daemon/rpc/response.rs @@ -68,6 +68,14 @@ impl TryFrom<&TunInterface> for ServerInfo { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct ServerConfig { pub address: Vec, + #[serde(default)] + pub routes: Vec, + #[serde(default)] + pub dns_servers: Vec, + #[serde(default)] + pub search_domains: Vec, + #[serde(default)] + pub include_default_route: bool, pub name: Option, pub mtu: Option, } @@ -78,6 +86,14 @@ impl TryFrom<&Config> for ServerConfig { fn try_from(config: &Config) -> anyhow::Result { Ok(ServerConfig { address: config.interface.address.clone(), + routes: config + .peers + .iter() + .flat_map(|peer| peer.allowed_ips.iter().cloned()) + .collect(), + dns_servers: config.interface.dns.clone(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: config.interface.mtu.map(|mtu| mtu as i32), }) @@ -88,6 +104,10 @@ impl Default for ServerConfig { fn default() -> Self { Self { address: vec!["10.13.13.2".to_string()], // Dummy remote address + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: None, } diff --git a/burrow/src/daemon/runtime.rs b/burrow/src/daemon/runtime.rs index 84dfd2b..31821a2 100644 --- a/burrow/src/daemon/runtime.rs +++ b/burrow/src/daemon/runtime.rs @@ -1,7 +1,13 @@ -use std::sync::Arc; +use std::{path::PathBuf, sync::Arc}; -use anyhow::{Context, Result}; -use tokio::{sync::RwLock, task::JoinHandle}; +use anyhow::{bail, Context, Result}; +use tokio::{ + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::UnixStream, + sync::{broadcast, mpsc, RwLock}, + task::JoinHandle, + time::{sleep, Duration}, +}; use tun::{tokio::TunInterface, TunOptions}; use super::rpc::{ @@ -9,7 +15,11 @@ use super::rpc::{ ServerConfig, }; use crate::{ - control::TailnetConfig, + auth::server::tailscale::{ + default_hostname, packet_socket_path, spawn_tailscale_helper, TailscaleHelperProcess, + TailscaleLoginStartRequest, TailscaleLoginStatus, + }, + control::{discovery, TailnetConfig}, wireguard::{Config, Interface as WireGuardInterface}, }; @@ -78,11 +88,19 @@ impl ResolvedTunnel { match self { Self::Passthrough { .. } => Ok(ServerConfig { address: Vec::new(), + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: Some(1500), }), Self::Tailnet { .. } => Ok(ServerConfig { address: Vec::new(), + routes: tailnet_routes(), + dns_servers: tailnet_dns_servers(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: Some(1280), }), @@ -93,21 +111,71 @@ impl ResolvedTunnel { pub async fn start( self, tun_interface: Arc>>, + tailnet_helper: Option>, ) -> Result { match self { - Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough { identity }), - Self::Tailnet { config, .. } => Err(anyhow::anyhow!( - "tailnet runtime is not wired in this checkout yet ({:?})", - config.provider - )), + Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough { + identity, + server_config: ServerConfig { + address: Vec::new(), + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, + name: None, + mtu: Some(1500), + }, + }), + Self::Tailnet { identity, config } => { + let (helper, shutdown_helper_on_stop) = match tailnet_helper { + Some(helper) => (helper, false), + None => { + let helper_request = tailnet_helper_request(&identity, &config); + let helper = Arc::new(spawn_tailscale_helper(&helper_request).await?); + (helper, true) + } + }; + let status = wait_for_tailnet_ready(helper.as_ref()).await?; + let server_config = tailnet_server_config(&status); + let packet_socket = helper + .packet_socket() + .map(PathBuf::from) + .ok_or_else(|| anyhow::anyhow!("tailnet helper did not report a packet socket"))?; + let packet_bridge = connect_tailnet_packet_bridge(packet_socket).await?; + #[cfg(target_vendor = "apple")] + let tun_task = None; + #[cfg(not(target_vendor = "apple"))] + let tun_task = { + let tun = TunOptions::new().open()?; + tun_interface.write().await.replace(tun); + Some(tokio::spawn(run_tailnet_tun_bridge( + tun_interface.clone(), + packet_bridge.outbound_sender(), + packet_bridge.subscribe(), + ))) + }; + + Ok(ActiveTunnel::Tailnet { + identity, + server_config, + helper, + shutdown_helper_on_stop, + packet_bridge, + tun_task, + }) + } Self::WireGuard { identity, config } => { + let server_config = ServerConfig::try_from(&config)?; let tun = TunOptions::new().open()?; tun_interface.write().await.replace(tun); match start_wireguard_runtime(config, tun_interface.clone()).await { - Ok((interface, task)) => { - Ok(ActiveTunnel::WireGuard { identity, interface, task }) - } + Ok((interface, task)) => Ok(ActiveTunnel::WireGuard { + identity, + server_config, + interface, + task, + }), Err(err) => { tun_interface.write().await.take(); Err(err) @@ -121,9 +189,19 @@ impl ResolvedTunnel { pub enum ActiveTunnel { Passthrough { identity: RuntimeIdentity, + server_config: ServerConfig, + }, + Tailnet { + identity: RuntimeIdentity, + server_config: ServerConfig, + helper: Arc, + shutdown_helper_on_stop: bool, + packet_bridge: TailnetPacketBridge, + tun_task: Option>>, }, WireGuard { identity: RuntimeIdentity, + server_config: ServerConfig, interface: Arc>, task: JoinHandle>, }, @@ -132,15 +210,69 @@ pub enum ActiveTunnel { impl ActiveTunnel { pub fn identity(&self) -> &RuntimeIdentity { match self { - Self::Passthrough { identity } + Self::Passthrough { identity, .. } + | Self::Tailnet { identity, .. } | Self::WireGuard { identity, .. } => identity, } } + pub fn server_config(&self) -> &ServerConfig { + match self { + Self::Passthrough { server_config, .. } + | Self::Tailnet { server_config, .. } + | Self::WireGuard { server_config, .. } => server_config, + } + } + + pub fn packet_stream( + &self, + ) -> Option<(mpsc::Sender>, broadcast::Receiver>)> { + match self { + Self::Tailnet { packet_bridge, .. } => Some(( + packet_bridge.outbound_sender(), + packet_bridge.subscribe(), + )), + _ => None, + } + } + pub async fn shutdown(self, tun_interface: &Arc>>) -> Result<()> { match self { Self::Passthrough { .. } => Ok(()), - Self::WireGuard { interface, task, .. } => { + Self::Tailnet { + helper, + shutdown_helper_on_stop, + packet_bridge, + tun_task, + .. + } => { + if let Some(tun_task) = tun_task { + tun_task.abort(); + match tun_task.await { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(err) if err.is_cancelled() => {} + Err(err) => return Err(err.into()), + } + } + packet_bridge.task.abort(); + match packet_bridge.task.await { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(err) if err.is_cancelled() => {} + Err(err) => return Err(err.into()), + } + tun_interface.write().await.take(); + if shutdown_helper_on_stop { + helper.shutdown().await?; + } + Ok(()) + } + Self::WireGuard { + interface, + task, + .. + } => { interface.read().await.remove_tun().await; let task_result = task.await; tun_interface.write().await.take(); @@ -151,6 +283,22 @@ impl ActiveTunnel { } } +pub struct TailnetPacketBridge { + outbound: mpsc::Sender>, + inbound: broadcast::Sender>, + task: JoinHandle>, +} + +impl TailnetPacketBridge { + fn outbound_sender(&self) -> mpsc::Sender> { + self.outbound.clone() + } + + fn subscribe(&self) -> broadcast::Receiver> { + self.inbound.subscribe() + } +} + async fn start_wireguard_runtime( config: Config, tun_interface: Arc>>, @@ -166,6 +314,279 @@ async fn start_wireguard_runtime( Ok((interface, task)) } +pub(crate) fn tailnet_helper_request( + identity: &RuntimeIdentity, + config: &TailnetConfig, +) -> TailscaleLoginStartRequest { + let account_name = config + .account + .as_deref() + .filter(|value| !value.trim().is_empty()) + .unwrap_or("default") + .to_owned(); + let identity_name = config + .identity + .as_deref() + .filter(|value| !value.trim().is_empty()) + .map(ToOwned::to_owned) + .unwrap_or_else(|| match identity { + RuntimeIdentity::Network { id, .. } => format!("network-{id}"), + RuntimeIdentity::Passthrough => "apple".to_owned(), + }); + let control_url = config.authority.as_deref().and_then(|authority| { + let authority = discovery::normalize_authority(authority); + (!discovery::is_managed_tailscale_authority(&authority)).then_some(authority) + }); + + let mut request = TailscaleLoginStartRequest { + account_name, + identity_name, + hostname: config.hostname.clone(), + control_url, + packet_socket: None, + }; + request.packet_socket = Some(packet_socket_path(&request).display().to_string()); + if request + .hostname + .as_deref() + .map(|value| value.trim().is_empty()) + .unwrap_or(true) + { + request.hostname = Some(default_hostname(&request)); + } + request +} + +async fn wait_for_tailnet_ready(helper: &TailscaleHelperProcess) -> Result { + let mut last_status = None; + for _ in 0..120 { + let status = helper.status().await?; + if status.running && !status.tailscale_ips.is_empty() { + return Ok(status); + } + if status.needs_login || status.auth_url.is_some() { + bail!("tailnet runtime requires a completed login before the tunnel can start"); + } + last_status = Some(status); + sleep(Duration::from_millis(250)).await; + } + + if let Some(status) = last_status { + bail!( + "tailnet helper never became ready (backend_state={})", + status.backend_state + ); + } + bail!("tailnet helper never produced a status update") +} + +fn tailnet_server_config(status: &TailscaleLoginStatus) -> ServerConfig { + let mut search_domains = Vec::new(); + if let Some(suffix) = status.magic_dns_suffix.as_deref() { + let suffix = suffix.trim().trim_end_matches('.'); + if !suffix.is_empty() { + search_domains.push(suffix.to_owned()); + } + } + + ServerConfig { + address: status + .tailscale_ips + .iter() + .map(|ip| tailnet_cidr(ip)) + .collect(), + routes: tailnet_routes(), + dns_servers: tailnet_dns_servers(), + search_domains, + include_default_route: false, + name: status.self_dns_name.clone(), + mtu: Some(1280), + } +} + +fn tailnet_routes() -> Vec { + vec!["100.64.0.0/10".to_owned(), "fd7a:115c:a1e0::/48".to_owned()] +} + +fn tailnet_dns_servers() -> Vec { + vec!["100.100.100.100".to_owned()] +} + +fn tailnet_cidr(ip: &str) -> String { + if ip.contains('/') { + return ip.to_owned(); + } + if ip.contains(':') { + format!("{ip}/128") + } else { + format!("{ip}/32") + } +} + +async fn connect_tailnet_packet_bridge(packet_socket: PathBuf) -> Result { + let mut last_error = None; + let mut stream = None; + for _ in 0..50 { + match UnixStream::connect(&packet_socket).await { + Ok(connected) => { + stream = Some(connected); + break; + } + Err(err) => { + last_error = Some(err); + sleep(Duration::from_millis(100)).await; + } + } + } + let stream = if let Some(stream) = stream { + stream + } else { + return Err(last_error + .context("failed to connect to tailnet helper packet socket")? + .into()); + }; + + let (outbound_tx, outbound_rx) = mpsc::channel(128); + let (inbound_tx, _) = broadcast::channel(128); + let task = tokio::spawn(run_tailnet_socket_bridge( + stream, + outbound_rx, + inbound_tx.clone(), + )); + + Ok(TailnetPacketBridge { + outbound: outbound_tx, + inbound: inbound_tx, + task, + }) +} + +async fn run_tailnet_socket_bridge( + stream: UnixStream, + mut outbound_rx: mpsc::Receiver>, + inbound_tx: broadcast::Sender>, +) -> Result<()> { + let (mut reader, mut writer) = stream.into_split(); + + let inbound = tokio::spawn(async move { + loop { + let packet = read_packet_frame(&mut reader).await?; + tracing::debug!( + "tailnet packet bridge received {} bytes from helper socket", + packet.len() + ); + let _ = inbound_tx.send(packet); + } + #[allow(unreachable_code)] + Result::<()>::Ok(()) + }); + + let outbound = tokio::spawn(async move { + while let Some(packet) = outbound_rx.recv().await { + tracing::debug!( + "tailnet packet bridge writing {} bytes to helper socket", + packet.len() + ); + write_packet_frame(&mut writer, &packet).await?; + } + Result::<()>::Ok(()) + }); + + let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?; + inbound_result?; + outbound_result?; + Ok(()) +} + +#[cfg(not(target_vendor = "apple"))] +async fn run_tailnet_tun_bridge( + tun_interface: Arc>>, + outbound_tx: mpsc::Sender>, + mut inbound_rx: broadcast::Receiver>, +) -> Result<()> { + let inbound_tun = tun_interface.clone(); + let inbound = tokio::spawn(async move { + loop { + let packet = match inbound_rx.recv().await { + Ok(packet) => packet, + Err(broadcast::error::RecvError::Lagged(_)) => continue, + Err(broadcast::error::RecvError::Closed) => break, + }; + let guard = inbound_tun.read().await; + let Some(tun) = guard.as_ref() else { + bail!("tailnet tun interface unavailable"); + }; + tun.send(&packet) + .await + .context("failed to write tailnet packet to tun")?; + } + Result::<()>::Ok(()) + }); + + let outbound_tun = tun_interface.clone(); + let outbound = tokio::spawn(async move { + let mut buf = vec![0u8; 65_535]; + loop { + let len = { + let guard = outbound_tun.read().await; + let Some(tun) = guard.as_ref() else { + bail!("tailnet tun interface unavailable"); + }; + tun.recv(&mut buf) + .await + .context("failed to read packet from tailnet tun")? + }; + outbound_tx + .send(buf[..len].to_vec()) + .await + .context("failed to forward packet to tailnet helper")?; + } + #[allow(unreachable_code)] + Result::<()>::Ok(()) + }); + + let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?; + inbound_result?; + outbound_result?; + Ok(()) +} + +async fn read_packet_frame(reader: &mut R) -> Result> +where + R: AsyncRead + Unpin, +{ + let mut len_buf = [0u8; 4]; + reader + .read_exact(&mut len_buf) + .await + .context("failed to read tailnet packet frame length")?; + let len = u32::from_be_bytes(len_buf) as usize; + let mut packet = vec![0u8; len]; + reader + .read_exact(&mut packet) + .await + .context("failed to read tailnet packet frame payload")?; + Ok(packet) +} + +async fn write_packet_frame(writer: &mut W, packet: &[u8]) -> Result<()> +where + W: AsyncWrite + Unpin, +{ + writer + .write_all(&(packet.len() as u32).to_be_bytes()) + .await + .context("failed to write tailnet packet frame length")?; + writer + .write_all(packet) + .await + .context("failed to write tailnet packet frame payload")?; + writer + .flush() + .await + .context("failed to flush tailnet packet frame") +} + #[cfg(test)] mod tests { use super::*; @@ -179,4 +600,19 @@ mod tests { Vec::::new() ); } + + #[test] + fn tailnet_server_config_uses_host_prefixes() { + let status = TailscaleLoginStatus { + running: true, + tailscale_ips: vec!["100.101.102.103".to_owned(), "fd7a:115c:a1e0::123".to_owned()], + ..Default::default() + }; + let config = tailnet_server_config(&status); + assert_eq!( + config.address, + vec!["100.101.102.103/32", "fd7a:115c:a1e0::123/128"] + ); + assert_eq!(config.mtu, Some(1280)); + } } diff --git a/burrow/src/tracing.rs b/burrow/src/tracing.rs index 21e16ae..8a245ef 100644 --- a/burrow/src/tracing.rs +++ b/burrow/src/tracing.rs @@ -47,10 +47,16 @@ pub fn initialize() { #[cfg(target_os = "macos")] let subscriber = { - let system_log = Some(tracing_oslog::OsLogger::new( - "com.hackclub.burrow", - "tracing", - )); + // `tracing_oslog` is crashing under Tokio/h2 span churn in the host daemon on + // current macOS. Keep logging on stderr by default and allow opt-in OSLog + // only when explicitly requested for local debugging. + let enable_oslog = matches!( + std::env::var("BURROW_ENABLE_OSLOG").as_deref(), + Ok("1" | "true" | "TRUE" | "yes" | "YES") + ); + let system_log = enable_oslog.then(|| { + tracing_oslog::OsLogger::new("com.hackclub.burrow", "tracing") + }); let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr); Registry::default().with(stderr).with(system_log) }; diff --git a/proto/burrow.proto b/proto/burrow.proto index a590cb1..ed1f89e 100644 --- a/proto/burrow.proto +++ b/proto/burrow.proto @@ -5,6 +5,7 @@ import "google/protobuf/timestamp.proto"; service Tunnel { rpc TunnelConfiguration (Empty) returns (stream TunnelConfigurationResponse); + rpc TunnelPackets (stream TunnelPacket) returns (stream TunnelPacket); rpc TunnelStart (Empty) returns (Empty); rpc TunnelStop (Empty) returns (Empty); rpc TunnelStatus (Empty) returns (stream TunnelStatusResponse); @@ -128,4 +129,12 @@ message TunnelStatusResponse { message TunnelConfigurationResponse { repeated string addresses = 1; int32 mtu = 2; + repeated string routes = 3; + repeated string dns_servers = 4; + repeated string search_domains = 5; + bool include_default_route = 6; +} + +message TunnelPacket { + bytes payload = 1; } From e40a947223e8dce37ca20665262d1d239d010301 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 5 Apr 2026 20:52:52 -0700 Subject: [PATCH 20/59] Add forge-owned Namespace auth portal --- .../authentik-sync-namespace-portal-oidc.sh | 246 +++++ Scripts/check-forge-host.sh | 4 + ...c__response__response_serialization-4.snap | 2 +- burrow/src/main.rs | 38 +- burrow/src/namespace_portal.rs | 880 ++++++++++++++++++ flake.nix | 32 + nixos/README.md | 11 +- nixos/hosts/burrow-forge/default.nix | 12 +- nixos/modules/burrow-authentik.nix | 75 ++ nixos/modules/burrow-namespace-portal.nix | 126 +++ 10 files changed, 1403 insertions(+), 23 deletions(-) create mode 100644 Scripts/authentik-sync-namespace-portal-oidc.sh create mode 100644 burrow/src/namespace_portal.rs create mode 100644 nixos/modules/burrow-namespace-portal.nix diff --git a/Scripts/authentik-sync-namespace-portal-oidc.sh b/Scripts/authentik-sync-namespace-portal-oidc.sh new file mode 100644 index 0000000..a62b0cf --- /dev/null +++ b/Scripts/authentik-sync-namespace-portal-oidc.sh @@ -0,0 +1,246 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG:-namespace}" +application_name="${AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME:-Namespace Portal}" +provider_name="${AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME:-Namespace Portal}" +template_slug="${AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG:-ts}" +client_id="${AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID:-nsc.burrow.net}" +client_secret="${AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET:-}" +launch_url="${AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL:-https://nsc.burrow.net/}" +redirect_uris_json="${AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON:-[ + \"https://nsc.burrow.net/oauth/callback\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-namespace-portal-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG + AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME + AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME + AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG + AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID + AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET + AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL + AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg client_secret "$client_secret" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: (if $client_secret == "" then "public" else "confidential" end), + client_id: $client_id, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + } + + (if $client_secret == "" then {} else {client_secret: $client_secret} end)' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Namespace portal OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: false, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Namespace portal OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik Namespace portal OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Namespace portal OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Namespace portal OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/check-forge-host.sh b/Scripts/check-forge-host.sh index f4d646d..d824f6d 100755 --- a/Scripts/check-forge-host.sh +++ b/Scripts/check-forge-host.sh @@ -84,6 +84,7 @@ base_services=( nsc_services=( forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service + burrow-namespace-portal.service ) tailnet_services=( @@ -173,5 +174,8 @@ if command -v curl >/dev/null 2>&1; then curl -fsS -o /dev/null -H 'Host: auth.burrow.net' -w 'authentik_ready %{http_code}\n' http://127.0.0.1/-/health/ready/ curl -sS -o /dev/null -H 'Host: ts.burrow.net' -w 'headscale_root %{http_code}\n' http://127.0.0.1/ || true fi + if [[ "${EXPECT_NSC}" == "1" ]]; then + curl -fsS -o /dev/null -H 'Host: nsc.burrow.net' -w 'namespace_portal %{http_code}\n' http://127.0.0.1/ + fi fi EOF diff --git a/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap b/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap index c40db25..68b4195 100644 --- a/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap +++ b/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap @@ -2,4 +2,4 @@ source: burrow/src/daemon/rpc/response.rs expression: "serde_json::to_string(&DaemonResponse::new(Ok::(DaemonResponseData::ServerConfig(ServerConfig::default()))))?" --- -{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"name":null,"mtu":null}},"id":0} +{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"routes":[],"dns_servers":[],"search_domains":[],"include_default_route":false,"name":null,"mtu":null}},"id":0} diff --git a/burrow/src/main.rs b/burrow/src/main.rs index 4ab7700..01591e7 100644 --- a/burrow/src/main.rs +++ b/burrow/src/main.rs @@ -5,6 +5,8 @@ use clap::{Args, Parser, Subcommand}; mod control; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod daemon; +#[cfg(target_os = "linux")] +mod namespace_portal; pub(crate) mod tracing; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod wireguard; @@ -60,6 +62,12 @@ enum Commands { ReloadConfig(ReloadConfigArgs), /// Authentication server AuthServer, + #[cfg(target_os = "linux")] + /// Admin portal for forge-owned Namespace authentication and NSC token minting + NamespacePortal, + #[cfg(target_os = "linux")] + /// Refresh the forge-owned Namespace dev token once + NamespaceRefreshToken, /// Server Status ServerStatus, /// Tunnel Config @@ -283,9 +291,7 @@ async fn try_tailnet_discover(email: &str) -> Result<()> { let mut client = BurrowClient::from_uds().await?; let response = client .tailnet_client - .discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest { - email: email.to_owned(), - }) + .discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest { email: email.to_owned() }) .await? .into_inner(); println!("Tailnet Discover Response: {:?}", response); @@ -370,13 +376,9 @@ async fn try_tailnet_ping(remote: &str, payload: &str, timeout_ms: u64) -> Resul "tailnet ping received {} bytes from daemon packet stream", packet.payload.len() ); - if let Some(reply) = parse_icmp_echo_reply( - &packet.payload, - local_ip, - remote_ip, - identifier, - sequence, - )? { + if let Some(reply) = + parse_icmp_echo_reply(&packet.payload, local_ip, remote_ip, identifier, sequence)? + { break Ok::<_, anyhow::Error>(reply); } } @@ -464,8 +466,7 @@ async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> R let egress_task = tokio::spawn(async move { while let Some(packet) = stack_stream.next().await { - let payload = - packet.context("failed to read outbound packet from userspace stack")?; + let payload = packet.context("failed to read outbound packet from userspace stack")?; log::debug!( "tailnet udp echo sending {} bytes into daemon packet stream", payload.len() @@ -484,9 +485,7 @@ async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> R .send((message.as_bytes().to_vec(), local_addr, remote_addr)) .await .context("failed to send UDP echo probe into userspace stack")?; - log::debug!( - "tailnet udp echo probe queued from {local_addr} to {remote_addr}" - ); + log::debug!("tailnet udp echo probe queued from {local_addr} to {remote_addr}"); let response = timeout(Duration::from_millis(timeout_ms), udp_reader.next()) .await @@ -516,7 +515,10 @@ async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> R } #[cfg(any(target_os = "linux", target_vendor = "apple"))] -fn select_tailnet_local_ip(addresses: &[String], remote_ip: std::net::IpAddr) -> Result { +fn select_tailnet_local_ip( + addresses: &[String], + remote_ip: std::net::IpAddr, +) -> Result { use anyhow::Context; let family_is_v4 = remote_ip.is_ipv4(); @@ -765,6 +767,10 @@ async fn main() -> Result<()> { Commands::ServerConfig => try_serverconfig().await?, Commands::ReloadConfig(args) => try_reloadconfig(args.interface_id.clone()).await?, Commands::AuthServer => crate::auth::server::serve().await?, + #[cfg(target_os = "linux")] + Commands::NamespacePortal => crate::namespace_portal::serve().await?, + #[cfg(target_os = "linux")] + Commands::NamespaceRefreshToken => crate::namespace_portal::refresh_token_once().await?, Commands::ServerStatus => try_serverstatus().await?, Commands::TunnelConfig => try_tun_config().await?, Commands::NetworkAdd(args) => { diff --git a/burrow/src/namespace_portal.rs b/burrow/src/namespace_portal.rs new file mode 100644 index 0000000..eb20775 --- /dev/null +++ b/burrow/src/namespace_portal.rs @@ -0,0 +1,880 @@ +#![cfg(target_os = "linux")] + +use std::{ + collections::HashMap, + env, fs, + path::{Path, PathBuf}, + process::Stdio, + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use axum::{ + extract::{Query, State}, + http::{ + header::{COOKIE, LOCATION, SET_COOKIE}, + HeaderMap, HeaderValue, StatusCode, + }, + response::{Html, IntoResponse, Redirect, Response}, + routing::{get, post}, + Router, +}; +use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; +use rand::RngCore; +use reqwest::Url; +use ring::digest::{digest, SHA256}; +use serde::Deserialize; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::Command, + sync::Mutex, +}; + +const SESSION_COOKIE: &str = "burrow_namespace_portal_session"; +const OIDC_TIMEOUT: Duration = Duration::from_secs(600); +const AUTH_CHECK_DURATION: &str = "10m"; + +#[derive(Clone, Debug)] +pub struct NamespacePortalConfig { + pub listen: String, + pub public_base_url: String, + pub oidc_discovery_url: String, + pub oidc_client_id: String, + pub oidc_client_secret: Option, + pub allowed_group: String, + pub nsc_bin: String, + pub nsc_state_dir: PathBuf, + pub token_output_path: PathBuf, +} + +impl Default for NamespacePortalConfig { + fn default() -> Self { + Self { + listen: "127.0.0.1:9080".to_owned(), + public_base_url: "https://nsc.burrow.net".to_owned(), + oidc_discovery_url: + "https://auth.burrow.net/application/o/namespace/.well-known/openid-configuration" + .to_owned(), + oidc_client_id: "nsc.burrow.net".to_owned(), + oidc_client_secret: None, + allowed_group: "burrow-admins".to_owned(), + nsc_bin: "nsc".to_owned(), + nsc_state_dir: PathBuf::from("/var/lib/burrow/namespace-portal/nsc"), + token_output_path: PathBuf::from("/var/lib/burrow/intake/forgejo_nsc_token.txt"), + } + } +} + +impl NamespacePortalConfig { + pub fn from_env() -> Self { + let mut config = Self::default(); + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_LISTEN") { + config.listen = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_BASE_URL") { + config.public_base_url = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_DISCOVERY_URL") { + config.oidc_discovery_url = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_ID") { + config.oidc_client_id = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_SECRET") { + let value = value.trim().to_owned(); + if !value.is_empty() { + config.oidc_client_secret = Some(value); + } + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_ALLOWED_GROUP") { + config.allowed_group = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_NSC_BIN") { + config.nsc_bin = value; + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_NSC_STATE_DIR") { + config.nsc_state_dir = PathBuf::from(value); + } + if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_TOKEN_OUTPUT_PATH") { + config.token_output_path = PathBuf::from(value); + } + config + } + + fn callback_url(&self) -> Result { + let mut url = Url::parse(&self.public_base_url) + .with_context(|| format!("invalid public base url {}", self.public_base_url))?; + url.set_path("/oauth/callback"); + url.set_query(None); + Ok(url.to_string()) + } + + fn ensure_paths(&self) -> Result<()> { + fs::create_dir_all(&self.nsc_state_dir).with_context(|| { + format!( + "failed to create namespace portal state dir {}", + self.nsc_state_dir.display() + ) + })?; + if let Some(parent) = self.token_output_path.parent() { + fs::create_dir_all(parent).with_context(|| { + format!("failed to create token output dir {}", parent.display()) + })?; + } + Ok(()) + } +} + +#[derive(Clone)] +struct AppState { + config: NamespacePortalConfig, + client: reqwest::Client, + oidc: OidcDiscovery, + pending_logins: Arc>>, + sessions: Arc>>, + namespace: NamespaceSessionManager, +} + +#[derive(Clone, Debug, Deserialize)] +struct OidcDiscovery { + authorization_endpoint: String, + token_endpoint: String, + userinfo_endpoint: String, +} + +#[derive(Clone, Debug)] +struct PendingOidcLogin { + verifier: String, + expires_at: Instant, +} + +#[derive(Clone, Debug)] +struct PortalSession { + email: String, + display_name: String, + groups: Vec, + issued_at: Instant, +} + +#[derive(Debug, Deserialize)] +struct OidcCallbackQuery { + code: Option, + state: Option, + error: Option, + error_description: Option, +} + +#[derive(Debug, Deserialize)] +struct TokenResponse { + access_token: String, +} + +#[derive(Debug, Deserialize)] +struct UserInfo { + #[serde(default)] + email: String, + #[serde(default)] + name: String, + #[serde(default)] + preferred_username: String, + #[serde(default)] + groups: Vec, +} + +#[derive(Clone)] +struct NamespaceSessionManager { + config: NamespacePortalConfig, + state: Arc>, +} + +#[derive(Clone, Debug, Default)] +struct NamespacePortalState { + active_login: Option, + last_error: Option, +} + +#[derive(Clone, Debug)] +struct ActiveNamespaceLogin { + login_url: String, +} + +#[derive(Clone, Debug)] +struct NamespaceStatus { + linked: bool, + login_url: Option, + last_error: Option, + token_present: bool, +} + +pub async fn serve() -> Result<()> { + serve_with_config(NamespacePortalConfig::from_env()).await +} + +pub async fn refresh_token_once() -> Result<()> { + let config = NamespacePortalConfig::from_env(); + config.ensure_paths()?; + NamespaceSessionManager::new(config).refresh_token().await +} + +pub async fn serve_with_config(config: NamespacePortalConfig) -> Result<()> { + config.ensure_paths()?; + let oidc = fetch_oidc_discovery(&config.oidc_discovery_url).await?; + let listen = config.listen.clone(); + let app = Router::new() + .route("/", get(index)) + .route("/healthz", get(healthz)) + .route("/login", get(oidc_login)) + .route("/logout", post(logout)) + .route("/oauth/callback", get(oidc_callback)) + .route("/namespace/link/start", post(namespace_link_start)) + .route("/namespace/token/refresh", post(namespace_token_refresh)) + .with_state(AppState { + config: config.clone(), + client: reqwest::Client::builder() + .redirect(reqwest::redirect::Policy::none()) + .build()?, + oidc, + pending_logins: Arc::new(Mutex::new(HashMap::new())), + sessions: Arc::new(Mutex::new(HashMap::new())), + namespace: NamespaceSessionManager::new(config), + }); + + let listener = tokio::net::TcpListener::bind(&listen).await?; + log::info!("Starting Namespace portal on {}", listen); + axum::serve(listener, app).await?; + Ok(()) +} + +async fn fetch_oidc_discovery(discovery_url: &str) -> Result { + reqwest::Client::new() + .get(discovery_url) + .send() + .await + .with_context(|| format!("failed to fetch oidc discovery {}", discovery_url))? + .error_for_status() + .with_context(|| format!("oidc discovery returned non-success {}", discovery_url))? + .json() + .await + .context("failed to decode oidc discovery document") +} + +async fn healthz() -> impl IntoResponse { + StatusCode::OK +} + +async fn index(State(state): State, headers: HeaderMap) -> Response { + match current_session(&state, &headers).await { + Ok(Some(session)) => { + let namespace_status = match state.namespace.status().await { + Ok(status) => status, + Err(err) => NamespaceStatus { + linked: false, + login_url: None, + last_error: Some(err.to_string()), + token_present: false, + }, + }; + Html(render_dashboard(&state.config, &session, &namespace_status)).into_response() + } + Ok(None) => Html(render_login_page()).into_response(), + Err(err) => ( + StatusCode::INTERNAL_SERVER_ERROR, + Html(render_error_page(&format!("session lookup failed: {err}"))), + ) + .into_response(), + } +} + +async fn oidc_login(State(state): State) -> Result { + prune_pending(&state).await; + let state_token = random_url_token(32); + let verifier = random_url_token(48); + let challenge = pkce_challenge(&verifier); + let callback_url = state.config.callback_url().map_err(internal_error)?; + + state.pending_logins.lock().await.insert( + state_token.clone(), + PendingOidcLogin { + verifier, + expires_at: Instant::now() + OIDC_TIMEOUT, + }, + ); + + let mut url = Url::parse(&state.oidc.authorization_endpoint).map_err(internal_error)?; + url.query_pairs_mut() + .append_pair("client_id", &state.config.oidc_client_id) + .append_pair("response_type", "code") + .append_pair("scope", "openid profile email groups") + .append_pair("redirect_uri", &callback_url) + .append_pair("state", &state_token) + .append_pair("code_challenge", &challenge) + .append_pair("code_challenge_method", "S256"); + Ok(Redirect::to(url.as_str())) +} + +async fn oidc_callback( + State(state): State, + Query(query): Query, +) -> Result { + if let Some(error) = query.error { + let description = query.error_description.unwrap_or_default(); + return Err(( + StatusCode::BAD_GATEWAY, + format!("oidc login failed: {error} {description}") + .trim() + .to_owned(), + )); + } + + let code = query + .code + .ok_or_else(|| (StatusCode::BAD_REQUEST, "missing oidc code".to_owned()))?; + let state_token = query + .state + .ok_or_else(|| (StatusCode::BAD_REQUEST, "missing oidc state".to_owned()))?; + + let verifier = { + let mut pending = state.pending_logins.lock().await; + let Some(login) = pending.remove(&state_token) else { + return Err((StatusCode::BAD_REQUEST, "unknown oidc state".to_owned())); + }; + if login.expires_at <= Instant::now() { + return Err((StatusCode::BAD_REQUEST, "expired oidc state".to_owned())); + } + login.verifier + }; + + let callback_url = state.config.callback_url().map_err(internal_error)?; + + let mut params = vec![ + ("grant_type", "authorization_code".to_owned()), + ("code", code), + ("client_id", state.config.oidc_client_id.clone()), + ("redirect_uri", callback_url), + ("code_verifier", verifier), + ]; + if let Some(secret) = &state.config.oidc_client_secret { + params.push(("client_secret", secret.clone())); + } + + let token = state + .client + .post(&state.oidc.token_endpoint) + .form(¶ms) + .send() + .await + .context("failed to exchange oidc code") + .map_err(internal_error)? + .error_for_status() + .context("oidc token endpoint returned non-success") + .map_err(internal_error)? + .json::() + .await + .context("failed to decode oidc token response") + .map_err(internal_error)?; + + let userinfo = state + .client + .get(&state.oidc.userinfo_endpoint) + .bearer_auth(&token.access_token) + .send() + .await + .context("failed to fetch oidc userinfo") + .map_err(internal_error)? + .error_for_status() + .context("oidc userinfo returned non-success") + .map_err(internal_error)? + .json::() + .await + .context("failed to decode oidc userinfo") + .map_err(internal_error)?; + + if !userinfo + .groups + .iter() + .any(|group| group == &state.config.allowed_group) + { + return Err(( + StatusCode::FORBIDDEN, + format!( + "authenticated user is not in required group {}", + state.config.allowed_group + ), + )); + } + + let session_id = random_url_token(32); + state.sessions.lock().await.insert( + session_id.clone(), + PortalSession { + email: userinfo.email.clone(), + display_name: display_name(&userinfo), + groups: userinfo.groups, + issued_at: Instant::now(), + }, + ); + + let mut response = Redirect::to("/").into_response(); + response.headers_mut().insert( + SET_COOKIE, + HeaderValue::from_str(&session_cookie_value(&session_id)).map_err(internal_error)?, + ); + Ok(response) +} + +async fn logout( + State(state): State, + headers: HeaderMap, +) -> Result { + if let Some(session_id) = session_cookie(&headers) { + state.sessions.lock().await.remove(&session_id); + } + let mut response = Redirect::to("/").into_response(); + response.headers_mut().insert( + SET_COOKIE, + HeaderValue::from_static( + "burrow_namespace_portal_session=; Path=/; Max-Age=0; HttpOnly; Secure; SameSite=Lax", + ), + ); + Ok(response) +} + +async fn namespace_link_start( + State(state): State, + headers: HeaderMap, +) -> Result { + require_session(&state, &headers).await?; + state + .namespace + .start_login() + .await + .map_err(internal_error)?; + Ok(Redirect::to("/")) +} + +async fn namespace_token_refresh( + State(state): State, + headers: HeaderMap, +) -> Result { + require_session(&state, &headers).await?; + state + .namespace + .refresh_token() + .await + .map_err(internal_error)?; + Ok(Redirect::to("/")) +} + +fn render_login_page() -> String { + r#" + + + + + Burrow Namespace Portal + + + +
+

Burrow Namespace Portal

+

Authenticate with burrow.net to manage the dedicated Namespace session that backs Forgejo NSC automation.

+ Sign in with burrow.net +
+ +"# + .to_owned() +} + +fn render_dashboard( + config: &NamespacePortalConfig, + session: &PortalSession, + status: &NamespaceStatus, +) -> String { + let refresh = if status.login_url.is_some() { + r#""# + } else { + "" + }; + let login_action = if let Some(url) = &status.login_url { + format!( + "

Namespace Login In Progress

Open the live Namespace URL below with the dedicated Burrow account. This page will refresh automatically until the server-side session is ready.

Open Namespace Login

", + escape_html(url) + ) + } else if status.linked { + "

Namespace Linked

The forge-owned NSC session is authenticated and ready to mint runner tokens.

".to_owned() + } else { + "

Namespace Not Linked

Start a server-side Namespace login. The portal will produce a Namespace URL, and completing that browser flow will authenticate the forge-owned NSC state directory.

".to_owned() + }; + let error = status + .last_error + .as_ref() + .map(|error| format!("

{}

", escape_html(error))) + .unwrap_or_default(); + let token_state = if status.token_present { + "present" + } else { + "missing" + }; + format!( + r#" + + + + + Burrow Namespace Portal + {refresh} + + + +
+
+
+

Burrow Namespace Portal

+

Signed in as {email}. This page controls the forge-owned NSC session and token material for Forgejo Namespace runners.

+
+
+
+ +
+
+
burrow.net identity
{identity}
+
required group
{group}
+
NSC token file
{token_path}
+
current token
{token_state}
+
+
+ + {login_action} + {error} + +
+

Actions

+
+
+
+
+
+
+ +"#, + refresh = refresh, + email = escape_html(&session.email), + identity = escape_html(&session.display_name), + group = escape_html(&config.allowed_group), + token_path = escape_html(&config.token_output_path.display().to_string()), + token_state = token_state, + login_action = login_action, + error = error, + ) +} + +fn render_error_page(message: &str) -> String { + format!( + r#"

Namespace Portal Error

{}

"#, + escape_html(message) + ) +} + +fn display_name(userinfo: &UserInfo) -> String { + if !userinfo.name.trim().is_empty() { + return userinfo.name.trim().to_owned(); + } + if !userinfo.preferred_username.trim().is_empty() { + return userinfo.preferred_username.trim().to_owned(); + } + userinfo.email.clone() +} + +async fn current_session(state: &AppState, headers: &HeaderMap) -> Result> { + let Some(session_id) = session_cookie(headers) else { + return Ok(None); + }; + Ok(state.sessions.lock().await.get(&session_id).cloned()) +} + +async fn require_session( + state: &AppState, + headers: &HeaderMap, +) -> Result { + current_session(state, headers) + .await + .map_err(internal_error)? + .ok_or_else(|| (StatusCode::UNAUTHORIZED, "sign-in required".to_owned())) +} + +async fn prune_pending(state: &AppState) { + state + .pending_logins + .lock() + .await + .retain(|_, login| login.expires_at > Instant::now()); +} + +fn session_cookie(headers: &HeaderMap) -> Option { + let cookie_header = headers.get(COOKIE)?.to_str().ok()?; + for pair in cookie_header.split(';') { + let mut parts = pair.trim().splitn(2, '='); + let name = parts.next()?.trim(); + let value = parts.next()?.trim(); + if name == SESSION_COOKIE && !value.is_empty() { + return Some(value.to_owned()); + } + } + None +} + +fn session_cookie_value(session_id: &str) -> String { + format!("{SESSION_COOKIE}={session_id}; Path=/; HttpOnly; Secure; SameSite=Lax") +} + +fn random_url_token(bytes: usize) -> String { + let mut buf = vec![0u8; bytes]; + rand::thread_rng().fill_bytes(&mut buf); + URL_SAFE_NO_PAD.encode(buf) +} + +fn pkce_challenge(verifier: &str) -> String { + let digest = digest(&SHA256, verifier.as_bytes()); + URL_SAFE_NO_PAD.encode(digest.as_ref()) +} + +fn escape_html(input: &str) -> String { + input + .replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +fn internal_error(err: impl std::fmt::Display) -> (StatusCode, String) { + (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) +} + +impl NamespaceSessionManager { + fn new(config: NamespacePortalConfig) -> Self { + Self { + config, + state: Arc::new(Mutex::new(NamespacePortalState::default())), + } + } + + async fn status(&self) -> Result { + let linked = self.check_login().await.is_ok(); + let state = self.state.lock().await.clone(); + let token_present = tokio::fs::metadata(&self.config.token_output_path) + .await + .is_ok(); + Ok(NamespaceStatus { + linked, + login_url: state.active_login.map(|login| login.login_url), + last_error: state.last_error, + token_present, + }) + } + + async fn start_login(&self) -> Result { + if self.check_login().await.is_ok() { + self.refresh_token().await?; + return Ok("already linked".to_owned()); + } + + { + let state = self.state.lock().await; + if let Some(active) = &state.active_login { + return Ok(active.login_url.clone()); + } + } + + self.config.ensure_paths()?; + let mut command = self.base_command(); + command + .args(["auth", "login", "--browser=false"]) + .stdout(Stdio::piped()) + .stderr(Stdio::null()); + let mut child = command.spawn().context("failed to spawn nsc auth login")?; + let stdout = child + .stdout + .take() + .context("nsc auth login stdout was not piped")?; + let mut lines = BufReader::new(stdout).lines(); + let mut login_url = None; + while let Some(line) = lines.next_line().await? { + if let Some(candidate) = extract_namespace_login_url(&line) { + login_url = Some(candidate); + break; + } + } + + let login_url = login_url + .ok_or_else(|| anyhow!("nsc auth login did not emit a Namespace login URL"))?; + { + let mut state = self.state.lock().await; + state.active_login = Some(ActiveNamespaceLogin { login_url: login_url.clone() }); + state.last_error = None; + } + + let manager = self.clone(); + tokio::spawn(async move { + let outcome = child.wait().await; + let mut state = manager.state.lock().await; + state.active_login = None; + match outcome { + Ok(status) if status.success() => { + drop(state); + if let Err(err) = manager.refresh_token().await { + manager.state.lock().await.last_error = Some(format!( + "Namespace login finished, but token refresh failed: {err}" + )); + } + } + Ok(status) => { + state.last_error = Some(format!( + "Namespace login command exited with status {}", + status + )); + } + Err(err) => { + state.last_error = Some(format!("Namespace login command failed: {err}")); + } + } + }); + + Ok(login_url) + } + + async fn refresh_token(&self) -> Result<()> { + self.config.ensure_paths()?; + self.check_login().await?; + let mut command = self.base_command(); + command.args([ + "auth", + "generate-dev-token", + "--output_to", + self.config + .token_output_path + .to_str() + .ok_or_else(|| anyhow!("token output path is not valid UTF-8"))?, + ]); + let output = command + .output() + .await + .context("failed to run nsc token refresh")?; + if !output.status.success() { + bail!( + "nsc auth generate-dev-token failed: {}", + String::from_utf8_lossy(&output.stderr).trim() + ); + } + #[cfg(target_family = "unix")] + { + use std::os::unix::fs::PermissionsExt; + + let perms = fs::Permissions::from_mode(0o440); + fs::set_permissions(&self.config.token_output_path, perms).with_context(|| { + format!( + "failed to set permissions on {}", + self.config.token_output_path.display() + ) + })?; + } + self.state.lock().await.last_error = None; + Ok(()) + } + + async fn check_login(&self) -> Result<()> { + let mut command = self.base_command(); + command.args(["auth", "check-login", "--duration", AUTH_CHECK_DURATION]); + let output = command + .output() + .await + .context("failed to run nsc auth check-login")?; + if output.status.success() { + return Ok(()); + } + bail!("{}", String::from_utf8_lossy(&output.stderr).trim()); + } + + fn base_command(&self) -> Command { + let mut command = Command::new(&self.config.nsc_bin); + let home = self.config.nsc_state_dir.join("home"); + let data = self.config.nsc_state_dir.join("data"); + let cache = self.config.nsc_state_dir.join("cache"); + let config = self.config.nsc_state_dir.join("config"); + let _ = fs::create_dir_all(&home); + let _ = fs::create_dir_all(&data); + let _ = fs::create_dir_all(&cache); + let _ = fs::create_dir_all(&config); + command + .env("HOME", &home) + .env("XDG_DATA_HOME", &data) + .env("XDG_CACHE_HOME", &cache) + .env("XDG_CONFIG_HOME", &config); + command + } +} + +fn extract_namespace_login_url(line: &str) -> Option { + line.split_whitespace() + .find(|token| token.starts_with("https://")) + .map(ToOwned::to_owned) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extracts_namespace_login_url_from_output() { + let url = extract_namespace_login_url( + " https://cloud.namespace.so/login/workspace?id=p0cl4ik19c4c473u14tvc3vq2o", + ); + assert_eq!( + url.as_deref(), + Some("https://cloud.namespace.so/login/workspace?id=p0cl4ik19c4c473u14tvc3vq2o") + ); + } + + #[test] + fn pkce_challenge_is_stable() { + assert_eq!( + pkce_challenge("hello"), + "LPJNul-wow4m6DsqxbninhsWHlwfp0JecwQzYpOLmCQ" + ); + } + + #[test] + fn parses_session_cookie() { + let mut headers = HeaderMap::new(); + headers.insert( + COOKIE, + HeaderValue::from_static( + "something=else; burrow_namespace_portal_session=session123; another=value", + ), + ); + assert_eq!(session_cookie(&headers).as_deref(), Some("session123")); + } +} diff --git a/flake.nix b/flake.nix index 1e91dcc..0bba0b1 100644 --- a/flake.nix +++ b/flake.nix @@ -94,6 +94,7 @@ pkgs.stdenvNoCC.mkDerivation { pname = "nsc"; inherit version src; + meta.mainProgram = "nsc"; dontConfigure = true; dontBuild = true; unpackPhase = '' @@ -144,6 +145,35 @@ subPackages = [ "./cmd/forgejo-nsc-autoscaler" ]; vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs="; }; + burrowSrc = lib.cleanSourceWith { + src = ./.; + filter = path: type: + let + p = toString path; + name = builtins.baseNameOf path; + hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p; + in + !(hasDir ".git" || hasDir "target" || hasDir "node_modules" || name == "result"); + }; + burrowPkg = pkgs.rustPlatform.buildRustPackage { + pname = "burrow"; + version = "0.1.0"; + src = burrowSrc; + cargoLock = { + lockFile = ./Cargo.lock; + outputHashes = { + "tracing-oslog-0.1.2" = "sha256-DjJDiPCTn43zJmmOfuRnyti8iQf9qoXICMKIx4bAG3I="; + }; + }; + cargoBuildFlags = [ + "-p" + "burrow" + "--bin" + "burrow" + ]; + nativeBuildInputs = [ pkgs.protobuf ]; + meta.mainProgram = "burrow"; + }; in { devShells.default = pkgs.mkShell { @@ -171,6 +201,7 @@ packages = { agenix = agenix.packages.${system}.agenix; + burrow = burrowPkg; hcloud-upload-image = hcloudUploadImagePkg; forgejo-nsc-dispatcher = forgejoNscDispatcher; forgejo-nsc-autoscaler = forgejoNscAutoscaler; @@ -183,6 +214,7 @@ nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default; nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; + nixosModules.burrow-namespace-portal = import ./nixos/modules/burrow-namespace-portal.nix; nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem { system = "x86_64-linux"; diff --git a/nixos/README.md b/nixos/README.md index c79d8ce..13fe76d 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -12,6 +12,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input - `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes - `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC +- `modules/burrow-namespace-portal.nix`: small admin portal for forge-owned Namespace authentication and NSC token refresh - `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets - `hetzner-cloud-config.yaml`: desired Hetzner host shape - `keys/contact_at_burrow_net.pub`: initial operator SSH public key @@ -24,6 +25,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host - `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists - `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host +- `../Scripts/authentik-sync-namespace-portal-oidc.sh`: reconcile the Authentik OIDC app used by `nsc.burrow.net` ## Intended Flow @@ -33,10 +35,11 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. 6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the raw Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/` for the upstream `services.forgejo-nsc` module. -7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. -8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. -9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. -10. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. +7. Visit `https://nsc.burrow.net/` as a Burrow admin to link the forge-owned Namespace session and rotate `/var/lib/burrow/intake/forgejo_nsc_token.txt` without relying on a personal local `nsc` login. +8. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. +9. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, `nsc.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. +10. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. +11. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. ## Current Constraints diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 75b76d4..aecdbfa 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -33,6 +33,7 @@ in self.nixosModules.burrow-forgejo-nsc self.nixosModules.burrow-authentik self.nixosModules.burrow-headscale + self.nixosModules.burrow-namespace-portal ]; system.stateVersion = "24.11"; @@ -89,8 +90,8 @@ in }; networking.extraHosts = '' - 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net - ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net + 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net nsc.burrow.net + ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net nsc.burrow.net ''; services.burrow.forge = { @@ -140,4 +141,11 @@ in enable = true; oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; }; + + services.burrow.namespacePortal = { + enable = true; + domain = "nsc.burrow.net"; + baseUrl = "https://nsc.burrow.net"; + adminGroup = contributors.groups.admins; + }; } diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 1616b36..e2ee18d 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -10,6 +10,7 @@ let dataVolume = "burrow-authentik-data:/data"; directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; + namespacePortalOidcSyncScript = ../../Scripts/authentik-sync-namespace-portal-oidc.sh; tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; @@ -138,6 +139,30 @@ in description = "Authentik application slug for Tailscale custom OIDC sign-in."; }; + namespacePortalDomain = lib.mkOption { + type = lib.types.str; + default = "nsc.burrow.net"; + description = "Public domain for the Burrow Namespace portal."; + }; + + namespacePortalProviderSlug = lib.mkOption { + type = lib.types.str; + default = "namespace"; + description = "Authentik application slug for the Namespace portal."; + }; + + namespacePortalClientId = lib.mkOption { + type = lib.types.str; + default = "nsc.burrow.net"; + description = "Client ID Authentik should present to the Namespace portal."; + }; + + namespacePortalClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional host-local file containing the Authentik Namespace portal OIDC client secret."; + }; + tailscaleClientId = lib.mkOption { type = lib.types.str; default = "tailscale.burrow.net"; @@ -708,6 +733,56 @@ EOF ''; }; + systemd.services.burrow-authentik-namespace-portal-oidc = { + description = "Reconcile the Burrow Authentik Namespace portal OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = + [ + namespacePortalOidcSyncScript + cfg.envFile + ] + ++ lib.optionals (cfg.namespacePortalClientSecretFile != null) [ cfg.namespacePortalClientSecretFile ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG=${lib.escapeShellArg cfg.namespacePortalProviderSlug} + export AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME="Namespace Portal" + export AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME="Namespace Portal" + export AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID=${lib.escapeShellArg cfg.namespacePortalClientId} + ${lib.optionalString (cfg.namespacePortalClientSecretFile != null) '' + export AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.namespacePortalClientSecretFile})" + ''} + export AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL=https://${cfg.namespacePortalDomain}/ + export AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON='["https://${cfg.namespacePortalDomain}/oauth/callback"]' + + ${pkgs.bash}/bin/bash ${namespacePortalOidcSyncScript} + ''; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} diff --git a/nixos/modules/burrow-namespace-portal.nix b/nixos/modules/burrow-namespace-portal.nix new file mode 100644 index 0000000..2eb7b24 --- /dev/null +++ b/nixos/modules/burrow-namespace-portal.nix @@ -0,0 +1,126 @@ +{ config, lib, pkgs, self, ... }: + +let + cfg = config.services.burrow.namespacePortal; + burrowExe = lib.getExe self.packages.${pkgs.system}.burrow; + nscExe = lib.getExe self.packages.${pkgs.system}.nsc; +in +{ + options.services.burrow.namespacePortal = { + enable = lib.mkEnableOption "the Burrow Namespace authentication portal"; + + domain = lib.mkOption { + type = lib.types.str; + default = "nsc.burrow.net"; + description = "Public domain for the Namespace portal."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 9080; + description = "Local listen port for the Namespace portal."; + }; + + baseUrl = lib.mkOption { + type = lib.types.str; + default = "https://nsc.burrow.net"; + description = "Public base URL for redirects."; + }; + + oidcProviderSlug = lib.mkOption { + type = lib.types.str; + default = "namespace"; + description = "Authentik provider slug used for the portal."; + }; + + oidcClientId = lib.mkOption { + type = lib.types.str; + default = "nsc.burrow.net"; + description = "OIDC client ID used by the portal."; + }; + + oidcClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional host-local OIDC client secret for the portal."; + }; + + adminGroup = lib.mkOption { + type = lib.types.str; + default = "burrow-admins"; + description = "Authentik group required to access the portal."; + }; + + stateDir = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/namespace-portal"; + description = "Persistent state directory for the portal-owned NSC session."; + }; + + tokenOutputPath = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; + description = "Path where refreshed NSC tokens should be written."; + }; + }; + + config = lib.mkIf cfg.enable { + assertions = [ + { + assertion = config.services.forgejo-nsc.enable; + message = "services.burrow.namespacePortal requires services.forgejo-nsc.enable"; + } + ]; + + systemd.tmpfiles.rules = [ + "d ${cfg.stateDir} 0750 forgejo-nsc forgejo-nsc -" + "d ${cfg.stateDir}/nsc 0750 forgejo-nsc forgejo-nsc -" + ]; + + systemd.services.burrow-namespace-portal = { + description = "Burrow Namespace authentication portal"; + after = [ + "network-online.target" + "burrow-authentik-ready.service" + ]; + wants = [ + "network-online.target" + "burrow-authentik-ready.service" + ]; + wantedBy = [ "multi-user.target" ]; + path = [ + self.packages.${pkgs.system}.burrow + self.packages.${pkgs.system}.nsc + pkgs.coreutils + ]; + serviceConfig = { + Type = "simple"; + User = "forgejo-nsc"; + Group = "forgejo-nsc"; + WorkingDirectory = cfg.stateDir; + Restart = "on-failure"; + RestartSec = "2s"; + }; + script = '' + set -euo pipefail + export BURROW_NAMESPACE_PORTAL_LISTEN=127.0.0.1:${toString cfg.port} + export BURROW_NAMESPACE_PORTAL_BASE_URL=${lib.escapeShellArg cfg.baseUrl} + export BURROW_NAMESPACE_PORTAL_OIDC_DISCOVERY_URL=${lib.escapeShellArg "https://${config.services.burrow.authentik.domain}/application/o/${cfg.oidcProviderSlug}/.well-known/openid-configuration"} + export BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_ID=${lib.escapeShellArg cfg.oidcClientId} + export BURROW_NAMESPACE_PORTAL_ALLOWED_GROUP=${lib.escapeShellArg cfg.adminGroup} + export BURROW_NAMESPACE_PORTAL_NSC_BIN=${lib.escapeShellArg nscExe} + export BURROW_NAMESPACE_PORTAL_NSC_STATE_DIR=${lib.escapeShellArg "${cfg.stateDir}/nsc"} + export BURROW_NAMESPACE_PORTAL_TOKEN_OUTPUT_PATH=${lib.escapeShellArg cfg.tokenOutputPath} + ${lib.optionalString (cfg.oidcClientSecretFile != null) '' + export BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.oidcClientSecretFile})" + ''} + exec ${burrowExe} namespace-portal + ''; + }; + + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:${toString cfg.port} + ''; + }; +} From 70607e874ce710bb05823f9206735c6fe6ea259a Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 5 Apr 2026 23:08:23 -0700 Subject: [PATCH 21/59] Move forgejo-nsc credentials into agenix --- .../authentik-sync-namespace-portal-oidc.sh | 246 ----- Scripts/check-forge-host.sh | 12 +- Scripts/seal-forgejo-nsc-secrets.sh | 112 +++ Scripts/sync-forgejo-nsc-config.sh | 133 +-- burrow/src/main.rs | 12 - burrow/src/namespace_portal.rs | 880 ------------------ flake.nix | 2 - nixos/README.md | 15 +- nixos/hosts/burrow-forge/default.nix | 36 +- nixos/modules/burrow-authentik.nix | 75 -- nixos/modules/burrow-namespace-portal.nix | 126 --- secrets.nix | 3 + .../infra/forgejo-nsc-autoscaler-config.age | Bin 0 -> 1264 bytes .../infra/forgejo-nsc-dispatcher-config.age | Bin 0 -> 1127 bytes secrets/infra/forgejo-nsc-token.age | 15 + 15 files changed, 172 insertions(+), 1495 deletions(-) delete mode 100644 Scripts/authentik-sync-namespace-portal-oidc.sh create mode 100755 Scripts/seal-forgejo-nsc-secrets.sh delete mode 100644 burrow/src/namespace_portal.rs delete mode 100644 nixos/modules/burrow-namespace-portal.nix create mode 100644 secrets/infra/forgejo-nsc-autoscaler-config.age create mode 100644 secrets/infra/forgejo-nsc-dispatcher-config.age create mode 100644 secrets/infra/forgejo-nsc-token.age diff --git a/Scripts/authentik-sync-namespace-portal-oidc.sh b/Scripts/authentik-sync-namespace-portal-oidc.sh deleted file mode 100644 index a62b0cf..0000000 --- a/Scripts/authentik-sync-namespace-portal-oidc.sh +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" -bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" -application_slug="${AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG:-namespace}" -application_name="${AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME:-Namespace Portal}" -provider_name="${AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME:-Namespace Portal}" -template_slug="${AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG:-ts}" -client_id="${AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID:-nsc.burrow.net}" -client_secret="${AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET:-}" -launch_url="${AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL:-https://nsc.burrow.net/}" -redirect_uris_json="${AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON:-[ - \"https://nsc.burrow.net/oauth/callback\" -]}" - -usage() { - cat <<'EOF' -Usage: Scripts/authentik-sync-namespace-portal-oidc.sh - -Required environment: - AUTHENTIK_BOOTSTRAP_TOKEN - -Optional environment: - AUTHENTIK_URL - AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG - AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME - AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME - AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG - AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID - AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET - AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL - AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON -EOF -} - -if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then - usage - exit 0 -fi - -if [[ -z "$bootstrap_token" ]]; then - echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 - exit 1 -fi - -if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then - echo "error: AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 - exit 1 -fi - -api() { - local method="$1" - local path="$2" - local data="${3:-}" - - if [[ -n "$data" ]]; then - curl -fsS \ - -X "$method" \ - -H "Authorization: Bearer ${bootstrap_token}" \ - -H "Content-Type: application/json" \ - -d "$data" \ - "${authentik_url}${path}" - else - curl -fsS \ - -X "$method" \ - -H "Authorization: Bearer ${bootstrap_token}" \ - "${authentik_url}${path}" - fi -} - -api_with_status() { - local method="$1" - local path="$2" - local data="${3:-}" - local response_file status - - response_file="$(mktemp)" - trap 'rm -f "$response_file"' RETURN - - if [[ -n "$data" ]]; then - status="$( - curl -sS \ - -o "$response_file" \ - -w '%{http_code}' \ - -X "$method" \ - -H "Authorization: Bearer ${bootstrap_token}" \ - -H "Content-Type: application/json" \ - -d "$data" \ - "${authentik_url}${path}" - )" - else - status="$( - curl -sS \ - -o "$response_file" \ - -w '%{http_code}' \ - -X "$method" \ - -H "Authorization: Bearer ${bootstrap_token}" \ - "${authentik_url}${path}" - )" - fi - - printf '%s\n' "$status" - cat "$response_file" -} - -wait_for_authentik() { - for _ in $(seq 1 90); do - if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then - return 0 - fi - sleep 2 - done - - echo "error: Authentik did not become ready at ${authentik_url}" >&2 - exit 1 -} - -wait_for_authentik - -template_provider="$( - api GET "/api/v3/providers/oauth2/?page_size=200" \ - | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ - | head -n1 -)" - -if [[ -z "$template_provider" ]]; then - echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 - exit 1 -fi - -authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" -invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" -property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" -signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" - -provider_payload="$( - jq -n \ - --arg name "$provider_name" \ - --arg authorization_flow "$authorization_flow" \ - --arg invalidation_flow "$invalidation_flow" \ - --arg client_id "$client_id" \ - --arg client_secret "$client_secret" \ - --arg signing_key "$signing_key" \ - --argjson property_mappings "$property_mappings" \ - --argjson redirect_uris "$redirect_uris_json" \ - '{ - name: $name, - authorization_flow: $authorization_flow, - invalidation_flow: $invalidation_flow, - client_type: (if $client_secret == "" then "public" else "confidential" end), - client_id: $client_id, - include_claims_in_id_token: true, - redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), - property_mappings: $property_mappings, - signing_key: $signing_key, - issuer_mode: "per_provider", - sub_mode: "hashed_user_id" - } - + (if $client_secret == "" then {} else {client_secret: $client_secret} end)' -)" - -existing_provider="$( - api GET "/api/v3/providers/oauth2/?page_size=200" \ - | jq -c \ - --arg application_slug "$application_slug" \ - --arg provider_name "$provider_name" \ - '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ - | head -n1 -)" - -if [[ -n "$existing_provider" ]]; then - provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" - api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null -else - provider_pk="$( - api POST "/api/v3/providers/oauth2/" "$provider_payload" \ - | jq -r '.pk // empty' - )" -fi - -if [[ -z "${provider_pk:-}" ]]; then - echo "error: Namespace portal OIDC provider did not return a primary key" >&2 - exit 1 -fi - -application_payload="$( - jq -n \ - --arg name "$application_name" \ - --arg slug "$application_slug" \ - --arg provider "$provider_pk" \ - --arg launch_url "$launch_url" \ - '{ - name: $name, - slug: $slug, - provider: ($provider | tonumber), - meta_launch_url: $launch_url, - open_in_new_tab: false, - policy_engine_mode: "any" - }' -)" - -existing_application="$( - api GET "/api/v3/core/applications/?page_size=200" \ - | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ - | head -n1 -)" - -if [[ -n "$existing_application" ]]; then - application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" -else - create_application_result="$( - api_with_status POST "/api/v3/core/applications/" "$application_payload" - )" - create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" - create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" - - if [[ "$create_application_status" =~ ^20[01]$ ]]; then - application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" - elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' - (.slug // [] | index("Application with this slug already exists.")) != null - or (.provider // [] | index("Application with this provider already exists.")) != null - ' >/dev/null; then - application_pk="existing-duplicate" - else - printf '%s\n' "$create_application_body" >&2 - echo "error: could not reconcile Authentik application ${application_slug}" >&2 - exit 1 - fi -fi - -if [[ -z "${application_pk:-}" ]]; then - echo "error: Namespace portal OIDC application did not return a primary key" >&2 - exit 1 -fi - -for _ in $(seq 1 30); do - if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then - echo "Synced Authentik Namespace portal OIDC application ${application_slug} (${application_name})." - exit 0 - fi - sleep 2 -done - -echo "warning: Namespace portal OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 -echo "Synced Authentik Namespace portal OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/check-forge-host.sh b/Scripts/check-forge-host.sh index d824f6d..0f79bf4 100755 --- a/Scripts/check-forge-host.sh +++ b/Scripts/check-forge-host.sh @@ -84,7 +84,6 @@ base_services=( nsc_services=( forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service - burrow-namespace-portal.service ) tailnet_services=( @@ -165,6 +164,14 @@ if [[ "${EXPECT_TAILNET}" == "1" ]]; then test -s /run/agenix/burrowHeadscaleOidcClientSecret fi +if [[ "${EXPECT_NSC}" == "1" ]]; then + echo "== agenix-nsc ==" + ls -l /run/agenix || true + test -s /run/agenix/burrowForgejoNscToken + test -s /run/agenix/burrowForgejoNscDispatcherConfig + test -s /run/agenix/burrowForgejoNscAutoscalerConfig +fi + if command -v curl >/dev/null 2>&1; then echo "== http-local ==" curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login @@ -174,8 +181,5 @@ if command -v curl >/dev/null 2>&1; then curl -fsS -o /dev/null -H 'Host: auth.burrow.net' -w 'authentik_ready %{http_code}\n' http://127.0.0.1/-/health/ready/ curl -sS -o /dev/null -H 'Host: ts.burrow.net' -w 'headscale_root %{http_code}\n' http://127.0.0.1/ || true fi - if [[ "${EXPECT_NSC}" == "1" ]]; then - curl -fsS -o /dev/null -H 'Host: nsc.burrow.net' -w 'namespace_portal %{http_code}\n' http://127.0.0.1/ - fi fi EOF diff --git a/Scripts/seal-forgejo-nsc-secrets.sh b/Scripts/seal-forgejo-nsc-secrets.sh new file mode 100755 index 0000000..a6b3918 --- /dev/null +++ b/Scripts/seal-forgejo-nsc-secrets.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +usage() { + cat <<'EOF' +Usage: Scripts/seal-forgejo-nsc-secrets.sh [options] + +Encrypt Burrow forgejo-nsc runtime inputs from intake/ into the agenix secrets +consumed by burrow-forge. + +Options: + --provision Re-render the local intake files before sealing. + --host SSH target forwarded to provision-forgejo-nsc.sh. + --ssh-key SSH private key forwarded to provision-forgejo-nsc.sh. + --nsc-bin Override the nsc binary for provisioning. + -h, --help Show this help text. +EOF +} + +PROVISION=0 +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +NSC_BIN="${NSC_BIN:-}" + +while [[ $# -gt 0 ]]; do + case "$1" in + --provision) + PROVISION=1 + shift + ;; + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --nsc-bin) + NSC_BIN="${2:?missing value for --nsc-bin}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "missing required command: $1" >&2 + exit 1 + fi +} + +require_cmd age +require_cmd nix +require_cmd python3 + +if [[ "${PROVISION}" -eq 1 ]]; then + provision_args=(--host "${HOST}" --ssh-key "${SSH_KEY}") + if [[ -n "${NSC_BIN}" ]]; then + provision_args+=(--nsc-bin "${NSC_BIN}") + fi + "${SCRIPT_DIR}/provision-forgejo-nsc.sh" "${provision_args[@]}" +fi + +tmpdir="$(mktemp -d)" +cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +seal_secret() { + local target="$1" + local source_path="$2" + recipients_file="${tmpdir}/$(basename "${target}").recipients" + if [[ ! -s "${source_path}" ]]; then + echo "required runtime input missing or empty: ${source_path}" >&2 + exit 1 + fi + nix eval --impure --json --expr "let s = import ${REPO_ROOT}/secrets.nix; in s.\"${target}\".publicKeys" \ + | python3 -c 'import json, sys; [print(item) for item in json.load(sys.stdin)]' \ + > "${recipients_file}" + + age -R "${recipients_file}" -o "${REPO_ROOT}/${target}" "${source_path}" +} + +seal_secret "secrets/infra/forgejo-nsc-token.age" "${REPO_ROOT}/intake/forgejo_nsc_token.txt" +seal_secret "secrets/infra/forgejo-nsc-dispatcher-config.age" "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" +seal_secret "secrets/infra/forgejo-nsc-autoscaler-config.age" "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" + +chmod 600 \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age" + +echo "Sealed forgejo-nsc runtime inputs into:" +printf ' %s\n' \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age" +echo "Deploy burrow-forge to apply the new CI credentials." diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh index 77581f8..2ce7114 100755 --- a/Scripts/sync-forgejo-nsc-config.sh +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -1,132 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -usage() { - cat <<'EOF' -Usage: Scripts/sync-forgejo-nsc-config.sh [options] - -Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and -restart the dispatcher/autoscaler units. - -Options: - --host SSH target (default: root@git.burrow.net) - --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) - --rotate-pat Re-render the intake files before syncing. - --no-restart Copy files only. - -h, --help Show this help text. -EOF -} - -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" - -HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" -KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" -ROTATE_PAT=0 -NO_RESTART=0 - -while [[ $# -gt 0 ]]; do - case "$1" in - --host) - HOST="${2:?missing value for --host}" - shift 2 - ;; - --ssh-key) - SSH_KEY="${2:?missing value for --ssh-key}" - shift 2 - ;; - --rotate-pat) - ROTATE_PAT=1 - shift - ;; - --no-restart) - NO_RESTART=1 - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - echo "unknown option: $1" >&2 - usage >&2 - exit 64 - ;; - esac -done - -mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" - -burrow_require_cmd() { - if ! command -v "$1" >/dev/null 2>&1; then - echo "missing required command: $1" >&2 - exit 1 - fi -} - -burrow_require_cmd ssh -burrow_require_cmd scp - -if [[ ! -f "${SSH_KEY}" ]]; then - echo "forge SSH key not found: ${SSH_KEY}" >&2 - exit 1 -fi - -if [[ "${ROTATE_PAT}" -eq 1 ]]; then - "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" -fi - -token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" -dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" -autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" - -for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do - if [[ ! -s "${path}" ]]; then - echo "required runtime input missing or empty: ${path}" >&2 - exit 1 - fi -done - -ssh_opts=( - -i "${SSH_KEY}" - -o IdentitiesOnly=yes - -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" - -o StrictHostKeyChecking=accept-new -) - -remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")" -cleanup() { - if [[ -n "${remote_tmp:-}" ]]; then - ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true - fi -} -trap cleanup EXIT - -scp "${ssh_opts[@]}" \ - "${token_file}" \ - "${dispatcher_file}" \ - "${autoscaler_file}" \ - "${HOST}:${remote_tmp}/" - -ssh "${ssh_opts[@]}" "${HOST}" " - set -euo pipefail - install -d -m 0755 /var/lib/burrow/intake - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml - install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml -" - -if [[ "${NO_RESTART}" -eq 0 ]]; then - ssh "${ssh_opts[@]}" "${HOST}" " - set -euo pipefail - systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service - systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service - ls -l \ - /var/lib/burrow/intake/forgejo_nsc_token.txt \ - /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \ - /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml - " -fi - -echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))." +echo "Scripts/sync-forgejo-nsc-config.sh is obsolete." >&2 +echo "Burrow forgejo-nsc now consumes agenix-backed secrets instead of host-local intake files." >&2 +echo "Use Scripts/seal-forgejo-nsc-secrets.sh and deploy burrow-forge." >&2 +exit 1 diff --git a/burrow/src/main.rs b/burrow/src/main.rs index 01591e7..cfa2085 100644 --- a/burrow/src/main.rs +++ b/burrow/src/main.rs @@ -5,8 +5,6 @@ use clap::{Args, Parser, Subcommand}; mod control; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod daemon; -#[cfg(target_os = "linux")] -mod namespace_portal; pub(crate) mod tracing; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod wireguard; @@ -62,12 +60,6 @@ enum Commands { ReloadConfig(ReloadConfigArgs), /// Authentication server AuthServer, - #[cfg(target_os = "linux")] - /// Admin portal for forge-owned Namespace authentication and NSC token minting - NamespacePortal, - #[cfg(target_os = "linux")] - /// Refresh the forge-owned Namespace dev token once - NamespaceRefreshToken, /// Server Status ServerStatus, /// Tunnel Config @@ -767,10 +759,6 @@ async fn main() -> Result<()> { Commands::ServerConfig => try_serverconfig().await?, Commands::ReloadConfig(args) => try_reloadconfig(args.interface_id.clone()).await?, Commands::AuthServer => crate::auth::server::serve().await?, - #[cfg(target_os = "linux")] - Commands::NamespacePortal => crate::namespace_portal::serve().await?, - #[cfg(target_os = "linux")] - Commands::NamespaceRefreshToken => crate::namespace_portal::refresh_token_once().await?, Commands::ServerStatus => try_serverstatus().await?, Commands::TunnelConfig => try_tun_config().await?, Commands::NetworkAdd(args) => { diff --git a/burrow/src/namespace_portal.rs b/burrow/src/namespace_portal.rs deleted file mode 100644 index eb20775..0000000 --- a/burrow/src/namespace_portal.rs +++ /dev/null @@ -1,880 +0,0 @@ -#![cfg(target_os = "linux")] - -use std::{ - collections::HashMap, - env, fs, - path::{Path, PathBuf}, - process::Stdio, - sync::Arc, - time::{Duration, Instant}, -}; - -use anyhow::{anyhow, bail, Context, Result}; -use axum::{ - extract::{Query, State}, - http::{ - header::{COOKIE, LOCATION, SET_COOKIE}, - HeaderMap, HeaderValue, StatusCode, - }, - response::{Html, IntoResponse, Redirect, Response}, - routing::{get, post}, - Router, -}; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; -use rand::RngCore; -use reqwest::Url; -use ring::digest::{digest, SHA256}; -use serde::Deserialize; -use tokio::{ - io::{AsyncBufReadExt, BufReader}, - process::Command, - sync::Mutex, -}; - -const SESSION_COOKIE: &str = "burrow_namespace_portal_session"; -const OIDC_TIMEOUT: Duration = Duration::from_secs(600); -const AUTH_CHECK_DURATION: &str = "10m"; - -#[derive(Clone, Debug)] -pub struct NamespacePortalConfig { - pub listen: String, - pub public_base_url: String, - pub oidc_discovery_url: String, - pub oidc_client_id: String, - pub oidc_client_secret: Option, - pub allowed_group: String, - pub nsc_bin: String, - pub nsc_state_dir: PathBuf, - pub token_output_path: PathBuf, -} - -impl Default for NamespacePortalConfig { - fn default() -> Self { - Self { - listen: "127.0.0.1:9080".to_owned(), - public_base_url: "https://nsc.burrow.net".to_owned(), - oidc_discovery_url: - "https://auth.burrow.net/application/o/namespace/.well-known/openid-configuration" - .to_owned(), - oidc_client_id: "nsc.burrow.net".to_owned(), - oidc_client_secret: None, - allowed_group: "burrow-admins".to_owned(), - nsc_bin: "nsc".to_owned(), - nsc_state_dir: PathBuf::from("/var/lib/burrow/namespace-portal/nsc"), - token_output_path: PathBuf::from("/var/lib/burrow/intake/forgejo_nsc_token.txt"), - } - } -} - -impl NamespacePortalConfig { - pub fn from_env() -> Self { - let mut config = Self::default(); - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_LISTEN") { - config.listen = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_BASE_URL") { - config.public_base_url = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_DISCOVERY_URL") { - config.oidc_discovery_url = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_ID") { - config.oidc_client_id = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_SECRET") { - let value = value.trim().to_owned(); - if !value.is_empty() { - config.oidc_client_secret = Some(value); - } - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_ALLOWED_GROUP") { - config.allowed_group = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_NSC_BIN") { - config.nsc_bin = value; - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_NSC_STATE_DIR") { - config.nsc_state_dir = PathBuf::from(value); - } - if let Ok(value) = env::var("BURROW_NAMESPACE_PORTAL_TOKEN_OUTPUT_PATH") { - config.token_output_path = PathBuf::from(value); - } - config - } - - fn callback_url(&self) -> Result { - let mut url = Url::parse(&self.public_base_url) - .with_context(|| format!("invalid public base url {}", self.public_base_url))?; - url.set_path("/oauth/callback"); - url.set_query(None); - Ok(url.to_string()) - } - - fn ensure_paths(&self) -> Result<()> { - fs::create_dir_all(&self.nsc_state_dir).with_context(|| { - format!( - "failed to create namespace portal state dir {}", - self.nsc_state_dir.display() - ) - })?; - if let Some(parent) = self.token_output_path.parent() { - fs::create_dir_all(parent).with_context(|| { - format!("failed to create token output dir {}", parent.display()) - })?; - } - Ok(()) - } -} - -#[derive(Clone)] -struct AppState { - config: NamespacePortalConfig, - client: reqwest::Client, - oidc: OidcDiscovery, - pending_logins: Arc>>, - sessions: Arc>>, - namespace: NamespaceSessionManager, -} - -#[derive(Clone, Debug, Deserialize)] -struct OidcDiscovery { - authorization_endpoint: String, - token_endpoint: String, - userinfo_endpoint: String, -} - -#[derive(Clone, Debug)] -struct PendingOidcLogin { - verifier: String, - expires_at: Instant, -} - -#[derive(Clone, Debug)] -struct PortalSession { - email: String, - display_name: String, - groups: Vec, - issued_at: Instant, -} - -#[derive(Debug, Deserialize)] -struct OidcCallbackQuery { - code: Option, - state: Option, - error: Option, - error_description: Option, -} - -#[derive(Debug, Deserialize)] -struct TokenResponse { - access_token: String, -} - -#[derive(Debug, Deserialize)] -struct UserInfo { - #[serde(default)] - email: String, - #[serde(default)] - name: String, - #[serde(default)] - preferred_username: String, - #[serde(default)] - groups: Vec, -} - -#[derive(Clone)] -struct NamespaceSessionManager { - config: NamespacePortalConfig, - state: Arc>, -} - -#[derive(Clone, Debug, Default)] -struct NamespacePortalState { - active_login: Option, - last_error: Option, -} - -#[derive(Clone, Debug)] -struct ActiveNamespaceLogin { - login_url: String, -} - -#[derive(Clone, Debug)] -struct NamespaceStatus { - linked: bool, - login_url: Option, - last_error: Option, - token_present: bool, -} - -pub async fn serve() -> Result<()> { - serve_with_config(NamespacePortalConfig::from_env()).await -} - -pub async fn refresh_token_once() -> Result<()> { - let config = NamespacePortalConfig::from_env(); - config.ensure_paths()?; - NamespaceSessionManager::new(config).refresh_token().await -} - -pub async fn serve_with_config(config: NamespacePortalConfig) -> Result<()> { - config.ensure_paths()?; - let oidc = fetch_oidc_discovery(&config.oidc_discovery_url).await?; - let listen = config.listen.clone(); - let app = Router::new() - .route("/", get(index)) - .route("/healthz", get(healthz)) - .route("/login", get(oidc_login)) - .route("/logout", post(logout)) - .route("/oauth/callback", get(oidc_callback)) - .route("/namespace/link/start", post(namespace_link_start)) - .route("/namespace/token/refresh", post(namespace_token_refresh)) - .with_state(AppState { - config: config.clone(), - client: reqwest::Client::builder() - .redirect(reqwest::redirect::Policy::none()) - .build()?, - oidc, - pending_logins: Arc::new(Mutex::new(HashMap::new())), - sessions: Arc::new(Mutex::new(HashMap::new())), - namespace: NamespaceSessionManager::new(config), - }); - - let listener = tokio::net::TcpListener::bind(&listen).await?; - log::info!("Starting Namespace portal on {}", listen); - axum::serve(listener, app).await?; - Ok(()) -} - -async fn fetch_oidc_discovery(discovery_url: &str) -> Result { - reqwest::Client::new() - .get(discovery_url) - .send() - .await - .with_context(|| format!("failed to fetch oidc discovery {}", discovery_url))? - .error_for_status() - .with_context(|| format!("oidc discovery returned non-success {}", discovery_url))? - .json() - .await - .context("failed to decode oidc discovery document") -} - -async fn healthz() -> impl IntoResponse { - StatusCode::OK -} - -async fn index(State(state): State, headers: HeaderMap) -> Response { - match current_session(&state, &headers).await { - Ok(Some(session)) => { - let namespace_status = match state.namespace.status().await { - Ok(status) => status, - Err(err) => NamespaceStatus { - linked: false, - login_url: None, - last_error: Some(err.to_string()), - token_present: false, - }, - }; - Html(render_dashboard(&state.config, &session, &namespace_status)).into_response() - } - Ok(None) => Html(render_login_page()).into_response(), - Err(err) => ( - StatusCode::INTERNAL_SERVER_ERROR, - Html(render_error_page(&format!("session lookup failed: {err}"))), - ) - .into_response(), - } -} - -async fn oidc_login(State(state): State) -> Result { - prune_pending(&state).await; - let state_token = random_url_token(32); - let verifier = random_url_token(48); - let challenge = pkce_challenge(&verifier); - let callback_url = state.config.callback_url().map_err(internal_error)?; - - state.pending_logins.lock().await.insert( - state_token.clone(), - PendingOidcLogin { - verifier, - expires_at: Instant::now() + OIDC_TIMEOUT, - }, - ); - - let mut url = Url::parse(&state.oidc.authorization_endpoint).map_err(internal_error)?; - url.query_pairs_mut() - .append_pair("client_id", &state.config.oidc_client_id) - .append_pair("response_type", "code") - .append_pair("scope", "openid profile email groups") - .append_pair("redirect_uri", &callback_url) - .append_pair("state", &state_token) - .append_pair("code_challenge", &challenge) - .append_pair("code_challenge_method", "S256"); - Ok(Redirect::to(url.as_str())) -} - -async fn oidc_callback( - State(state): State, - Query(query): Query, -) -> Result { - if let Some(error) = query.error { - let description = query.error_description.unwrap_or_default(); - return Err(( - StatusCode::BAD_GATEWAY, - format!("oidc login failed: {error} {description}") - .trim() - .to_owned(), - )); - } - - let code = query - .code - .ok_or_else(|| (StatusCode::BAD_REQUEST, "missing oidc code".to_owned()))?; - let state_token = query - .state - .ok_or_else(|| (StatusCode::BAD_REQUEST, "missing oidc state".to_owned()))?; - - let verifier = { - let mut pending = state.pending_logins.lock().await; - let Some(login) = pending.remove(&state_token) else { - return Err((StatusCode::BAD_REQUEST, "unknown oidc state".to_owned())); - }; - if login.expires_at <= Instant::now() { - return Err((StatusCode::BAD_REQUEST, "expired oidc state".to_owned())); - } - login.verifier - }; - - let callback_url = state.config.callback_url().map_err(internal_error)?; - - let mut params = vec![ - ("grant_type", "authorization_code".to_owned()), - ("code", code), - ("client_id", state.config.oidc_client_id.clone()), - ("redirect_uri", callback_url), - ("code_verifier", verifier), - ]; - if let Some(secret) = &state.config.oidc_client_secret { - params.push(("client_secret", secret.clone())); - } - - let token = state - .client - .post(&state.oidc.token_endpoint) - .form(¶ms) - .send() - .await - .context("failed to exchange oidc code") - .map_err(internal_error)? - .error_for_status() - .context("oidc token endpoint returned non-success") - .map_err(internal_error)? - .json::() - .await - .context("failed to decode oidc token response") - .map_err(internal_error)?; - - let userinfo = state - .client - .get(&state.oidc.userinfo_endpoint) - .bearer_auth(&token.access_token) - .send() - .await - .context("failed to fetch oidc userinfo") - .map_err(internal_error)? - .error_for_status() - .context("oidc userinfo returned non-success") - .map_err(internal_error)? - .json::() - .await - .context("failed to decode oidc userinfo") - .map_err(internal_error)?; - - if !userinfo - .groups - .iter() - .any(|group| group == &state.config.allowed_group) - { - return Err(( - StatusCode::FORBIDDEN, - format!( - "authenticated user is not in required group {}", - state.config.allowed_group - ), - )); - } - - let session_id = random_url_token(32); - state.sessions.lock().await.insert( - session_id.clone(), - PortalSession { - email: userinfo.email.clone(), - display_name: display_name(&userinfo), - groups: userinfo.groups, - issued_at: Instant::now(), - }, - ); - - let mut response = Redirect::to("/").into_response(); - response.headers_mut().insert( - SET_COOKIE, - HeaderValue::from_str(&session_cookie_value(&session_id)).map_err(internal_error)?, - ); - Ok(response) -} - -async fn logout( - State(state): State, - headers: HeaderMap, -) -> Result { - if let Some(session_id) = session_cookie(&headers) { - state.sessions.lock().await.remove(&session_id); - } - let mut response = Redirect::to("/").into_response(); - response.headers_mut().insert( - SET_COOKIE, - HeaderValue::from_static( - "burrow_namespace_portal_session=; Path=/; Max-Age=0; HttpOnly; Secure; SameSite=Lax", - ), - ); - Ok(response) -} - -async fn namespace_link_start( - State(state): State, - headers: HeaderMap, -) -> Result { - require_session(&state, &headers).await?; - state - .namespace - .start_login() - .await - .map_err(internal_error)?; - Ok(Redirect::to("/")) -} - -async fn namespace_token_refresh( - State(state): State, - headers: HeaderMap, -) -> Result { - require_session(&state, &headers).await?; - state - .namespace - .refresh_token() - .await - .map_err(internal_error)?; - Ok(Redirect::to("/")) -} - -fn render_login_page() -> String { - r#" - - - - - Burrow Namespace Portal - - - -
-

Burrow Namespace Portal

-

Authenticate with burrow.net to manage the dedicated Namespace session that backs Forgejo NSC automation.

- Sign in with burrow.net -
- -"# - .to_owned() -} - -fn render_dashboard( - config: &NamespacePortalConfig, - session: &PortalSession, - status: &NamespaceStatus, -) -> String { - let refresh = if status.login_url.is_some() { - r#""# - } else { - "" - }; - let login_action = if let Some(url) = &status.login_url { - format!( - "

Namespace Login In Progress

Open the live Namespace URL below with the dedicated Burrow account. This page will refresh automatically until the server-side session is ready.

Open Namespace Login

", - escape_html(url) - ) - } else if status.linked { - "

Namespace Linked

The forge-owned NSC session is authenticated and ready to mint runner tokens.

".to_owned() - } else { - "

Namespace Not Linked

Start a server-side Namespace login. The portal will produce a Namespace URL, and completing that browser flow will authenticate the forge-owned NSC state directory.

".to_owned() - }; - let error = status - .last_error - .as_ref() - .map(|error| format!("

{}

", escape_html(error))) - .unwrap_or_default(); - let token_state = if status.token_present { - "present" - } else { - "missing" - }; - format!( - r#" - - - - - Burrow Namespace Portal - {refresh} - - - -
-
-
-

Burrow Namespace Portal

-

Signed in as {email}. This page controls the forge-owned NSC session and token material for Forgejo Namespace runners.

-
-
-
- -
-
-
burrow.net identity
{identity}
-
required group
{group}
-
NSC token file
{token_path}
-
current token
{token_state}
-
-
- - {login_action} - {error} - -
-

Actions

-
-
-
-
-
-
- -"#, - refresh = refresh, - email = escape_html(&session.email), - identity = escape_html(&session.display_name), - group = escape_html(&config.allowed_group), - token_path = escape_html(&config.token_output_path.display().to_string()), - token_state = token_state, - login_action = login_action, - error = error, - ) -} - -fn render_error_page(message: &str) -> String { - format!( - r#"

Namespace Portal Error

{}

"#, - escape_html(message) - ) -} - -fn display_name(userinfo: &UserInfo) -> String { - if !userinfo.name.trim().is_empty() { - return userinfo.name.trim().to_owned(); - } - if !userinfo.preferred_username.trim().is_empty() { - return userinfo.preferred_username.trim().to_owned(); - } - userinfo.email.clone() -} - -async fn current_session(state: &AppState, headers: &HeaderMap) -> Result> { - let Some(session_id) = session_cookie(headers) else { - return Ok(None); - }; - Ok(state.sessions.lock().await.get(&session_id).cloned()) -} - -async fn require_session( - state: &AppState, - headers: &HeaderMap, -) -> Result { - current_session(state, headers) - .await - .map_err(internal_error)? - .ok_or_else(|| (StatusCode::UNAUTHORIZED, "sign-in required".to_owned())) -} - -async fn prune_pending(state: &AppState) { - state - .pending_logins - .lock() - .await - .retain(|_, login| login.expires_at > Instant::now()); -} - -fn session_cookie(headers: &HeaderMap) -> Option { - let cookie_header = headers.get(COOKIE)?.to_str().ok()?; - for pair in cookie_header.split(';') { - let mut parts = pair.trim().splitn(2, '='); - let name = parts.next()?.trim(); - let value = parts.next()?.trim(); - if name == SESSION_COOKIE && !value.is_empty() { - return Some(value.to_owned()); - } - } - None -} - -fn session_cookie_value(session_id: &str) -> String { - format!("{SESSION_COOKIE}={session_id}; Path=/; HttpOnly; Secure; SameSite=Lax") -} - -fn random_url_token(bytes: usize) -> String { - let mut buf = vec![0u8; bytes]; - rand::thread_rng().fill_bytes(&mut buf); - URL_SAFE_NO_PAD.encode(buf) -} - -fn pkce_challenge(verifier: &str) -> String { - let digest = digest(&SHA256, verifier.as_bytes()); - URL_SAFE_NO_PAD.encode(digest.as_ref()) -} - -fn escape_html(input: &str) -> String { - input - .replace('&', "&") - .replace('<', "<") - .replace('>', ">") - .replace('"', """) -} - -fn internal_error(err: impl std::fmt::Display) -> (StatusCode, String) { - (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) -} - -impl NamespaceSessionManager { - fn new(config: NamespacePortalConfig) -> Self { - Self { - config, - state: Arc::new(Mutex::new(NamespacePortalState::default())), - } - } - - async fn status(&self) -> Result { - let linked = self.check_login().await.is_ok(); - let state = self.state.lock().await.clone(); - let token_present = tokio::fs::metadata(&self.config.token_output_path) - .await - .is_ok(); - Ok(NamespaceStatus { - linked, - login_url: state.active_login.map(|login| login.login_url), - last_error: state.last_error, - token_present, - }) - } - - async fn start_login(&self) -> Result { - if self.check_login().await.is_ok() { - self.refresh_token().await?; - return Ok("already linked".to_owned()); - } - - { - let state = self.state.lock().await; - if let Some(active) = &state.active_login { - return Ok(active.login_url.clone()); - } - } - - self.config.ensure_paths()?; - let mut command = self.base_command(); - command - .args(["auth", "login", "--browser=false"]) - .stdout(Stdio::piped()) - .stderr(Stdio::null()); - let mut child = command.spawn().context("failed to spawn nsc auth login")?; - let stdout = child - .stdout - .take() - .context("nsc auth login stdout was not piped")?; - let mut lines = BufReader::new(stdout).lines(); - let mut login_url = None; - while let Some(line) = lines.next_line().await? { - if let Some(candidate) = extract_namespace_login_url(&line) { - login_url = Some(candidate); - break; - } - } - - let login_url = login_url - .ok_or_else(|| anyhow!("nsc auth login did not emit a Namespace login URL"))?; - { - let mut state = self.state.lock().await; - state.active_login = Some(ActiveNamespaceLogin { login_url: login_url.clone() }); - state.last_error = None; - } - - let manager = self.clone(); - tokio::spawn(async move { - let outcome = child.wait().await; - let mut state = manager.state.lock().await; - state.active_login = None; - match outcome { - Ok(status) if status.success() => { - drop(state); - if let Err(err) = manager.refresh_token().await { - manager.state.lock().await.last_error = Some(format!( - "Namespace login finished, but token refresh failed: {err}" - )); - } - } - Ok(status) => { - state.last_error = Some(format!( - "Namespace login command exited with status {}", - status - )); - } - Err(err) => { - state.last_error = Some(format!("Namespace login command failed: {err}")); - } - } - }); - - Ok(login_url) - } - - async fn refresh_token(&self) -> Result<()> { - self.config.ensure_paths()?; - self.check_login().await?; - let mut command = self.base_command(); - command.args([ - "auth", - "generate-dev-token", - "--output_to", - self.config - .token_output_path - .to_str() - .ok_or_else(|| anyhow!("token output path is not valid UTF-8"))?, - ]); - let output = command - .output() - .await - .context("failed to run nsc token refresh")?; - if !output.status.success() { - bail!( - "nsc auth generate-dev-token failed: {}", - String::from_utf8_lossy(&output.stderr).trim() - ); - } - #[cfg(target_family = "unix")] - { - use std::os::unix::fs::PermissionsExt; - - let perms = fs::Permissions::from_mode(0o440); - fs::set_permissions(&self.config.token_output_path, perms).with_context(|| { - format!( - "failed to set permissions on {}", - self.config.token_output_path.display() - ) - })?; - } - self.state.lock().await.last_error = None; - Ok(()) - } - - async fn check_login(&self) -> Result<()> { - let mut command = self.base_command(); - command.args(["auth", "check-login", "--duration", AUTH_CHECK_DURATION]); - let output = command - .output() - .await - .context("failed to run nsc auth check-login")?; - if output.status.success() { - return Ok(()); - } - bail!("{}", String::from_utf8_lossy(&output.stderr).trim()); - } - - fn base_command(&self) -> Command { - let mut command = Command::new(&self.config.nsc_bin); - let home = self.config.nsc_state_dir.join("home"); - let data = self.config.nsc_state_dir.join("data"); - let cache = self.config.nsc_state_dir.join("cache"); - let config = self.config.nsc_state_dir.join("config"); - let _ = fs::create_dir_all(&home); - let _ = fs::create_dir_all(&data); - let _ = fs::create_dir_all(&cache); - let _ = fs::create_dir_all(&config); - command - .env("HOME", &home) - .env("XDG_DATA_HOME", &data) - .env("XDG_CACHE_HOME", &cache) - .env("XDG_CONFIG_HOME", &config); - command - } -} - -fn extract_namespace_login_url(line: &str) -> Option { - line.split_whitespace() - .find(|token| token.starts_with("https://")) - .map(ToOwned::to_owned) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn extracts_namespace_login_url_from_output() { - let url = extract_namespace_login_url( - " https://cloud.namespace.so/login/workspace?id=p0cl4ik19c4c473u14tvc3vq2o", - ); - assert_eq!( - url.as_deref(), - Some("https://cloud.namespace.so/login/workspace?id=p0cl4ik19c4c473u14tvc3vq2o") - ); - } - - #[test] - fn pkce_challenge_is_stable() { - assert_eq!( - pkce_challenge("hello"), - "LPJNul-wow4m6DsqxbninhsWHlwfp0JecwQzYpOLmCQ" - ); - } - - #[test] - fn parses_session_cookie() { - let mut headers = HeaderMap::new(); - headers.insert( - COOKIE, - HeaderValue::from_static( - "something=else; burrow_namespace_portal_session=session123; another=value", - ), - ); - assert_eq!(session_cookie(&headers).as_deref(), Some("session123")); - } -} diff --git a/flake.nix b/flake.nix index 0bba0b1..1974f17 100644 --- a/flake.nix +++ b/flake.nix @@ -214,8 +214,6 @@ nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default; nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; - nixosModules.burrow-namespace-portal = import ./nixos/modules/burrow-namespace-portal.nix; - nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem { system = "x86_64-linux"; specialArgs = { diff --git a/nixos/README.md b/nixos/README.md index 13fe76d..23907f3 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -12,7 +12,6 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input - `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes - `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC -- `modules/burrow-namespace-portal.nix`: small admin portal for forge-owned Namespace authentication and NSC token refresh - `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets - `hetzner-cloud-config.yaml`: desired Hetzner host shape - `keys/contact_at_burrow_net.pub`: initial operator SSH public key @@ -24,8 +23,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host - `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists -- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host -- `../Scripts/authentik-sync-namespace-portal-oidc.sh`: reconcile the Authentik OIDC app used by `nsc.burrow.net` +- `../Scripts/seal-forgejo-nsc-secrets.sh`: encrypt forgejo-nsc runtime inputs into the agenix secrets consumed by `burrow-forge` ## Intended Flow @@ -34,16 +32,17 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B 3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. -6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the raw Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/` for the upstream `services.forgejo-nsc` module. -7. Visit `https://nsc.burrow.net/` as a Burrow admin to link the forge-owned Namespace session and rotate `/var/lib/burrow/intake/forgejo_nsc_token.txt` without relying on a personal local `nsc` login. -8. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. -9. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, `nsc.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. +6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `intake/forgejo_nsc_token.txt`, `intake/forgejo_nsc_dispatcher.yaml`, and `intake/forgejo_nsc_autoscaler.yaml`. +7. Run `Scripts/seal-forgejo-nsc-secrets.sh` to encrypt those runtime inputs into the agenix secrets used by `burrow-forge`. +8. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, `secrets/infra/headscale-oidc-client-secret.age`, `secrets/infra/forgejo-nsc-token.age`, `secrets/infra/forgejo-nsc-dispatcher-config.age`, and `secrets/infra/forgejo-nsc-autoscaler-config.age`, and let agenix materialize them under `/run/agenix/`. +9. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 10. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. 11. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. ## Current Constraints -- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host. +- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`. +- `services.forgejo-nsc` now expects agenix-backed runtime inputs at `/run/agenix/burrowForgejoNscToken`, `/run/agenix/burrowForgejoNscDispatcherConfig`, and `/run/agenix/burrowForgejoNscAutoscalerConfig`. - Authentik and Headscale secrets now live in tracked agenix blobs under `secrets/infra/` and decrypt to `/run/agenix/` on the forge host. - Public Burrow forge cutover completed on March 15, 2026: - `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21` diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index aecdbfa..7f6af22 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -33,7 +33,6 @@ in self.nixosModules.burrow-forgejo-nsc self.nixosModules.burrow-authentik self.nixosModules.burrow-headscale - self.nixosModules.burrow-namespace-portal ]; system.stateVersion = "24.11"; @@ -88,10 +87,28 @@ in group = "root"; mode = "0400"; }; + age.secrets.burrowForgejoNscToken = { + file = ../../../secrets/infra/forgejo-nsc-token.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; + age.secrets.burrowForgejoNscDispatcherConfig = { + file = ../../../secrets/infra/forgejo-nsc-dispatcher-config.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; + age.secrets.burrowForgejoNscAutoscalerConfig = { + file = ../../../secrets/infra/forgejo-nsc-autoscaler-config.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; networking.extraHosts = '' - 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net nsc.burrow.net - ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net nsc.burrow.net + 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net + ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net ''; services.burrow.forge = { @@ -113,13 +130,13 @@ in services.forgejo-nsc = { enable = true; - nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; + nscTokenFile = config.age.secrets.burrowForgejoNscToken.path; dispatcher = { - configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml"; + configFile = config.age.secrets.burrowForgejoNscDispatcherConfig.path; }; autoscaler = { enable = true; - configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml"; + configFile = config.age.secrets.burrowForgejoNscAutoscalerConfig.path; }; }; @@ -141,11 +158,4 @@ in enable = true; oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; }; - - services.burrow.namespacePortal = { - enable = true; - domain = "nsc.burrow.net"; - baseUrl = "https://nsc.burrow.net"; - adminGroup = contributors.groups.admins; - }; } diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index e2ee18d..1616b36 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -10,7 +10,6 @@ let dataVolume = "burrow-authentik-data:/data"; directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; - namespacePortalOidcSyncScript = ../../Scripts/authentik-sync-namespace-portal-oidc.sh; tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; @@ -139,30 +138,6 @@ in description = "Authentik application slug for Tailscale custom OIDC sign-in."; }; - namespacePortalDomain = lib.mkOption { - type = lib.types.str; - default = "nsc.burrow.net"; - description = "Public domain for the Burrow Namespace portal."; - }; - - namespacePortalProviderSlug = lib.mkOption { - type = lib.types.str; - default = "namespace"; - description = "Authentik application slug for the Namespace portal."; - }; - - namespacePortalClientId = lib.mkOption { - type = lib.types.str; - default = "nsc.burrow.net"; - description = "Client ID Authentik should present to the Namespace portal."; - }; - - namespacePortalClientSecretFile = lib.mkOption { - type = lib.types.nullOr lib.types.str; - default = null; - description = "Optional host-local file containing the Authentik Namespace portal OIDC client secret."; - }; - tailscaleClientId = lib.mkOption { type = lib.types.str; default = "tailscale.burrow.net"; @@ -733,56 +708,6 @@ EOF ''; }; - systemd.services.burrow-authentik-namespace-portal-oidc = { - description = "Reconcile the Burrow Authentik Namespace portal OIDC application"; - after = [ - "burrow-authentik-ready.service" - "network-online.target" - ]; - wants = [ - "burrow-authentik-ready.service" - "network-online.target" - ]; - wantedBy = [ "multi-user.target" ]; - restartTriggers = - [ - namespacePortalOidcSyncScript - cfg.envFile - ] - ++ lib.optionals (cfg.namespacePortalClientSecretFile != null) [ cfg.namespacePortalClientSecretFile ]; - path = [ - pkgs.bash - pkgs.coreutils - pkgs.curl - pkgs.jq - ]; - serviceConfig = { - Type = "oneshot"; - User = "root"; - Group = "root"; - }; - script = '' - set -euo pipefail - set -a - source ${lib.escapeShellArg cfg.envFile} - set +a - - export AUTHENTIK_URL=https://${cfg.domain} - export AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_SLUG=${lib.escapeShellArg cfg.namespacePortalProviderSlug} - export AUTHENTIK_NAMESPACE_PORTAL_APPLICATION_NAME="Namespace Portal" - export AUTHENTIK_NAMESPACE_PORTAL_PROVIDER_NAME="Namespace Portal" - export AUTHENTIK_NAMESPACE_PORTAL_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} - export AUTHENTIK_NAMESPACE_PORTAL_CLIENT_ID=${lib.escapeShellArg cfg.namespacePortalClientId} - ${lib.optionalString (cfg.namespacePortalClientSecretFile != null) '' - export AUTHENTIK_NAMESPACE_PORTAL_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.namespacePortalClientSecretFile})" - ''} - export AUTHENTIK_NAMESPACE_PORTAL_LAUNCH_URL=https://${cfg.namespacePortalDomain}/ - export AUTHENTIK_NAMESPACE_PORTAL_REDIRECT_URIS_JSON='["https://${cfg.namespacePortalDomain}/oauth/callback"]' - - ${pkgs.bash}/bin/bash ${namespacePortalOidcSyncScript} - ''; - }; - services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} diff --git a/nixos/modules/burrow-namespace-portal.nix b/nixos/modules/burrow-namespace-portal.nix deleted file mode 100644 index 2eb7b24..0000000 --- a/nixos/modules/burrow-namespace-portal.nix +++ /dev/null @@ -1,126 +0,0 @@ -{ config, lib, pkgs, self, ... }: - -let - cfg = config.services.burrow.namespacePortal; - burrowExe = lib.getExe self.packages.${pkgs.system}.burrow; - nscExe = lib.getExe self.packages.${pkgs.system}.nsc; -in -{ - options.services.burrow.namespacePortal = { - enable = lib.mkEnableOption "the Burrow Namespace authentication portal"; - - domain = lib.mkOption { - type = lib.types.str; - default = "nsc.burrow.net"; - description = "Public domain for the Namespace portal."; - }; - - port = lib.mkOption { - type = lib.types.port; - default = 9080; - description = "Local listen port for the Namespace portal."; - }; - - baseUrl = lib.mkOption { - type = lib.types.str; - default = "https://nsc.burrow.net"; - description = "Public base URL for redirects."; - }; - - oidcProviderSlug = lib.mkOption { - type = lib.types.str; - default = "namespace"; - description = "Authentik provider slug used for the portal."; - }; - - oidcClientId = lib.mkOption { - type = lib.types.str; - default = "nsc.burrow.net"; - description = "OIDC client ID used by the portal."; - }; - - oidcClientSecretFile = lib.mkOption { - type = lib.types.nullOr lib.types.str; - default = null; - description = "Optional host-local OIDC client secret for the portal."; - }; - - adminGroup = lib.mkOption { - type = lib.types.str; - default = "burrow-admins"; - description = "Authentik group required to access the portal."; - }; - - stateDir = lib.mkOption { - type = lib.types.str; - default = "/var/lib/burrow/namespace-portal"; - description = "Persistent state directory for the portal-owned NSC session."; - }; - - tokenOutputPath = lib.mkOption { - type = lib.types.str; - default = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; - description = "Path where refreshed NSC tokens should be written."; - }; - }; - - config = lib.mkIf cfg.enable { - assertions = [ - { - assertion = config.services.forgejo-nsc.enable; - message = "services.burrow.namespacePortal requires services.forgejo-nsc.enable"; - } - ]; - - systemd.tmpfiles.rules = [ - "d ${cfg.stateDir} 0750 forgejo-nsc forgejo-nsc -" - "d ${cfg.stateDir}/nsc 0750 forgejo-nsc forgejo-nsc -" - ]; - - systemd.services.burrow-namespace-portal = { - description = "Burrow Namespace authentication portal"; - after = [ - "network-online.target" - "burrow-authentik-ready.service" - ]; - wants = [ - "network-online.target" - "burrow-authentik-ready.service" - ]; - wantedBy = [ "multi-user.target" ]; - path = [ - self.packages.${pkgs.system}.burrow - self.packages.${pkgs.system}.nsc - pkgs.coreutils - ]; - serviceConfig = { - Type = "simple"; - User = "forgejo-nsc"; - Group = "forgejo-nsc"; - WorkingDirectory = cfg.stateDir; - Restart = "on-failure"; - RestartSec = "2s"; - }; - script = '' - set -euo pipefail - export BURROW_NAMESPACE_PORTAL_LISTEN=127.0.0.1:${toString cfg.port} - export BURROW_NAMESPACE_PORTAL_BASE_URL=${lib.escapeShellArg cfg.baseUrl} - export BURROW_NAMESPACE_PORTAL_OIDC_DISCOVERY_URL=${lib.escapeShellArg "https://${config.services.burrow.authentik.domain}/application/o/${cfg.oidcProviderSlug}/.well-known/openid-configuration"} - export BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_ID=${lib.escapeShellArg cfg.oidcClientId} - export BURROW_NAMESPACE_PORTAL_ALLOWED_GROUP=${lib.escapeShellArg cfg.adminGroup} - export BURROW_NAMESPACE_PORTAL_NSC_BIN=${lib.escapeShellArg nscExe} - export BURROW_NAMESPACE_PORTAL_NSC_STATE_DIR=${lib.escapeShellArg "${cfg.stateDir}/nsc"} - export BURROW_NAMESPACE_PORTAL_TOKEN_OUTPUT_PATH=${lib.escapeShellArg cfg.tokenOutputPath} - ${lib.optionalString (cfg.oidcClientSecretFile != null) '' - export BURROW_NAMESPACE_PORTAL_OIDC_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.oidcClientSecretFile})" - ''} - exec ${burrowExe} namespace-portal - ''; - }; - - services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' - encode gzip zstd - reverse_proxy 127.0.0.1:${toString cfg.port} - ''; - }; -} diff --git a/secrets.nix b/secrets.nix index c0b9b53..a8fb923 100644 --- a/secrets.nix +++ b/secrets.nix @@ -16,6 +16,9 @@ in "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-autoscaler-config.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-dispatcher-config.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-token.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/forgejo-nsc-autoscaler-config.age b/secrets/infra/forgejo-nsc-autoscaler-config.age new file mode 100644 index 0000000000000000000000000000000000000000..28e3d4ae7ab2661a31c8175b638ac203ef7414f5 GIT binary patch literal 1264 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vn%ZPch6g z^K>k8clR#$4b1T@sj4h43Qut=*Uzpjb`K0Psz{8;@+b}t&gKfL3^d5|%`fms&kQZd z_6sym&eS&b^{Xt(F9{6Hc1kWZ4>n8kC@M0`k3_f4vnVRpFFU78w>%WtJfsNd}RoVYx;= zl|jKt9vNQQ5n0}4`B5c-?!H__AtvdbK`Gwah6aX48Sa(YA*EGGfu82RWjR$Dm5$ol zX^91?S*aFD#jaesy1EKxnLg#om8G6RrvBxwQB`j4LE44BSsoQ8hUxw;!J%$Bx#f-~ ziQazUKKWexCro>@`sL+AF2P%-?VnSA_v+14+vPW86nk&p{X#nQuy^3>RiEE5`Coo} z?rF!3{j&?#^giCOc%DtTWR=@fL$7P=bvA8Z=eTNB{fexA>Y_T^d6?fYiV{doL(PWGBpH+sU}soI|UzCk`oGOo3gt0?<(1%Ho#t=sCSPxQ_6MaZ@Jb&dx{NTgw8oPPsTkz`DNwrLn`}=9mOuGWzVs56Zvh=?;X0j zljR_Paf|yZc7OZ{PowBi~aXT&kjX(VB&C z8f$LOdhX|Y{BXb}`Jm22{+SzR_VJgCP5%5-zHVb#+1_`WcceK~wD!a)J<7?dmuCLcQgw*qZB}LT^+8I2-b%V$upd?Otw`bdkML zXd_l~_1Nu=i+p1@Jzn}QfbV|Nxy5Virt3_5Hg|2hrTp@Ubu*o{y$;;7xxg2%x6Up` zHdgZI+Su7ef2?m9@m_qsASO({D7e2uc0;6X*uSgG#9mK0FQ6^-H)~<)5_!e2y6-D~ zu3G#;@XH$K!B_TB3$myc?H(4!)Us*A8LLvj+w%%v^Yk0xE|MH1d zH#aZW&X|2^=c%gd=;DBtZ_VnZ?^?x3x2c)En0WGE=FYyK%ySRPFs$y@`K;%g)Wn)t zrrdfhSbIS?Gjp-FOu|3r_eVX`=CfRTT(x!G0{J!F3P1ka+5R#*{cF?tPl~76rv6Q8 zJs=XK>Adik;(pF2qNU3wRo%THy}Mn*s=DxTt7Ed48`Ql_>`@MoUi!f$ z`i7<^6=^w1Zs}aQy1EKw#vYz&g}$XFUU?}_6(td=S(#qBW}acD#r~1nP6j?nxyk<8 zMxo|zMJ8N~ZF~a0p;1kLJGi)Bwru4*`t8_5*vAfi zCw!wiR$S%$wD*-znVtCD=5Gu6R=V%FeKp)#_;10!xh_ zF21PLR`jNG!R0mY)8BDRFVnmCbGGA!t6Tp+V4rvL)pZ?Xv)1cVcc?A6meHqmV2!NT zis}!J>qDpM7TlZ|BBtu*ll4zJ>f-?^-l&=TEf33XW2r2>81!yhx9*k}9Y+2?Q~V|$ zS4#YKTXdECT#ngAx1Suh5Z9lwO+xg6_r$zAy<9O&LN_J{728*yy*!_xFuifkRK;gq zrVliY`(C9zc9~nUE@xliVi9vygW~M(-}>(=tW8jy`$W0HG5dq}hLaqA{Ij2ao+>%< zw;J1Il?nZ)H|&)v{gSic)-9`d9(F=@qL&_b+3&N@di+|AeIFNph}<79qt$QjybU`3 zVWX$W{0$5Lh0QLv$yxVmZ|@;96W<*hv_*TiSBvE~9u?9TPMGxj`{(mAZC0r&0Z~8E!>s<85g!8X{YZb^eF8Fe~F3M+0wJFQQML#E{Y)iPbW+i9EuR|~5 zm*?JH@#8_HW8Bs6Ri}%4m$mu)<1ah)DF2qPvB{39@X+W$F|WCV|{ zztO(JNu}(%tnZj-=`eoD62w<>8J%jM`(4w^On%KSc6^+!(owNiW} zwY6S`yeKz2R`=$;;=~+V_q*;N#rtNwn%xoiUCH67wdFBG9k0L_-7eGq&wceig!5o( zC$Hh#T{D)}-R{~ha?R9_slugDRr77p^R?$@FxYDOX_w|dl6O*N+2CA%AvN#wozOMH zQ)OGCdomjTf9Y`3eR}oWUA@&yk3?LlQTXcLvX?o!{ciN19WQ>UF?y-JQ ssh-ed25519 ux4N8Q yCjzc3QW91l62Y+U2YZqLpTkiZyTJAxQQCiZ+DxHiWI +mG/+2fppo3RITeohTM/Dm1M6fsErtxhOgIeI2FqvoUs +-> ssh-ed25519 IrZmAg +Y59O8SVATZfe8Vu2gis1KNWcL34Ct7M3G34XNURczw +GGkVYcmoUtJRx4zftjLFID2wLtNtCgGVnYuMN8XF74s +-> X25519 xqDMDV9XRhSPlFy2IJPBfpUGuNA9gpX73kg8Pnj48VI +TPZZNrRUK+FzruetDFuJcTzed03d7gkxOv8QAZshBn8 +--- PRD84efdrqDmPeRA8zi0D2V8RmT0tFVbDIVD6U/4KVo +2Wk*cS++j9{4j;`wd3,"gligЇ e`''# "'(=LS3hFjgYIF|0$Fp^` +QknUx78b!>n?9^!=ͮ [a ` ϫ_#?T@]Eβ[,g퟇cjx}.̞f45֕DLH4_HdwXwXkRx7DM,0 7*TU{~ä8yC "/oXCe8-ulYt ;ҖDZdm wFyiIώɅ8F}l"Isu{L!+UBei_Z~D>B)L> Date: Mon, 6 Apr 2026 01:08:24 -0700 Subject: [PATCH 22/59] Install nsc on burrow forge host --- nixos/hosts/burrow-forge/default.nix | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 7f6af22..64f45bc 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -1,4 +1,4 @@ -{ config, lib, self, ... }: +{ config, lib, pkgs, self, ... }: let contributors = import ../../../contributors.nix; @@ -44,6 +44,10 @@ in "flakes" ]; + environment.systemPackages = lib.optionals config.services.forgejo-nsc.enable [ + self.packages.${pkgs.stdenv.hostPlatform.system}.nsc + ]; + age.identityPaths = [ "/var/lib/agenix/agenix.key" ]; age.secrets.burrowAuthentikEnv = { file = ../../../secrets/infra/authentik.env.age; From 5e58aafb07e08cac67d01b9a61d9646170a26e2b Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Mon, 6 Apr 2026 01:12:47 -0700 Subject: [PATCH 23/59] Align Forgejo runner labels with workflows --- nixos/hosts/burrow-forge/default.nix | 6 ++++++ nixos/modules/burrow-forge-runner.nix | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 64f45bc..bf6330f 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -130,6 +130,12 @@ in services.burrow.forgeRunner = { enable = true; sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; + labels = [ + "self-hosted" + "linux" + "x86_64" + "burrow-forge" + ]; }; services.forgejo-nsc = { diff --git a/nixos/modules/burrow-forge-runner.nix b/nixos/modules/burrow-forge-runner.nix index 1e183d2..d4ade40 100644 --- a/nixos/modules/burrow-forge-runner.nix +++ b/nixos/modules/burrow-forge-runner.nix @@ -5,8 +5,10 @@ let runnerPkg = pkgs.forgejo-runner; stateDir = cfg.stateDir; runnerFile = "${stateDir}/.runner"; + registrationFingerprintFile = "${stateDir}/.runner-registration-fingerprint"; configFile = "${stateDir}/runner.yaml"; labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels); + registrationFingerprint = builtins.hashString "sha256" "${cfg.instanceUrl}\n${cfg.name}\n${labelsCsv}"; sshPrivateKeyFile = cfg.sshPrivateKeyFile or ""; in { @@ -141,6 +143,17 @@ EOF chown ${cfg.user}:${cfg.group} ${configFile} chmod 0640 ${configFile} + expected_fingerprint=${lib.escapeShellArg registrationFingerprint} + if [ -s ${runnerFile} ]; then + current_fingerprint="" + if [ -s ${registrationFingerprintFile} ]; then + current_fingerprint="$(tr -d '\r\n' < ${registrationFingerprintFile})" + fi + if [ "${"$"}current_fingerprint" != "${"$"}expected_fingerprint" ]; then + rm -f ${runnerFile} ${registrationFingerprintFile} + fi + fi + install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh ${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \ ${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName} @@ -177,6 +190,10 @@ EOF --name ${lib.escapeShellArg cfg.name} \ --labels ${lib.escapeShellArg labelsCsv} \ --config ${configFile} + + printf '%s\n' "${"$"}expected_fingerprint" > ${registrationFingerprintFile} + chown ${cfg.user}:${cfg.group} ${registrationFingerprintFile} + chmod 0640 ${registrationFingerprintFile} fi ''; }; From fbe864391448fb3fafaa4ef7bc45e2ee96469307 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Mon, 6 Apr 2026 01:15:46 -0700 Subject: [PATCH 24/59] Restart Forgejo runner when registration changes --- nixos/modules/burrow-forge-runner.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nixos/modules/burrow-forge-runner.nix b/nixos/modules/burrow-forge-runner.nix index d4ade40..034fb38 100644 --- a/nixos/modules/burrow-forge-runner.nix +++ b/nixos/modules/burrow-forge-runner.nix @@ -208,6 +208,7 @@ EOF User = cfg.user; Group = cfg.group; WorkingDirectory = stateDir; + Environment = [ "BURROW_RUNNER_REGISTRATION_FINGERPRINT=${registrationFingerprint}" ]; Restart = "on-failure"; RestartSec = 2; ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" '' From aa577c561606e5e391624179150c9f4168030419 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Mon, 6 Apr 2026 04:22:34 -0700 Subject: [PATCH 25/59] Inline Forgejo workflow checkout --- .forgejo/workflows/build-rust.yml | 19 +++++++++++++++---- .forgejo/workflows/build-site.yml | 19 +++++++++++++++---- .forgejo/workflows/lint-governance.yml | 19 +++++++++++++++---- 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index 2df1ad3..9ed49e1 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -19,10 +19,21 @@ jobs: runs-on: [self-hosted, linux, x86_64, burrow-forge] steps: - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 + shell: bash + run: | + set -euo pipefail + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx - name: Test shell: bash diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index 6f7c5e2..239b3b2 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -19,10 +19,21 @@ jobs: runs-on: [self-hosted, linux, x86_64, burrow-forge] steps: - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 + shell: bash + run: | + set -euo pipefail + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx - name: Build shell: bash diff --git a/.forgejo/workflows/lint-governance.yml b/.forgejo/workflows/lint-governance.yml index 490702e..2db94cc 100644 --- a/.forgejo/workflows/lint-governance.yml +++ b/.forgejo/workflows/lint-governance.yml @@ -15,10 +15,21 @@ jobs: runs-on: [self-hosted, linux, x86_64, burrow-forge] steps: - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 + shell: bash + run: | + set -euo pipefail + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx - name: Validate BEP metadata shell: bash From bc85e256f2299908468d7007306fd5f62d7e1eeb Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Thu, 9 Apr 2026 20:59:31 -0700 Subject: [PATCH 26/59] Stabilize Forgejo site build --- .forgejo/workflows/build-site.yml | 2 +- site/layout/layout.tsx | 25 +- site/package-lock.json | 3907 +++++++++++++++++++++++++++++ site/pages/index.tsx | 56 +- 4 files changed, 3950 insertions(+), 40 deletions(-) create mode 100644 site/package-lock.json diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index 239b3b2..67be5bb 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -39,4 +39,4 @@ jobs: shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -lc 'cd site && npm install && npm run build' + nix develop .#ci -c bash -lc 'cd site && npm ci --no-audit --no-fund && npm run build' diff --git a/site/layout/layout.tsx b/site/layout/layout.tsx index 28ff24d..057aa68 100644 --- a/site/layout/layout.tsx +++ b/site/layout/layout.tsx @@ -1,20 +1,5 @@ -import { Space_Mono, Poppins } from "next/font/google"; import localFont from "next/font/local"; -const space_mono = Space_Mono({ - weight: ["400", "700"], - subsets: ["latin"], - display: "swap", - variable: "--font-space-mono", -}); - -const poppins = Poppins({ - weight: ["400", "500", "600", "700", "800", "900"], - subsets: ["latin"], - display: "swap", - variable: "--font-poppins", -}); - const phantomSans = localFont({ src: [ { @@ -36,10 +21,18 @@ const phantomSans = localFont({ variable: "--font-phantom-sans", }); +const fallbackFontVariables = { + "--font-space-mono": + '"SFMono-Regular", "SF Mono", ui-monospace, Menlo, Monaco, "Cascadia Mono", "Segoe UI Mono", "Roboto Mono", monospace', + "--font-poppins": + 'var(--font-phantom-sans), -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif', +} as React.CSSProperties; + export default function Layout({ children }: { children: React.ReactNode }) { return (
{children}
diff --git a/site/package-lock.json b/site/package-lock.json new file mode 100644 index 0000000..e1357f9 --- /dev/null +++ b/site/package-lock.json @@ -0,0 +1,3907 @@ +{ + "name": "burrow", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "burrow", + "version": "0.1.0", + "dependencies": { + "@fortawesome/fontawesome-free": "^6.4.2", + "@fortawesome/fontawesome-svg-core": "^6.4.2", + "@fortawesome/free-brands-svg-icons": "^6.4.2", + "@fortawesome/free-solid-svg-icons": "^6.4.2", + "@fortawesome/react-fontawesome": "^0.2.0", + "@headlessui/react": "^1.7.17", + "@headlessui/tailwindcss": "^0.2.0", + "@types/node": "20.5.8", + "@types/react": "18.2.21", + "@types/react-dom": "18.2.7", + "autoprefixer": "10.4.15", + "eslint": "8.48.0", + "eslint-config-next": "13.4.19", + "next": "13.4.19", + "postcss": "8.4.29", + "react": "18.2.0", + "react-dom": "18.2.0", + "tailwindcss": "3.3.3", + "typescript": "5.2.2" + }, + "devDependencies": { + "prettier": "^3.0.3", + "prettier-plugin-tailwindcss": "^0.5.4" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/runtime": { + "version": "7.22.11", + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.8.0", + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.48.0", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@fortawesome/fontawesome-common-types": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-free": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND OFL-1.1 AND MIT)", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-svg-core": { + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.7.2.tgz", + "integrity": "sha512-yxtOBWDrdi5DD5o1pmVdq3WMCvnobT0LU6R8RyyVXPvFRd2o79/0NCuQoCjNTeZz9EzA9xS3JxNWfv54RIHFEA==", + "license": "MIT", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.7.2" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-svg-core/node_modules/@fortawesome/fontawesome-common-types": { + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.7.2.tgz", + "integrity": "sha512-Zs+YeHUC5fkt7Mg1l6XTniei3k4bwG/yo3iFUtZWd/pMx9g3fdvkSK9E0FOC+++phXOka78uJcYb8JaFkW52Xg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/free-brands-svg-icons": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND MIT)", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.5.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/free-solid-svg-icons": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND MIT)", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.5.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/react-fontawesome": { + "version": "0.2.0", + "license": "MIT", + "dependencies": { + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "@fortawesome/fontawesome-svg-core": "~1 || ~6", + "react": ">=16.3" + } + }, + "node_modules/@headlessui/react": { + "version": "1.7.18", + "license": "MIT", + "dependencies": { + "@tanstack/react-virtual": "^3.0.0-beta.60", + "client-only": "^0.0.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18", + "react-dom": "^16 || ^17 || ^18" + } + }, + "node_modules/@headlessui/tailwindcss": { + "version": "0.2.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "tailwindcss": "^3.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.11", + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "license": "BSD-3-Clause" + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.19", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@next/env": { + "version": "13.4.19", + "license": "MIT" + }, + "node_modules/@next/eslint-plugin-next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "glob": "7.1.7" + } + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "13.4.19", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.19.tgz", + "integrity": "sha512-jyzO6wwYhx6F+7gD8ddZfuqO4TtpJdw3wyOduR4fxTUCm3aLw7YmHGYNjS0xRSYGAkLpBkH1E0RcelyId6lNsw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.19.tgz", + "integrity": "sha512-vdlnIlaAEh6H+G6HrKZB9c2zJKnpPVKnA6LBwjwT2BTjxI7e0Hx30+FoWCgi50e+YO49p6oPOtesP9mXDRiiUg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.19.tgz", + "integrity": "sha512-aU0HkH2XPgxqrbNRBFb3si9Ahu/CpaR5RPmN2s9GiM9qJCiBBlZtRTiEca+DC+xRPyCThTtWYgxjWHgU7ZkyvA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.19.tgz", + "integrity": "sha512-htwOEagMa/CXNykFFeAHHvMJeqZfNQEoQvHfsA4wgg5QqGNqD5soeCer4oGlCol6NGUxknrQO6VEustcv+Md+g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.19.tgz", + "integrity": "sha512-4Gj4vvtbK1JH8ApWTT214b3GwUh9EKKQjY41hH/t+u55Knxi/0wesMzwQRhppK6Ddalhu0TEttbiJ+wRcoEj5Q==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.19.tgz", + "integrity": "sha512-bUfDevQK4NsIAHXs3/JNgnvEY+LRyneDN788W2NYiRIIzmILjba7LaQTfihuFawZDhRtkYCv3JDC3B4TwnmRJw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.19.tgz", + "integrity": "sha512-Y5kikILFAr81LYIFaw6j/NrOtmiM4Sf3GtOc0pn50ez2GCkr+oejYuKGcwAwq3jiTKuzF6OF4iT2INPoxRycEA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.19.tgz", + "integrity": "sha512-YzA78jBDXMYiINdPdJJwGgPNT3YqBNNGhsthsDoWHL9p24tEJn9ViQf/ZqTbwSpX/RrkPupLfuuTH2sf73JBAw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.3.3", + "license": "MIT" + }, + "node_modules/@swc/helpers": { + "version": "0.5.1", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tanstack/react-virtual": { + "version": "3.2.0", + "license": "MIT", + "dependencies": { + "@tanstack/virtual-core": "3.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.2.0", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.5.8", + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.2.21", + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.7", + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "license": "MIT" + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.5.0", + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "6.5.0", + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/typescript-estree": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.5.0", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.5.0", + "license": "MIT", + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.5.0", + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.5.4", + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.5.0", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/acorn": { + "version": "8.10.0", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "license": "Python-2.0" + }, + "node_modules/aria-query": { + "version": "5.3.0", + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "is-array-buffer": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "get-intrinsic": "^1.1.3", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.3", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.1.3" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "is-array-buffer": "^3.0.2", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.7", + "license": "ISC" + }, + "node_modules/asynciterator.prototype": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.15", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.21.10", + "caniuse-lite": "^1.0.30001520", + "fraction.js": "^4.2.0", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.5", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.7.2", + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "3.2.1", + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "license": "MIT", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.21.10", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "caniuse-lite": "^1.0.30001517", + "electron-to-chromium": "^1.4.477", + "node-releases": "^2.0.13", + "update-browserslist-db": "^1.0.11" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001525", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "license": "MIT" + }, + "node_modules/commander": { + "version": "4.1.1", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "license": "MIT" + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "license": "BSD-2-Clause" + }, + "node_modules/debug": { + "version": "4.3.4", + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "license": "MIT" + }, + "node_modules/define-properties": { + "version": "1.2.0", + "license": "MIT", + "dependencies": { + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "license": "Apache-2.0" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.508", + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "license": "MIT" + }, + "node_modules/enhanced-resolve": { + "version": "5.15.0", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/es-abstract": { + "version": "1.22.1", + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "arraybuffer.prototype.slice": "^1.0.1", + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "es-set-tostringtag": "^2.0.1", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.5", + "get-intrinsic": "^1.2.1", + "get-symbol-description": "^1.0.0", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "is-array-buffer": "^3.0.2", + "is-callable": "^1.2.7", + "is-negative-zero": "^2.0.2", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.2", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.10", + "is-weakref": "^1.0.2", + "object-inspect": "^1.12.3", + "object-keys": "^1.1.1", + "object.assign": "^4.1.4", + "regexp.prototype.flags": "^1.5.0", + "safe-array-concat": "^1.0.0", + "safe-regex-test": "^1.0.0", + "string.prototype.trim": "^1.2.7", + "string.prototype.trimend": "^1.0.6", + "string.prototype.trimstart": "^1.0.6", + "typed-array-buffer": "^1.0.0", + "typed-array-byte-length": "^1.0.0", + "typed-array-byte-offset": "^1.0.0", + "typed-array-length": "^1.0.4", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.14", + "license": "MIT", + "dependencies": { + "asynciterator.prototype": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-set-tostringtag": "^2.0.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "iterator.prototype": "^1.1.0", + "safe-array-concat": "^1.0.0" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3", + "has": "^1.0.3", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has": "^1.0.3" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.48.0", + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.2", + "@eslint/js": "8.48.0", + "@humanwhocodes/config-array": "^0.11.10", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "@next/eslint-plugin-next": "13.4.19", + "@rushstack/eslint-patch": "^1.1.3", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-import-resolver-typescript": "^3.5.2", + "eslint-plugin-import": "^2.26.0", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-react": "^7.31.7", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" + }, + "peerDependencies": { + "eslint": "^7.23.0 || ^8.0.0", + "typescript": ">=3.3.1" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "license": "MIT", + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-import-resolver-typescript": { + "version": "3.6.0", + "license": "ISC", + "dependencies": { + "debug": "^4.3.4", + "enhanced-resolve": "^5.12.0", + "eslint-module-utils": "^2.7.4", + "fast-glob": "^3.3.1", + "get-tsconfig": "^4.5.0", + "is-core-module": "^2.11.0", + "is-glob": "^4.0.3" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts/projects/eslint-import-resolver-ts" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*" + } + }, + "node_modules/eslint-module-utils": { + "version": "2.8.0", + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.28.1", + "license": "MIT", + "peer": true, + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.findlastindex": "^1.2.2", + "array.prototype.flat": "^1.3.1", + "array.prototype.flatmap": "^1.3.1", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.7", + "eslint-module-utils": "^2.8.0", + "has": "^1.0.3", + "is-core-module": "^2.13.0", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.6", + "object.groupby": "^1.0.0", + "object.values": "^1.1.6", + "semver": "^6.3.1", + "tsconfig-paths": "^3.14.2" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.7.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "aria-query": "^5.1.3", + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "ast-types-flow": "^0.0.7", + "axe-core": "^4.6.2", + "axobject-query": "^3.1.1", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "has": "^1.0.3", + "jsx-ast-utils": "^3.3.3", + "language-tags": "=1.0.5", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" + } + }, + "node_modules/eslint-plugin-jsx-a11y/node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.33.2", + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "array.prototype.tosorted": "^1.1.1", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", + "estraverse": "^5.3.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "object.hasown": "^1.1.2", + "object.values": "^1.1.6", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.4", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.8" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.4", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.9.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.1", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.15.0", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "flatted": "^3.2.7", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.2.7", + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.3", + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/fraction.js": { + "version": "4.3.6", + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "license": "MIT" + }, + "node_modules/function.prototype.name": { + "version": "1.1.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.7.0", + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob": { + "version": "7.1.7", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "license": "BSD-2-Clause" + }, + "node_modules/globals": { + "version": "13.21.0", + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.3", + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "license": "MIT" + }, + "node_modules/has": { + "version": "1.0.3", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-bigints": { + "version": "1.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ignore": { + "version": "5.2.4", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "license": "ISC" + }, + "node_modules/internal-slot": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.0", + "is-typed-array": "^1.1.10" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.1.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.13.0", + "license": "MIT", + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-map": { + "version": "2.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.2", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.0.7", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.1.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.0.7", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.12", + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.11" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.1", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.1", + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.3" + } + }, + "node_modules/jiti": { + "version": "1.19.3", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "license": "MIT" + }, + "node_modules/json5": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.3", + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/language-subtag-registry": { + "version": "0.3.22", + "license": "CC0-1.0" + }, + "node_modules/language-tags": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "language-subtag-registry": "~0.3.2" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "license": "MIT", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.6", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "license": "MIT" + }, + "node_modules/next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "@next/env": "13.4.19", + "@swc/helpers": "0.5.1", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001406", + "postcss": "8.4.14", + "styled-jsx": "5.1.1", + "watchpack": "2.4.0", + "zod": "3.21.4" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=16.8.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "13.4.19", + "@next/swc-darwin-x64": "13.4.19", + "@next/swc-linux-arm64-gnu": "13.4.19", + "@next/swc-linux-arm64-musl": "13.4.19", + "@next/swc-linux-x64-gnu": "13.4.19", + "@next/swc-linux-x64-musl": "13.4.19", + "@next/swc-win32-arm64-msvc": "13.4.19", + "@next/swc-win32-ia32-msvc": "13.4.19", + "@next/swc-win32-x64-msvc": "13.4.19" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.14", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.4", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/node-releases": { + "version": "2.0.13", + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1" + } + }, + "node_modules/object.hasown": { + "version": "1.1.3", + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.values": { + "version": "1.1.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "license": "MIT", + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.4.29", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.1", + "license": "MIT", + "dependencies": { + "lilconfig": "^2.0.5", + "yaml": "^2.1.1" + }, + "engines": { + "node": ">= 14" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.11" + }, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.13", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.2.5", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-plugin-tailwindcss": { + "version": "0.5.13", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "@ianvs/prettier-plugin-sort-imports": "*", + "@prettier/plugin-pug": "*", + "@shopify/prettier-plugin-liquid": "*", + "@trivago/prettier-plugin-sort-imports": "*", + "@zackad/prettier-plugin-twig-melody": "*", + "prettier": "^3.0", + "prettier-plugin-astro": "*", + "prettier-plugin-css-order": "*", + "prettier-plugin-import-sort": "*", + "prettier-plugin-jsdoc": "*", + "prettier-plugin-marko": "*", + "prettier-plugin-organize-attributes": "*", + "prettier-plugin-organize-imports": "*", + "prettier-plugin-sort-imports": "*", + "prettier-plugin-style-order": "*", + "prettier-plugin-svelte": "*" + }, + "peerDependenciesMeta": { + "@ianvs/prettier-plugin-sort-imports": { + "optional": true + }, + "@prettier/plugin-pug": { + "optional": true + }, + "@shopify/prettier-plugin-liquid": { + "optional": true + }, + "@trivago/prettier-plugin-sort-imports": { + "optional": true + }, + "@zackad/prettier-plugin-twig-melody": { + "optional": true + }, + "prettier-plugin-astro": { + "optional": true + }, + "prettier-plugin-css-order": { + "optional": true + }, + "prettier-plugin-import-sort": { + "optional": true + }, + "prettier-plugin-jsdoc": { + "optional": true + }, + "prettier-plugin-marko": { + "optional": true + }, + "prettier-plugin-organize-attributes": { + "optional": true + }, + "prettier-plugin-organize-imports": { + "optional": true + }, + "prettier-plugin-sort-imports": { + "optional": true + }, + "prettier-plugin-style-order": { + "optional": true + }, + "prettier-plugin-svelte": { + "optional": true + } + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/punycode": { + "version": "2.3.0", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.2.0", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "license": "MIT" + }, + "node_modules/read-cache": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "license": "MIT" + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve": { + "version": "1.22.4", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.0", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "is-regex": "^1.1.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/scheduler": { + "version": "0.23.0", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.9", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "regexp.prototype.flags": "^1.5.0", + "side-channel": "^1.0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/sucrase": { + "version": "3.34.0", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "7.1.6", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/glob": { + "version": "7.1.6", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.3.3", + "license": "MIT", + "peer": true, + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.5.3", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.2.12", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.18.2", + "lilconfig": "^2.1.0", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", + "postcss-selector-parser": "^6.0.11", + "resolve": "^1.22.2", + "sucrase": "^3.32.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.0.2", + "license": "MIT", + "engines": { + "node": ">=16.13.0" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "license": "Apache-2.0" + }, + "node_modules/tsconfig-paths": { + "version": "3.14.2", + "license": "MIT", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "is-typed-array": "^1.1.9" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.2.2", + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.11", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/watchpack": { + "version": "2.4.0", + "license": "MIT", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "license": "MIT", + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-weakmap": "^2.0.1", + "is-weakset": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "license": "ISC" + }, + "node_modules/yallist": { + "version": "4.0.0", + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.3.2", + "license": "ISC", + "engines": { + "node": ">= 14" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.21.4", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/site/pages/index.tsx b/site/pages/index.tsx index 73fbc33..20d7f1b 100644 --- a/site/pages/index.tsx +++ b/site/pages/index.tsx @@ -1,13 +1,36 @@ -import { faGithub } from "@fortawesome/free-brands-svg-icons"; -import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Head from "next/head"; -import { - faChevronDown, - faChevronUp, - faUpRightFromSquare, -} from "@fortawesome/free-solid-svg-icons"; import { Menu, Transition } from "@headlessui/react"; import { useState, useRef, useEffect } from "react"; + +function ChevronIcon({ open }: { open: boolean }) { + return ( + + ); +} + +function ExternalLinkIcon() { + return ( + + ); +} + +function GithubIcon() { + return ( + + ); +} + export default function Page() { const [chevron, setChevron] = useState(false); const menuButtonRef = useRef(null); @@ -71,17 +94,7 @@ export default function Page() { className="w-50 h-12 rounded-2xl bg-hackClubRed px-3 font-SpaceMono hover:scale-105 md:h-12 md:w-auto md:rounded-3xl md:text-xl 2xl:h-16 2xl:text-2xl " > Install for Linux - {chevron ? ( - - ) : ( - - )} +
From c58d06dfc1079d567d643cfca852dc451f93c936 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 02:18:22 -0700 Subject: [PATCH 27/59] Move Burrow Google account aliases into agenix --- contributors.nix | 25 ++++++++++++++++-- nixos/hosts/burrow-forge/default.nix | 8 +++++- nixos/modules/burrow-authentik.nix | 24 ++++++++++++----- secrets.nix | 1 + .../authentik-google-account-map.json.age | Bin 0 -> 968 bytes 5 files changed, 48 insertions(+), 10 deletions(-) create mode 100644 secrets/infra/authentik-google-account-map.json.age diff --git a/contributors.nix b/contributors.nix index 22c28b6..36bc1c9 100644 --- a/contributors.nix +++ b/contributors.nix @@ -8,7 +8,6 @@ contact = { displayName = "Burrow"; canonicalEmail = "contact@burrow.net"; - sourceEmail = "net.burrow@gmail.com"; isAdmin = true; forgeAuthorized = true; bootstrapAuthentik = true; @@ -22,7 +21,6 @@ conrad = { displayName = "Conrad Kramer"; canonicalEmail = "conrad@burrow.net"; - sourceEmail = "ckrames1234@gmail.com"; isAdmin = true; forgeAuthorized = false; bootstrapAuthentik = true; @@ -32,6 +30,29 @@ ]; }; + jett = { + displayName = "Jett"; + canonicalEmail = "jett@burrow.net"; + isAdmin = false; + forgeAuthorized = false; + bootstrapAuthentik = true; + roles = [ + "member" + ]; + }; + + davnotdev = { + displayName = "David"; + canonicalEmail = "davnotdev@burrow.net"; + isAdmin = true; + forgeAuthorized = false; + bootstrapAuthentik = true; + roles = [ + "member" + "operator" + ]; + }; + agent = { displayName = "Burrow Agent"; canonicalEmail = "agent@burrow.net"; diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index bf6330f..497d40e 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -13,7 +13,6 @@ let inherit username; name = identity.displayName; email = identity.canonicalEmail; - sourceEmail = identity.sourceEmail or null; isAdmin = identity.isAdmin or false; passwordFile = authentikPasswordSecretPath identity; } @@ -85,6 +84,12 @@ in group = "root"; mode = "0400"; }; + age.secrets.burrowAuthentikGoogleAccountMap = { + file = ../../../secrets/infra/authentik-google-account-map.json.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; age.secrets.burrowAuthentikUiTestPassword = { file = ../../../secrets/infra/authentik-ui-test-password.age; owner = "root"; @@ -158,6 +163,7 @@ in tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; + googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path; googleLoginMode = "redirect"; userGroupName = contributors.groups.users; adminGroupName = contributors.groups.admins; diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 1616b36..2fa83da 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -180,6 +180,12 @@ in description = "Host-local file containing the Google OAuth client secret for the Authentik source."; }; + googleAccountMapFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional host-local JSON file mapping external Google accounts onto Burrow Authentik users."; + }; + googleSourceSlug = lib.mkOption { type = lib.types.str; default = "google"; @@ -477,7 +483,7 @@ EOF cfg.envFile cfg.googleClientIDFile cfg.googleClientSecretFile - ]; + ] ++ lib.optional (cfg.googleAccountMapFile != null) cfg.googleAccountMapFile; path = [ pkgs.bash pkgs.coreutils @@ -501,12 +507,16 @@ EOF export AUTHENTIK_GOOGLE_USER_MATCHING_MODE=email_link export AUTHENTIK_GOOGLE_CLIENT_ID="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientIDFile})" export AUTHENTIK_GOOGLE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientSecretFile})" - export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON='${builtins.toJSON (map (user: { - source_email = user.sourceEmail; - username = user.username; - email = user.email; - name = user.name; - }) (lib.filter (user: user.sourceEmail != null) cfg.bootstrapUsers))}' + if [ -n ${lib.escapeShellArg (cfg.googleAccountMapFile or "")} ]; then + export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON="$(tr -d '\n' < ${lib.escapeShellArg (cfg.googleAccountMapFile or "/dev/null")})" + else + export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON='${builtins.toJSON (map (user: { + source_email = user.sourceEmail; + username = user.username; + email = user.email; + name = user.name; + }) (lib.filter (user: user.sourceEmail != null) cfg.bootstrapUsers))}' + fi ${pkgs.bash}/bin/bash ${googleSourceSyncScript} ''; diff --git a/secrets.nix b/secrets.nix index a8fb923..e3fd9a2 100644 --- a/secrets.nix +++ b/secrets.nix @@ -14,6 +14,7 @@ in "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-google-account-map.json.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/forgejo-nsc-autoscaler-config.age".publicKeys = burrowForgeRecipients; diff --git a/secrets/infra/authentik-google-account-map.json.age b/secrets/infra/authentik-google-account-map.json.age new file mode 100644 index 0000000000000000000000000000000000000000..b3cb6f84c8d7f174f404cabbccb26a8525167538 GIT binary patch literal 968 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vl%Sa`OsJ zc6Tu_%QNvaO7o5M3vzccO7`$E3@SE=a4j}ZaxXW}$PMz&N#@E)jW8>%C@C*=EeZ@u z^frrfOZ5uOtZ?_tOwrG*O!oH+cdLrbuqX{n2}HNevnVRpFSPD#l$by$SkM8$u-f$)vP$rIoT{M%a6U@Uh2>6$ zZWaY*e%WTm=3&L29!8di5e8fquAY{8+M!-mj+VJeKE)dxz1m8H%g}n3&iJs-83sPPtV`x-$h$#JFb~)O7l0)>{z0%d*xHu{Z1<( zNhU{!T#1+Ni7g+d<}Tc5nX_iI>{YANg?hfm^Y(vD4oeI)?cAO6_p9xJ_$n9G4_tl9 zhgn~6AK&z7iGnT5hTK&h%NPG<%g^R!Dh*%w_u7Q_i@#j>d}n3N;azzdX?#nKGg{?T zKVJ0=w%&GV`;-?AA71WWc=lzF^`>l-*izw!E5P!YY2g@46CVY_z+8DhsaZ9L0 z*yOTVMn#i$FR@?{_*>y4r(baUv{BbAnaL71kIcSroUau5=+FYQq)hkcS+iHwe+uKT z>bBWG+qK^Ed~UePi?cKPIUU=-)h&o$SZvoMbK}_br^a{Gn5K9-?YQ`y`|{1N5^Rhe1>fY{#=Ui7)vZP;JSikI4T+CyJptZABv};^v>uHdZ?csW`ee1r)O$BqR z{q($}=T|#<_B%=M5?IGC61-dX(D8rsf9oVT#LTGN)v0`U%bllx7!4Mjy{pBpX20U{ zOWm_wyRW=ot|)M9?%Zc7)y*jn|I3OSCELe&TnY{QxW`8O?3PI8yxX=LrMC11ettRg zl8S-uG93f|#6NGFlm1(>t>67hw%1wt=h1a*LKId!TD^DW{Mf4-`?#NY{(Qxn%`CF} z;qHo}OQz4ZPZK`y^{J2RmaG=V!;%dF|7FB1u0KDz_V0{jv$PHg_Ptk9UoeOR0CcFC Ar2qf` literal 0 HcmV?d00001 From abd5a3597031820ad46f11ad4457f090fe017c76 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 02:42:01 -0700 Subject: [PATCH 28/59] Make Jett a Burrow admin --- contributors.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contributors.nix b/contributors.nix index 36bc1c9..95d4e59 100644 --- a/contributors.nix +++ b/contributors.nix @@ -33,7 +33,7 @@ jett = { displayName = "Jett"; canonicalEmail = "jett@burrow.net"; - isAdmin = false; + isAdmin = true; forgeAuthorized = false; bootstrapAuthentik = true; roles = [ From 4f88f0b1e09d31a490d001b5720d5b396adc71de Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 17:09:20 -0700 Subject: [PATCH 29/59] Align Burrow operator access on forge --- contributors.nix | 3 +++ nixos/hosts/burrow-forge/default.nix | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/contributors.nix b/contributors.nix index 95d4e59..9475a27 100644 --- a/contributors.nix +++ b/contributors.nix @@ -38,6 +38,8 @@ bootstrapAuthentik = true; roles = [ "member" + "operator" + "forge-admin" ]; }; @@ -50,6 +52,7 @@ roles = [ "member" "operator" + "forge-admin" ]; }; diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 497d40e..1b46f6c 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -18,6 +18,15 @@ let } ) (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); + headscaleBootstrapUsers = lib.mapAttrsToList + ( + username: identity: { + name = username; + displayName = identity.displayName; + email = identity.canonicalEmail; + } + ) + (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); forgeAuthorizedKeys = map (username: builtins.readFile identities.${username}.sshPublicKeyPath) (builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities)); @@ -173,5 +182,6 @@ in services.burrow.headscale = { enable = true; oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; + bootstrapUsers = headscaleBootstrapUsers; }; } From 5a4fe58b86fbf70b1de85e8e1a61a75600bb5687 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 17:47:17 -0700 Subject: [PATCH 30/59] Add Jett forge access and rekey secrets --- contributors.nix | 2 ++ nixos/hosts/burrow-forge/default.nix | 30 ++++++++++++++++++ nixos/keys/jett_at_burrow_net.pub | 1 + secrets.nix | 2 ++ .../authentik-google-account-map.json.age | Bin 968 -> 1078 bytes secrets/infra/authentik-google-client-id.age | Bin 493 -> 603 bytes .../infra/authentik-google-client-secret.age | 18 ++++++----- secrets/infra/authentik-ui-test-password.age | Bin 832 -> 672 bytes secrets/infra/authentik.env.age | Bin 732 -> 842 bytes .../infra/forgejo-nsc-autoscaler-config.age | Bin 1264 -> 1374 bytes .../infra/forgejo-nsc-dispatcher-config.age | Bin 1127 -> 1237 bytes secrets/infra/forgejo-nsc-token.age | Bin 1199 -> 1309 bytes secrets/infra/forgejo-oidc-client-secret.age | Bin 484 -> 594 bytes .../infra/headscale-oidc-client-secret.age | Bin 485 -> 595 bytes .../infra/tailscale-oidc-client-secret.age | Bin 484 -> 594 bytes 15 files changed, 45 insertions(+), 8 deletions(-) create mode 100644 nixos/keys/jett_at_burrow_net.pub diff --git a/contributors.nix b/contributors.nix index 9475a27..df76a01 100644 --- a/contributors.nix +++ b/contributors.nix @@ -35,7 +35,9 @@ canonicalEmail = "jett@burrow.net"; isAdmin = true; forgeAuthorized = false; + forgeUnixUser = true; bootstrapAuthentik = true; + sshPublicKeyPath = ./nixos/keys/jett_at_burrow_net.pub; roles = [ "member" "operator" diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 1b46f6c..96eca4f 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -3,6 +3,7 @@ let contributors = import ../../../contributors.nix; identities = contributors.identities; + stripNewline = value: lib.replaceStrings [ "\n" ] [ "" ] value; authentikPasswordSecretPath = identity: if identity ? authentikPasswordSecret then config.age.secrets.${identity.authentikPasswordSecret}.path @@ -27,6 +28,23 @@ let } ) (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); + forgeUnixUsernames = + builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeUnixUser or false) identities); + forgeUnixUsers = lib.genAttrs forgeUnixUsernames (username: + let + identity = identities.${username}; + sshKeys = lib.optional (identity ? sshPublicKeyPath) (stripNewline (builtins.readFile identity.sshPublicKeyPath)); + in + { + isNormalUser = true; + createHome = true; + home = "/home/${username}"; + shell = pkgs.bashInteractive; + extraGroups = lib.optional (identity.isAdmin or false) "wheel"; + openssh.authorizedKeys.keys = sshKeys; + }); + forgeUnixAdminUsernames = + builtins.attrNames (lib.filterAttrs (_: identity: (identity.forgeUnixUser or false) && (identity.isAdmin or false)) identities); forgeAuthorizedKeys = map (username: builtins.readFile identities.${username}.sshPublicKeyPath) (builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities)); @@ -52,6 +70,18 @@ in "flakes" ]; + users.users = forgeUnixUsers; + + security.sudo.extraRules = lib.map (username: { + users = [ username ]; + commands = [ + { + command = "ALL"; + options = [ "NOPASSWD" ]; + } + ]; + }) forgeUnixAdminUsernames; + environment.systemPackages = lib.optionals config.services.forgejo-nsc.enable [ self.packages.${pkgs.stdenv.hostPlatform.system}.nsc ]; diff --git a/nixos/keys/jett_at_burrow_net.pub b/nixos/keys/jett_at_burrow_net.pub new file mode 100644 index 0000000..36c85ee --- /dev/null +++ b/nixos/keys/jett_at_burrow_net.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMe960j6TC869F6RvElpICxlBauIT3E0uLyy0m7n70ZC diff --git a/secrets.nix b/secrets.nix index e3fd9a2..32d7882 100644 --- a/secrets.nix +++ b/secrets.nix @@ -2,10 +2,12 @@ let conradev = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBueQxNbP2246pxr/m7au4zNVm+ShC96xuOcfEcpIjWZ"; contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; + jett = builtins.replaceStrings [ "\n" ] [ "" ] (builtins.readFile ./nixos/keys/jett_at_burrow_net.pub); burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l"; burrowForgeRecipients = [ contact agent + jett burrowForgeHost ]; uiTestRecipients = burrowForgeRecipients ++ [ conradev ]; diff --git a/secrets/infra/authentik-google-account-map.json.age b/secrets/infra/authentik-google-account-map.json.age index b3cb6f84c8d7f174f404cabbccb26a8525167538..158814a6c51d0945bf91c2632d6504972f7bf9e9 100644 GIT binary patch delta 1049 zcmX@XzKvsoPQ7t%mZ5%7o}q<_MUKB=Sz=X*ms63aYk6r!dUi;XpHGr&c6hOyPe@|8 zFIR}awwq73p?0E=rD>sWWI(EsQF)nZg@>V$Nm`1JX{ENOZ@Oo(WxiW>K9{bYLUD11 zZfc5=si~o*f@e`wu4B4Fcx6bHQDT~Qd4zL?nPGs1e!YI6V_`w2cdZ^WI<&* zx^)KG;Q{G^3Z{YKp{{;T5kAF{UWO%!k*SGcSs7-|C82qazJU>8srtqBrV%bK{vNKu z7F-!QiN(pLrimu`B{_wWh3+PK`bpW@p3cc7!Ol*l!Ok9rZtkx7&c0=-CLrq~ARem- zN~S643Er@@Tzn-bj$RKDEIa))eq#-)zwvqa7}GH^7iFihuKeEcGNg)P7D$@;aIwi&I|6W-q7 zF7YJ&Yj@tGzABxbOOr&6UQ0xpY}&u-giyp3_3tk)eADeosTa*@wBY3neo-!Cbx)G_ zsmJ;?qEnsJJ!+S$WLw>j^E%e6$+B*1>I<>`KgC)zIuCD8+vo7BP;==MvkwK&8+(?` z4E43)UdnR+*oHHUmM-Jg3~B&4B9Y zyNk1feskLQF)TRn?)=pHUH4r-Z&H`+RQapt{o{Z8!h3E#ceKShtu}I;t7uesK9_|- z!!&kV%JDC0A8Li#84jlz*(Kg((MpgIwR*H;dH)Mb4yUEl91GUo^!ds6kw1(hWs)zy zw&s=Bn_b)gFH2sabHl`V=^AmdopUBUJ+I!=JFUI=R&&|pOF1w8*-ux~cC42_!@oZ~ z#_0OL;Fym1-5MQxUj7lVKccbid#CRyR*wFyznVh-nM6h(F)GpdaCYW~$bWnjGJVYA zKJQwz{h>wDeY*>9-4hIk zdbIzpn6OFL6dl1IkG`j$Z>wO|Jgz!vTK#Reyl?0BKFlr z4|}H-cK*5)>Rl#R_Hpf-d8I2SwF+96$!wpKcc1mQe@KPy)jfPFpI*+a$z!uRlX2o4 z+rz-tTaT_cs@#!UQN8{}n&s|S4r}LHe%{-bwqxm$+f(?tr!D@R-``Mk5P(=U#^i^PJxqaqKT_n zah`LsSy+}YS4vP$WrmTLVX1nCKB>{zQ#l=;HhAAlpj$FFBx(Wtffu`og z`Vr{YANg?hfm^Y(vD4oeI)?cAO6_p9xJ_$n9G4_tl9hgn~6AK&z7iGnT5hTK&h%NPG< z%g^R!Dh*%w_u7Q_i@#j>d}n3N;azzdX?#nKGg{?TKVJ0=w%&GV`;-?AA71WWc=lz< zPp43}`eg|#b@irflh{-$9^NX){MLLV}_u$vsScgTxaWPkdp1;da!-#zQ#=jbE^IHyrbt=J9+jyN$(O^$1f7R zTlUcLfAfFqBsj#(sI1@BseE_Kou_{o4HlfetHrKnzvA*s-LqZ0ue@KbC~$1<+-E7( z%_$H6%ZeK%+sAoa3Jv?X$42|?mPqEj+qN5}w)6#lemV1!ih=Gj9RvTwKX03p{#&xG z-~CFq*ID`J(RFJ=6jnT1y?5pO*sB}+xSx3be8rm0EVBFI?uw#Irq8xd6IMR(^{J2R kmaG=V!;%dF|7FB1u0KDz_V0{jv$PHg_Ptk9UoeOR0Gbk!f&c&j diff --git a/secrets/infra/authentik-google-client-id.age b/secrets/infra/authentik-google-client-id.age index f295804f68781d005cf5d43f5fa1e4f1dd49d320..344c73bffdc9e21ccf1560cb9da098d937c37ccb 100644 GIT binary patch delta 570 zcmaFMe4Ay0PQ7z(ntoY&kdu2+fQ7evc|o#egmZ*!XK8^`etn2xzOlEHwp+e=ps}m5ONc>v zP=$q2nn`9dS4fmek-m?qYl=sJWqG-0l2^D>mZxV(R-%!QQ9z1`V}yRFxmR$SPp&?O zbq3kt0qKDXWtM*VZWYBru0g@+-loav&K|)jB~b-=Ca%c=8D&`}Ue5JFL57JH?uDVg zT>0rPnLgRUZbcTE#+FIOl}6z?c}XF@1w|2=VO|05rrv%Y?%rOWiRnK1Am2qmJZ4gn zUz!o%UT9J7oST%C5O7m{7#n^F*3R?cPAxj$q^#F5!keK$^4l3W+^x_#@b zU+gD>ZR11rj@$OW2{djw&;kZ#!}SlxAk~mHO(ryL}WKrn>hZYloh1wm>0sl}E-elF#aTs|4~ zNv@dz9{L`cMq&Qh9{x^IrmiVzk*SHk-o{mCMh1DtnI4X2Zbezz$y~a+x(Y!ax#ksN z>Hg&*E{4WtL8e)0P9}v#g~pXWj&6mOVeY2crI{X%Mip5ek_V(AM~n%W=#7CX59 z#3RR<^Xn_NROTdF2IgP5&JecRXlAG9Wfz~f%D2smgaP yg*IIYT5fqcU?aQk>Z-TR0;T@>)0RXXn}51>-pTHJSx3#pe;XZ)NG{y;ZY}_a=%t+i diff --git a/secrets/infra/authentik-google-client-secret.age b/secrets/infra/authentik-google-client-secret.age index 43ecf0b..9a841c7 100644 --- a/secrets/infra/authentik-google-client-secret.age +++ b/secrets/infra/authentik-google-client-secret.age @@ -1,9 +1,11 @@ age-encryption.org/v1 --> ssh-ed25519 ux4N8Q 4uq5z93mRUUgcMOxP4+Yfe2Jq4tGYErwtzvtMHUvgi0 -J9DkDeSPkQbOjFM3QoV+1Kz3ZVLfR4PUxCT8Zxz+Wvk --> ssh-ed25519 IrZmAg uLEVmJ+e9ZiLas5YooR4GfgyspWTsFdMB2WPvluU/VI -7vqqQ/BIDQaOp6VDVLa5ugoRxVZZsMj116cTHY6+8KM --> X25519 9spF9eLz63UOaBfuG9vTIr6bCKwzFsWMjnaIj1PIR3Y -iGFELg2RQUT9rEal7pblQhfxtwYhxsZdXYxEhvjtHpw ---- 3TDrUnIN826N/n5gc+YY8ilMMc/6K8zGTh6FxzKC/JM -XH#IJGueֹf&1a2BJԎg=̿.*7Fb \ No newline at end of file +-> ssh-ed25519 ux4N8Q Q3rYrGroJXarMLdatYCHVERefWDyGwM0Ii/kOp5m3Fs +W3tgHNXLSVfGU5p8MhBj0mX72SNgMl8nf8sQX29yvBw +-> ssh-ed25519 IrZmAg fyFQQkd51GthNZ4R+W5Al266LnlKbr4ZoMERlCM1OTQ +rNjnHTGCfF8LkqU8mzTrHlL5G4az1k62gvH4gW8zmjc +-> ssh-ed25519 0kWPgQ OWokv9XAphqbkDi1cznb9V09VcM6Li1eIh0JpcIlVTY +TnPVlqKB78y7NPYp02UJmuRXdBMKJKCngpvo8TjpFZ8 +-> X25519 HWaWhyejjo4IjDrNsBYxU1JaGU0899FqiBYgstInuiU +enbBGnhH+uJKY3NBD6mmy09Uos+in6ytRQ5BakvTUvI +--- gOBrh88hnvlUSmnRiowJiUIwgIz5zzVKH8YCRb8Ckdw +xokPn8v򵄙HRʏoMË9&Tb]ĉ'|<Pbe \ No newline at end of file diff --git a/secrets/infra/authentik-ui-test-password.age b/secrets/infra/authentik-ui-test-password.age index e84a7becc0b8a5f9a3acd01f4a95440bb16e9fd7..773833e64364613a7f7619be1fde4a32d423c4fa 100644 GIT binary patch literal 672 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vjgFb+#x@ z4hyRCcFFS&Pp=5c$;c}z3J(jj%u0$d$c3+^je`DA+$S zA}TCT+pkQ&EHf;{!!tWK*P_VWI}qJAgY58t^gxB+RL?4ZACsU^QzHY5q)P9M$VhkN z^dt+92yOky6qCxr0q>v=@yu1*zs1Qe!(9lTVRD%e| z$_h6RL#IIfq)?}vN>`9=5s*L(jBqIp3lA@Lw#*Aj^~lSt$_on2PIJpODlsuL3QIQd z$Vl@j3y$(GHa6i(&#*M}F-Q-q3@=QLGzm5E)=$of%GA$xNewP4DEA8w4T^Ge^vDSf zPc8@9fE-sPNx7!Z!EOeb{)wLD4g>$BXwx@?*Rid+DPLzwMpQF)OG>3va9C(cAXi9szG0Ya zR8^2mR8VSoi9v`#N=0J0sad39SYo14MNVXvn^9zzpI1_tqiL{VkU>(YUzT%Lm`SjK zo4>!SK^0eMXmUl6M~1tfL4HwCM21heTa>GBUP`IAL5f$PVVJvLo@-@NM0QnJReD)e zj;ps%h)H^~v0uJVcv!Y?R;VGDZ%B4}N<_LrNLEIWr-z$UkfD=dVvd(@YPMmfe^^Fd zn46z}l##DNQg(@FNT{J(c!-m0q@h<>V5pl*P_CapSCXM$ZfdrxcW9QcVVH|+WnyTK zXJlw`S!je?URX$uOSp?)nQyqML6lLDSyE1vtFN(VRZ4D&e^O3RL{x}lIag?4en~*4 zQ&qB2dP!7xV5MJ3XoY{UlUH(1K}leEa=Nc;RDN){WmJkwR6%O6MOBzbN~O24Q?Xx2 zPJW_WXeC#oYlTagdv=Adn`@Q7o1b@NXi!*Sgp;wKV?juyVRA)CPMCX=QAtElct&M# zM3h@(o|A7%l~+-sb8&g9M_?IORA^36NOqucSWZ-?kAYvIe~4EVIQrbuVXpB=&Q32& z%JMVu%XTaDGb(Wja?QyMH42Ih$_dKjip(}m3=b{IOw1|BNe+)PN-~Oa4sl5~3=55l za?Z{$Om=fK3CWJi4bAe(4GGC93(s;YPV$J#j10@k4Rba1<_a{jbo2Mf2u(5YEA=)^ z2?~kGa19U5_Vx)jDDiYREeg+02~IQ$%Lp+}Hi$GzDGv!ZF-d02=dKx zH47?HqQ3)D9%VM_YFxj^2y68FEC7UH_9>#@$d@Ih$t{E^R7rq VH7qvFO}Dk>0_Ol%KNp;t0|0|53f}+# diff --git a/secrets/infra/authentik.env.age b/secrets/infra/authentik.env.age index f9f613687871d9959a66369ee2bb51b7aee03d40..dbada85c47dfffb1804d9c0b7c0bc4196873bce0 100644 GIT binary patch delta 811 zcmcb^dWvm=PJNKReraN1SXf4+pPye)d3b50XGw&ArbVPrl~ZUyQki3VRJdh@V{ln; zF;`VdS%q(sv9o82vA$)gUuL9PWky<6PKBp_QD%;>YgtlBrdxVpkYPZ2D3`9CLUD11 zZfc5=si~o*f@e`wu4B4FWNxHmVosJvR-l=yVT4JZdA&t(cyeN%OG=_+P>`ibSU_dE zX|c9bg?^qZS3#;#pi^O3kwvn$qh)@MQAko?cu9IydR}s9wz*eQwr^FcQ+{c-Nq%xV zx^)KG;Q{G^3fh@oVct%b?rHAcK3Uo6C7yxWg=s0K#UA0=MLt&ZPw|5h00riP||v7T$q@`5@~eARa3T za}P_3^bQCK3oftp^iQu!4N1~BGx0PiEq8WuPYR06D31zu^)U=}%;zeO^7C~n4RFp4 zDJ)5`EJ+N@Pp@~5j4Cn7@eC{1b_q9($jSDswS5%k$04_s!>8f4_9|h6~DvPVy^;e!X(d?}35P zrr0$KH`{zNzdZSD^2+j00RNGWWuLyMYYE+9OR)_1%*gTiR{u<|Zqe)K!PmThonDfw zoUoMDH`w0yUjL-De^c#$wtjv%g}X=N+`>b*%e&+%ZuQ?&mx|7a(>|F~y68+n?AHZH zWWUBv+V#k%HFUG5Tm%>2hi%J0^eJ!{^0OJlHk%z<)~B-4c0$K`i(b)3&Gzxy7AL(F;5)@8-?UauJyE>yqdNhKrXMWim)v0RWAPJL)faH6(*L9kJIL~(ejnY(3rNm+KJbBd9LVOW+^pnHgui*aFMdYO}} z30H+_nwM{tZ$**2M|i5cqkFhdq+^bWxre8Zzq4qdTb5s7dRd}hWW9HUx4UtGagcwhc6w+@k$-A( zaZXfWnQMp%SH7vYX+^1JuD(}EV78BWXi<`3d6;2Xv3pcOdVobllCfV%afnfbM^$h+ z$hruKO~nP4r52@WQGUrKi3S9$r`RU-W&ZpB^^!NIBSVMP&cGkffl}=!RDUY1!Yc#My0{NhT8f*u3VocC4J3un-}`=cmDV3 z|G2NqF4nwKuU_&m);V?aoMR3l-xqmoSzNDu$o60C))Lv;xfcF)5;i+d9K5T2jA_A- zKSIl28+mMV_@Zd#Tb6Y>Fj3nknfvtgnQ|u>3QL=LbQ4awdR`1%P=Dnw8+XOmQjwkd zvhgNtMY^t6&3^@-*9%bi$$Ca?S5|sy_2Zj2W*@tIQHp4{?JZ)>&M=>AC+BR$HhaazI z$lcs?eEON7kbcv*?so*M?wABV+mqy$mbL!l^*3!_{w`)Vy%!#(&B*sSVrKTf8y7yV tiL#CQ%+#?PQ72YMPOuRrBhKsa&l3ai)Ct6QAt^TWnytfzMFS)NJ&9nvR9Uqxl3AQ zK3A}7rm=gfmw|q;qqa*>kfD3FsZ*qhqe+Q(NbF?S7uOOjv(tIARcon zOE${MO-yu4EsrY5Pfv3%3Ad~Y%k!;F&X4rV_0abXNOumbFi0yacjXGMa?0~hH3-fw zaq_ARjdZdoH>=O~4fD%LcdRJQC^9SvbPNx3bxY34_sQnc)zwwd&nj`w&kXc4cMmD` z$c@U#3JFd(Gp`8M4oY__O)brc%64=q*Y^tN*d>67FoZGXY4p7Ty~J@m%RGydXu}%y-L-K^f}shFHbr0 zZ0dujqB$7{MPxoDa<9E9u5K1{GU6DsP?)*aircQj8y6K7oW1gK>ZYi)EaBwM>zdhu zlLEfZ-=f&3_pD&u2nRo(XKTGo!VZB=Zouwk!@b>=ij8dC@tsFkFrXTHnv|#ri z^E8E;-yJ?F?*7j_j_t{d%6j=MRz>Pu-Shg*zaK9tQcj2LxaUtPj}#rT+Ny#bvP?NtcgIvXHPi`dszcW}Q~c z{YBGxLPWQx^4)Ow^HOPd&Fo;t?I$+scN}s&vQhr&?G|kxtNUVKgxM>;_0;I~luw$u zjCI-8mO~EHKNNpr+Mm8sdbz-Xn><@zY1f}hC}!MnF7IFJf_q{Yo{QYs-c~b7;@r+f z<}FrB7wr>>U0fHqc;UvLsO2XX?hTQBd+7e!ya&2Tt@HA7Ts-I4DW$BPceK+#;#RSS z|ALv^OtKO@Yt#f6TyANRHnmwcDN>97VV1w#!Wk37)=pwIIV$$|(gwHK%O(#39xd`@ z{g=eURsSOH=qt{6@l7kv{B`^uX22lwh?=;@+Uc4%>FWEc~py((mC63$H}x3L$~=Tk4y2A7!< zlMNdhSL$d6G;8_Wn(@wBvSP}QLy7(!k3-YjWOYYkewf ztgM4HvY!6jzJRSzz0T-y&9uY-2hBTG3-7raTyjloxUA!KFEpF`6!)qLJ{5DHo;$E7 z*7E9|1TiOO$8QPJ{OR+fe(js7D|B~puwHmc0(<;>9|N;1H%tt6PSpMo=@Ixpbj1;o zf8jY3bf(E1=aYQ2TF1ul(IJ-^!5uq`>r}!8%A@|CW7Bv0Eiv)mz;8~}HiK)3(^ delta 1236 zcmcb|^?`GOPJKmwieZ+Sr(>DByLY*7V2)==Rb_Eec#2cGes*QCdti`JMPfvjM{#g) zHdjz(ph2E*et}1NW@tgSU!Zw%rnafCUu98#Nnl{MQ*xnsuvwBvQIT1GB$uw8LUD11 zZfc5=si~o*f@e`wu4B4FsDD*>iDhO|S-QDLkbY!IMtx3jdZ1faX;GGOdX+`Cr+;XH zS5aWNUs_;3S4mW1V7Pg)r(1S*vPoJ%Vpv(QSCDa-hiQ?yp=)AdQGj=-QD&j9M^a@V z$hruKP2p~N1w~m#PTCe37ExuEAsI;qk)~m}Mn086!ATw&UfB^@-evhwC4ug~Tty-E zCh49*Dc;(K28Ko%?v>dgrBz9Rp60$~IaL{zj@sI3i3O=ysTN7au3Wmhx(a5QKIO@k zrJg~i{^hPwRc`J<+J(MZ9u+2r>HaRkp>8?3<&Gwa-hSae`CR)aOnbBX<>f;z!CR*7 zpHqJK>djNzA>Y_T^d6?fYiV{doL(PWGBp zH+sU}soK_``o2LvNiwdrldCBEa|M5ofUVo=r%&|E6)j$7vEBWBF6=+s#&5aSLVJo0 zUxdy%H&4brKlx?l??Wp4iyg%-sb$Zxa})V(&+i?&x|8J~uT+oX&nr7$fB3-#BbmKl_TF%AY7`VchQ=KZyIZE*3WwG=X?Bcz$N*h&O`p0 z8)x?Mmy1pQ{8PSeV_DhWcba#kIaIXv#3?<>$*Pxaw)TDYf4j-W#P`oW#%S==^Pcr- z+w=L0&W~B!d1tN-z1Db8JN(*)ixzv=K4xRze9WNi)YhH)aW|?b&pr1%xxHES@^^>C zgojI4ylbePZj?Vs<#9w{eS%Ee`pPShCtP#N@OJ-j`b6AOOV3k@HGVC7FCCj5(Ik^4 zUaP!E)>`O7z2&Lcn(LlIZ&U&}8}g-M(h5E8UT&3ik-br9BUWr8t#cWt_*{PKu(Go7`)4&1Z3z!$H#&MrnaR`TcC*x5yYtZ&pC@m_qs zASO({D7e2uc0;6X*uSgG#9mK0FQ6^-H)~<)5_!e2y6-D~u3G#;@ zXH$K!B_TB3$myc?H(4!)Us*A8LLvj+w%%v^Yk0xE|MH1dH#aZW&X|2^=c%gd=;DBt zZ_VnZ?^?x3x2c)En0WGE=FYyK%ySRPFx0Q^*7>aGo7BXbSf<>1Em(U&H#2jwwoJl5 z=J!WE)8?~Wdt9}3-2(YF-3mYc+u8myI{j?c*{1$YYCRwlr0Klymg0WSC!(dx zCRN?NAicX?!>YRQotNdm(APT?c1*jgb|wCt{Ij6byVtK=^ygpd7rVth>IWOt);znd z*E2o-pKsB^yM5_C5}cd4LUXhmpO-BCE2UDVwoB#l>s zflFb5r*Bv`SCxf+saJqgR7GJ@ps8o6er~CAnQKLGqH}O*S*BS*aCuQklB=sxKx#la zx^)KG;Q{G^3jRU4MtP2=u2JSL0ToUm1{RTq*@cezt}apeCdtL2#i{1?Nns`)Q7NS! z>0Cy+KB1|JX0A?2-sTow{zV>+`rfH|AugHbg+3|zP6kzlxl!gx8RkK``5@~eARa3y zbE-7Sv&c{LFRdsqj;u2G(zZzQGz_;W3a)bZsx0>M_RR1O%F)jVHsCTfF)z(>^e>7u zsEqOobaM)DOsrTL^a|E5_csYBbPdQf$xVw&Gztjy%uP1R2r}SWaxceT!hP}*kG@}-olP%tSfm|} zznRP6^?B|ox66roF0-aTeUZ+vKL6CKe^*8K#XL|xc|x19wEnxnu9E3Gk*Ym9rQDv) zbFY2hH0#)BmR{MEnFR%t7Rs2Z_Ptrm**|BcTB+Rg`SA%776l(o7S9Xu^JtxQZjZIJ z=ttrGZc^gA>Mn6`I$U*>QkAv-lD>`2W$$mE=x_sF=Mo-+`{x4%{@6`^?sK$g#!rDA zf%S(|PKsEpVbj%Onkib(U|u3#lhk_sR7U>BZ|u79`#M!mPDwTKSuP{-z1*|T@K==G zvMFr)F4g(niD|PBR(-(GfB2{Cr!zSVR?iZPRR6Q4TDrym`1PB#_w;oP|=zW##x`zEg4 ztJ>G@+&OpaVVfxX4PrGJVggznn;U1HoWJ?tqp++kL{ zO%>A5zP)&Pmte_OjkjO_{gc!=cF%Z9ebGnT3!Y}D_@}I}nV4PTmN4P@79B++say6+ zC+v6J+dH%MUjFqfGbSJ8%iit4zb;zGr}_b!%&oGX!KRDSto>w}a0yQa>|Zx6cmN&e~8lX>?J{oNrX z`^sQb{qNa2Y+4-`1<#!{+^ygHlFhrdx9ajX9u~%LJJ$&MvM_K-omH3+A5?Jh{qD4| z#wY*8rRHC}u~nLZD=#5nm5ZN{5y!KKXMV4boW$4gP1#`eyWR)izvpb&s9e|TZg=_6 zg>L4w4Z)xO=6yUZ^?io1`h)dhGWqt#l7TAq57wImC8ij;eo@tVziE-n-}b+&o04s8 o-7S7;>?-}uE_CwtM<@M4&Y2d8aSNDyH#c#g5L~+1>orp!03~<(g#Z8m delta 1098 zcmcc0`J7{dPQ6o=m$9d}VV0YJl9z{9g_}ibVQE-)o?mKOg}%AIM`EJ3o3TZPxnrWc z1y`0wc2!wGL{3pul23(Ou8(hD@K~7jzWPOFUajJK|duD+_ZlF_{OK4`H zXJw8>Rhh9XmusP~cA}9%xW1!nMzTe&b54L`V0nONl&^DEsb65CQGv5@UPw-4m4$00 z$hruKP3~Di86}Qp1->q&>5)aoftdyQuEvQjIq8Y|`pFh~u0GkOd1e6x1{sy1T*>A2 zsfJOhhUrl`sTOI5VWwVYLE3?aCPA5=Sw5y-`oSgohNdPJX*o%5>0G+Hx(a2+9-e81 zzNIBzc_~g6B@wAvnO?bOo?)iN{*l^F20lr-$^P0#q2_KyCR~hdd;-3qQB8k4xVT=n zY~?)q?bwHulGNfI={vs8Ej)J5ewF;;oO(IU*vAfiCw!wiR$S%$wD*-znVtCD=5Gu6 zR=V%FeKp)#_;10!xh_F21PLR`jNG!R0mY)8BDRFVnmC zbGGA!t6Tp+V4rvL)pZ?Xv)1cVcc?A6meHqmV2!NTis}!J>qDpM7TlZ|BBtu*ll4zJ z>f-?^-ukGS`z;U4Zeyt|ycqOuTDR_&79B?ZKU4fBA6H8Jbz5|m`&^FMMYo?Ew-DE# zvQ0wtf%n9`JH1>nOhPv%2Nl~_p1nMup)kF1&Q!%`U8WB-jr(4uJ$9K}vMy&|;bIYU zRfFQ}@8A0GDy&UVoclz%!7=-T_lA=ke*Ckaex52h@wXaV{bZF1{iiqVl`8#`v*Fe) zt9KrDLUy8;9(URAv(I|`T8(`l7k`M{A1|ZTZ|=MeI{sm!r^x&b3;%`9F1N{9_iAtN zAu|)-9UHVod$w1L*rmnl=5({61CnM^5{-QhX$}wO)q2C^tJ+_vU@Q;=~+V z_q*;N#rtNwn%xoiUCH67wdFBG9k0L_-7eGq&wceig!5o(C$Hh#T{D)}-R{~ha?R9_ zslugDRr77p^R?$@FxYDOX_w|dl6O*N+2CA%AvN#wozOMHQ)OGCdomjTf9Y`3eR}oW zUA@&yk3?LlQTXcLvX?o!{ciN19WQ>UF*3ZfuB-Ri7rXJ8003ti=DPp@ diff --git a/secrets/infra/forgejo-nsc-token.age b/secrets/infra/forgejo-nsc-token.age index ff8c278c3f78517fb387ac2ff5c9640754e81ffd..68b65722f7fdefd62b86b7e0b8a0586f52507286 100644 GIT binary patch delta 1281 zcmZ3_IhSjKPJLENinF6-VX;|dfPZ;$y1B7mWPo2#VOo)ASVU50P@+$nSxA(VnP*5u zAeVQPSCm(JhJI;aVtJ-%NsgD1c|}QavSXQBRce%PnTvaJL1K=nab;qLCzr0BLUD11 zZfc5=si~o*f@e`wu4B4FP@++4s8haidWw;kSD~SwV||K6TCj^}T4lDoXS%sdl0{xr zx{p zjf|`cN=yrM53MgXiz+Y94#=oX^6)o^2q?(#CI?b*9aJMvxYN4Bm9{YFg= zTf2|y_-=5R&%ZyrNoQ6-2lMY;p}eWwDhFiKlA0%Ow^{SBKJmbtqfWgup4vH7U;3x( zuIkQde@D#DBma|1h;g~+wI^*>ThHHN(RVziu}Q8<=aT$9iIT((i5>kx6)UF*bidrX zWs|tHQEuPC3jK^SYbU&r!`uld?P`ihUuo0o|QO7;O`l5(--}*-%6Z<0cm0f=t3NYMGkIJr7~OAhkk?3C zIasRx(iRQfJ$AgDCPFtdL#F6S9(^)x!S-%P`T7l?ZZ2H#pY;q!!s#zN?Hny{)M=dA zY?^jlSL+DxvDKM;f_W=k3!1#%T~C?kGV}NXPM31Em&@7|jwa6kGhBB zj@$W6M~i|kNO%aC{Bt}qbFYK>FJ+c_?+^M_mOf(4?W!vN_TjAKr~Z-EFg4?y z+P<`}Qc7lP#w+898`V$#6#EK>F?BAT(yqDn&I*eak7kIleED~!IIQ*QwOG^1700I( MT&Uo4Ki0Vr07SqoDF6Tf delta 1171 zcmbQswVrc=PJN|wR#mcbV7R4Wj+s%UcBoNgRH08nNOopaWr&w!MPQ(FW|X!|g-2$% zCs(e!zP3?XK|#K8kY`A0enyC|zDus5uUT5LYf(u>hJU(es;7}#VOf4?F_*5LLUD11 zZfc5=si~o*f@e`wu4B4_cBHALzeRAEV@On5szq3-QGI%5v7xtLc(RYNiF1j$ud%za zNrYc$P;yl{m%Dp*SY&c;erSnTP=!fVT1l3Vo2QFWxlf5-iF3MpSYBkQub)MPo4E-wy#@Xo>0eM*_7Ga)TAp!MK zQGP{1q2AhVRYj$#B`$8IUdbU3IwHTA@gSF>r zXIVB^Z&+ZG_0hU3q5MXQ@!IXHbwoPrmENW2WTwlVWWCV-Zz*@`i$@8!d2ouUX7Vx_?fXN%735`*wNX?0;EG z$1Se^p=f({okDaX_xS{cHRo6FiC6C1Hi6APr2EMkwk7)=&ish|CgS@1+@`Is_Aq^0 zyCGWVcKX}-?TZ;tzjz>>-2Wi!+idaXirQ~GXUx<)Gw;9D?KG1EBGbCInqHgg@ce#(4ll8oR_T28H_PX33Q9Be^CNCKA5FE;-8uO|X-;IxeD0m1)=!pSns$6+O74n< zKY2fwf4}b5T6rte^I6;VZ|D1FpKP^oTd?%t`VX}^mwzZ-y2G)>^Xd;a4sr3T#ijqd zw^sWoPLh6oM*HB*P^bFTObJ$z74cDT>UtKq*gbX9+}Ks$?X%oY)Mi&h5ZC1gR}aNr z>HfOG>gU6RpRBK%R((!2`d>bS0 zglT=8Z?evDDf|Dmx<$5wHP$21ab=7Ryo%Ic6zU${_ z221Z;uaM?*q$k-X8539b_AMgI4lGC*`b?*I3Ygfo!-OA1K=)kM6pD|U& zPQQIOPkY9@vs-O#ktf^Xj(;`&EgA*knq8V)OW#d>+H->E?1kJo;mNh7;SZi@HEvz7 zj_v+3{}kB;9aT2WCguv8e|>+_|26u0SZct5R}y<06W5ggFL7EkXPdHp-TN;+VKVG) U4PHDu{y$z@K6SUmQ>)r10Amgg@m#&>cadC!j zYKoDmsiCEUXHitHW4c0cws)~>p>wgTr;%xXq*H~3TfM8liIa=FXO4SNx|v6AR+w*M zx^z8TJ`5rxJskzVeuAnPI^9!pCt z%Fy>nGONh9DAljX)X&k+G4~8EboR@yG!8Ph@Q?HfaVZH(HcQU)9dHp_7}H_S{-GEOxr3gyz()m89HG4`nP_bp5*3QKkN zC~jPX_agI)U;`rEAac^Iex-tyveTY5LYB%3<@hh~^m& h*{PZ*;?2Dw=zLOV>B+mcv61REC9Ip$DjZJ delta 449 zcmcb_@`QPUZhfMcxu=0+WoAiGcz{ntUPw}6VpT|Hph01okxP)bzms2ig|>EJSdn`+ zmtkcadC!j zYKoDmsiCEUXHitHW4eN4pl6j)X1ITFsdj34Zep=xV!c6LkXx9cd3uqfslI1Wriq`c zQ)ZrbVxR?=MWAPCnoE9mcx6&jW>9#TzQ0#as(H45N`7voe_ljzUWHexx0|+Uq$k+A z2#8IV1^xy_S#Cz=fx&@hQU2*ZncLL%CjQ1AW%nYoT!-C7L`+Lkdcjas2`aZ2Fs zBfN2p9e38BF4fkPzJ5F}V}7yi5reQktDeU}Zd@mA-BTHlKew)$`nWSU{%}~UZ{oWm nzV_qUCK;>$um1ew?!+CFHr7Sj>=$_U`{;RP$!%8O9@zl^AG4tK diff --git a/secrets/infra/headscale-oidc-client-secret.age b/secrets/infra/headscale-oidc-client-secret.age index 925512cb9692b0538eddfa2c4f83fa98fe8b0c66..81cff1c5b9e23216013b14e69a1819e0a6a400ab 100644 GIT binary patch delta 562 zcmaFLe3@l}PQ687iAPwFMM`Q~Mq*L0WkG>sv5$XVd4!>(afqpNp`TAiqK}KaiD99E zD_2RNQ>c$|cvXf;j%RtUr;~e@kB4b`R&kkgMtHuNyO&91MMSc8vR_I{D3`9CLUD11 zZfc5=si~o*f@e`wu4B4FNV<=4Nm06^v9?KOSw(?oL4BEik!xytR&G>ccCc%OxszF{ zQE{?KMMh9LmtVMve|nf#R7RLrXoRnmM`B`Dwt<;*g{xOqRZ(efVThlnk-k}wi?4|z zx^)KG;Q{G^3Z+i@#cmN&K`N$871jqQLd5o{vNJQPVV_` zkz76@MFojP{zayl<&Lhd0lwNrAyJhs6<%&$5&C(7#UYUeektz3>E0o!#USeqbIytMF>rHo$;k96s_^8xz_*0CpLw>ValD+>z5PY}ZAsf5 z>$bH?mgOCq>8^j}r0y%bj(*cG`?lZnj_G*a-Swmpf39<$bE8xgnXpTp_;o zrJ=^DeuO%Cr5TxFv;#yiVlX$mtS5Cm|^pDUV6-w-=DT{J=1x!L)_w=ZrjDAmOr`S rC%GO>TATKLPE48PoNtpt9ZpUDoEB1N%4hKGN7?CFe_GV{RPF=-Z}+BS diff --git a/secrets/infra/tailscale-oidc-client-secret.age b/secrets/infra/tailscale-oidc-client-secret.age index e88c2d1b5cf2b362996e3a4403d3f70773e12a72..3c3c07468aa611765480683d977310e72ae7ca83 100644 GIT binary patch delta 561 zcmaFDe2Ha(PJMW3p?*+eX_953ex^@iQDJ~{rhi_taYac)KvYIVlDV&AQdM}BX{d3P zCs#>kiHVU>N~l3nS($m2zN@FXdAW~vYEe){MOl)0hLd4QsY#xte??)TCzr0BLUD11 zZfc5=si~o*f@e`wu4B4FmA^?;U}~neuYq5Vt8th~M1536PF1e4Z>f`$exz}LW0FaJ zc6Nw)nU9etS7NePu2W8ARjN^eMX;k~p^sa(hlN{yMxnsS#MVMaJd@mY$Z0p+13rE+#%zL6${T{$Y;pj_yugi5?b?Am2qmJQfsI zP#jqp8f@xmQW6xB;cf0}YU!4i?Pz3_p`C10RbiBF;o(>0=TqwH$rWK)T#@P&=xLno zmFkj`*rKZS`g%J7|ErptE-S+oT{H$7-Eo|SXz`7 zl$jJ6>ggHoY~dB@?Hv^4n-h}gRNz>iUgD8v*=KVN|PC!0SXJjJOBUy delta 450 zcmcb_@`QPUPQ7aapKYs+oCGP?Sr!OLl2?N?N*Wgn4OTmXk?nIhU@TLUD11 zZfc5=si~o*f@e`wu4B4_qg$T2Q-FzEc1dWYhh<=yU%gjAm`jRZvTK1?R%BtWe^F*} zS%hVTdzgU*S5!v6wzpqNwqaRlV3et8mSIJxM@n(IW0paPL2!OnroM->S%G0maagh= z$hruKO~qxV2ANS=PMKNZ-f0D9p4vWXK{>8oM#Y7BS*H3%xds_Q;Z>%7g+3wKTplU) zX=VA^7Uor{xxvnE;V!{BRoMl(Wgb4IWyuC5Srx7&2KmXEApu3s>0G+Hx(ZI&fhi{b zM!EW#mKH@hPRV{5$yK3d;aLV5CHa|tK7lTI2H|Fv-g$=l$y}oRyotMbR=wM`U+G}@ zWuF`G8kMd>jR0JGji*aSR*x1E70{}D+pvC|I From 4d3257995b2f2f4681aa41a3ec167238345bbde1 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:10:18 -0700 Subject: [PATCH 31/59] Add Authentik SSO apps for Linear and 1Password --- Scripts/authentik-sync-1password-oidc.sh | 243 +++++++++++++ Scripts/authentik-sync-linear-saml.sh | 334 ++++++++++++++++++ ...ntik-backed-team-chat-and-workspace-sso.md | 152 ++++++++ nixos/hosts/burrow-forge/default.nix | 3 + nixos/modules/burrow-authentik.nix | 153 ++++++++ 5 files changed, 885 insertions(+) create mode 100755 Scripts/authentik-sync-1password-oidc.sh create mode 100755 Scripts/authentik-sync-linear-saml.sh create mode 100644 evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md diff --git a/Scripts/authentik-sync-1password-oidc.sh b/Scripts/authentik-sync-1password-oidc.sh new file mode 100755 index 0000000..f523d9a --- /dev/null +++ b/Scripts/authentik-sync-1password-oidc.sh @@ -0,0 +1,243 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG:-onepassword}" +application_name="${AUTHENTIK_ONEPASSWORD_APPLICATION_NAME:-1Password}" +provider_name="${AUTHENTIK_ONEPASSWORD_PROVIDER_NAME:-1Password}" +template_slug="${AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG:-ts}" +client_id="${AUTHENTIK_ONEPASSWORD_CLIENT_ID:-1password.burrow.net}" +launch_url="${AUTHENTIK_ONEPASSWORD_LAUNCH_URL:-https://burrow-team.1password.com/}" +redirect_uris_json="${AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON:-[ + \"https://burrow-team.1password.com/sso/oidc/redirect/\", + \"onepassword://sso/oidc/redirect\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-1password-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG + AUTHENTIK_ONEPASSWORD_APPLICATION_NAME + AUTHENTIK_ONEPASSWORD_PROVIDER_NAME + AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG + AUTHENTIK_ONEPASSWORD_CLIENT_ID + AUTHENTIK_ONEPASSWORD_LAUNCH_URL + AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "public", + client_id: $client_id, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: 1Password OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: 1Password OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: 1Password OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh new file mode 100755 index 0000000..9bead9f --- /dev/null +++ b/Scripts/authentik-sync-linear-saml.sh @@ -0,0 +1,334 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}" +application_name="${AUTHENTIK_LINEAR_APPLICATION_NAME:-Linear}" +provider_name="${AUTHENTIK_LINEAR_PROVIDER_NAME:-Linear}" +launch_url="${AUTHENTIK_LINEAR_LAUNCH_URL:-https://linear.app/burrownet}" +acs_url="${AUTHENTIK_LINEAR_ACS_URL:-}" +audience="${AUTHENTIK_LINEAR_AUDIENCE:-}" +issuer="${AUTHENTIK_LINEAR_ISSUER:-${authentik_url}/application/saml/${application_slug}/metadata/}" +default_relay_state="${AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE:-}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-linear-saml.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_LINEAR_ACS_URL + AUTHENTIK_LINEAR_AUDIENCE + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_LINEAR_APPLICATION_SLUG + AUTHENTIK_LINEAR_APPLICATION_NAME + AUTHENTIK_LINEAR_PROVIDER_NAME + AUTHENTIK_LINEAR_LAUNCH_URL + AUTHENTIK_LINEAR_ISSUER + AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$acs_url" ]]; then + echo "error: AUTHENTIK_LINEAR_ACS_URL is required" >&2 + exit 1 +fi + +if [[ -z "$audience" ]]; then + echo "error: AUTHENTIK_LINEAR_AUDIENCE is required" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_oauth_template_field() { + local field="$1" + + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \ + | head -n1 +} + +reconcile_property_mapping() { + local name="$1" + local saml_name="$2" + local friendly_name="$3" + local expression="$4" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg saml_name "$saml_name" \ + --arg friendly_name "$friendly_name" \ + --arg expression "$expression" \ + '{ + name: $name, + saml_name: $saml_name, + friendly_name: $friendly_name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty' + fi +} + +wait_for_authentik + +authorization_flow="$(lookup_oauth_template_field authorization_flow)" +invalidation_flow="$(lookup_oauth_template_field invalidation_flow)" +signing_kp="$(lookup_oauth_template_field signing_key)" + +if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then + echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2 + exit 1 +fi + +email_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Email" \ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \ + "email" \ + 'return request.user.email' +)" + +name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Name" \ + "name" \ + "name" \ + 'return request.user.name or request.user.username' +)" + +first_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML First Name" \ + "firstName" \ + "firstName" \ + $'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username' +)" + +last_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Last Name" \ + "lastName" \ + "lastName" \ + $'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username' +)" + +if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then + echo "error: failed to reconcile Linear SAML property mappings" >&2 + exit 1 +fi + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg acs_url "$acs_url" \ + --arg audience "$audience" \ + --arg issuer "$issuer" \ + --arg signing_kp "$signing_kp" \ + --arg default_relay_state "$default_relay_state" \ + --arg name_id_mapping "$email_mapping_pk" \ + --arg email_mapping "$email_mapping_pk" \ + --arg name_mapping "$name_mapping_pk" \ + --arg first_name_mapping "$first_name_mapping_pk" \ + --arg last_name_mapping "$last_name_mapping_pk" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + acs_url: $acs_url, + audience: $audience, + issuer: $issuer, + signing_kp: $signing_kp, + sign_assertion: true, + sign_response: true, + sp_binding: "post", + name_id_mapping: $name_id_mapping, + property_mappings: [ + $email_mapping, + $name_mapping, + $first_name_mapping, + $last_name_mapping + ] + } + + (if $default_relay_state == "" then {} else {default_relay_state: $default_relay_state} end)' +)" + +existing_provider="$( + api GET "/api/v3/providers/saml/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/saml/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Linear SAML provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Linear SAML application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/saml/${application_slug}/metadata/" >/dev/null 2>&1; then + echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Linear SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." diff --git a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md new file mode 100644 index 0000000..6c11dbc --- /dev/null +++ b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md @@ -0,0 +1,152 @@ +# `BEP-0008` - Authentik-Backed Team Chat and Workspace Identity + +```text +Status: Draft +Proposal: BEP-0008 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should add a self-hosted team chat surface at `chat.burrow.net` and +continue the project-wide move toward Authentik as the identity authority for +external work systems. The immediate targets are a self-hosted Zulip +deployment rooted in Authentik SAML, a Linear SAML configuration when the +workspace plan supports it, and a 1Password Unlock-with-SSO deployment rooted +in the same Authentik-backed OIDC authority. + +This keeps Burrow's day-to-day coordination surfaces aligned with the same +admin groups, canonical users, and secret-handling model already used for +Forgejo, Headscale, and Tailscale. It also avoids fragmenting login state +across vendor-native Google auth flows when Burrow already operates an IdP. + +## Motivation + +- Forge, Tailnet, operator identity, and Tailscale custom OIDC are already + rooted in Authentik. Team chat, work tracking, and password-manager access + should not become separate authority islands. +- Zulip provides a self-hosted chat system under Burrow's control, which fits + the constitution better than adding another hosted chat dependency. +- Linear remains a SaaS dependency, but its workspace access should still be + derived from Burrow-managed identities and domains when the vendor plan + exposes SAML configuration. +- 1Password Business is another external work surface where Burrow-controlled + identities are preferable to vendor-native Google-only auth. Its current + vendor flow is OIDC-based Unlock with SSO rather than SAML, so the proposal + needs to preserve protocol accuracy instead of flattening everything into + one SAML bucket. +- Burrow already has a canonical public identity registry and a secret-backed + external-email alias map. Reusing that structure is lower-risk than + inventing per-app user bootstrap logic. + +## Detailed Design + +- Add a Burrow-managed Zulip workload on the forge host at `chat.burrow.net`. + The deployment should be repo-owned and rebuildable from Nix, even if the + runtime uses vendor-supported container images internally. +- Zulip should authenticate through Authentik SAML rather than local passwords + as the primary path. Initial bootstrap may still keep an operational escape + hatch while the deployment is being validated. +- Add Authentik-managed SAML applications for: + - Zulip at `chat.burrow.net` + - Linear using Burrow's claimed domains and Authentik metadata +- Add an Authentik-managed OIDC application for 1Password Business under the + Burrow team sign-in address. +- Treat Zulip and Linear as downstream applications of the same identity + authority, and treat 1Password as part of that same authority even though + its vendor protocol is OIDC rather than SAML. The source of truth remains: + - public identities and admin intent in `contributors.nix` + - private alias mappings and external accounts in agenix-encrypted secrets +- Keep app-specific configuration in dedicated reconciliation code or module + options instead of hand-edited UI state. +- Prefer service-specific reconciliation over ad hoc manual setup so rebuilds + and host replacement converge automatically. +- Model 1Password according to the vendor's actual integration contract: + - OIDC Authorization Code Flow with PKCE + - public client rather than a confidential client + - no Burrow-side dependence on a stored client secret unless the vendor flow + changes + +## Security and Operational Considerations + +- Do not store external personal email mappings in public registry files. + Public tree data may include Burrow usernames and canonical `@burrow.net` + addresses, but external aliases must stay in encrypted secrets. +- Zulip internal service credentials, Django secret material, and any mail + credentials must have explicit storage and rotation paths. +- Linear SAML must not become Burrow's only admin recovery path. At least one + owner login path outside the enforced SAML flow should remain available until + rollout is proven. +- 1Password Owners cannot be forced onto Unlock with SSO during initial setup. + Burrow should preserve the owner recovery path and treat OIDC rollout as a + scoped migration for non-owner users first. +- If Zulip is deployed without production-grade outbound email at first, that + limitation must be documented and treated as an operational constraint, not a + hidden assumption. +- Rollback should be straightforward: + - disable or stop the Zulip module + - remove the Authentik SAML apps + - remove the Authentik OIDC app used for 1Password if necessary + - leave the underlying Burrow identities unchanged + +## Contributor Playbook + +- Define the app and identity intent in the repository before modifying the + forge host. +- Add or update Nix modules so `burrow-forge` can rebuild Zulip and the + corresponding Authentik SAML configuration from the tree. +- Verify: + - `chat.burrow.net` serves a working Zulip login surface + - Authentik exposes working metadata for Zulip and Linear + - Authentik exposes a working OIDC issuer for 1Password + - users in Burrow admin groups receive the expected access on first login +- Record concrete evidence for: + - host deployment generation + - Authentik reconciliation success + - Zulip login success + - Linear SAML configuration state + - 1Password Unlock with SSO configuration state + +## Alternatives Considered + +- Use Zulip Cloud instead of self-hosting. Rejected because the ask is to host + chat under `chat.burrow.net`, and Burrow already operates a forge host with a + self-managed identity plane. +- Keep Linear on Google-native login. Rejected because it leaves Burrow work + access outside the project's operator and group model. +- Treat 1Password as a SAML app for consistency. Rejected because the live + vendor flow is OIDC and Burrow should not pretend otherwise in repo-owned + infrastructure. +- Add per-app manual Authentik configuration without repository automation. + Rejected because it violates Burrow's infrastructure-in-repo commitment. + +## Impact on Other Work + +- Extends Burrow's Authentik role from control-plane identity into team-work + surfaces. +- Introduces a persistent chat workload on the forge host, with resource and + monitoring implications. +- Creates a likely follow-up for SCIM or richer group synchronization if Linear + or Zulip role mapping needs to become fully declarative later. +- Adds a second OIDC relying party beyond Forgejo, Headscale, and Tailscale, + which raises the importance of keeping Burrow's Authentik scope mappings and + redirect handling consistent across applications. + +## Decision + +Pending. + +## References + +- `CONSTITUTION.md` +- `contributors.nix` +- `evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md` +- Authentik docs: SAML provider and metadata endpoints +- Zulip docs: SAML authentication and docker deployment +- Linear docs: SAML and access control +- 1Password docs: Unlock with SSO using OpenID Connect diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 96eca4f..3f73346 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -207,6 +207,9 @@ in userGroupName = contributors.groups.users; adminGroupName = contributors.groups.admins; bootstrapUsers = bootstrapUsers; + linearAcsUrl = "https://api.linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de/acs"; + linearAudience = "https://auth.linear.app/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearDefaultRelayState = "https://linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; }; services.burrow.headscale = { diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 2fa83da..5b04de2 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -11,6 +11,8 @@ let directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; + onePasswordOidcSyncScript = ../../Scripts/authentik-sync-1password-oidc.sh; + linearSamlSyncScript = ../../Scripts/authentik-sync-linear-saml.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' @@ -150,6 +152,63 @@ in description = "Host-local file containing the Authentik Tailscale OIDC client secret."; }; + onePasswordDomain = lib.mkOption { + type = lib.types.str; + default = "burrow-team.1password.com"; + description = "1Password team sign-in domain used for Burrow Unlock with SSO."; + }; + + onePasswordProviderSlug = lib.mkOption { + type = lib.types.str; + default = "onepassword"; + description = "Authentik application slug for 1Password Unlock with SSO."; + }; + + onePasswordClientId = lib.mkOption { + type = lib.types.str; + default = "1password.burrow.net"; + description = "Public OIDC client ID Authentik should present to 1Password."; + }; + + onePasswordRedirectUris = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ + "https://burrow-team.1password.com/sso/oidc/redirect/" + "onepassword://sso/oidc/redirect" + ]; + description = "Allowed 1Password OIDC redirect URIs."; + }; + + linearProviderSlug = lib.mkOption { + type = lib.types.str; + default = "linear"; + description = "Authentik application slug for Linear SAML."; + }; + + linearAcsUrl = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SAML ACS URL."; + }; + + linearAudience = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SAML audience/entity identifier."; + }; + + linearLaunchUrl = lib.mkOption { + type = lib.types.str; + default = "https://linear.app/burrownet"; + description = "Linear workspace URL exposed in Authentik."; + }; + + linearDefaultRelayState = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional Linear relay state or login URL for IdP-initiated launches."; + }; + forgejoClientId = lib.mkOption { type = lib.types.str; default = "git.burrow.net"; @@ -718,6 +777,100 @@ EOF ''; }; + systemd.services.burrow-authentik-1password-oidc = { + description = "Reconcile the Burrow Authentik 1Password OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + onePasswordOidcSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG=${lib.escapeShellArg cfg.onePasswordProviderSlug} + export AUTHENTIK_ONEPASSWORD_APPLICATION_NAME=1Password + export AUTHENTIK_ONEPASSWORD_PROVIDER_NAME=1Password + export AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_ONEPASSWORD_CLIENT_ID=${lib.escapeShellArg cfg.onePasswordClientId} + export AUTHENTIK_ONEPASSWORD_LAUNCH_URL=https://${cfg.onePasswordDomain}/ + export AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON='${builtins.toJSON cfg.onePasswordRedirectUris}' + + ${pkgs.bash}/bin/bash ${onePasswordOidcSyncScript} + ''; + }; + + systemd.services.burrow-authentik-linear-saml = lib.mkIf ( + cfg.linearAcsUrl != null && cfg.linearAudience != null + ) { + description = "Reconcile the Burrow Authentik Linear SAML application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + linearSamlSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug} + export AUTHENTIK_LINEAR_APPLICATION_NAME=Linear + export AUTHENTIK_LINEAR_PROVIDER_NAME=Linear + export AUTHENTIK_LINEAR_ACS_URL=${lib.escapeShellArg cfg.linearAcsUrl} + export AUTHENTIK_LINEAR_AUDIENCE=${lib.escapeShellArg cfg.linearAudience} + export AUTHENTIK_LINEAR_LAUNCH_URL=${lib.escapeShellArg cfg.linearLaunchUrl} + ${lib.optionalString (cfg.linearDefaultRelayState != null) '' + export AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE=${lib.escapeShellArg cfg.linearDefaultRelayState} + ''} + + ${pkgs.bash}/bin/bash ${linearSamlSyncScript} + ''; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} From ebcfc4bf8d157395fc23bb0f0ccabb53a3910b82 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:23:53 -0700 Subject: [PATCH 32/59] Add Linear SCIM role sync --- Scripts/authentik-sync-linear-scim.sh | 311 ++++++++++++++++++ contributors.nix | 5 + ...ntik-backed-team-chat-and-workspace-sso.md | 8 + nixos/hosts/burrow-forge/default.nix | 14 + nixos/modules/burrow-authentik.nix | 90 +++++ secrets.nix | 1 + secrets/infra/linear-scim-token.age | 11 + 7 files changed, 440 insertions(+) create mode 100644 Scripts/authentik-sync-linear-scim.sh create mode 100644 secrets/infra/linear-scim-token.age diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh new file mode 100644 index 0000000..b689212 --- /dev/null +++ b/Scripts/authentik-sync-linear-scim.sh @@ -0,0 +1,311 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}" +provider_name="${AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME:-Linear SCIM}" +scim_url="${AUTHENTIK_LINEAR_SCIM_URL:-}" +scim_token_file="${AUTHENTIK_LINEAR_SCIM_TOKEN_FILE:-}" +user_identifier="${AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER:-email}" +owner_group="${AUTHENTIK_LINEAR_OWNER_GROUP:-linear-owners}" +admin_group="${AUTHENTIK_LINEAR_ADMIN_GROUP:-linear-admins}" +guest_group="${AUTHENTIK_LINEAR_GUEST_GROUP:-linear-guests}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-linear-scim.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_LINEAR_SCIM_URL + AUTHENTIK_LINEAR_SCIM_TOKEN_FILE + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_LINEAR_APPLICATION_SLUG + AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME + AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER + AUTHENTIK_LINEAR_OWNER_GROUP + AUTHENTIK_LINEAR_ADMIN_GROUP + AUTHENTIK_LINEAR_GUEST_GROUP +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$scim_url" ]]; then + echo "error: AUTHENTIK_LINEAR_SCIM_URL is required" >&2 + exit 1 +fi + +if [[ -z "$scim_token_file" || ! -s "$scim_token_file" ]]; then + echo "error: AUTHENTIK_LINEAR_SCIM_TOKEN_FILE is required and must be readable" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \ + | jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 +} + +ensure_group() { + local group_name="$1" + local payload group_pk + + payload="$(jq -cn --arg name "$group_name" '{name: $name}')" + group_pk="$(lookup_group_pk "$group_name")" + + if [[ -n "$group_pk" ]]; then + api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null + else + group_pk="$( + api POST "/api/v3/core/groups/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$group_pk" ]]; then + echo "error: could not reconcile Authentik group ${group_name}" >&2 + exit 1 + fi + + printf '%s\n' "$group_pk" +} + +lookup_application() { + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +} + +lookup_scim_provider() { + api GET "/api/v3/providers/scim/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_backchannel_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +} + +lookup_scim_mapping_pk() { + local managed_name="$1" + + api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \ + | jq -r --arg managed "$managed_name" '.results[]? | select(.managed == $managed) | .pk // empty' \ + | head -n1 +} + +reconcile_property_mapping() { + local name="$1" + local expression="$2" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg expression "$expression" \ + '{ + name: $name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/scim/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/scim/" "$payload" \ + | jq -r '.pk // empty' + fi +} + +sync_object() { + local provider_pk="$1" + local model="$2" + local object_id="$3" + + api POST "/api/v3/providers/scim/${provider_pk}/sync/object/" "$( + jq -cn \ + --arg model "$model" \ + --arg object_id "$object_id" \ + '{ + sync_object_model: $model, + sync_object_id: $object_id, + override_dry_run: false + }' + )" >/dev/null +} + +wait_for_authentik + +group_mapping_pk="$(lookup_scim_mapping_pk "goauthentik.io/providers/scim/group")" +case "$user_identifier" in + email) + user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\n\nidentifier = request.user.email\nif identifier == "":\n identifier = request.user.username\n\nreturn {\n "userName": identifier,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}' + ;; + username) + user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\nreturn {\n "userName": request.user.username,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}' + ;; + *) + echo "error: unsupported AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER value: ${user_identifier}" >&2 + exit 1 + ;; +esac +user_mapping_pk="$(reconcile_property_mapping "Burrow Linear SCIM User" "$user_mapping_expression")" + +if [[ -z "$user_mapping_pk" || -z "$group_mapping_pk" ]]; then + echo "error: could not resolve managed Authentik SCIM property mappings" >&2 + exit 1 +fi + +owner_group_pk="$(ensure_group "$owner_group")" +admin_group_pk="$(ensure_group "$admin_group")" +guest_group_pk="$(ensure_group "$guest_group")" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg url "$scim_url" \ + --arg token "$(tr -d '\r\n' < "$scim_token_file")" \ + --arg user_mapping_pk "$user_mapping_pk" \ + --arg group_mapping_pk "$group_mapping_pk" \ + --arg owner_group_pk "$owner_group_pk" \ + --arg admin_group_pk "$admin_group_pk" \ + --arg guest_group_pk "$guest_group_pk" \ + '{ + name: $name, + url: $url, + token: $token, + auth_mode: "token", + verify_certificates: true, + compatibility_mode: "default", + property_mappings: [$user_mapping_pk], + property_mappings_group: [$group_mapping_pk], + group_filters: [ + $owner_group_pk, + $admin_group_pk, + $guest_group_pk + ], + dry_run: false + }' +)" + +existing_provider="$(lookup_scim_provider)" +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/scim/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/scim/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Linear SCIM provider did not return a primary key" >&2 + exit 1 +fi + +application="$(lookup_application)" +if [[ -z "$application" ]]; then + echo "error: could not resolve Authentik application ${application_slug}" >&2 + exit 1 +fi + +application_pk="$(printf '%s\n' "$application" | jq -r '.pk')" +application_payload="$( + printf '%s\n' "$application" \ + | jq \ + --arg provider_pk "$provider_pk" \ + '{ + name: .name, + slug: .slug, + provider: .provider, + backchannel_providers: ((.backchannel_providers // []) + [($provider_pk | tonumber)] | unique), + open_in_new_tab: .open_in_new_tab, + meta_launch_url: .meta_launch_url, + policy_engine_mode: .policy_engine_mode + }' +)" +api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null + +group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')" +user_pks_json="$( + api GET "/api/v3/core/users/?page_size=200" \ + | jq -c \ + --argjson group_pks "$group_pks_json" \ + '[.results[]? + | select( + ([((.groups // [])[] | tostring)] as $user_groups + | ($group_pks | map(. as $wanted | ($user_groups | index($wanted)) != null) | any)) + ) + | .pk]' +)" + +while IFS= read -r group_pk; do + [[ -z "$group_pk" ]] && continue + sync_object "$provider_pk" "authentik.core.models.Group" "$group_pk" +done < <(printf '%s\n' "$group_pks_json" | jq -r '.[]') + +while IFS= read -r user_pk; do + [[ -z "$user_pk" ]] && continue + sync_object "$provider_pk" "authentik.core.models.User" "$user_pk" +done < <(printf '%s\n' "$user_pks_json" | jq -r '.[]') + +status_json="$(api GET "/api/v3/providers/scim/${provider_pk}/sync/status/")" +if ! printf '%s\n' "$status_json" | jq -e '.task_count >= 0' >/dev/null 2>&1; then + echo "error: could not read Linear SCIM sync status for provider ${provider_pk}" >&2 + exit 1 +fi + +echo "Synced Authentik Linear SCIM provider ${provider_name} (${provider_pk}) with groups ${owner_group}, ${admin_group}, ${guest_group}." diff --git a/contributors.nix b/contributors.nix index df76a01..60501d1 100644 --- a/contributors.nix +++ b/contributors.nix @@ -2,6 +2,11 @@ groups = { users = "burrow-users"; admins = "burrow-admins"; + linear = { + owners = "linear-owners"; + admins = "linear-admins"; + guests = "linear-guests"; + }; }; identities = { diff --git a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md index 6c11dbc..63e0994 100644 --- a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md +++ b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md @@ -55,6 +55,8 @@ across vendor-native Google auth flows when Burrow already operates an IdP. - Add Authentik-managed SAML applications for: - Zulip at `chat.burrow.net` - Linear using Burrow's claimed domains and Authentik metadata +- Add an Authentik-managed SCIM backchannel for Linear so Burrow can push + role groups declaratively instead of hand-maintaining workspace roles. - Add an Authentik-managed OIDC application for 1Password Business under the Burrow team sign-in address. - Treat Zulip and Linear as downstream applications of the same identity @@ -66,6 +68,10 @@ across vendor-native Google auth flows when Burrow already operates an IdP. options instead of hand-edited UI state. - Prefer service-specific reconciliation over ad hoc manual setup so rebuilds and host replacement converge automatically. +- Derive Linear SCIM role groups from Burrow's canonical identity metadata. + If Burrow-wide admin intent says a user is an operator/admin, the repo-owned + configuration should map that intent onto the Linear push group without a + second manual roster. - Model 1Password according to the vendor's actual integration contract: - OIDC Authorization Code Flow with PKCE - public client rather than a confidential client @@ -82,6 +88,8 @@ across vendor-native Google auth flows when Burrow already operates an IdP. - Linear SAML must not become Burrow's only admin recovery path. At least one owner login path outside the enforced SAML flow should remain available until rollout is proven. +- Linear SCIM group push should be role-scoped and explicit. Burrow should + avoid blanket ownership mapping unless that intent is recorded in the repo. - 1Password Owners cannot be forced onto Unlock with SSO during initial setup. Burrow should preserve the owner recovery path and treat OIDC rollout as a scoped migration for non-owner users first. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 3f73346..0121f92 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -3,6 +3,7 @@ let contributors = import ../../../contributors.nix; identities = contributors.identities; + linearGroups = contributors.groups.linear; stripNewline = value: lib.replaceStrings [ "\n" ] [ "" ] value; authentikPasswordSecretPath = identity: if identity ? authentikPasswordSecret @@ -15,6 +16,7 @@ let name = identity.displayName; email = identity.canonicalEmail; isAdmin = identity.isAdmin or false; + groups = lib.optionals (identity.isAdmin or false) [ linearGroups.owners ]; passwordFile = authentikPasswordSecretPath identity; } ) @@ -111,6 +113,12 @@ in group = "root"; mode = "0400"; }; + age.secrets.burrowLinearScimToken = { + file = ../../../secrets/infra/linear-scim-token.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; age.secrets.burrowAuthentikGoogleClientId = { file = ../../../secrets/infra/authentik-google-client-id.age; owner = "root"; @@ -210,6 +218,12 @@ in linearAcsUrl = "https://api.linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de/acs"; linearAudience = "https://auth.linear.app/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; linearDefaultRelayState = "https://linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearScimUrl = "https://api.linear.app/auth/scim/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearScimTokenFile = config.age.secrets.burrowLinearScimToken.path; + linearScimUserIdentifier = "email"; + linearOwnerGroupName = linearGroups.owners; + linearAdminGroupName = linearGroups.admins; + linearGuestGroupName = linearGroups.guests; }; services.burrow.headscale = { diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 5b04de2..772adc4 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -13,6 +13,7 @@ let tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; onePasswordOidcSyncScript = ../../Scripts/authentik-sync-1password-oidc.sh; linearSamlSyncScript = ../../Scripts/authentik-sync-linear-saml.sh; + linearScimSyncScript = ../../Scripts/authentik-sync-linear-scim.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' @@ -209,6 +210,42 @@ in description = "Optional Linear relay state or login URL for IdP-initiated launches."; }; + linearScimUrl = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SCIM base connector URL."; + }; + + linearScimTokenFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Linear SCIM bearer token."; + }; + + linearScimUserIdentifier = lib.mkOption { + type = lib.types.str; + default = "email"; + description = "Linear SCIM unique identifier field for users."; + }; + + linearOwnerGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-owners"; + description = "Authentik group name that should map to Linear owners."; + }; + + linearAdminGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-admins"; + description = "Authentik group name that should map to Linear admins."; + }; + + linearGuestGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-guests"; + description = "Authentik group name that should map to Linear guests."; + }; + forgejoClientId = lib.mkOption { type = lib.types.str; default = "git.burrow.net"; @@ -871,6 +908,59 @@ EOF ''; }; + systemd.services.burrow-authentik-linear-scim = lib.mkIf ( + cfg.linearScimUrl != null && cfg.linearScimTokenFile != null + ) { + description = "Reconcile the Burrow Authentik Linear SCIM provider"; + after = [ + "burrow-authentik-ready.service" + "burrow-authentik-directory.service" + "burrow-authentik-linear-saml.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "burrow-authentik-directory.service" + "burrow-authentik-linear-saml.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + linearScimSyncScript + cfg.envFile + cfg.linearScimTokenFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug} + export AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME="Linear SCIM" + export AUTHENTIK_LINEAR_SCIM_URL=${lib.escapeShellArg cfg.linearScimUrl} + export AUTHENTIK_LINEAR_SCIM_TOKEN_FILE=${lib.escapeShellArg cfg.linearScimTokenFile} + export AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER=${lib.escapeShellArg cfg.linearScimUserIdentifier} + export AUTHENTIK_LINEAR_OWNER_GROUP=${lib.escapeShellArg cfg.linearOwnerGroupName} + export AUTHENTIK_LINEAR_ADMIN_GROUP=${lib.escapeShellArg cfg.linearAdminGroupName} + export AUTHENTIK_LINEAR_GUEST_GROUP=${lib.escapeShellArg cfg.linearGuestGroupName} + + ${pkgs.bash}/bin/bash ${linearScimSyncScript} + ''; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} diff --git a/secrets.nix b/secrets.nix index 32d7882..1a6dce0 100644 --- a/secrets.nix +++ b/secrets.nix @@ -23,5 +23,6 @@ in "secrets/infra/forgejo-nsc-dispatcher-config.age".publicKeys = burrowForgeRecipients; "secrets/infra/forgejo-nsc-token.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/linear-scim-token.age".publicKeys = burrowForgeRecipients; "secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; } diff --git a/secrets/infra/linear-scim-token.age b/secrets/infra/linear-scim-token.age new file mode 100644 index 0000000..677a475 --- /dev/null +++ b/secrets/infra/linear-scim-token.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q 6LanICpiWi1sozNr5HJDWCGb6QFBktRQ0dH2wfFSu2g +jc83UfFoFvxAXcu4O/b6KC+1AyZq/k9IHzx6fL8DHoQ +-> ssh-ed25519 IrZmAg r1ggts4fiWOGHoD7IY+cVEgECOUFaulJ1ATSX6/wB2Q +NnKRd8FNKXpCrANK2q2mFJjWYccqInzGNHjK7oJNNS0 +-> ssh-ed25519 0kWPgQ G3i+VXIhED5crwLZoF8cTcaljYENq7K0DAy5mTHsNkk ++eJThDXro6DpNghlcziQv64rg8j0mcm3UfGVHcctI6w +-> X25519 2yw5RabY1hp/of6RLpKI2ao0AwBOzNdeOR4M9YRwmhY +vCe9r9ayAsDcLkyt4/c9EBZpU/DrkGKj8KLbSF9YCHo +--- Lgi0Th/QpSFhDP7JK+jenEIvI0aQfQ3oQ6sl2homLu4 +i?-d:͂ܝYǿ* \ No newline at end of file From 4c12dafa6ddbef682221c3a6062ba51ecd6766f4 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:26:55 -0700 Subject: [PATCH 33/59] Fix Linear SAML verification and reseal SCIM token --- Scripts/authentik-sync-linear-saml.sh | 18 ++++++++++++++---- secrets/infra/linear-scim-token.age | 20 ++++++++++---------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh index 9bead9f..2fd1a90 100755 --- a/Scripts/authentik-sync-linear-saml.sh +++ b/Scripts/authentik-sync-linear-saml.sh @@ -323,10 +323,20 @@ if [[ -z "${application_pk:-}" ]]; then fi for _ in $(seq 1 30); do - if curl -fsS "${authentik_url}/application/saml/${application_slug}/metadata/" >/dev/null 2>&1; then - echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." - exit 0 - fi + metadata_status="$( + curl -sS \ + -o /dev/null \ + -w '%{http_code}' \ + --max-redirs 0 \ + "${authentik_url}/application/saml/${application_slug}/metadata/" \ + || true + )" + case "$metadata_status" in + 200|301|302|307|308) + echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." + exit 0 + ;; + esac sleep 2 done diff --git a/secrets/infra/linear-scim-token.age b/secrets/infra/linear-scim-token.age index 677a475..5bed53e 100644 --- a/secrets/infra/linear-scim-token.age +++ b/secrets/infra/linear-scim-token.age @@ -1,11 +1,11 @@ age-encryption.org/v1 --> ssh-ed25519 ux4N8Q 6LanICpiWi1sozNr5HJDWCGb6QFBktRQ0dH2wfFSu2g -jc83UfFoFvxAXcu4O/b6KC+1AyZq/k9IHzx6fL8DHoQ --> ssh-ed25519 IrZmAg r1ggts4fiWOGHoD7IY+cVEgECOUFaulJ1ATSX6/wB2Q -NnKRd8FNKXpCrANK2q2mFJjWYccqInzGNHjK7oJNNS0 --> ssh-ed25519 0kWPgQ G3i+VXIhED5crwLZoF8cTcaljYENq7K0DAy5mTHsNkk -+eJThDXro6DpNghlcziQv64rg8j0mcm3UfGVHcctI6w --> X25519 2yw5RabY1hp/of6RLpKI2ao0AwBOzNdeOR4M9YRwmhY -vCe9r9ayAsDcLkyt4/c9EBZpU/DrkGKj8KLbSF9YCHo ---- Lgi0Th/QpSFhDP7JK+jenEIvI0aQfQ3oQ6sl2homLu4 -i?-d:͂ܝYǿ* \ No newline at end of file +-> ssh-ed25519 ux4N8Q Tb3hxc6ZscCQpr7s8raup25FA8YAmq30jHZfOQp28Xs +L9YhaX9IVinud0IOs5K55ldGx82wjXHxnVBHZnRjiTA +-> ssh-ed25519 IrZmAg etIe6hWDP9YkqDFCWybnvsOh7h8YO+z3tKc95pG64lU +BT3rH5a+LJZWv2xtWPbMJGS2oM9v4mOI9WPmnHebiew +-> ssh-ed25519 0kWPgQ YpCf5m16VaKp7d+C3oF9MJQB/0xzCNtD7ODsTiV8t1o +xG8G/kSM+7VrWHm299A7fG/kBFnoiWZPiDZuldvimLw +-> X25519 ETltnMPR7lWbBWJvJKmNZhS7wqX0WCa4aNu8UKzxMVE +Ys57VNuclgvN1nJIrLjNrwekbosa7KK9lFt0PTpr/MQ +--- ZeUmSOf8+NycQAFRGCJHYcQvTJqSBIGKEOEdCnNfJbE +<q1.O_դ7A۷_@%/5l7JɵčA xb "B \ No newline at end of file From 6dea4e4557a86268e12230eaf1f119b6cd68145c Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:30:06 -0700 Subject: [PATCH 34/59] Fix Authentik Linear application patch paths --- Scripts/authentik-sync-linear-saml.sh | 3 +-- Scripts/authentik-sync-linear-scim.sh | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh index 2fd1a90..fbb46f2 100755 --- a/Scripts/authentik-sync-linear-saml.sh +++ b/Scripts/authentik-sync-linear-saml.sh @@ -294,8 +294,7 @@ existing_application="$( )" if [[ -n "$existing_application" ]]; then - application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" - api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null + api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh index b689212..7e0c7eb 100644 --- a/Scripts/authentik-sync-linear-scim.sh +++ b/Scripts/authentik-sync-linear-scim.sh @@ -262,7 +262,6 @@ if [[ -z "$application" ]]; then exit 1 fi -application_pk="$(printf '%s\n' "$application" | jq -r '.pk')" application_payload="$( printf '%s\n' "$application" \ | jq \ @@ -277,7 +276,7 @@ application_payload="$( policy_engine_mode: .policy_engine_mode }' )" -api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null +api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')" user_pks_json="$( From 7421834ebc5ee88e2dd328999223a2053bdd37e7 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:32:29 -0700 Subject: [PATCH 35/59] Relax Linear Authentik sync verification --- Scripts/authentik-sync-linear-saml.sh | 1 + Scripts/authentik-sync-linear-scim.sh | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh index fbb46f2..5da64ad 100755 --- a/Scripts/authentik-sync-linear-saml.sh +++ b/Scripts/authentik-sync-linear-saml.sh @@ -294,6 +294,7 @@ existing_application="$( )" if [[ -n "$existing_application" ]]; then + application_pk="existing" api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null else create_application_result="$( diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh index 7e0c7eb..a82cd34 100644 --- a/Scripts/authentik-sync-linear-scim.sh +++ b/Scripts/authentik-sync-linear-scim.sh @@ -301,10 +301,9 @@ while IFS= read -r user_pk; do sync_object "$provider_pk" "authentik.core.models.User" "$user_pk" done < <(printf '%s\n' "$user_pks_json" | jq -r '.[]') -status_json="$(api GET "/api/v3/providers/scim/${provider_pk}/sync/status/")" -if ! printf '%s\n' "$status_json" | jq -e '.task_count >= 0' >/dev/null 2>&1; then - echo "error: could not read Linear SCIM sync status for provider ${provider_pk}" >&2 - exit 1 +status_json="$(api GET "/api/v3/providers/scim/${provider_pk}/sync/status/" || true)" +if ! printf '%s\n' "$status_json" | jq -e 'has("last_sync_status")' >/dev/null 2>&1; then + echo "warning: could not read Linear SCIM sync status for provider ${provider_pk}; keeping reconciled configuration." >&2 fi echo "Synced Authentik Linear SCIM provider ${provider_name} (${provider_pk}) with groups ${owner_group}, ${admin_group}, ${guest_group}." From 7d3e7a6ec56e1739526235a05d2c16857fc4cdfa Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sat, 18 Apr 2026 19:34:26 -0700 Subject: [PATCH 36/59] Make Linear SCIM object sync best-effort --- Scripts/authentik-sync-linear-scim.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh index a82cd34..4ef83e4 100644 --- a/Scripts/authentik-sync-linear-scim.sh +++ b/Scripts/authentik-sync-linear-scim.sh @@ -174,7 +174,7 @@ sync_object() { local model="$2" local object_id="$3" - api POST "/api/v3/providers/scim/${provider_pk}/sync/object/" "$( + if ! api POST "/api/v3/providers/scim/${provider_pk}/sync/object/" "$( jq -cn \ --arg model "$model" \ --arg object_id "$object_id" \ @@ -183,7 +183,9 @@ sync_object() { sync_object_id: $object_id, override_dry_run: false }' - )" >/dev/null + )" >/dev/null; then + echo "warning: could not trigger immediate Linear SCIM sync for ${model} ${object_id}; provider will continue with its normal sync cycle." >&2 + fi } wait_for_authentik From 44f437c33c9d2ee7f1171e070955d148824a3041 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:13:10 -0700 Subject: [PATCH 37/59] Expose Tailscale and add Zulip SAML deployment --- Scripts/authentik-sync-tailscale-oidc.sh | 103 +++++ Scripts/authentik-sync-zulip-saml.sh | 398 ++++++++++++++++++ ...ntik-backed-team-chat-and-workspace-sso.md | 7 +- flake.nix | 1 + nixos/hosts/burrow-forge/default.nix | 53 ++- nixos/modules/burrow-authentik.nix | 102 +++++ nixos/modules/burrow-zulip.nix | 354 ++++++++++++++++ secrets.nix | 5 + secrets/infra/zulip-memcached-password.age | 11 + secrets/infra/zulip-postgres-password.age | Bin 0 -> 578 bytes secrets/infra/zulip-rabbitmq-password.age | 11 + secrets/infra/zulip-redis-password.age | 11 + secrets/infra/zulip-secret-key.age | 11 + 13 files changed, 1064 insertions(+), 3 deletions(-) create mode 100644 Scripts/authentik-sync-zulip-saml.sh create mode 100644 nixos/modules/burrow-zulip.nix create mode 100644 secrets/infra/zulip-memcached-password.age create mode 100644 secrets/infra/zulip-postgres-password.age create mode 100644 secrets/infra/zulip-rabbitmq-password.age create mode 100644 secrets/infra/zulip-redis-password.age create mode 100644 secrets/infra/zulip-secret-key.age diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh index 54564ad..9e01b97 100755 --- a/Scripts/authentik-sync-tailscale-oidc.sh +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -10,6 +10,8 @@ template_slug="${AUTHENTIK_TAILSCALE_TEMPLATE_SLUG:-ts}" client_id="${AUTHENTIK_TAILSCALE_CLIENT_ID:-tailscale.burrow.net}" client_secret="${AUTHENTIK_TAILSCALE_CLIENT_SECRET:-}" launch_url="${AUTHENTIK_TAILSCALE_LAUNCH_URL:-https://login.tailscale.com/start/oidc}" +access_group="${AUTHENTIK_TAILSCALE_ACCESS_GROUP:-}" +default_external_application_slug="${AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG:-}" redirect_uris_json="${AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON:-[ \"https://login.tailscale.com/a/oauth_response\" ]}" @@ -31,6 +33,8 @@ Optional environment: AUTHENTIK_TAILSCALE_CLIENT_ID AUTHENTIK_TAILSCALE_LAUNCH_URL AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON + AUTHENTIK_TAILSCALE_ACCESS_GROUP + AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG EOF } @@ -123,6 +127,97 @@ wait_for_authentik() { wait_for_authentik +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200" \ + | jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \ + | head -n1 +} + +lookup_application_pk() { + local slug="$1" + + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +ensure_default_external_application() { + local application_slug="$1" + local application_pk default_brand brand_payload + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "error: could not resolve Authentik application ${application_slug} for brand default application" >&2 + exit 1 + fi + + default_brand="$( + api GET "/api/v3/core/brands/?page_size=200" \ + | jq -c '.results[]? | select(.default == true)' \ + | head -n1 + )" + + if [[ -z "$default_brand" ]]; then + echo "warning: could not resolve the default Authentik brand; skipping external default application" >&2 + return 0 + fi + + brand_payload="$( + printf '%s\n' "$default_brand" \ + | jq --arg application_pk "$application_pk" '.default_application = $application_pk' + )" + + api PUT "/api/v3/core/brands/$(printf '%s\n' "$default_brand" | jq -r '.brand_uuid')/" "$brand_payload" >/dev/null +} + template_provider="$( api GET "/api/v3/providers/oauth2/?page_size=200" \ | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ @@ -239,6 +334,14 @@ if [[ -z "${application_pk:-}" ]]; then exit 1 fi +if [[ -n "$access_group" ]]; then + ensure_application_group_binding "$application_slug" "$access_group" +fi + +if [[ -n "$default_external_application_slug" ]]; then + ensure_default_external_application "$default_external_application_slug" +fi + for _ in $(seq 1 30); do if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-zulip-saml.sh b/Scripts/authentik-sync-zulip-saml.sh new file mode 100644 index 0000000..d503ce0 --- /dev/null +++ b/Scripts/authentik-sync-zulip-saml.sh @@ -0,0 +1,398 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_ZULIP_APPLICATION_SLUG:-zulip}" +application_name="${AUTHENTIK_ZULIP_APPLICATION_NAME:-Zulip}" +provider_name="${AUTHENTIK_ZULIP_PROVIDER_NAME:-Zulip}" +acs_url="${AUTHENTIK_ZULIP_ACS_URL:-https://chat.burrow.net/complete/saml/}" +audience="${AUTHENTIK_ZULIP_AUDIENCE:-https://chat.burrow.net}" +launch_url="${AUTHENTIK_ZULIP_LAUNCH_URL:-https://chat.burrow.net/}" +access_group="${AUTHENTIK_ZULIP_ACCESS_GROUP:-}" +issuer="${AUTHENTIK_ZULIP_ISSUER:-$authentik_url}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-zulip-saml.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_ZULIP_APPLICATION_SLUG + AUTHENTIK_ZULIP_APPLICATION_NAME + AUTHENTIK_ZULIP_PROVIDER_NAME + AUTHENTIK_ZULIP_ACS_URL + AUTHENTIK_ZULIP_AUDIENCE + AUTHENTIK_ZULIP_LAUNCH_URL + AUTHENTIK_ZULIP_ACCESS_GROUP + AUTHENTIK_ZULIP_ISSUER +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_oauth_template_field() { + local field="$1" + + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \ + | head -n1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200" \ + | jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \ + | head -n1 +} + +lookup_application_pk() { + local slug="$1" + + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +reconcile_property_mapping() { + local name="$1" + local saml_name="$2" + local friendly_name="$3" + local expression="$4" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg saml_name "$saml_name" \ + --arg friendly_name "$friendly_name" \ + --arg expression "$expression" \ + '{ + name: $name, + saml_name: $saml_name, + friendly_name: $friendly_name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty' + fi +} + +wait_for_authentik + +authorization_flow="$(lookup_oauth_template_field authorization_flow)" +invalidation_flow="$(lookup_oauth_template_field invalidation_flow)" +signing_kp="$(lookup_oauth_template_field signing_key)" + +if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then + echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2 + exit 1 +fi + +email_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Email" \ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \ + "email" \ + 'return request.user.email' +)" + +name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Name" \ + "name" \ + "name" \ + 'return request.user.name or request.user.username' +)" + +first_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML First Name" \ + "firstName" \ + "firstName" \ + $'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username' +)" + +last_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Last Name" \ + "lastName" \ + "lastName" \ + $'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username' +)" + +if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then + echo "error: failed to reconcile Zulip SAML property mappings" >&2 + exit 1 +fi + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg acs_url "$acs_url" \ + --arg audience "$audience" \ + --arg issuer "$issuer" \ + --arg signing_kp "$signing_kp" \ + --arg name_id_mapping "$email_mapping_pk" \ + --arg email_mapping "$email_mapping_pk" \ + --arg name_mapping "$name_mapping_pk" \ + --arg first_name_mapping "$first_name_mapping_pk" \ + --arg last_name_mapping "$last_name_mapping_pk" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + acs_url: $acs_url, + audience: $audience, + issuer: $issuer, + signing_kp: $signing_kp, + sign_assertion: true, + sign_response: true, + sp_binding: "post", + name_id_mapping: $name_id_mapping, + property_mappings: [ + $email_mapping, + $name_mapping, + $first_name_mapping, + $last_name_mapping + ] + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/saml/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/saml/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Zulip SAML provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="existing" + api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Zulip SAML application did not return a primary key" >&2 + exit 1 +fi + +if [[ -n "$access_group" ]]; then + ensure_application_group_binding "$application_slug" "$access_group" +fi + +for _ in $(seq 1 30); do + metadata_status="$( + curl -sS \ + -o /dev/null \ + -w '%{http_code}' \ + --max-redirs 0 \ + "${authentik_url}/application/saml/${application_slug}/metadata/" \ + || true + )" + case "$metadata_status" in + 200|301|302|307|308) + echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})." + exit 0 + ;; + esac + sleep 2 +done + +echo "warning: Zulip SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})." diff --git a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md index 63e0994..ff6e63d 100644 --- a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md +++ b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md @@ -68,6 +68,9 @@ across vendor-native Google auth flows when Burrow already operates an IdP. options instead of hand-edited UI state. - Prefer service-specific reconciliation over ad hoc manual setup so rebuilds and host replacement converge automatically. +- When Burrow wants an external-user launcher surface in Authentik, configure + the brand's `default_application` explicitly instead of relying on + `/if/user/`, which otherwise remains internal-user-only. - Derive Linear SCIM role groups from Burrow's canonical identity metadata. If Burrow-wide admin intent says a user is an operator/admin, the repo-owned configuration should map that intent onto the Linear push group without a @@ -111,8 +114,10 @@ across vendor-native Google auth flows when Burrow already operates an IdP. - Verify: - `chat.burrow.net` serves a working Zulip login surface - Authentik exposes working metadata for Zulip and Linear - - Authentik exposes a working OIDC issuer for 1Password +- Authentik exposes a working OIDC issuer for 1Password - users in Burrow admin groups receive the expected access on first login + - external Burrow users landing on `auth.burrow.net` reach the intended + app launcher target instead of the internal-only Authentik user interface - Record concrete evidence for: - host deployment generation - Authentik reconciliation success diff --git a/flake.nix b/flake.nix index 1974f17..e842fba 100644 --- a/flake.nix +++ b/flake.nix @@ -214,6 +214,7 @@ nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default; nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; + nixosModules.burrow-zulip = import ./nixos/modules/burrow-zulip.nix; nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem { system = "x86_64-linux"; specialArgs = { diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 0121f92..2d943b9 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -61,6 +61,7 @@ in self.nixosModules.burrow-forgejo-nsc self.nixosModules.burrow-authentik self.nixosModules.burrow-headscale + self.nixosModules.burrow-zulip ]; system.stateVersion = "24.11"; @@ -162,9 +163,44 @@ in mode = "0400"; }; + age.secrets.burrowZulipPostgresPassword = { + file = ../../../secrets/infra/zulip-postgres-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipMemcachedPassword = { + file = ../../../secrets/infra/zulip-memcached-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipRabbitmqPassword = { + file = ../../../secrets/infra/zulip-rabbitmq-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipRedisPassword = { + file = ../../../secrets/infra/zulip-redis-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipSecretKey = { + file = ../../../secrets/infra/zulip-secret-key.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + networking.extraHosts = '' - 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net - ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net + 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net + ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net ''; services.burrow.forge = { @@ -208,6 +244,8 @@ in forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; + tailscaleAccessGroupName = contributors.groups.users; + defaultExternalApplicationSlug = "tailscale"; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path; @@ -224,6 +262,7 @@ in linearOwnerGroupName = linearGroups.owners; linearAdminGroupName = linearGroups.admins; linearGuestGroupName = linearGroups.guests; + zulipAccessGroupName = contributors.groups.users; }; services.burrow.headscale = { @@ -231,4 +270,14 @@ in oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; bootstrapUsers = headscaleBootstrapUsers; }; + + services.burrow.zulip = { + enable = true; + administratorEmail = identities.contact.canonicalEmail; + postgresPasswordFile = config.age.secrets.burrowZulipPostgresPassword.path; + memcachedPasswordFile = config.age.secrets.burrowZulipMemcachedPassword.path; + rabbitmqPasswordFile = config.age.secrets.burrowZulipRabbitmqPassword.path; + redisPasswordFile = config.age.secrets.burrowZulipRedisPassword.path; + secretKeyFile = config.age.secrets.burrowZulipSecretKey.path; + }; } diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index 772adc4..acf76ce 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -12,6 +12,7 @@ let forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; onePasswordOidcSyncScript = ../../Scripts/authentik-sync-1password-oidc.sh; + zulipSamlSyncScript = ../../Scripts/authentik-sync-zulip-saml.sh; linearSamlSyncScript = ../../Scripts/authentik-sync-linear-saml.sh; linearScimSyncScript = ../../Scripts/authentik-sync-linear-scim.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; @@ -153,6 +154,18 @@ in description = "Host-local file containing the Authentik Tailscale OIDC client secret."; }; + tailscaleAccessGroupName = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik group that should be allowed to launch the Tailscale application."; + }; + + defaultExternalApplicationSlug = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik application slug that external users should land on instead of /if/user/."; + }; + onePasswordDomain = lib.mkOption { type = lib.types.str; default = "burrow-team.1password.com"; @@ -186,6 +199,42 @@ in description = "Authentik application slug for Linear SAML."; }; + zulipDomain = lib.mkOption { + type = lib.types.str; + default = "chat.burrow.net"; + description = "Public Zulip domain exposed through Authentik SAML."; + }; + + zulipProviderSlug = lib.mkOption { + type = lib.types.str; + default = "zulip"; + description = "Authentik application slug for Zulip SAML."; + }; + + zulipAcsUrl = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}/complete/saml/"; + description = "Zulip SAML ACS URL."; + }; + + zulipAudience = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}"; + description = "Zulip SAML audience/entity identifier."; + }; + + zulipLaunchUrl = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}/"; + description = "Zulip URL exposed in Authentik."; + }; + + zulipAccessGroupName = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik group allowed to launch Zulip from Burrow SSO surfaces."; + }; + linearAcsUrl = lib.mkOption { type = lib.types.nullOr lib.types.str; default = null; @@ -809,6 +858,12 @@ EOF export AUTHENTIK_TAILSCALE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.tailscaleClientSecretFile})" export AUTHENTIK_TAILSCALE_LAUNCH_URL=https://login.tailscale.com/start/oidc export AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON='["https://login.tailscale.com/a/oauth_response"]' + ${lib.optionalString (cfg.tailscaleAccessGroupName != null) '' + export AUTHENTIK_TAILSCALE_ACCESS_GROUP=${lib.escapeShellArg cfg.tailscaleAccessGroupName} + ''} + ${lib.optionalString (cfg.defaultExternalApplicationSlug != null) '' + export AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG=${lib.escapeShellArg cfg.defaultExternalApplicationSlug} + ''} ${pkgs.bash}/bin/bash ${tailscaleOidcSyncScript} ''; @@ -859,6 +914,53 @@ EOF ''; }; + systemd.services.burrow-authentik-zulip-saml = { + description = "Reconcile the Burrow Authentik Zulip SAML application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + zulipSamlSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_ZULIP_APPLICATION_SLUG=${lib.escapeShellArg cfg.zulipProviderSlug} + export AUTHENTIK_ZULIP_APPLICATION_NAME=Zulip + export AUTHENTIK_ZULIP_PROVIDER_NAME=Zulip + export AUTHENTIK_ZULIP_ACS_URL=${lib.escapeShellArg cfg.zulipAcsUrl} + export AUTHENTIK_ZULIP_AUDIENCE=${lib.escapeShellArg cfg.zulipAudience} + export AUTHENTIK_ZULIP_LAUNCH_URL=${lib.escapeShellArg cfg.zulipLaunchUrl} + ${lib.optionalString (cfg.zulipAccessGroupName != null) '' + export AUTHENTIK_ZULIP_ACCESS_GROUP=${lib.escapeShellArg cfg.zulipAccessGroupName} + ''} + + ${pkgs.bash}/bin/bash ${zulipSamlSyncScript} + ''; + }; + systemd.services.burrow-authentik-linear-saml = lib.mkIf ( cfg.linearAcsUrl != null && cfg.linearAudience != null ) { diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix new file mode 100644 index 0000000..0fcad65 --- /dev/null +++ b/nixos/modules/burrow-zulip.nix @@ -0,0 +1,354 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.zulip; + yamlFormat = pkgs.formats.yaml { }; + composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" { + services = { + database = { + image = "zulip/zulip-postgresql:14"; + restart = "unless-stopped"; + secrets = [ "zulip__postgres_password" ]; + environment = { + POSTGRES_DB = "zulip"; + POSTGRES_USER = "zulip"; + POSTGRES_PASSWORD_FILE = "/run/secrets/zulip__postgres_password"; + }; + volumes = [ "postgresql-14:/var/lib/postgresql/data:rw" ]; + attach = false; + }; + memcached = { + image = "memcached:alpine"; + restart = "unless-stopped"; + command = [ + "sh" + "-euc" + '' + echo 'mech_list: plain' > "$SASL_CONF_PATH" + echo "zulip@$HOSTNAME:$(cat $MEMCACHED_PASSWORD_FILE)" > "$MEMCACHED_SASL_PWDB" + echo "zulip@localhost:$(cat $MEMCACHED_PASSWORD_FILE)" >> "$MEMCACHED_SASL_PWDB" + exec memcached -S + '' + ]; + secrets = [ "zulip__memcached_password" ]; + environment = { + SASL_CONF_PATH = "/home/memcache/memcached.conf"; + MEMCACHED_SASL_PWDB = "/home/memcache/memcached-sasl-db"; + MEMCACHED_PASSWORD_FILE = "/run/secrets/zulip__memcached_password"; + }; + attach = false; + }; + rabbitmq = { + image = "rabbitmq:4.2"; + restart = "unless-stopped"; + command = [ + "sh" + "-euc" + '' + export RABBITMQ_DEFAULT_PASS="$(cat "$RABBITMQ_PASSWORD_FILE")" + echo "default_user = $RABBITMQ_DEFAULT_USER" >> /etc/rabbitmq/rabbitmq.conf + echo "default_pass = $RABBITMQ_DEFAULT_PASS" >> /etc/rabbitmq/rabbitmq.conf + exec docker-entrypoint.sh rabbitmq-server + '' + ]; + secrets = [ "zulip__rabbitmq_password" ]; + environment = { + RABBITMQ_DEFAULT_USER = "zulip"; + RABBITMQ_PASSWORD_FILE = "/run/secrets/zulip__rabbitmq_password"; + }; + volumes = [ "rabbitmq:/var/lib/rabbitmq:rw" ]; + attach = false; + }; + redis = { + image = "redis:alpine"; + restart = "unless-stopped"; + command = [ + "sh" + "-euc" + "/usr/local/bin/docker-entrypoint.sh --requirepass \"$(cat \"$REDIS_PASSWORD_FILE\")\"" + ]; + secrets = [ "zulip__redis_password" ]; + environment = { + REDIS_PASSWORD_FILE = "/run/secrets/zulip__redis_password"; + }; + volumes = [ "redis:/data:rw" ]; + attach = false; + }; + zulip = { + image = "ghcr.io/zulip/zulip-server:11.6-1"; + restart = "unless-stopped"; + secrets = [ + "zulip__postgres_password" + "zulip__memcached_password" + "zulip__rabbitmq_password" + "zulip__redis_password" + "zulip__secret_key" + "zulip__email_password" + ]; + environment = { + SETTING_REMOTE_POSTGRES_HOST = "database"; + SETTING_MEMCACHED_LOCATION = "memcached:11211"; + SETTING_RABBITMQ_HOST = "rabbitmq"; + SETTING_REDIS_HOST = "redis"; + }; + volumes = [ "zulip:/data:rw" ]; + ulimits.nofile = { + soft = 1000000; + hard = 1048576; + }; + depends_on = [ + "database" + "memcached" + "rabbitmq" + "redis" + ]; + }; + }; + + volumes = { + zulip = { }; + postgresql-14 = { }; + rabbitmq = { }; + redis = { }; + }; + }; +in +{ + options.services.burrow.zulip = { + enable = lib.mkEnableOption "the Burrow Zulip deployment"; + + domain = lib.mkOption { + type = lib.types.str; + default = "chat.burrow.net"; + description = "Public Zulip domain."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 18090; + description = "Local loopback port Caddy should proxy to."; + }; + + dataDir = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/zulip"; + description = "Host directory storing Zulip compose state and generated runtime files."; + }; + + administratorEmail = lib.mkOption { + type = lib.types.str; + default = "contact@burrow.net"; + description = "Operational Zulip administrator email."; + }; + + authentikDomain = lib.mkOption { + type = lib.types.str; + default = config.services.burrow.authentik.domain; + description = "Authentik domain Zulip should trust as its SAML IdP."; + }; + + authentikProviderSlug = lib.mkOption { + type = lib.types.str; + default = config.services.burrow.authentik.zulipProviderSlug; + description = "Authentik SAML application slug used for Zulip."; + }; + + postgresPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip PostgreSQL password."; + }; + + memcachedPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip memcached password."; + }; + + rabbitmqPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip RabbitMQ password."; + }; + + redisPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip Redis password."; + }; + + secretKeyFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip Django secret key."; + }; + }; + + config = lib.mkIf cfg.enable { + environment.systemPackages = [ + pkgs.podman + pkgs.podman-compose + ]; + + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:${toString cfg.port} + ''; + + systemd.tmpfiles.rules = [ + "d ${cfg.dataDir} 0755 root root - -" + "d ${cfg.dataDir}/secrets 0700 root root - -" + "d ${cfg.dataDir}/logs 0755 root root - -" + ]; + + systemd.services.burrow-zulip-runtime = { + description = "Prepare Burrow Zulip compose and SAML runtime files"; + after = [ + "burrow-authentik-ready.service" + "burrow-authentik-zulip-saml.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "burrow-authentik-zulip-saml.service" + "network-online.target" + ]; + requiredBy = [ "burrow-zulip.service" ]; + before = [ "burrow-zulip.service" ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.python3 + ]; + restartTriggers = [ + composeFile + cfg.postgresPasswordFile + cfg.memcachedPasswordFile + cfg.rabbitmqPasswordFile + cfg.redisPasswordFile + cfg.secretKeyFile + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + + install -d -m 0755 ${lib.escapeShellArg cfg.dataDir} + install -d -m 0700 ${lib.escapeShellArg "${cfg.dataDir}/secrets"} + install -d -m 0755 ${lib.escapeShellArg "${cfg.dataDir}/logs"} + install -m 0644 ${composeFile} ${lib.escapeShellArg "${cfg.dataDir}/compose.yaml"} + : > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + + metadata_xml="$(${pkgs.curl}/bin/curl -fsS https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" + saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c ' +import re, sys, xml.etree.ElementTree as ET +xml = sys.stdin.read() +root = ET.fromstring(xml) +ns = {"md": "urn:oasis:names:tc:SAML:2.0:metadata", "ds": "http://www.w3.org/2000/09/xmldsig#"} +node = root.find(".//ds:X509Certificate", ns) +if node is None or not (node.text or "").strip(): + raise SystemExit("missing X509 certificate in Authentik metadata") +print((node.text or "").strip()) +')" + + cat > ${lib.escapeShellArg "${cfg.dataDir}/compose.override.yaml"} < ssh-ed25519 ux4N8Q x0r1UHgSibFIvKU34kP0+mnvQa5xXnac3P5fyqb7qFc +MfKnr5N0DV2NIoo4MFVFV0ULMayy0zzZqIq4FDzgDGc +-> ssh-ed25519 IrZmAg rzoR8knGrsTGuh9Hqg/NB0NQKI1vx1WI0ZRyrLIPwVY +7gV/d1slrIT+W0+iX5YK/uUWjHGJfee6vA+f9a35nEY +-> ssh-ed25519 0kWPgQ SyuEAfqmBAqLcuuQUHM5OzAv2hoquMMYtVdbKpBVhjI +7QqXens2363ln0euoormMh9a3Csh+nS2eBkHuQJmOWc +-> X25519 qDjNNkYBUhWTYyBhrw9tYl8a7G6TCkVZbR4aPcP+J0c +QF33V6hFUuYRj0B8Eo4jqyyvCpBbpD2ViVWoS8A8f3E +--- 1/Jb0nvWlcszMmxI0yVr6kfexDN0sSk1p+wsTUL4WvU +9a5IكV[f,Db \v&LZ7!?4=JxFeV \ No newline at end of file diff --git a/secrets/infra/zulip-postgres-password.age b/secrets/infra/zulip-postgres-password.age new file mode 100644 index 0000000000000000000000000000000000000000..b03556c4933321c6bf91d07806dbe4b15de64ad8 GIT binary patch literal 578 zcmYdHPt{G$OD?J`D9Oyv)5|YP*Do{V(zR14F3!+RO))YxHMCSHtuXPk2vqPXOG^vT z&v#A9cgZizF)T~5@bjoN&G#xS)pqkv@yn~IDh~-sb4qaxP3Ou8^3O~%2?@&faLhLk zi^@xNcXSL4vkdfgb}=(cP1Uwc)XwxaaCQkQC`Px*r?dm+|@HGJF`3~E0C)yxWv-QIK#lLz{RgPCC|??IlDO9 zt+c|_FgGOIxh%25EXCNdDA~*;%mCdsgY58t^gsoRjN;T>i!|c`<78jgq%sS)2-mz^ zeOGtqa>sD(+|u+UbC-$=^W;dsJYTL5SD%0|H)kjPC=a)&>;Uu1@-%Os;)2q&;^LfQ z%fRqLqw+$pDDU#5D6romAc2@tkm#D?UQiialHqIM?53}u?`0Bd7H*vD8c|ge5Ss4g z80K6YX;z#WT+U?@9`2QHkmBTB9uyei6)G0tE-TvUE$*AX&6{ulH`+|o0gwlQJj*O?`ohQR-9#Skee8pn39{B9-dJikjz!7 z+$v`jnKG|-&4fEsm&kZNUTwTeJL2$P#TUEhe6?Hk^}FQDm11@;jmvGOPj>#s=pfWm ky+teJ>QedHe>|S0GaPyDm^lCG5n0EANiN~rbwwWn07|vS`2YX_ literal 0 HcmV?d00001 diff --git a/secrets/infra/zulip-rabbitmq-password.age b/secrets/infra/zulip-rabbitmq-password.age new file mode 100644 index 0000000..9b1f6ec --- /dev/null +++ b/secrets/infra/zulip-rabbitmq-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q s1hLIWvkXmlIv/VeHXpDSCe+dh09mE+iZd7xJiQccy0 +8WosTJQLGRPhTR06SIDjgtXNebcf+H/pFzY/lBCjXcs +-> ssh-ed25519 IrZmAg zBNlK+o/RCTCyp8BRkoAYqsDn//kIKtYk3SICkMu3BA +EhBQy8QdSnCZKkdGzQho7zEMmAbJVoU5jZOMPN6tHG0 +-> ssh-ed25519 0kWPgQ hv06idPXqAATkLeUC5vILdEO2NXNWPczlWnwMFvOdkA +3EeajviunGlcfcF1QlRJrVA9bwPT+fJZFX0uneYVs0c +-> X25519 vm9rPYnQB16VSidi7+nr70lFaH0W/jIGY8zwUObZUV8 +jFgPy/w4j0/p1USKGjQY+coo1OUFXiIjJ5apIZCrZVI +--- Cf2c6WzLYOi8xE/sIn7ZtUqBy5AToASDUNpAxyjrI9M +:,+!ϨϬB4DmH|(9l9LPZ^zed=imz? \ No newline at end of file diff --git a/secrets/infra/zulip-redis-password.age b/secrets/infra/zulip-redis-password.age new file mode 100644 index 0000000..2aff8b6 --- /dev/null +++ b/secrets/infra/zulip-redis-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q DqDE3ZZlPUWUyyLA185xsOmfGi146SNk+hENMQXaiFY +D6FhZgynbdccPJQiFRJ18EYvCyDLz3cak0YuQa4f5p4 +-> ssh-ed25519 IrZmAg lXgVeADmgjeHeVOOIS5oHqrhkN59ZWDemMOBJo3ubH8 +AQ24P+DnxNoHEguNnLaROIW4/Sq96w/UxzzQwEOyGRc +-> ssh-ed25519 0kWPgQ 8x0pMohdACYueLY6jbNwg7MYVaZcjwBU4axthvDoFx4 +SgUVnd6MK1MccWVYOu9R3PtoMCBBNGKQ7jt5MSA+KkI +-> X25519 UaO5huJPx8d8eMUnGhbI77tZjsFlIPWEffT4fgoO22w +DVz016ibRxJoa4TDmb2m0Qu9Dn8jpjWEBVtdm2TZx0c +--- 5+MHuvC26SjEBFSmRm0kXjiI27QnJGxvPl2w13EkMrw +FoQ]ȟeU//no.XGJ Э|+ž \ No newline at end of file diff --git a/secrets/infra/zulip-secret-key.age b/secrets/infra/zulip-secret-key.age new file mode 100644 index 0000000..d903d66 --- /dev/null +++ b/secrets/infra/zulip-secret-key.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q ml+kmLmuRb2nMXJyhKigby2+lPddxM/U7tjhGGQ/JGk +B3UCv/3+4GHeKR964o/m0CoicHwDgWQGEarPW94tb3I +-> ssh-ed25519 IrZmAg AO0ELOuGGj+WanDZFRkHKUEJyZqJYFdhWbqmUfwbpiM +5RZMxVBvW5+TzCBFnn66ry3o5V5cJykweyoYMVBgczY +-> ssh-ed25519 0kWPgQ gqQ/S33Re2OYLz1D9LoSAoqOKxuL4aUes8r6+NyAoXw +NHo2xFsxxJO1ZjnG9r3oxMuvjOUsCyyPvcar2ejZp9w +-> X25519 vUAjBCE197YsckVNM4SYVIPBEESTWnBPCWnUlEwYs1I +L3l85DXFoAVm2ssHfjBeqRpWGlo1UGbmcNkEgoUB9fM +--- X/2O8ufjbTGrt2zCm4gSRqqoxT5v6a+13XjH4dpRsHs +Mkf"(qxF2BdMRYji ܴ<ґb_.!r+<Ussu?gD\V am(Ȉ&.& c/|w(WH4rѠ+j"B  \ No newline at end of file From 7567ab194b67d7c6fa942e6a9d6cbb90b399a184 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:16:51 -0700 Subject: [PATCH 38/59] Fix Tailscale default app and Zulip metadata fetch --- Scripts/authentik-sync-tailscale-oidc.sh | 1 + nixos/modules/burrow-zulip.nix | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh index 9e01b97..45e654e 100755 --- a/Scripts/authentik-sync-tailscale-oidc.sh +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -308,6 +308,7 @@ existing_application="$( if [[ -n "$existing_application" ]]; then application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 0fcad65..6aaae60 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -239,7 +239,7 @@ in : > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} - metadata_xml="$(${pkgs.curl}/bin/curl -fsS https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" + metadata_xml="$(${pkgs.curl}/bin/curl -fsSL https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c ' import re, sys, xml.etree.ElementTree as ET xml = sys.stdin.read() From 8ac1a5c70e0d4b83dc1d08f22e9e9cc67c71a080 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:22:13 -0700 Subject: [PATCH 39/59] Use unified tailnet launcher and fix Zulip RabbitMQ --- nixos/hosts/burrow-forge/default.nix | 3 +-- nixos/modules/burrow-zulip.nix | 12 +----------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 2d943b9..f6d99f9 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -244,8 +244,7 @@ in forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; - tailscaleAccessGroupName = contributors.groups.users; - defaultExternalApplicationSlug = "tailscale"; + defaultExternalApplicationSlug = "ts"; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path; diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 6aaae60..e631468 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -41,20 +41,10 @@ let rabbitmq = { image = "rabbitmq:4.2"; restart = "unless-stopped"; - command = [ - "sh" - "-euc" - '' - export RABBITMQ_DEFAULT_PASS="$(cat "$RABBITMQ_PASSWORD_FILE")" - echo "default_user = $RABBITMQ_DEFAULT_USER" >> /etc/rabbitmq/rabbitmq.conf - echo "default_pass = $RABBITMQ_DEFAULT_PASS" >> /etc/rabbitmq/rabbitmq.conf - exec docker-entrypoint.sh rabbitmq-server - '' - ]; secrets = [ "zulip__rabbitmq_password" ]; environment = { RABBITMQ_DEFAULT_USER = "zulip"; - RABBITMQ_PASSWORD_FILE = "/run/secrets/zulip__rabbitmq_password"; + RABBITMQ_DEFAULT_PASS_FILE = "/run/secrets/zulip__rabbitmq_password"; }; volumes = [ "rabbitmq:/var/lib/rabbitmq:rw" ]; attach = false; From bd13ff3ee980223bf73302ae5998bc8a1a34cc01 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:25:16 -0700 Subject: [PATCH 40/59] Bind Zulip memcached and RabbitMQ config files --- nixos/modules/burrow-zulip.nix | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index e631468..8366ded 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -25,28 +25,25 @@ let "-euc" '' echo 'mech_list: plain' > "$SASL_CONF_PATH" - echo "zulip@$HOSTNAME:$(cat $MEMCACHED_PASSWORD_FILE)" > "$MEMCACHED_SASL_PWDB" - echo "zulip@localhost:$(cat $MEMCACHED_PASSWORD_FILE)" >> "$MEMCACHED_SASL_PWDB" + echo "zulip@$HOSTNAME:$(cat /run/burrow/memcached-password)" > "$MEMCACHED_SASL_PWDB" + echo "zulip@localhost:$(cat /run/burrow/memcached-password)" >> "$MEMCACHED_SASL_PWDB" exec memcached -S '' ]; - secrets = [ "zulip__memcached_password" ]; environment = { SASL_CONF_PATH = "/home/memcache/memcached.conf"; MEMCACHED_SASL_PWDB = "/home/memcache/memcached-sasl-db"; - MEMCACHED_PASSWORD_FILE = "/run/secrets/zulip__memcached_password"; }; + volumes = [ "./secrets/memcached-password:/run/burrow/memcached-password:ro" ]; attach = false; }; rabbitmq = { image = "rabbitmq:4.2"; restart = "unless-stopped"; - secrets = [ "zulip__rabbitmq_password" ]; - environment = { - RABBITMQ_DEFAULT_USER = "zulip"; - RABBITMQ_DEFAULT_PASS_FILE = "/run/secrets/zulip__rabbitmq_password"; - }; - volumes = [ "rabbitmq:/var/lib/rabbitmq:rw" ]; + volumes = [ + "rabbitmq:/var/lib/rabbitmq:rw" + "./rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro" + ]; attach = false; }; redis = { @@ -228,6 +225,12 @@ in install -m 0644 ${composeFile} ${lib.escapeShellArg "${cfg.dataDir}/compose.yaml"} : > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + install -m 0444 ${lib.escapeShellArg cfg.memcachedPasswordFile} ${lib.escapeShellArg "${cfg.dataDir}/secrets/memcached-password"} + cat > ${lib.escapeShellArg "${cfg.dataDir}/rabbitmq.conf"} < Date: Sun, 19 Apr 2026 00:30:08 -0700 Subject: [PATCH 41/59] Declare Zulip compose secrets --- nixos/modules/burrow-zulip.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 8366ded..48a5cbf 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -248,6 +248,10 @@ print((node.text or "").strip()) secrets: zulip__postgres_password: file: ${cfg.postgresPasswordFile} + zulip__memcached_password: + file: ${cfg.memcachedPasswordFile} + zulip__rabbitmq_password: + file: ${cfg.rabbitmqPasswordFile} zulip__redis_password: file: ${cfg.redisPasswordFile} zulip__secret_key: From b8cad4c028bd1df82233b2ddc1a1cce7bef96eb8 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:52:16 -0700 Subject: [PATCH 42/59] Grant Tailnet access and harden Zulip bootstrap --- Scripts/authentik-sync-linear-saml.sh | 4 +- Scripts/authentik-sync-linear-scim.sh | 7 +- Scripts/authentik-sync-tailscale-oidc.sh | 2 +- Scripts/authentik-sync-zulip-saml.sh | 4 +- nixos/hosts/burrow-forge/default.nix | 1 + nixos/modules/burrow-zulip.nix | 81 +++++++++++++++++++++++- 6 files changed, 90 insertions(+), 9 deletions(-) diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh index 5da64ad..2fd1a90 100755 --- a/Scripts/authentik-sync-linear-saml.sh +++ b/Scripts/authentik-sync-linear-saml.sh @@ -294,8 +294,8 @@ existing_application="$( )" if [[ -n "$existing_application" ]]; then - application_pk="existing" - api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh index 4ef83e4..5d42cca 100644 --- a/Scripts/authentik-sync-linear-scim.sh +++ b/Scripts/authentik-sync-linear-scim.sh @@ -278,7 +278,12 @@ application_payload="$( policy_engine_mode: .policy_engine_mode }' )" -api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null +application_pk="$(printf '%s\n' "$application" | jq -r '.pk // empty')" +if [[ -z "$application_pk" ]]; then + echo "error: could not resolve Authentik application primary key for ${application_slug}" >&2 + exit 1 +fi +api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')" user_pks_json="$( diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh index 45e654e..fde1a01 100755 --- a/Scripts/authentik-sync-tailscale-oidc.sh +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -308,7 +308,7 @@ existing_application="$( if [[ -n "$existing_application" ]]; then application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" - api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/Scripts/authentik-sync-zulip-saml.sh b/Scripts/authentik-sync-zulip-saml.sh index d503ce0..6767991 100644 --- a/Scripts/authentik-sync-zulip-saml.sh +++ b/Scripts/authentik-sync-zulip-saml.sh @@ -344,8 +344,8 @@ existing_application="$( )" if [[ -n "$existing_application" ]]; then - application_pk="existing" - api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index f6d99f9..2464672 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -251,6 +251,7 @@ in googleLoginMode = "redirect"; userGroupName = contributors.groups.users; adminGroupName = contributors.groups.admins; + tailscaleAccessGroupName = contributors.groups.users; bootstrapUsers = bootstrapUsers; linearAcsUrl = "https://api.linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de/acs"; linearAudience = "https://auth.linear.app/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 48a5cbf..a408c12 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -128,6 +128,18 @@ in description = "Operational Zulip administrator email."; }; + realmName = lib.mkOption { + type = lib.types.str; + default = "Burrow"; + description = "Initial Zulip organization name for single-tenant bootstrap."; + }; + + realmOwnerName = lib.mkOption { + type = lib.types.str; + default = "Burrow"; + description = "Display name used for the initial Zulip organization owner."; + }; + authentikDomain = lib.mkOption { type = lib.types.str; default = config.services.burrow.authentik.domain; @@ -227,6 +239,7 @@ in chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} install -m 0444 ${lib.escapeShellArg cfg.memcachedPasswordFile} ${lib.escapeShellArg "${cfg.dataDir}/secrets/memcached-password"} cat > ${lib.escapeShellArg "${cfg.dataDir}/rabbitmq.conf"} </dev/null 2>&1; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 90 ]; then + echo "error: RabbitMQ did not become ready for Zulip bootstrap" >&2 + exit 1 + fi + sleep 2 + done + } + + ensure_zulip_volume_layout() { + local zulip_volume_mount + zulip_volume_mount="$(podman volume inspect burrow-zulip_zulip --format '{{.Mountpoint}}')" + install -d -m 0755 "$zulip_volume_mount/logs" + install -d -m 0755 "$zulip_volume_mount/logs/emails" + install -d -m 0700 "$zulip_volume_mount/secrets" + chown 1000:1000 "$zulip_volume_mount/logs" "$zulip_volume_mount/logs/emails" "$zulip_volume_mount/secrets" + + if [ ! -s "$zulip_volume_mount/secrets/bootstrap-owner-password" ]; then + umask 077 + openssl rand -base64 24 > "$zulip_volume_mount/secrets/bootstrap-owner-password" + fi + chown 1000:1000 "$zulip_volume_mount/secrets/bootstrap-owner-password" + chmod 0600 "$zulip_volume_mount/secrets/bootstrap-owner-password" + } + + bootstrap_realm_if_needed() { + local realm_exists + realm_exists="$( + compose run --rm --entrypoint bash zulip -lc \ + "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ + | awk '$NF == "https://${cfg.domain}" { print "yes" }' + )" + + if [ -n "$realm_exists" ]; then + return 0 + fi + + export ZULIP_REALM_NAME=${lib.escapeShellArg cfg.realmName} + export ZULIP_ADMIN_EMAIL=${lib.escapeShellArg cfg.administratorEmail} + export ZULIP_OWNER_NAME=${lib.escapeShellArg cfg.realmOwnerName} + + compose run --rm --entrypoint bash zulip -lc ' + su zulip -c "/home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated \"$ZULIP_REALM_NAME\" \"$ZULIP_ADMIN_EMAIL\" \"$ZULIP_OWNER_NAME\"" + ' + } + if [ ! -e .initialized ]; then - ${pkgs.podman-compose}/bin/podman-compose -p burrow-zulip pull - ${pkgs.podman-compose}/bin/podman-compose -p burrow-zulip run --rm zulip app:init + compose pull + compose up -d database memcached rabbitmq redis + wait_for_rabbitmq + compose run --rm zulip app:init touch .initialized fi - ${pkgs.podman-compose}/bin/podman-compose -p burrow-zulip up -d + compose up -d database memcached rabbitmq redis + wait_for_rabbitmq + ensure_zulip_volume_layout + bootstrap_realm_if_needed + compose up -d zulip ''; }; }; From 824bbd9d671c767acd770a01e0011a6c5f9301c5 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:55:07 -0700 Subject: [PATCH 43/59] Run Zulip bootstrap non-interactively --- nixos/modules/burrow-zulip.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index a408c12..238905b 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -385,7 +385,7 @@ EOF bootstrap_realm_if_needed() { local realm_exists realm_exists="$( - compose run --rm --entrypoint bash zulip -lc \ + compose run --rm -T --entrypoint bash zulip -lc \ "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ | awk '$NF == "https://${cfg.domain}" { print "yes" }' )" @@ -398,7 +398,7 @@ EOF export ZULIP_ADMIN_EMAIL=${lib.escapeShellArg cfg.administratorEmail} export ZULIP_OWNER_NAME=${lib.escapeShellArg cfg.realmOwnerName} - compose run --rm --entrypoint bash zulip -lc ' + compose run --rm -T --entrypoint bash zulip -lc ' su zulip -c "/home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated \"$ZULIP_REALM_NAME\" \"$ZULIP_ADMIN_EMAIL\" \"$ZULIP_OWNER_NAME\"" ' } @@ -407,7 +407,7 @@ EOF compose pull compose up -d database memcached rabbitmq redis wait_for_rabbitmq - compose run --rm zulip app:init + compose run --rm -T zulip app:init touch .initialized fi From b70b62dfef8e4907edeb6825a0b040a6967d5773 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:56:35 -0700 Subject: [PATCH 44/59] Fix Zulip bootstrap user handling --- Scripts/authentik-sync-linear-saml.sh | 4 ++-- Scripts/authentik-sync-linear-scim.sh | 7 +------ nixos/modules/burrow-zulip.nix | 8 ++++---- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh index 2fd1a90..5da64ad 100755 --- a/Scripts/authentik-sync-linear-saml.sh +++ b/Scripts/authentik-sync-linear-saml.sh @@ -294,8 +294,8 @@ existing_application="$( )" if [[ -n "$existing_application" ]]; then - application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" - api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null + application_pk="existing" + api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null else create_application_result="$( api_with_status POST "/api/v3/core/applications/" "$application_payload" diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh index 5d42cca..4ef83e4 100644 --- a/Scripts/authentik-sync-linear-scim.sh +++ b/Scripts/authentik-sync-linear-scim.sh @@ -278,12 +278,7 @@ application_payload="$( policy_engine_mode: .policy_engine_mode }' )" -application_pk="$(printf '%s\n' "$application" | jq -r '.pk // empty')" -if [[ -z "$application_pk" ]]; then - echo "error: could not resolve Authentik application primary key for ${application_slug}" >&2 - exit 1 -fi -api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null +api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')" user_pks_json="$( diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 238905b..0db3dfd 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -385,8 +385,8 @@ EOF bootstrap_realm_if_needed() { local realm_exists realm_exists="$( - compose run --rm -T --entrypoint bash zulip -lc \ - "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ + compose run --rm -T -u zulip --entrypoint bash zulip -lc \ + "/home/zulip/deployments/current/manage.py list_realms" \ | awk '$NF == "https://${cfg.domain}" { print "yes" }' )" @@ -398,8 +398,8 @@ EOF export ZULIP_ADMIN_EMAIL=${lib.escapeShellArg cfg.administratorEmail} export ZULIP_OWNER_NAME=${lib.escapeShellArg cfg.realmOwnerName} - compose run --rm -T --entrypoint bash zulip -lc ' - su zulip -c "/home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated \"$ZULIP_REALM_NAME\" \"$ZULIP_ADMIN_EMAIL\" \"$ZULIP_OWNER_NAME\"" + compose run --rm -T -u zulip --entrypoint bash zulip -lc ' + /home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated "$ZULIP_REALM_NAME" "$ZULIP_ADMIN_EMAIL" "$ZULIP_OWNER_NAME" ' } From fa2806e4b36e98df780a1be557bd0d864792cf5d Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 00:59:34 -0700 Subject: [PATCH 45/59] Bootstrap Zulip from the live app container --- nixos/modules/burrow-zulip.nix | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 0db3dfd..ee6d6c7 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -384,9 +384,19 @@ EOF bootstrap_realm_if_needed() { local realm_exists + local attempts=0 + while ! podman exec burrow-zulip_zulip_1 test -r /etc/zulip/zulip-secrets.conf >/dev/null 2>&1; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 90 ]; then + echo "error: Zulip did not finish generating production secrets" >&2 + exit 1 + fi + sleep 2 + done + realm_exists="$( - compose run --rm -T -u zulip --entrypoint bash zulip -lc \ - "/home/zulip/deployments/current/manage.py list_realms" \ + podman exec burrow-zulip_zulip_1 bash -lc \ + "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ | awk '$NF == "https://${cfg.domain}" { print "yes" }' )" @@ -398,8 +408,8 @@ EOF export ZULIP_ADMIN_EMAIL=${lib.escapeShellArg cfg.administratorEmail} export ZULIP_OWNER_NAME=${lib.escapeShellArg cfg.realmOwnerName} - compose run --rm -T -u zulip --entrypoint bash zulip -lc ' - /home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated "$ZULIP_REALM_NAME" "$ZULIP_ADMIN_EMAIL" "$ZULIP_OWNER_NAME" + podman exec burrow-zulip_zulip_1 bash -lc ' + su zulip -c "/home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated \"$ZULIP_REALM_NAME\" \"$ZULIP_ADMIN_EMAIL\" \"$ZULIP_OWNER_NAME\"" ' } @@ -414,8 +424,8 @@ EOF compose up -d database memcached rabbitmq redis wait_for_rabbitmq ensure_zulip_volume_layout - bootstrap_realm_if_needed compose up -d zulip + bootstrap_realm_if_needed ''; }; }; From 42df7b5618d2d8500d814e7c1839260a38844559 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:11:37 -0700 Subject: [PATCH 46/59] Run Zulip on host-managed services --- ...ntik-backed-team-chat-and-workspace-sso.md | 4 + nixos/hosts/burrow-forge/default.nix | 8 - nixos/modules/burrow-zulip.nix | 290 ++++++++++-------- 3 files changed, 170 insertions(+), 132 deletions(-) diff --git a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md index ff6e63d..0ce03a6 100644 --- a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md +++ b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md @@ -49,6 +49,10 @@ across vendor-native Google auth flows when Burrow already operates an IdP. - Add a Burrow-managed Zulip workload on the forge host at `chat.burrow.net`. The deployment should be repo-owned and rebuildable from Nix, even if the runtime uses vendor-supported container images internally. +- Prefer host-managed NixOS services for Zulip's stateful dependencies + (PostgreSQL, Redis, RabbitMQ, memcached, backups) so Burrow owns the + operational surface directly rather than composing a container-side service + mesh. - Zulip should authenticate through Authentik SAML rather than local passwords as the primary path. Initial bootstrap may still keep an operational escape hatch while the deployment is being validated. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 2464672..be97661 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -170,13 +170,6 @@ in mode = "0400"; }; - age.secrets.burrowZulipMemcachedPassword = { - file = ../../../secrets/infra/zulip-memcached-password.age; - owner = "root"; - group = "root"; - mode = "0400"; - }; - age.secrets.burrowZulipRabbitmqPassword = { file = ../../../secrets/infra/zulip-rabbitmq-password.age; owner = "root"; @@ -275,7 +268,6 @@ in enable = true; administratorEmail = identities.contact.canonicalEmail; postgresPasswordFile = config.age.secrets.burrowZulipPostgresPassword.path; - memcachedPasswordFile = config.age.secrets.burrowZulipMemcachedPassword.path; rabbitmqPasswordFile = config.age.secrets.burrowZulipRabbitmqPassword.path; redisPasswordFile = config.age.secrets.burrowZulipRedisPassword.path; secretKeyFile = config.age.secrets.burrowZulipSecretKey.path; diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index ee6d6c7..b5e72b7 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -5,99 +5,30 @@ let yamlFormat = pkgs.formats.yaml { }; composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" { services = { - database = { - image = "zulip/zulip-postgresql:14"; - restart = "unless-stopped"; - secrets = [ "zulip__postgres_password" ]; - environment = { - POSTGRES_DB = "zulip"; - POSTGRES_USER = "zulip"; - POSTGRES_PASSWORD_FILE = "/run/secrets/zulip__postgres_password"; - }; - volumes = [ "postgresql-14:/var/lib/postgresql/data:rw" ]; - attach = false; - }; - memcached = { - image = "memcached:alpine"; - restart = "unless-stopped"; - command = [ - "sh" - "-euc" - '' - echo 'mech_list: plain' > "$SASL_CONF_PATH" - echo "zulip@$HOSTNAME:$(cat /run/burrow/memcached-password)" > "$MEMCACHED_SASL_PWDB" - echo "zulip@localhost:$(cat /run/burrow/memcached-password)" >> "$MEMCACHED_SASL_PWDB" - exec memcached -S - '' - ]; - environment = { - SASL_CONF_PATH = "/home/memcache/memcached.conf"; - MEMCACHED_SASL_PWDB = "/home/memcache/memcached-sasl-db"; - }; - volumes = [ "./secrets/memcached-password:/run/burrow/memcached-password:ro" ]; - attach = false; - }; - rabbitmq = { - image = "rabbitmq:4.2"; - restart = "unless-stopped"; - volumes = [ - "rabbitmq:/var/lib/rabbitmq:rw" - "./rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro" - ]; - attach = false; - }; - redis = { - image = "redis:alpine"; - restart = "unless-stopped"; - command = [ - "sh" - "-euc" - "/usr/local/bin/docker-entrypoint.sh --requirepass \"$(cat \"$REDIS_PASSWORD_FILE\")\"" - ]; - secrets = [ "zulip__redis_password" ]; - environment = { - REDIS_PASSWORD_FILE = "/run/secrets/zulip__redis_password"; - }; - volumes = [ "redis:/data:rw" ]; - attach = false; - }; zulip = { image = "ghcr.io/zulip/zulip-server:11.6-1"; restart = "unless-stopped"; + network_mode = "host"; secrets = [ "zulip__postgres_password" - "zulip__memcached_password" "zulip__rabbitmq_password" "zulip__redis_password" "zulip__secret_key" "zulip__email_password" ]; environment = { - SETTING_REMOTE_POSTGRES_HOST = "database"; - SETTING_MEMCACHED_LOCATION = "memcached:11211"; - SETTING_RABBITMQ_HOST = "rabbitmq"; - SETTING_REDIS_HOST = "redis"; + SETTING_REMOTE_POSTGRES_HOST = "127.0.0.1"; + SETTING_MEMCACHED_LOCATION = "127.0.0.1:11211"; + SETTING_RABBITMQ_HOST = "127.0.0.1"; + SETTING_REDIS_HOST = "127.0.0.1"; }; - volumes = [ "zulip:/data:rw" ]; + volumes = [ "${cfg.dataDir}/data:/data:rw" ]; ulimits.nofile = { soft = 1000000; hard = 1048576; }; - depends_on = [ - "database" - "memcached" - "rabbitmq" - "redis" - ]; }; }; - - volumes = { - zulip = { }; - postgresql-14 = { }; - rabbitmq = { }; - redis = { }; - }; }; in { @@ -157,11 +88,6 @@ in description = "File containing the Zulip PostgreSQL password."; }; - memcachedPasswordFile = lib.mkOption { - type = lib.types.str; - description = "File containing the Zulip memcached password."; - }; - rabbitmqPasswordFile = lib.mkOption { type = lib.types.str; description = "File containing the Zulip RabbitMQ password."; @@ -184,6 +110,49 @@ in pkgs.podman-compose ]; + services.postgresql = { + ensureDatabases = [ "zulip" ]; + ensureUsers = [ + { + name = "zulip"; + ensureDBOwnership = true; + } + ]; + settings = { + listen_addresses = lib.mkDefault "127.0.0.1"; + password_encryption = lib.mkDefault "scram-sha-256"; + }; + authentication = lib.mkAfter '' + host zulip zulip 127.0.0.1/32 scram-sha-256 + ''; + }; + + services.postgresqlBackup = { + enable = true; + backupAll = false; + databases = [ "zulip" ]; + }; + + services.memcached = { + enable = true; + listen = "127.0.0.1"; + port = 11211; + extraOptions = [ "-U 0" ]; + }; + + services.redis.servers.zulip = { + enable = true; + bind = "127.0.0.1"; + port = 6379; + requirePassFile = cfg.redisPasswordFile; + }; + + services.rabbitmq = { + enable = true; + listenAddress = "127.0.0.1"; + port = 5672; + }; + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' encode gzip zstd reverse_proxy 127.0.0.1:${toString cfg.port} @@ -191,18 +160,114 @@ in systemd.tmpfiles.rules = [ "d ${cfg.dataDir} 0755 root root - -" + "d ${cfg.dataDir}/data 0755 root root - -" + "d ${cfg.dataDir}/data/logs 0755 root root - -" + "d ${cfg.dataDir}/data/logs/emails 0755 root root - -" + "d ${cfg.dataDir}/data/secrets 0700 root root - -" "d ${cfg.dataDir}/secrets 0700 root root - -" "d ${cfg.dataDir}/logs 0755 root root - -" ]; + systemd.services.burrow-zulip-postgres-bootstrap = { + description = "Bootstrap PostgreSQL role for Burrow Zulip"; + after = [ "postgresql.service" ]; + wants = [ "postgresql.service" ]; + requiredBy = [ "burrow-zulip.service" ]; + before = [ "burrow-zulip.service" ]; + path = [ + config.services.postgresql.package + pkgs.bash + pkgs.coreutils + pkgs.python3 + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + + db_password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.postgresPasswordFile})" + db_password_sql="$(printf '%s' "$db_password" | python3 -c "import sys; print(sys.stdin.read().replace(chr(39), chr(39) * 2), end=\"\")")" + setup_sql="$(mktemp)" + trap 'rm -f "$setup_sql"' EXIT + + cat > "$setup_sql" < ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} - install -m 0444 ${lib.escapeShellArg cfg.memcachedPasswordFile} ${lib.escapeShellArg "${cfg.dataDir}/secrets/memcached-password"} - cat > ${lib.escapeShellArg "${cfg.dataDir}/rabbitmq.conf"} </dev/null 2>&1; do - attempts=$((attempts + 1)) - if [ "$attempts" -ge 90 ]; then - echo "error: RabbitMQ did not become ready for Zulip bootstrap" >&2 - exit 1 - fi - sleep 2 - done - } + ensure_zulip_data_layout() { + local zulip_data_dir=${lib.escapeShellArg "${cfg.dataDir}/data"} - ensure_zulip_volume_layout() { - local zulip_volume_mount - zulip_volume_mount="$(podman volume inspect burrow-zulip_zulip --format '{{.Mountpoint}}')" - install -d -m 0755 "$zulip_volume_mount/logs" - install -d -m 0755 "$zulip_volume_mount/logs/emails" - install -d -m 0700 "$zulip_volume_mount/secrets" - chown 1000:1000 "$zulip_volume_mount/logs" "$zulip_volume_mount/logs/emails" "$zulip_volume_mount/secrets" + install -d -m 0755 "$zulip_data_dir/logs" + install -d -m 0755 "$zulip_data_dir/logs/emails" + install -d -m 0700 "$zulip_data_dir/secrets" + chown 1000:1000 "$zulip_data_dir/logs" "$zulip_data_dir/logs/emails" "$zulip_data_dir/secrets" - if [ ! -s "$zulip_volume_mount/secrets/bootstrap-owner-password" ]; then + if [ ! -s "$zulip_data_dir/secrets/bootstrap-owner-password" ]; then umask 077 - openssl rand -base64 24 > "$zulip_volume_mount/secrets/bootstrap-owner-password" + openssl rand -base64 24 > "$zulip_data_dir/secrets/bootstrap-owner-password" fi - chown 1000:1000 "$zulip_volume_mount/secrets/bootstrap-owner-password" - chmod 0600 "$zulip_volume_mount/secrets/bootstrap-owner-password" + chown 1000:1000 "$zulip_data_dir/secrets/bootstrap-owner-password" + chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password" } bootstrap_realm_if_needed() { @@ -415,15 +461,11 @@ EOF if [ ! -e .initialized ]; then compose pull - compose up -d database memcached rabbitmq redis - wait_for_rabbitmq compose run --rm -T zulip app:init touch .initialized fi - compose up -d database memcached rabbitmq redis - wait_for_rabbitmq - ensure_zulip_volume_layout + ensure_zulip_data_layout compose up -d zulip bootstrap_realm_if_needed ''; From 601bedcc59532f183fa5009b81aef3efa4974c0e Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:19:01 -0700 Subject: [PATCH 47/59] Fix Zulip Postgres bootstrap runtime --- nixos/modules/burrow-zulip.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index b5e72b7..3417925 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -179,6 +179,7 @@ in pkgs.bash pkgs.coreutils pkgs.python3 + pkgs.shadow ]; serviceConfig = { Type = "oneshot"; @@ -204,7 +205,7 @@ END ALTER ROLE zulip WITH LOGIN PASSWORD '$db_password_sql'; SQL - su postgres -s ${pkgs.bash}/bin/bash -c "psql -v ON_ERROR_STOP=1 -f '$setup_sql'" + ${pkgs.shadow}/bin/su postgres -s ${pkgs.bash}/bin/bash -c "psql -v ON_ERROR_STOP=1 -f '$setup_sql'" ''; }; From 2ef804fa1051268d25f3a26cdaa39cee73b9a259 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:20:55 -0700 Subject: [PATCH 48/59] Use runuser for Zulip Postgres bootstrap --- nixos/modules/burrow-zulip.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 3417925..23ce77b 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -179,7 +179,7 @@ in pkgs.bash pkgs.coreutils pkgs.python3 - pkgs.shadow + pkgs.util-linux ]; serviceConfig = { Type = "oneshot"; @@ -205,7 +205,7 @@ END ALTER ROLE zulip WITH LOGIN PASSWORD '$db_password_sql'; SQL - ${pkgs.shadow}/bin/su postgres -s ${pkgs.bash}/bin/bash -c "psql -v ON_ERROR_STOP=1 -f '$setup_sql'" + ${pkgs.util-linux}/bin/runuser -u postgres -- psql -v ON_ERROR_STOP=1 -f "$setup_sql" ''; }; From 142c2ef77807f9071ae2326e54fc7e7c338b1b52 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:22:32 -0700 Subject: [PATCH 49/59] Allow postgres bootstrap to read generated SQL --- nixos/modules/burrow-zulip.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 23ce77b..7d93705 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -204,6 +204,7 @@ END \$\$; ALTER ROLE zulip WITH LOGIN PASSWORD '$db_password_sql'; SQL + chmod 0644 "$setup_sql" ${pkgs.util-linux}/bin/runuser -u postgres -- psql -v ON_ERROR_STOP=1 -f "$setup_sql" ''; From 2af7618f5265471f4048db49eb1353924cf322f6 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:31:45 -0700 Subject: [PATCH 50/59] Fix tailscale landing and zulip bootstrap --- Scripts/authentik-sync-tailscale-oidc.sh | 16 +++++++++++++++- nixos/hosts/burrow-forge/default.nix | 2 +- nixos/modules/burrow-zulip.nix | 24 +++++++++++++++++------- 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh index fde1a01..58fe7e4 100755 --- a/Scripts/authentik-sync-tailscale-oidc.sh +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -137,10 +137,24 @@ lookup_group_pk() { lookup_application_pk() { local slug="$1" + local application_pk lookup_result lookup_status - api GET "/api/v3/core/applications/?page_size=200" \ + application_pk="$( + api GET "/api/v3/core/applications/?page_size=200" \ | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ | head -n1 + )" + + if [[ -n "$application_pk" ]]; then + printf '%s\n' "$application_pk" + return 0 + fi + + lookup_result="$(api_with_status GET "/api/v3/core/applications/${slug}/")" + lookup_status="$(printf '%s\n' "$lookup_result" | sed -n '1p')" + if [[ "$lookup_status" =~ ^20[01]$ ]]; then + printf '%s\n' "$lookup_result" | sed '1d' | jq -r '.pk // empty' + fi } ensure_application_group_binding() { diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index be97661..c4fc92e 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -237,7 +237,7 @@ in forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; - defaultExternalApplicationSlug = "ts"; + defaultExternalApplicationSlug = "tailscale"; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path; diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 7d93705..0096b65 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -404,7 +404,8 @@ EOF Group = "root"; WorkingDirectory = cfg.dataDir; RemainAfterExit = true; - ExecStop = "${pkgs.bash}/bin/bash -lc 'cd ${lib.escapeShellArg cfg.dataDir} && ${pkgs.podman-compose}/bin/podman-compose -p burrow-zulip down'"; + TimeoutStopSec = "20s"; + ExecStop = "${pkgs.bash}/bin/bash -lc 'set -euo pipefail; if ${pkgs.podman}/bin/podman container exists burrow-zulip_zulip_1; then ${pkgs.podman}/bin/podman stop --ignore --time 10 burrow-zulip_zulip_1 >/dev/null || true; ${pkgs.podman}/bin/podman rm -f --ignore burrow-zulip_zulip_1 >/dev/null || true; fi'"; }; script = '' set -euo pipefail @@ -452,13 +453,22 @@ EOF return 0 fi - export ZULIP_REALM_NAME=${lib.escapeShellArg cfg.realmName} - export ZULIP_ADMIN_EMAIL=${lib.escapeShellArg cfg.administratorEmail} - export ZULIP_OWNER_NAME=${lib.escapeShellArg cfg.realmOwnerName} + local realm_name=${lib.escapeShellArg cfg.realmName} + local admin_email=${lib.escapeShellArg cfg.administratorEmail} + local owner_name=${lib.escapeShellArg cfg.realmOwnerName} + local create_realm_cmd - podman exec burrow-zulip_zulip_1 bash -lc ' - su zulip -c "/home/zulip/deployments/current/manage.py create_realm --string-id= --password-file /data/secrets/bootstrap-owner-password --automated \"$ZULIP_REALM_NAME\" \"$ZULIP_ADMIN_EMAIL\" \"$ZULIP_OWNER_NAME\"" - ' + printf -v create_realm_cmd '%q ' \ + /home/zulip/deployments/current/manage.py \ + create_realm \ + --string-id= \ + --password-file /data/secrets/bootstrap-owner-password \ + --automated \ + "$realm_name" \ + "$admin_email" \ + "$owner_name" + + podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd" } if [ ! -e .initialized ]; then From 4c3dcdd17b7d1feb2487cc8119e6c19b2c4dfa4f Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:43:43 -0700 Subject: [PATCH 51/59] Force https-only Zulip SAML login --- nixos/modules/burrow-zulip.nix | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 0096b65..25d553d 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -340,13 +340,18 @@ services: SETTING_ZULIP_ADMINISTRATOR: "${cfg.administratorEmail}" TRUST_GATEWAY_IP: "True" SETTING_SEND_LOGIN_EMAILS: "False" - ZULIP_AUTH_BACKENDS: "EmailAuthBackend,SAMLAuthBackend" + ZULIP_AUTH_BACKENDS: "SAMLAuthBackend" CONFIG_application_server__http_only: true CONFIG_application_server__nginx_listen_port: ${toString cfg.port} CONFIG_application_server__queue_workers_multiprocess: false ZULIP_CUSTOM_SETTINGS: | EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend" EMAIL_FILE_PATH = "/data/logs/emails" + EXTERNAL_URI_SCHEME = "https://" + SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") + USE_X_FORWARDED_HOST = True + SESSION_COOKIE_SECURE = True + CSRF_COOKIE_SECURE = True SOCIAL_AUTH_SAML_ORG_INFO = { "en-US": { "displayname": "Burrow Zulip", From 78d83c50790b5882228f2e343a7663bbf70eb51e Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 01:49:25 -0700 Subject: [PATCH 52/59] Pin Zulip SAML ACS to https --- nixos/modules/burrow-zulip.nix | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 25d553d..e26cc3d 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -352,6 +352,15 @@ services: USE_X_FORWARDED_HOST = True SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True + SOCIAL_AUTH_REDIRECT_IS_HTTPS = True + SOCIAL_AUTH_SAML_REDIRECT_IS_HTTPS = True + SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://${cfg.domain}" + SOCIAL_AUTH_SAML_SP_EXTRA = { + "assertionConsumerService": { + "url": "https://${cfg.domain}/complete/saml/", + "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST", + }, + } SOCIAL_AUTH_SAML_ORG_INFO = { "en-US": { "displayname": "Burrow Zulip", From 5598fc18fc6bf168a80dc123e164c7615002bfa0 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 03:37:42 -0700 Subject: [PATCH 53/59] Enable Zulip SAML auto signup --- nixos/modules/burrow-zulip.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index e26cc3d..ef1f190 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -373,6 +373,7 @@ services: "entity_id": "https://${cfg.authentikDomain}", "url": "https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/sso/binding/redirect/", "display_name": "burrow.net", + "auto_signup": True, "x509cert": """$saml_cert""", "attr_user_permanent_id": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", "attr_username": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", From eb9327a99fcb18ecc763644a0ce2b0068a7b0dd9 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 03:43:57 -0700 Subject: [PATCH 54/59] Map Burrow admins to Zulip owners --- Scripts/authentik-sync-zulip-saml.sh | 16 +++++++++++++++- nixos/modules/burrow-authentik.nix | 1 + nixos/modules/burrow-zulip.nix | 8 ++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/Scripts/authentik-sync-zulip-saml.sh b/Scripts/authentik-sync-zulip-saml.sh index 6767991..cd18752 100644 --- a/Scripts/authentik-sync-zulip-saml.sh +++ b/Scripts/authentik-sync-zulip-saml.sh @@ -10,6 +10,7 @@ acs_url="${AUTHENTIK_ZULIP_ACS_URL:-https://chat.burrow.net/complete/saml/}" audience="${AUTHENTIK_ZULIP_AUDIENCE:-https://chat.burrow.net}" launch_url="${AUTHENTIK_ZULIP_LAUNCH_URL:-https://chat.burrow.net/}" access_group="${AUTHENTIK_ZULIP_ACCESS_GROUP:-}" +admin_group="${AUTHENTIK_ZULIP_ADMIN_GROUP:-}" issuer="${AUTHENTIK_ZULIP_ISSUER:-$authentik_url}" usage() { @@ -28,6 +29,7 @@ Optional environment: AUTHENTIK_ZULIP_AUDIENCE AUTHENTIK_ZULIP_LAUNCH_URL AUTHENTIK_ZULIP_ACCESS_GROUP + AUTHENTIK_ZULIP_ADMIN_GROUP AUTHENTIK_ZULIP_ISSUER EOF } @@ -257,6 +259,17 @@ last_name_mapping_pk="$( $'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username' )" +role_mapping_pk="" +if [[ -n "$admin_group" ]]; then + role_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Role" \ + "zulip_role" \ + "zulip_role" \ + $'admin_group = "'$admin_group$'"\nif any(group.name == admin_group for group in request.user.ak_groups.all()):\n return "owner"\nreturn None' + )" +fi + if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then echo "error: failed to reconcile Zulip SAML property mappings" >&2 exit 1 @@ -276,6 +289,7 @@ provider_payload="$( --arg name_mapping "$name_mapping_pk" \ --arg first_name_mapping "$first_name_mapping_pk" \ --arg last_name_mapping "$last_name_mapping_pk" \ + --arg role_mapping "$role_mapping_pk" \ '{ name: $name, authorization_flow: $authorization_flow, @@ -293,7 +307,7 @@ provider_payload="$( $name_mapping, $first_name_mapping, $last_name_mapping - ] + ] + (if $role_mapping != "" then [$role_mapping] else [] end) }' )" diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix index acf76ce..977b641 100644 --- a/nixos/modules/burrow-authentik.nix +++ b/nixos/modules/burrow-authentik.nix @@ -956,6 +956,7 @@ EOF ${lib.optionalString (cfg.zulipAccessGroupName != null) '' export AUTHENTIK_ZULIP_ACCESS_GROUP=${lib.escapeShellArg cfg.zulipAccessGroupName} ''} + export AUTHENTIK_ZULIP_ADMIN_GROUP=${lib.escapeShellArg cfg.adminGroupName} ${pkgs.bash}/bin/bash ${zulipSamlSyncScript} ''; diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index ef1f190..a7adb48 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -374,6 +374,7 @@ services: "url": "https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/sso/binding/redirect/", "display_name": "burrow.net", "auto_signup": True, + "extra_attrs": ["zulip_role"], "x509cert": """$saml_cert""", "attr_user_permanent_id": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", "attr_username": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress", @@ -382,6 +383,13 @@ services: "attr_last_name": "lastName", }, } + SOCIAL_AUTH_SYNC_ATTRS_DICT = { + "authentik": { + "saml": { + "role": "zulip_role", + }, + }, + } EOF ''; }; From 6cd0f3b1aeaf5a9d3dffe85719b4226bec44fa04 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 13:59:01 -0700 Subject: [PATCH 55/59] Fix Zulip SAML callback scheme handling --- nixos/modules/burrow-zulip.nix | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index a7adb48..9a805c4 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -307,6 +307,34 @@ SQL install -m 0644 ${composeFile} ${lib.escapeShellArg "${cfg.dataDir}/compose.yaml"} : > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + cat > ${lib.escapeShellArg "${cfg.dataDir}/uwsgi_params"} <<'EOF' +uwsgi_param QUERY_STRING $query_string; +uwsgi_param REQUEST_METHOD $request_method; +uwsgi_param CONTENT_TYPE $content_type; +uwsgi_param CONTENT_LENGTH $content_length; +uwsgi_param REQUEST_URI $request_uri; +uwsgi_param PATH_INFO $document_uri; +uwsgi_param DOCUMENT_ROOT $document_root; +uwsgi_param SERVER_PROTOCOL $server_protocol; +uwsgi_param REQUEST_SCHEME $trusted_x_forwarded_proto; +uwsgi_param HTTPS on; +uwsgi_param REMOTE_ADDR $remote_addr; +uwsgi_param REMOTE_PORT $remote_port; +uwsgi_param SERVER_ADDR $server_addr; +uwsgi_param SERVER_PORT $server_port; +uwsgi_param SERVER_NAME $server_name; +uwsgi_param HTTP_X_REAL_IP $remote_addr; +uwsgi_param HTTP_X_FORWARDED_PROTO $trusted_x_forwarded_proto; +uwsgi_param HTTP_X_FORWARDED_SSL ""; +uwsgi_param HTTP_X_PROXY_MISCONFIGURATION $x_proxy_misconfiguration; + +# This value is the default, and is provided for explicitness; it must +# be longer than the configured 55s "harakiri" timeout in uwsgi +uwsgi_read_timeout 60s; + +uwsgi_pass django; +EOF + chmod 0644 ${lib.escapeShellArg "${cfg.dataDir}/uwsgi_params"} metadata_xml="$(${pkgs.curl}/bin/curl -fsSL https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c ' @@ -390,6 +418,8 @@ services: }, }, } + volumes: + - ${cfg.dataDir}/uwsgi_params:/etc/nginx/uwsgi_params:ro EOF ''; }; From 836ccc93cd1d63dd844f79cbb364558e6bc9be29 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 14:04:42 -0700 Subject: [PATCH 56/59] Patch Zulip uwsgi scheme at runtime --- nixos/modules/burrow-zulip.nix | 62 ++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 9a805c4..3149a88 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -307,34 +307,6 @@ SQL install -m 0644 ${composeFile} ${lib.escapeShellArg "${cfg.dataDir}/compose.yaml"} : > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} - cat > ${lib.escapeShellArg "${cfg.dataDir}/uwsgi_params"} <<'EOF' -uwsgi_param QUERY_STRING $query_string; -uwsgi_param REQUEST_METHOD $request_method; -uwsgi_param CONTENT_TYPE $content_type; -uwsgi_param CONTENT_LENGTH $content_length; -uwsgi_param REQUEST_URI $request_uri; -uwsgi_param PATH_INFO $document_uri; -uwsgi_param DOCUMENT_ROOT $document_root; -uwsgi_param SERVER_PROTOCOL $server_protocol; -uwsgi_param REQUEST_SCHEME $trusted_x_forwarded_proto; -uwsgi_param HTTPS on; -uwsgi_param REMOTE_ADDR $remote_addr; -uwsgi_param REMOTE_PORT $remote_port; -uwsgi_param SERVER_ADDR $server_addr; -uwsgi_param SERVER_PORT $server_port; -uwsgi_param SERVER_NAME $server_name; -uwsgi_param HTTP_X_REAL_IP $remote_addr; -uwsgi_param HTTP_X_FORWARDED_PROTO $trusted_x_forwarded_proto; -uwsgi_param HTTP_X_FORWARDED_SSL ""; -uwsgi_param HTTP_X_PROXY_MISCONFIGURATION $x_proxy_misconfiguration; - -# This value is the default, and is provided for explicitness; it must -# be longer than the configured 55s "harakiri" timeout in uwsgi -uwsgi_read_timeout 60s; - -uwsgi_pass django; -EOF - chmod 0644 ${lib.escapeShellArg "${cfg.dataDir}/uwsgi_params"} metadata_xml="$(${pkgs.curl}/bin/curl -fsSL https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c ' @@ -418,8 +390,6 @@ services: }, }, } - volumes: - - ${cfg.dataDir}/uwsgi_params:/etc/nginx/uwsgi_params:ro EOF ''; }; @@ -484,6 +454,37 @@ EOF chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password" } + patch_uwsgi_scheme_handling() { + podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/uwsgi_params <<'EOF' +uwsgi_param QUERY_STRING \$query_string; +uwsgi_param REQUEST_METHOD \$request_method; +uwsgi_param CONTENT_TYPE \$content_type; +uwsgi_param CONTENT_LENGTH \$content_length; +uwsgi_param REQUEST_URI \$request_uri; +uwsgi_param PATH_INFO \$document_uri; +uwsgi_param DOCUMENT_ROOT \$document_root; +uwsgi_param SERVER_PROTOCOL \$server_protocol; +uwsgi_param REQUEST_SCHEME \$trusted_x_forwarded_proto; +uwsgi_param HTTPS on; +uwsgi_param REMOTE_ADDR \$remote_addr; +uwsgi_param REMOTE_PORT \$remote_port; +uwsgi_param SERVER_ADDR \$server_addr; +uwsgi_param SERVER_PORT \$server_port; +uwsgi_param SERVER_NAME \$server_name; +uwsgi_param HTTP_X_REAL_IP \$remote_addr; +uwsgi_param HTTP_X_FORWARDED_PROTO \$trusted_x_forwarded_proto; +uwsgi_param HTTP_X_FORWARDED_SSL \"\"; +uwsgi_param HTTP_X_PROXY_MISCONFIGURATION \$x_proxy_misconfiguration; + +# This value is the default, and is provided for explicitness; it must +# be longer than the configured 55s harakiri timeout in uwsgi +uwsgi_read_timeout 60s; + +uwsgi_pass django; +EOF +supervisorctl restart nginx zulip-django >/dev/null" + } + bootstrap_realm_if_needed() { local realm_exists local attempts=0 @@ -532,6 +533,7 @@ EOF ensure_zulip_data_layout compose up -d zulip + patch_uwsgi_scheme_handling bootstrap_realm_if_needed ''; }; From 75401107134cc6a34e94fdef36d162079d22fc1c Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 14:09:26 -0700 Subject: [PATCH 57/59] Wait for Zulip supervisor before nginx patching --- nixos/modules/burrow-zulip.nix | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 3149a88..9298571 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -455,6 +455,16 @@ EOF } patch_uwsgi_scheme_handling() { + local attempts=0 + while ! podman exec burrow-zulip_zulip_1 supervisorctl status >/dev/null 2>&1; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 90 ]; then + echo "error: Zulip supervisor did not become ready for nginx patching" >&2 + exit 1 + fi + sleep 2 + done + podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/uwsgi_params <<'EOF' uwsgi_param QUERY_STRING \$query_string; uwsgi_param REQUEST_METHOD \$request_method; @@ -533,8 +543,8 @@ supervisorctl restart nginx zulip-django >/dev/null" ensure_zulip_data_layout compose up -d zulip - patch_uwsgi_scheme_handling bootstrap_realm_if_needed + patch_uwsgi_scheme_handling ''; }; }; From 9244a0476ab164959e2a1c5eead7317b2ede55bc Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 19 Apr 2026 14:37:18 -0700 Subject: [PATCH 58/59] Fix Zulip SAML provisioning --- nixos/modules/burrow-zulip.nix | 62 +++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 13 deletions(-) diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix index 9298571..9670694 100644 --- a/nixos/modules/burrow-zulip.nix +++ b/nixos/modules/burrow-zulip.nix @@ -2,6 +2,11 @@ let cfg = config.services.burrow.zulip; + realmSignupDomain = + let + parts = lib.splitString "@" cfg.administratorEmail; + in + if builtins.length parts == 2 then builtins.elemAt parts 1 else cfg.domain; yamlFormat = pkgs.formats.yaml { }; composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" { services = { @@ -352,6 +357,7 @@ services: USE_X_FORWARDED_HOST = True SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True + CSRF_TRUSTED_ORIGINS = ["https://${cfg.domain}"] SOCIAL_AUTH_REDIRECT_IS_HTTPS = True SOCIAL_AUTH_SAML_REDIRECT_IS_HTTPS = True SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://${cfg.domain}" @@ -384,7 +390,7 @@ services: }, } SOCIAL_AUTH_SYNC_ATTRS_DICT = { - "authentik": { + "": { "saml": { "role": "zulip_role", }, @@ -454,18 +460,38 @@ EOF chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password" } - patch_uwsgi_scheme_handling() { + wait_for_zulip_supervisor() { local attempts=0 while ! podman exec burrow-zulip_zulip_1 supervisorctl status >/dev/null 2>&1; do attempts=$((attempts + 1)) if [ "$attempts" -ge 90 ]; then - echo "error: Zulip supervisor did not become ready for nginx patching" >&2 + echo "error: Zulip supervisor did not become ready" >&2 exit 1 fi sleep 2 done + } - podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/uwsgi_params <<'EOF' + patch_uwsgi_scheme_handling() { + wait_for_zulip_supervisor + podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/zulip-include/trusted-proto <<'EOF' +map \$remote_addr \$trusted_x_forwarded_proto { + default \$scheme; + 127.0.0.1 \$http_x_forwarded_proto; + ::1 \$http_x_forwarded_proto; + 172.31.1.1 \$http_x_forwarded_proto; +} +map \$remote_addr \$trusted_x_forwarded_for { + default \"\"; + 127.0.0.1 \$http_x_forwarded_for; + ::1 \$http_x_forwarded_for; + 172.31.1.1 \$http_x_forwarded_for; +} +map \$remote_addr \$x_proxy_misconfiguration { + default \"\"; +} +EOF +cat > /etc/nginx/uwsgi_params <<'EOF' uwsgi_param QUERY_STRING \$query_string; uwsgi_param REQUEST_METHOD \$request_method; uwsgi_param CONTENT_TYPE \$content_type; @@ -496,16 +522,8 @@ supervisorctl restart nginx zulip-django >/dev/null" } bootstrap_realm_if_needed() { + wait_for_zulip_supervisor local realm_exists - local attempts=0 - while ! podman exec burrow-zulip_zulip_1 test -r /etc/zulip/zulip-secrets.conf >/dev/null 2>&1; do - attempts=$((attempts + 1)) - if [ "$attempts" -ge 90 ]; then - echo "error: Zulip did not finish generating production secrets" >&2 - exit 1 - fi - sleep 2 - done realm_exists="$( podman exec burrow-zulip_zulip_1 bash -lc \ @@ -535,6 +553,23 @@ supervisorctl restart nginx zulip-django >/dev/null" podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd" } + reconcile_realm_policy() { + wait_for_zulip_supervisor + local realm_id + realm_id="$( + podman exec burrow-zulip_zulip_1 bash -lc \ + "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ + | awk '$NF == "https://${cfg.domain}" { print $1 }' + )" + + podman exec burrow-zulip_zulip_1 su zulip -c \ + "/home/zulip/deployments/current/manage.py realm_domain --op add -r $realm_id ${realmSignupDomain} --allow-subdomains --automated" \ + >/dev/null 2>&1 || true + + podman exec burrow-zulip_zulip_1 su zulip -c \ + "/home/zulip/deployments/current/manage.py shell -c 'from zerver.models import Realm; realm = Realm.objects.get(id=$realm_id); realm.invite_required = False; realm.save(update_fields=[\"invite_required\"])'" + } + if [ ! -e .initialized ]; then compose pull compose run --rm -T zulip app:init @@ -544,6 +579,7 @@ supervisorctl restart nginx zulip-django >/dev/null" ensure_zulip_data_layout compose up -d zulip bootstrap_realm_if_needed + reconcile_realm_policy patch_uwsgi_scheme_handling ''; }; From 97c569fb35688a5b89f0a20f938a7bb6d1afe8d7 Mon Sep 17 00:00:00 2001 From: Conrad Kramer Date: Sun, 3 May 2026 17:36:55 -0700 Subject: [PATCH 59/59] Align GTK app with Apple home surface Add the GTK home screen, local account store, daemon gRPC wrapper, and embedded Linux daemon startup path so the Linux app follows the Apple client UX and daemon boundary. Document the GTK parity expectations and update the daemon IPC and Tailnet BEPs with the cross-platform client model. --- burrow-gtk/Cargo.toml | 2 + burrow-gtk/src/account_store.rs | 139 ++ burrow-gtk/src/components/app.rs | 139 +- burrow-gtk/src/components/home_screen.rs | 1178 +++++++++++++++++ burrow-gtk/src/components/mod.rs | 10 +- burrow-gtk/src/daemon_api.rs | 420 ++++++ burrow-gtk/src/main.rs | 6 +- burrow/src/daemon/apple.rs | 39 +- burrow/src/lib.rs | 8 +- docs/GTK_APP.md | 22 +- .../BEP-0005-daemon-ipc-and-apple-boundary.md | 3 + ...6-tailnet-authority-first-control-plane.md | 5 +- 12 files changed, 1861 insertions(+), 110 deletions(-) create mode 100644 burrow-gtk/src/account_store.rs create mode 100644 burrow-gtk/src/components/home_screen.rs create mode 100644 burrow-gtk/src/daemon_api.rs diff --git a/burrow-gtk/Cargo.toml b/burrow-gtk/Cargo.toml index 21cb52e..b12577a 100644 --- a/burrow-gtk/Cargo.toml +++ b/burrow-gtk/Cargo.toml @@ -11,6 +11,8 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]} burrow = { version = "*", path = "../burrow/" } tokio = { version = "1.35.0", features = ["time", "sync"] } gettext-rs = { version = "0.7.0", features = ["gettext-system"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" [build-dependencies] anyhow = "1.0" diff --git a/burrow-gtk/src/account_store.rs b/burrow-gtk/src/account_store.rs new file mode 100644 index 0000000..6aee78b --- /dev/null +++ b/burrow-gtk/src/account_store.rs @@ -0,0 +1,139 @@ +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use std::{ + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AccountRecord { + pub id: String, + pub kind: AccountKind, + pub title: String, + pub authority: Option, + pub account: String, + pub identity: String, + pub hostname: Option, + pub tailnet: Option, + pub note: Option, + pub created_at: u64, + pub updated_at: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum AccountKind { + WireGuard, + Tor, + Tailnet, +} + +impl AccountKind { + pub fn title(self) -> &'static str { + match self { + Self::WireGuard => "WireGuard", + Self::Tor => "Tor", + Self::Tailnet => "Tailnet", + } + } + + fn sort_rank(self) -> u8 { + match self { + Self::Tailnet => 0, + Self::Tor => 1, + Self::WireGuard => 2, + } + } +} + +pub fn load() -> Result> { + let path = storage_path()?; + if !path.exists() { + return Ok(Vec::new()); + } + let data = + std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?; + serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display())) +} + +pub fn upsert(mut record: AccountRecord) -> Result> { + let mut accounts = load()?; + let now = timestamp(); + record.updated_at = now; + if record.created_at == 0 { + record.created_at = now; + } + + if let Some(index) = accounts.iter().position(|account| account.id == record.id) { + accounts[index] = record; + } else { + accounts.push(record); + } + accounts.sort_by(|lhs, rhs| { + lhs.kind + .sort_rank() + .cmp(&rhs.kind.sort_rank()) + .then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase())) + }); + persist(&accounts)?; + Ok(accounts) +} + +pub fn new_record( + kind: AccountKind, + title: String, + authority: Option, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + note: Option, +) -> AccountRecord { + let now = timestamp(); + AccountRecord { + id: format!("{}-{now}", kind.title().to_ascii_lowercase()), + kind, + title, + authority, + account, + identity, + hostname, + tailnet, + note, + created_at: now, + updated_at: now, + } +} + +fn persist(accounts: &[AccountRecord]) -> Result<()> { + let path = storage_path()?; + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("failed to create {}", parent.display()))?; + } + let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?; + std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display())) +} + +fn storage_path() -> Result { + if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") { + return Ok(PathBuf::from(data_home) + .join("burrow") + .join("accounts.json")); + } + if let Some(home) = std::env::var_os("HOME") { + return Ok(PathBuf::from(home) + .join(".local") + .join("share") + .join("burrow") + .join("accounts.json")); + } + Ok(std::env::temp_dir().join("burrow-accounts.json")) +} + +fn timestamp() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|duration| duration.as_secs()) + .unwrap_or_default() +} diff --git a/burrow-gtk/src/components/app.rs b/burrow-gtk/src/components/app.rs index 62c98c0..7354825 100644 --- a/burrow-gtk/src/components/app.rs +++ b/burrow-gtk/src/components/app.rs @@ -1,24 +1,19 @@ use super::*; use anyhow::Context; -use std::time::Duration; - -const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5); pub struct App { - daemon_client: Arc>>, - settings_screen: Controller, - switch_screen: AsyncController, + _home_screen: AsyncController, } #[derive(Debug)] pub enum AppMsg { None, - PostInit, } impl App { pub fn run() { let app = RelmApp::new(config::ID); + relm4::set_global_css(APP_CSS); Self::setup_gresources().unwrap(); Self::setup_i18n().unwrap(); @@ -49,7 +44,7 @@ impl AsyncComponent for App { view! { adw::Window { set_title: Some("Burrow"), - set_default_size: (640, 480), + set_default_size: (900, 760), } } @@ -58,100 +53,84 @@ impl AsyncComponent for App { root: Self::Root, sender: AsyncComponentSender, ) -> AsyncComponentParts { - let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok())); - - let switch_screen = switch_screen::SwitchScreen::builder() - .launch(switch_screen::SwitchScreenInit { - daemon_client: Arc::clone(&daemon_client), - }) - .forward(sender.input_sender(), |_| AppMsg::None); - - let settings_screen = settings_screen::SettingsScreen::builder() - .launch(settings_screen::SettingsScreenInit { - daemon_client: Arc::clone(&daemon_client), - }) + let home_screen = home_screen::HomeScreen::builder() + .launch(()) .forward(sender.input_sender(), |_| AppMsg::None); let widgets = view_output!(); - let view_stack = adw::ViewStack::new(); - view_stack.add_titled(switch_screen.widget(), None, "Switch"); - view_stack.add_titled(settings_screen.widget(), None, "Settings"); - - let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build(); - view_switcher_bar.set_reveal(true); - - // When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate - // - // let toolbar = adw::ToolbarView::new(); - // toolbar.add_top_bar( - // &adw::HeaderBar::builder() - // .title_widget(>k::Label::new(Some("Burrow"))) - // .build(), - // ); - // toolbar.add_bottom_bar(&view_switcher_bar); - // toolbar.set_content(Some(&view_stack)); - // root.set_content(Some(&toolbar)); - let content = gtk::Box::new(gtk::Orientation::Vertical, 0); content.append( &adw::HeaderBar::builder() .title_widget(>k::Label::new(Some("Burrow"))) .build(), ); - content.append(&view_stack); - content.append(&view_switcher_bar); + content.append(home_screen.widget()); root.set_content(Some(&content)); - sender.input(AppMsg::PostInit); - - let model = App { - daemon_client, - switch_screen, - settings_screen, - }; + let model = App { _home_screen: home_screen }; AsyncComponentParts { model, widgets } } async fn update( &mut self, - _msg: Self::Input, + msg: Self::Input, _sender: AsyncComponentSender, _root: &Self::Root, ) { - loop { - tokio::time::sleep(RECONNECT_POLL_TIME).await; - { - let mut daemon_client = self.daemon_client.lock().await; - let mut disconnected_daemon_client = false; - - if let Some(daemon_client) = daemon_client.as_mut() { - if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await { - disconnected_daemon_client = true; - self.switch_screen - .emit(switch_screen::SwitchScreenMsg::DaemonDisconnect); - self.settings_screen - .emit(settings_screen::SettingsScreenMsg::DaemonStateChange) - } - } - - if disconnected_daemon_client || daemon_client.is_none() { - match DaemonClient::new().await { - Ok(new_daemon_client) => { - *daemon_client = Some(new_daemon_client); - self.switch_screen - .emit(switch_screen::SwitchScreenMsg::DaemonReconnect); - self.settings_screen - .emit(settings_screen::SettingsScreenMsg::DaemonStateChange) - } - Err(_e) => { - // TODO: Handle Error - } - } - } - } + match msg { + AppMsg::None => {} } } } + +const APP_CSS: &str = r#" +.empty-state { + border-radius: 18px; + padding: 22px; + background: alpha(@card_bg_color, 0.72); +} + +.summary-card { + border-radius: 18px; + padding: 14px; + background: alpha(@card_bg_color, 0.72); +} + +.network-card { + border-radius: 10px; + padding: 16px; + box-shadow: 0 2px 6px alpha(black, 0.14); +} + +.wireguard-card { + background: linear-gradient(135deg, #3277d8, #174ea6); +} + +.tailnet-card { + background: linear-gradient(135deg, #31b891, #147d69); +} + +.network-card-kind, +.network-card-title, +.network-card-detail { + color: white; +} + +.network-card-kind { + opacity: 0.86; + font-weight: 700; +} + +.network-card-title { + font-size: 1.22em; + font-weight: 700; +} + +.network-card-detail { + opacity: 0.92; + font-family: monospace; +} +"#; diff --git a/burrow-gtk/src/components/home_screen.rs b/burrow-gtk/src/components/home_screen.rs new file mode 100644 index 0000000..0bfdda2 --- /dev/null +++ b/burrow-gtk/src/components/home_screen.rs @@ -0,0 +1,1178 @@ +use super::*; +use crate::account_store::{self, AccountKind, AccountRecord}; +use std::time::Duration; + +pub struct HomeScreen { + daemon_banner: adw::Banner, + network_status: gtk::Label, + network_cards: gtk::Box, + account_status: gtk::Label, + account_rows: gtk::Box, + tunnel_status: gtk::Label, + tunnel_button: gtk::Button, + tunnel_state: Option, + tailnet_session_id: Option, + tailnet_running: bool, +} + +#[derive(Debug)] +pub enum HomeScreenMsg { + EnsureDaemon, + Refresh, + TunnelAction, + OpenWireGuard, + OpenTor, + OpenTailnet, + AddWireGuard { + title: String, + account: String, + identity: String, + config: String, + }, + SaveTor { + title: String, + account: String, + identity: String, + note: String, + }, + DiscoverTailnet(String), + ProbeTailnet(String), + StartTailnetLogin { + authority: String, + account: String, + identity: String, + hostname: Option, + }, + PollTailnetLogin, + CancelTailnetLogin, + AddTailnet { + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + }, +} + +#[relm4::component(pub, async)] +impl AsyncComponent for HomeScreen { + type Init = (); + type Input = HomeScreenMsg; + type Output = (); + type CommandOutput = (); + + view! { + gtk::ScrolledWindow { + set_vexpand: true, + + adw::Clamp { + set_maximum_size: 900, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 24, + set_margin_all: 24, + + gtk::Box { + set_orientation: gtk::Orientation::Horizontal, + set_spacing: 16, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 6, + set_hexpand: true, + + gtk::Label { + add_css_class: "title-1", + set_xalign: 0.0, + set_label: "Burrow", + }, + + gtk::Label { + add_css_class: "heading", + add_css_class: "dim-label", + set_xalign: 0.0, + set_label: "Networks and accounts", + }, + }, + + #[name(add_button)] + gtk::MenuButton { + add_css_class: "flat", + set_icon_name: "list-add-symbolic", + set_tooltip_text: Some("Add"), + set_valign: Align::Start, + }, + }, + + #[name(daemon_banner)] + adw::Banner { + set_title: "Starting Burrow daemon", + set_revealed: false, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 12, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Networks", + }, + + #[name(network_status)] + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "Stored daemon networks and their active account selectors", + }, + }, + + gtk::ScrolledWindow { + set_policy: (gtk::PolicyType::Automatic, gtk::PolicyType::Never), + set_min_content_height: 190, + + #[name(network_cards)] + gtk::Box { + set_orientation: gtk::Orientation::Horizontal, + set_spacing: 14, + }, + }, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 12, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Accounts", + }, + + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "Per-network identities and sign-in state", + }, + }, + + #[name(account_rows)] + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 8, + set_margin_all: 0, + set_valign: Align::Center, + }, + + #[name(account_status)] + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "", + }, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 8, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Tunnel", + }, + + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_label: "Current daemon tunnel state", + }, + }, + + #[name(tunnel_status)] + gtk::Label { + set_xalign: 0.0, + set_label: "Checking daemon status", + }, + + #[name(tunnel_button)] + gtk::Button { + add_css_class: "suggested-action", + set_label: "Start", + set_halign: Align::Start, + connect_clicked => HomeScreenMsg::TunnelAction, + }, + }, + } + } + } + } + + async fn init( + _: Self::Init, + _root: Self::Root, + sender: AsyncComponentSender, + ) -> AsyncComponentParts { + let widgets = view_output!(); + configure_add_popover(&widgets.add_button, &sender); + + let refresh_sender = sender.input_sender().clone(); + relm4::spawn(async move { + loop { + tokio::time::sleep(Duration::from_secs(5)).await; + refresh_sender.emit(HomeScreenMsg::Refresh); + } + }); + + let model = HomeScreen { + daemon_banner: widgets.daemon_banner.clone(), + network_status: widgets.network_status.clone(), + network_cards: widgets.network_cards.clone(), + account_status: widgets.account_status.clone(), + account_rows: widgets.account_rows.clone(), + tunnel_status: widgets.tunnel_status.clone(), + tunnel_button: widgets.tunnel_button.clone(), + tunnel_state: None, + tailnet_session_id: None, + tailnet_running: false, + }; + + sender.input(HomeScreenMsg::EnsureDaemon); + + AsyncComponentParts { model, widgets } + } + + async fn update( + &mut self, + msg: Self::Input, + sender: AsyncComponentSender, + root: &Self::Root, + ) { + match msg { + HomeScreenMsg::EnsureDaemon => self.ensure_daemon().await, + HomeScreenMsg::Refresh => self.refresh().await, + HomeScreenMsg::TunnelAction => self.perform_tunnel_action().await, + HomeScreenMsg::OpenWireGuard => open_wireguard_window(root, &sender), + HomeScreenMsg::OpenTor => open_tor_window(root, &sender), + HomeScreenMsg::OpenTailnet => open_tailnet_window(root, &sender), + HomeScreenMsg::AddWireGuard { + title, + account, + identity, + config, + } => self.add_wireguard(title, account, identity, config).await, + HomeScreenMsg::SaveTor { title, account, identity, note } => { + self.save_tor(title, account, identity, note) + } + HomeScreenMsg::DiscoverTailnet(email) => self.discover_tailnet(email).await, + HomeScreenMsg::ProbeTailnet(authority) => self.probe_tailnet(authority).await, + HomeScreenMsg::StartTailnetLogin { + authority, + account, + identity, + hostname, + } => { + self.start_tailnet_login(authority, account, identity, hostname, sender) + .await; + } + HomeScreenMsg::PollTailnetLogin => self.poll_tailnet_login(sender).await, + HomeScreenMsg::CancelTailnetLogin => self.cancel_tailnet_login().await, + HomeScreenMsg::AddTailnet { + authority, + account, + identity, + hostname, + tailnet, + } => { + self.add_tailnet(authority, account, identity, hostname, tailnet) + .await; + } + } + } +} + +impl HomeScreen { + async fn ensure_daemon(&mut self) { + self.daemon_banner.set_title("Starting Burrow daemon"); + self.daemon_banner.set_revealed(true); + match daemon_api::ensure_daemon().await { + Ok(()) => { + self.daemon_banner.set_revealed(false); + self.refresh().await; + } + Err(error) => { + self.daemon_banner + .set_title(&format!("Burrow daemon is not reachable: {error}")); + self.daemon_banner.set_revealed(true); + self.tunnel_state = None; + self.tunnel_status.set_label("Daemon unavailable"); + self.tunnel_button.set_label("Enable"); + self.tunnel_button.set_sensitive(true); + self.network_status + .set_label("Stored daemon networks are unavailable until the daemon starts."); + self.render_networks(&[]); + } + } + } + + async fn refresh(&mut self) { + match daemon_api::tunnel_state().await { + Ok(state) => { + self.daemon_banner.set_revealed(false); + self.tunnel_state = Some(state); + match state { + daemon_api::TunnelState::Running => { + self.tunnel_status.set_label("Connected"); + self.tunnel_button.set_label("Stop"); + } + daemon_api::TunnelState::Stopped => { + self.tunnel_status.set_label("Disconnected"); + self.tunnel_button.set_label("Start"); + } + } + self.tunnel_button.set_sensitive(true); + } + Err(error) => { + self.tunnel_state = None; + self.daemon_banner + .set_title(&format!("Burrow daemon is not reachable: {error}")); + self.daemon_banner.set_revealed(true); + self.tunnel_status.set_label("Unknown"); + self.tunnel_button.set_label("Enable"); + self.tunnel_button.set_sensitive(true); + } + } + + match daemon_api::list_networks().await { + Ok(networks) => { + self.render_networks(&networks); + self.network_status.set_label(if networks.is_empty() { + "Stored daemon networks and their active account selectors" + } else { + "Stored daemon networks and their active account selectors" + }); + } + Err(error) => { + self.render_networks(&[]); + self.network_status + .set_label(&format!("Unable to read daemon networks: {error}")); + } + } + + match account_store::load() { + Ok(accounts) => { + self.account_status.set_label(""); + self.render_accounts(&accounts); + } + Err(error) => { + self.render_accounts(&[]); + self.account_status + .set_label(&format!("Unable to read account store: {error}")); + } + } + } + + async fn perform_tunnel_action(&mut self) { + match self.tunnel_state { + Some(daemon_api::TunnelState::Running) => { + self.tunnel_button.set_sensitive(false); + self.tunnel_status.set_label("Disconnecting..."); + if let Err(error) = daemon_api::stop_tunnel().await { + self.tunnel_status + .set_label(&format!("Stop failed: {error}")); + } + self.refresh().await; + } + Some(daemon_api::TunnelState::Stopped) => { + self.tunnel_button.set_sensitive(false); + self.tunnel_status.set_label("Connecting..."); + if let Err(error) = daemon_api::start_tunnel().await { + self.tunnel_status + .set_label(&format!("Start failed: {error}")); + } + self.refresh().await; + } + None => self.ensure_daemon().await, + } + } + + async fn add_wireguard( + &mut self, + title: String, + account: String, + identity: String, + config: String, + ) { + if config.trim().is_empty() { + self.network_status + .set_label("Paste a WireGuard configuration before adding a network."); + return; + } + match daemon_api::add_wireguard(config).await { + Ok(id) => { + let title = daemon_api::normalized(&title, &format!("WireGuard {id}")); + let record = account_store::new_record( + AccountKind::WireGuard, + title, + None, + daemon_api::normalized(&account, "default"), + daemon_api::normalized(&identity, &format!("network-{id}")), + None, + None, + Some(format!("Linked to daemon network #{id}.")), + ); + match account_store::upsert(record) { + Ok(accounts) => self.render_accounts(&accounts), + Err(error) => self + .account_status + .set_label(&format!("WireGuard account save failed: {error}")), + } + self.network_status + .set_label(&format!("Added WireGuard network #{id}.")); + self.refresh().await; + } + Err(error) => self + .network_status + .set_label(&format!("Unable to add WireGuard network: {error}")), + } + } + + fn save_tor(&mut self, title: String, account: String, identity: String, note: String) { + let record = account_store::new_record( + AccountKind::Tor, + daemon_api::normalized( + &title, + &format!("Tor {}", daemon_api::normalized(&identity, "linux")), + ), + Some("arti://local".to_owned()), + daemon_api::normalized(&account, "default"), + daemon_api::normalized(&identity, "linux"), + None, + None, + Some(note), + ); + match account_store::upsert(record) { + Ok(accounts) => { + self.account_status.set_label("Saved Tor account."); + self.render_accounts(&accounts); + } + Err(error) => self + .account_status + .set_label(&format!("Unable to save Tor account: {error}")), + } + } + + async fn discover_tailnet(&mut self, email: String) { + let Ok(email) = daemon_api::require_value(&email, "Email address") else { + self.account_status + .set_label("Enter an email address before Tailnet discovery."); + return; + }; + + self.account_status.set_label("Finding Tailnet server..."); + match daemon_api::discover_tailnet(email).await { + Ok(discovery) => { + let kind = if discovery.managed { + "managed authority" + } else { + "custom authority" + }; + let issuer = discovery + .oidc_issuer + .map(|issuer| format!(" OIDC: {issuer}.")) + .unwrap_or_default(); + self.account_status.set_label(&format!( + "Discovered {kind}: {}.{issuer}", + discovery.authority + )); + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet discovery failed: {error}")), + } + } + + async fn probe_tailnet(&mut self, authority: String) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before checking it."); + return; + }; + + self.account_status.set_label("Checking Tailnet server..."); + match daemon_api::probe_tailnet(authority).await { + Ok(probe) => { + let detail = probe + .detail + .unwrap_or_else(|| format!("HTTP {}", probe.status_code)); + self.account_status + .set_label(&format!("{}: {detail}", probe.summary)); + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet probe failed: {error}")), + } + } + + async fn start_tailnet_login( + &mut self, + authority: String, + account: String, + identity: String, + hostname: Option, + sender: AsyncComponentSender, + ) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before sign-in."); + return; + }; + + self.account_status.set_label("Starting Tailnet sign-in..."); + match daemon_api::start_tailnet_login(authority, account, identity, hostname).await { + Ok(status) => { + self.apply_login_status(&status); + if let Some(auth_url) = status.auth_url.as_deref() { + if let Err(error) = open_auth_url(auth_url) { + self.account_status.set_label(&format!( + "{} Open this URL manually: {auth_url}. Browser launch failed: {error}", + self.account_status.text() + )); + } + } + if !status.running { + sender.input(HomeScreenMsg::PollTailnetLogin); + } + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet sign-in failed: {error}")), + } + } + + async fn poll_tailnet_login(&mut self, sender: AsyncComponentSender) { + let Some(session_id) = self.tailnet_session_id.clone() else { + return; + }; + if self.tailnet_running { + return; + } + + tokio::time::sleep(Duration::from_secs(1)).await; + match daemon_api::tailnet_login_status(session_id).await { + Ok(status) => { + self.apply_login_status(&status); + if !status.running { + sender.input(HomeScreenMsg::PollTailnetLogin); + } + } + Err(error) => { + self.account_status + .set_label(&format!("Tailnet sign-in status failed: {error}")); + self.tailnet_session_id = None; + } + } + } + + async fn cancel_tailnet_login(&mut self) { + let Some(session_id) = self.tailnet_session_id.clone() else { + self.account_status + .set_label("No Tailnet sign-in is active."); + return; + }; + match daemon_api::cancel_tailnet_login(session_id).await { + Ok(()) => { + self.tailnet_session_id = None; + self.tailnet_running = false; + self.account_status.set_label("Tailnet sign-in cancelled."); + } + Err(error) => self + .account_status + .set_label(&format!("Unable to cancel Tailnet sign-in: {error}")), + } + } + + async fn add_tailnet( + &mut self, + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + ) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before saving."); + return; + }; + if self.tailnet_session_id.is_some() && !self.tailnet_running { + self.account_status + .set_label("Finish browser sign-in before saving this Tailnet account."); + return; + } + + let stored_authority = daemon_api::normalized_optional(&authority) + .unwrap_or_else(|| daemon_api::default_tailnet_authority().to_owned()); + let stored_account = daemon_api::normalized(&account, "default"); + let stored_identity = daemon_api::normalized(&identity, "linux"); + let stored_hostname = hostname.clone(); + let stored_tailnet = tailnet.clone(); + + match daemon_api::add_tailnet(authority, account, identity, hostname, tailnet).await { + Ok(id) => { + let title = stored_tailnet + .clone() + .or(stored_hostname.clone()) + .unwrap_or_else(|| format!("Tailnet {id}")); + let record = account_store::new_record( + AccountKind::Tailnet, + title, + Some(stored_authority), + stored_account, + stored_identity, + stored_hostname, + stored_tailnet, + Some(format!("Linked to daemon network #{id}.")), + ); + match account_store::upsert(record) { + Ok(accounts) => self.render_accounts(&accounts), + Err(error) => self + .account_status + .set_label(&format!("Tailnet account save failed: {error}")), + } + self.account_status + .set_label(&format!("Saved Tailnet account and network #{id}.")); + self.refresh().await; + } + Err(error) => self + .account_status + .set_label(&format!("Unable to save Tailnet account: {error}")), + } + } + + fn apply_login_status(&mut self, status: &daemon_api::TailnetLoginStatus) { + self.tailnet_session_id = Some(status.session_id.clone()); + self.tailnet_running = status.running; + + let mut parts = Vec::new(); + if status.running { + parts.push("Signed In".to_owned()); + } else if status.needs_login { + parts.push("Browser Sign-In Required".to_owned()); + } else { + parts.push("Checking Sign-In".to_owned()); + } + if !status.backend_state.is_empty() { + parts.push(format!("State: {}", status.backend_state)); + } + if let Some(tailnet_name) = &status.tailnet_name { + parts.push(format!("Tailnet: {tailnet_name}")); + } + if let Some(self_dns_name) = &status.self_dns_name { + parts.push(self_dns_name.clone()); + } + if !status.tailnet_ips.is_empty() { + parts.push(status.tailnet_ips.join(", ")); + } + if !status.health.is_empty() { + parts.push(status.health.join(" / ")); + } + self.account_status.set_label(&parts.join("\n")); + } + + fn render_networks(&self, networks: &[daemon_api::NetworkSummary]) { + while let Some(child) = self.network_cards.first_child() { + self.network_cards.remove(&child); + } + + if networks.is_empty() { + self.network_cards.append(&empty_networks_view()); + return; + } + + for network in networks { + self.network_cards.append(&network_card(network)); + } + } + + fn render_accounts(&self, accounts: &[AccountRecord]) { + while let Some(child) = self.account_rows.first_child() { + self.account_rows.remove(&child); + } + + if accounts.is_empty() { + self.account_rows.append(&empty_accounts_view()); + return; + } + + for account in accounts { + self.account_rows.append(&account_card(account)); + } + } +} + +fn configure_add_popover(button: >k::MenuButton, sender: &AsyncComponentSender) { + let popover = gtk::Popover::new(); + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 4); + box_.set_margin_all(6); + + for (label, msg) in [ + ("Add WireGuard Network", HomeScreenMsg::OpenWireGuard), + ("Save Tor Account", HomeScreenMsg::OpenTor), + ("Add Tailnet Account", HomeScreenMsg::OpenTailnet), + ] { + let item = gtk::Button::with_label(label); + item.add_css_class("flat"); + item.set_halign(Align::Fill); + let input = sender.input_sender().clone(); + item.connect_clicked(move |_| input.emit(msg_from_template(&msg))); + box_.append(&item); + } + + popover.set_child(Some(&box_)); + button.set_popover(Some(&popover)); +} + +fn msg_from_template(msg: &HomeScreenMsg) -> HomeScreenMsg { + match msg { + HomeScreenMsg::OpenWireGuard => HomeScreenMsg::OpenWireGuard, + HomeScreenMsg::OpenTor => HomeScreenMsg::OpenTor, + HomeScreenMsg::OpenTailnet => HomeScreenMsg::OpenTailnet, + _ => unreachable!(), + } +} + +fn network_card(network: &daemon_api::NetworkSummary) -> gtk::Box { + let card = gtk::Box::new(gtk::Orientation::Vertical, 10); + card.add_css_class("network-card"); + if network.title.to_ascii_lowercase().contains("wireguard") { + card.add_css_class("wireguard-card"); + } else { + card.add_css_class("tailnet-card"); + } + card.set_size_request(360, 175); + card.set_margin_bottom(8); + + let kind = if network.title.to_ascii_lowercase().contains("wireguard") { + "WireGuard" + } else { + "Tailnet" + }; + let kind_label = gtk::Label::new(Some(kind)); + kind_label.add_css_class("network-card-kind"); + kind_label.set_xalign(0.0); + + let title = gtk::Label::new(Some(&network.title)); + title.add_css_class("network-card-title"); + title.set_xalign(0.0); + title.set_wrap(true); + + let spacer = gtk::Box::new(gtk::Orientation::Vertical, 0); + spacer.set_vexpand(true); + + let detail = gtk::Label::new(Some(&network.detail)); + detail.add_css_class("network-card-detail"); + detail.set_xalign(0.0); + detail.set_wrap(true); + detail.set_lines(4); + + card.append(&kind_label); + card.append(&title); + card.append(&spacer); + card.append(&detail); + card +} + +fn empty_networks_view() -> gtk::Box { + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 6); + box_.add_css_class("empty-state"); + box_.set_size_request(520, 175); + box_.set_hexpand(true); + + let title = gtk::Label::new(Some("No Networks Yet")); + title.add_css_class("title-3"); + title.set_xalign(0.0); + let detail = gtk::Label::new(Some( + "Add a WireGuard network, or save a Tailnet account so Burrow can store a managed network when the daemon is reachable.", + )); + detail.add_css_class("dim-label"); + detail.set_wrap(true); + detail.set_xalign(0.0); + + box_.append(&title); + box_.append(&detail); + box_ +} + +fn empty_accounts_view() -> gtk::Box { + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 6); + box_.add_css_class("empty-state"); + box_.set_hexpand(true); + + let title = gtk::Label::new(Some("No Accounts Yet")); + title.add_css_class("title-3"); + title.set_justify(gtk::Justification::Center); + let detail = gtk::Label::new(Some( + "Save a Tor account or sign in to Tailnet to keep network identities ready on this device.", + )); + detail.add_css_class("dim-label"); + detail.set_wrap(true); + detail.set_justify(gtk::Justification::Center); + + box_.append(&title); + box_.append(&detail); + box_ +} + +fn account_card(account: &AccountRecord) -> gtk::Box { + let card = gtk::Box::new(gtk::Orientation::Vertical, 8); + card.add_css_class("summary-card"); + card.set_hexpand(true); + + let header = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let title = gtk::Label::new(Some(&account.title)); + title.add_css_class("title-3"); + title.set_xalign(0.0); + title.set_hexpand(true); + let kind = gtk::Label::new(Some(account.kind.title())); + kind.add_css_class("dim-label"); + header.append(&title); + header.append(&kind); + card.append(&header); + + append_account_value(&card, "Account", &account.account); + append_account_value(&card, "Identity", &account.identity); + if let Some(authority) = &account.authority { + append_account_value(&card, "Authority", authority); + } + if let Some(hostname) = &account.hostname { + append_account_value(&card, "Hostname", hostname); + } + if let Some(tailnet) = &account.tailnet { + append_account_value(&card, "Tailnet", tailnet); + } + if let Some(note) = &account.note { + let note_label = gtk::Label::new(Some(note)); + note_label.add_css_class("dim-label"); + note_label.set_wrap(true); + note_label.set_xalign(0.0); + card.append(¬e_label); + } + + card +} + +fn append_account_value(card: >k::Box, label: &str, value: &str) { + let row = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let key = gtk::Label::new(Some(label)); + key.add_css_class("dim-label"); + key.set_xalign(0.0); + key.set_width_chars(9); + let value = gtk::Label::new(Some(value)); + value.set_xalign(0.0); + value.set_wrap(true); + value.set_hexpand(true); + row.append(&key); + row.append(&value); + card.append(&row); +} + +fn open_wireguard_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "WireGuard", 560, 620); + let content = sheet_content( + &window, + "Import WireGuard", + "Import a tunnel and optional account metadata.", + ); + + let title = gtk::Entry::new(); + title.set_placeholder_text(Some("Title")); + let account = gtk::Entry::new(); + account.set_placeholder_text(Some("Account")); + let identity = gtk::Entry::new(); + identity.set_placeholder_text(Some("Identity")); + let text = gtk::TextView::new(); + text.set_monospace(true); + text.set_wrap_mode(gtk::WrapMode::WordChar); + + let editor = gtk::ScrolledWindow::new(); + editor.set_min_content_height(220); + editor.set_child(Some(&text)); + + content.append(§ion_label("Identity")); + content.append(&title); + content.append(&account); + content.append(&identity); + content.append(§ion_label("WireGuard Configuration")); + content.append(&editor); + + let add = gtk::Button::with_label("Add Network"); + add.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + add.connect_clicked(move |_| { + input.emit(HomeScreenMsg::AddWireGuard { + title: title.text().to_string(), + account: account.text().to_string(), + identity: identity.text().to_string(), + config: text_view_text(&text), + }); + window_for_click.close(); + }); + content.append(&add); + + window.set_child(Some(&content)); + window.present(); +} + +fn open_tor_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "Tor", 520, 540); + let content = sheet_content( + &window, + "Configure Tor", + "Store Arti account and identity preferences.", + ); + + let title = entry_with_text("Title", "Default Tor"); + let account = entry_with_text("Account", "default"); + let identity = entry_with_text("Identity", "linux"); + let addresses = entry_with_text("Virtual Addresses", "100.64.0.2/32"); + let dns = entry_with_text("DNS Resolvers", "1.1.1.1, 1.0.0.1"); + let mtu = entry_with_text("MTU", "1400"); + let listen = entry_with_text("Transparent Listener", "127.0.0.1:9040"); + + content.append(§ion_label("Identity")); + content.append(&title); + content.append(&account); + content.append(&identity); + content.append(§ion_label("Tor Preferences")); + content.append(&addresses); + content.append(&dns); + content.append(&mtu); + content.append(&listen); + + let save = gtk::Button::with_label("Save Account"); + save.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + save.connect_clicked(move |_| { + let note = [ + format!( + "Addresses: {}", + normalized_entry(&addresses, "100.64.0.2/32") + ), + format!("DNS: {}", normalized_entry(&dns, "1.1.1.1, 1.0.0.1")), + format!("MTU: {}", normalized_entry(&mtu, "1400")), + format!("Listen: {}", normalized_entry(&listen, "127.0.0.1:9040")), + ] + .join(" - "); + input.emit(HomeScreenMsg::SaveTor { + title: normalized_entry(&title, "Default Tor"), + account: normalized_entry(&account, "default"), + identity: normalized_entry(&identity, "linux"), + note, + }); + window_for_click.close(); + }); + content.append(&save); + + window.set_child(Some(&content)); + window.present(); +} + +fn open_tailnet_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "Tailnet", 560, 680); + let content = sheet_content( + &window, + "Connect Tailnet", + "Save Tailnet authority, identity defaults, and login material.", + ); + + let email = gtk::Entry::new(); + email.set_placeholder_text(Some("Email address")); + let authority = entry_with_text("Server URL", daemon_api::default_tailnet_authority()); + let tailnet = gtk::Entry::new(); + tailnet.set_placeholder_text(Some("Tailnet")); + let account = entry_with_text("Account", "default"); + let identity = entry_with_text("Identity", "linux"); + let hostname = entry_with_text("Hostname", &hostname_fallback()); + + content.append(§ion_label("Connection")); + content.append(&email); + content.append(&authority); + content.append(&tailnet); + content.append(§ion_label("Identity")); + content.append(&account); + content.append(&identity); + content.append(&hostname); + + let actions = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let discover = gtk::Button::with_label("Refresh Server Lookup"); + let probe = gtk::Button::with_label("Check Server"); + let sign_in = gtk::Button::with_label("Start Sign-In"); + actions.append(&discover); + actions.append(&probe); + actions.append(&sign_in); + content.append(§ion_label("Authentication")); + content.append(&actions); + + let input = sender.input_sender().clone(); + let email_for_click = email.clone(); + discover.connect_clicked(move |_| { + input.emit(HomeScreenMsg::DiscoverTailnet( + email_for_click.text().to_string(), + )); + }); + + let input = sender.input_sender().clone(); + let authority_for_probe = authority.clone(); + probe.connect_clicked(move |_| { + input.emit(HomeScreenMsg::ProbeTailnet( + authority_for_probe.text().to_string(), + )); + }); + + let input = sender.input_sender().clone(); + let authority_for_login = authority.clone(); + let account_for_login = account.clone(); + let identity_for_login = identity.clone(); + let hostname_for_login = hostname.clone(); + sign_in.connect_clicked(move |_| { + input.emit(HomeScreenMsg::StartTailnetLogin { + authority: authority_for_login.text().to_string(), + account: normalized_entry(&account_for_login, "default"), + identity: normalized_entry(&identity_for_login, "linux"), + hostname: daemon_api::normalized_optional(&hostname_for_login.text()), + }); + }); + + let save = gtk::Button::with_label("Save Account"); + save.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + save.connect_clicked(move |_| { + input.emit(HomeScreenMsg::AddTailnet { + authority: authority.text().to_string(), + account: normalized_entry(&account, "default"), + identity: normalized_entry(&identity, "linux"), + hostname: daemon_api::normalized_optional(&hostname.text()), + tailnet: daemon_api::normalized_optional(&tailnet.text()), + }); + window_for_click.close(); + }); + + let cancel = gtk::Button::with_label("Cancel Sign-In"); + let input = sender.input_sender().clone(); + cancel.connect_clicked(move |_| { + input.emit(HomeScreenMsg::CancelTailnetLogin); + }); + + content.append(&save); + content.append(&cancel); + + window.set_child(Some(&content)); + window.present(); +} + +fn sheet_window(root: >k::ScrolledWindow, title: &str, width: i32, height: i32) -> gtk::Window { + let window = gtk::Window::builder() + .title(title) + .default_width(width) + .default_height(height) + .modal(true) + .build(); + if let Some(root) = root.root() { + if let Ok(parent) = root.downcast::() { + window.set_transient_for(Some(&parent)); + } + } + window +} + +fn sheet_content(window: >k::Window, title: &str, detail: &str) -> gtk::Box { + let content = gtk::Box::new(gtk::Orientation::Vertical, 12); + content.set_margin_all(18); + + let summary = gtk::Box::new(gtk::Orientation::Horizontal, 12); + summary.add_css_class("summary-card"); + + let copy = gtk::Box::new(gtk::Orientation::Vertical, 4); + copy.set_hexpand(true); + + let title_label = gtk::Label::new(Some(title)); + title_label.add_css_class("title-3"); + title_label.set_xalign(0.0); + + let detail_label = gtk::Label::new(Some(detail)); + detail_label.add_css_class("dim-label"); + detail_label.set_wrap(true); + detail_label.set_xalign(0.0); + + copy.append(&title_label); + copy.append(&detail_label); + summary.append(©); + + let close = gtk::Button::builder() + .icon_name("window-close-symbolic") + .tooltip_text("Close") + .valign(Align::Start) + .build(); + close.add_css_class("flat"); + let window_for_click = window.clone(); + close.connect_clicked(move |_| window_for_click.close()); + summary.append(&close); + + content.append(&summary); + content +} + +fn section_label(label: &str) -> gtk::Label { + let section = gtk::Label::new(Some(label)); + section.add_css_class("heading"); + section.set_xalign(0.0); + section +} + +fn entry_with_text(placeholder: &str, value: &str) -> gtk::Entry { + let entry = gtk::Entry::new(); + entry.set_placeholder_text(Some(placeholder)); + entry.set_text(value); + entry +} + +fn normalized_entry(entry: >k::Entry, fallback: &str) -> String { + daemon_api::normalized(&entry.text(), fallback) +} + +fn hostname_fallback() -> String { + std::env::var("HOSTNAME").unwrap_or_else(|_| "linux".to_owned()) +} + +fn text_view_text(text_view: >k::TextView) -> String { + let buffer = text_view.buffer(); + buffer + .text(&buffer.start_iter(), &buffer.end_iter(), true) + .to_string() +} + +fn open_auth_url(url: &str) -> anyhow::Result<()> { + gtk::gio::AppInfo::launch_default_for_uri(url, None::<>k::gio::AppLaunchContext>) + .map_err(anyhow::Error::from) +} diff --git a/burrow-gtk/src/components/mod.rs b/burrow-gtk/src/components/mod.rs index b134809..8e60fa7 100644 --- a/burrow-gtk/src/components/mod.rs +++ b/burrow-gtk/src/components/mod.rs @@ -1,6 +1,6 @@ use super::*; +use crate::daemon_api; use adw::prelude::*; -use burrow::{DaemonClient, DaemonCommand, DaemonResponseData}; use gtk::Align; use relm4::{ component::{ @@ -9,13 +9,9 @@ use relm4::{ }, prelude::*, }; -use std::sync::Arc; -use tokio::sync::Mutex; mod app; -mod settings; -mod settings_screen; -mod switch_screen; +mod home_screen; pub use app::*; -pub use settings::{DaemonGroupMsg, DiagGroupMsg}; +pub use home_screen::{HomeScreen, HomeScreenMsg}; diff --git a/burrow-gtk/src/daemon_api.rs b/burrow-gtk/src/daemon_api.rs new file mode 100644 index 0000000..4ff8bf5 --- /dev/null +++ b/burrow-gtk/src/daemon_api.rs @@ -0,0 +1,420 @@ +use anyhow::{anyhow, Context, Result}; +use burrow::{ + control::{TailnetConfig, TailnetProvider}, + grpc_defs::{ + Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest, + TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest, + }, + BurrowClient, +}; +use std::{path::PathBuf, sync::OnceLock}; +use tokio::time::{timeout, Duration}; + +const RPC_TIMEOUT: Duration = Duration::from_secs(3); +const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com"; +static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new(); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TunnelState { + Running, + Stopped, +} + +#[derive(Debug, Clone)] +pub struct NetworkSummary { + pub id: i32, + pub title: String, + pub detail: String, +} + +#[derive(Debug, Clone)] +pub struct TailnetDiscovery { + pub authority: String, + pub managed: bool, + pub oidc_issuer: Option, +} + +#[derive(Debug, Clone)] +pub struct TailnetProbe { + pub summary: String, + pub detail: Option, + pub status_code: i32, +} + +#[derive(Debug, Clone)] +pub struct TailnetLoginStatus { + pub session_id: String, + pub backend_state: String, + pub auth_url: Option, + pub running: bool, + pub needs_login: bool, + pub tailnet_name: Option, + pub self_dns_name: Option, + pub tailnet_ips: Vec, + pub health: Vec, +} + +pub fn default_tailnet_authority() -> &'static str { + MANAGED_TAILSCALE_AUTHORITY +} + +pub fn configure_client_paths() -> Result<()> { + if std::env::var_os("BURROW_SOCKET_PATH").is_none() { + std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?); + } + Ok(()) +} + +pub async fn ensure_daemon() -> Result<()> { + configure_client_paths()?; + if daemon_available().await { + return Ok(()); + } + + let socket_path = socket_path()?; + let db_path = database_path()?; + ensure_parent(&socket_path)?; + ensure_parent(&db_path)?; + + if EMBEDDED_DAEMON_STARTED.get().is_none() { + tokio::task::spawn_blocking(move || { + burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path)); + }) + .await + .context("failed to join embedded daemon startup")?; + let _ = EMBEDDED_DAEMON_STARTED.set(()); + } + + tunnel_state() + .await + .map(|_| ()) + .context("Burrow daemon started but did not accept tunnel status RPCs") +} + +pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider { + let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase(); + if normalized == "controlplane.tailscale.com" + || normalized == "http://controlplane.tailscale.com" + || normalized == MANAGED_TAILSCALE_AUTHORITY + { + TailnetProvider::Tailscale + } else { + TailnetProvider::Headscale + } +} + +pub async fn daemon_available() -> bool { + tunnel_state().await.is_ok() +} + +fn socket_path() -> Result { + if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") { + return Ok(PathBuf::from(path)); + } + default_socket_path() +} + +fn default_socket_path() -> Result { + if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") { + return Ok(PathBuf::from(runtime_dir).join("burrow.sock")); + } + let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned()); + Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock"))) +} + +fn database_path() -> Result { + if let Some(path) = std::env::var_os("BURROW_DB_PATH") { + return Ok(PathBuf::from(path)); + } + if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") { + return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db")); + } + if let Some(home) = std::env::var_os("HOME") { + return Ok(PathBuf::from(home) + .join(".local") + .join("share") + .join("burrow") + .join("burrow.db")); + } + Ok(std::env::temp_dir().join("burrow.db")) +} + +fn ensure_parent(path: &PathBuf) -> Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("failed to create {}", parent.display()))?; + } + Ok(()) +} + +pub async fn tunnel_state() -> Result { + let mut client = BurrowClient::from_uds().await?; + let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {})) + .await + .context("timed out connecting to Burrow daemon")?? + .into_inner(); + let status = timeout(RPC_TIMEOUT, stream.message()) + .await + .context("timed out reading Burrow tunnel status")?? + .context("Burrow daemon ended the status stream without a state")?; + Ok(match status.state() { + State::Running => TunnelState::Running, + State::Stopped => TunnelState::Stopped, + }) +} + +pub async fn start_tunnel() -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {})) + .await + .context("timed out starting Burrow tunnel")??; + Ok(()) +} + +pub async fn stop_tunnel() -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {})) + .await + .context("timed out stopping Burrow tunnel")??; + Ok(()) +} + +pub async fn list_networks() -> Result> { + let mut client = BurrowClient::from_uds().await?; + let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {})) + .await + .context("timed out connecting to Burrow network list")?? + .into_inner(); + let response = timeout(RPC_TIMEOUT, stream.message()) + .await + .context("timed out reading Burrow network list")?? + .context("Burrow daemon ended the network stream without a snapshot")?; + Ok(response.network.iter().map(summarize_network).collect()) +} + +pub async fn add_wireguard(config: String) -> Result { + add_network(NetworkType::WireGuard, config.into_bytes()).await +} + +pub async fn add_tailnet( + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, +) -> Result { + let provider = infer_tailnet_provider(&authority); + let config = TailnetConfig { + provider, + authority: Some(authority), + account: Some(account), + identity: Some(identity), + hostname, + tailnet, + }; + let payload = serde_json::to_vec_pretty(&config)?; + add_network(NetworkType::Tailnet, payload).await +} + +pub async fn discover_tailnet(email: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .discover(TailnetDiscoverRequest { email }), + ) + .await + .context("timed out discovering Tailnet authority")?? + .into_inner(); + + Ok(TailnetDiscovery { + authority: response.authority, + managed: response.managed, + oidc_issuer: optional(response.oidc_issuer), + }) +} + +pub async fn probe_tailnet(authority: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .probe(TailnetProbeRequest { authority }), + ) + .await + .context("timed out probing Tailnet authority")?? + .into_inner(); + + Ok(TailnetProbe { + summary: response.summary, + detail: optional(response.detail), + status_code: response.status_code, + }) +} + +pub async fn start_tailnet_login( + authority: String, + account_name: String, + identity_name: String, + hostname: Option, +) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client.tailnet_client.login_start(TailnetLoginStartRequest { + account_name, + identity_name, + hostname: hostname.unwrap_or_default(), + authority, + }), + ) + .await + .context("timed out starting Tailnet sign-in")?? + .into_inner(); + Ok(decode_tailnet_status(response)) +} + +pub async fn tailnet_login_status(session_id: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .login_status(TailnetLoginStatusRequest { session_id }), + ) + .await + .context("timed out reading Tailnet sign-in status")?? + .into_inner(); + Ok(decode_tailnet_status(response)) +} + +pub async fn cancel_tailnet_login(session_id: String) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout( + RPC_TIMEOUT, + client + .tailnet_client + .login_cancel(TailnetLoginCancelRequest { session_id }), + ) + .await + .context("timed out cancelling Tailnet sign-in")??; + Ok(()) +} + +async fn add_network(network_type: NetworkType, payload: Vec) -> Result { + let id = next_network_id().await?; + let mut client = BurrowClient::from_uds().await?; + timeout( + RPC_TIMEOUT, + client.networks_client.network_add(Network { + id, + r#type: network_type.into(), + payload, + }), + ) + .await + .context("timed out saving network to Burrow daemon")??; + Ok(id) +} + +async fn next_network_id() -> Result { + let networks = list_networks().await?; + Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1) +} + +fn summarize_network(network: &Network) -> NetworkSummary { + match network.r#type() { + NetworkType::WireGuard => summarize_wireguard(network), + NetworkType::Tailnet => summarize_tailnet(network), + } +} + +fn summarize_wireguard(network: &Network) -> NetworkSummary { + let payload = String::from_utf8_lossy(&network.payload); + let detail = payload + .lines() + .map(str::trim) + .find(|line| !line.is_empty() && !line.starts_with('[')) + .unwrap_or("Stored WireGuard configuration") + .to_owned(); + NetworkSummary { + id: network.id, + title: format!("WireGuard {}", network.id), + detail, + } +} + +fn summarize_tailnet(network: &Network) -> NetworkSummary { + match TailnetConfig::from_slice(&network.payload) { + Ok(config) => { + let title = config + .tailnet + .clone() + .or(config.hostname.clone()) + .unwrap_or_else(|| "Tailnet".to_owned()); + let authority = config + .authority + .unwrap_or_else(|| "default authority".to_owned()); + let account = config.account.unwrap_or_else(|| "default".to_owned()); + NetworkSummary { + id: network.id, + title, + detail: format!("{authority} - account {account}"), + } + } + Err(error) => NetworkSummary { + id: network.id, + title: "Tailnet".to_owned(), + detail: format!("Unable to read Tailnet payload: {error}"), + }, + } +} + +fn decode_tailnet_status( + response: burrow::grpc_defs::TailnetLoginStatusResponse, +) -> TailnetLoginStatus { + TailnetLoginStatus { + session_id: response.session_id, + backend_state: response.backend_state, + auth_url: optional(response.auth_url), + running: response.running, + needs_login: response.needs_login, + tailnet_name: optional(response.tailnet_name), + self_dns_name: optional(response.self_dns_name), + tailnet_ips: response.tailnet_ips, + health: response.health, + } +} + +fn optional(value: String) -> Option { + let trimmed = value.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_owned()) + } +} + +pub fn normalized(value: &str, fallback: &str) -> String { + let trimmed = value.trim(); + if trimmed.is_empty() { + fallback.to_owned() + } else { + trimmed.to_owned() + } +} + +pub fn normalized_optional(value: &str) -> Option { + let trimmed = value.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_owned()) + } +} + +pub fn require_value(value: &str, label: &str) -> Result { + normalized_optional(value).ok_or_else(|| anyhow!("{label} is required")) +} diff --git a/burrow-gtk/src/main.rs b/burrow-gtk/src/main.rs index 6f91e2a..b47b63e 100644 --- a/burrow-gtk/src/main.rs +++ b/burrow-gtk/src/main.rs @@ -1,11 +1,15 @@ use anyhow::Result; pub mod components; -mod diag; +mod account_store; +mod daemon_api; // Generated using meson mod config; fn main() { + if let Err(error) = daemon_api::configure_client_paths() { + eprintln!("failed to configure Burrow daemon paths: {error}"); + } components::App::run(); } diff --git a/burrow/src/daemon/apple.rs b/burrow/src/daemon/apple.rs index c60f131..f369ea9 100644 --- a/burrow/src/daemon/apple.rs +++ b/burrow/src/daemon/apple.rs @@ -1,11 +1,11 @@ use std::{ ffi::{c_char, CStr}, path::PathBuf, - sync::Arc, + sync::{Arc, Mutex}, thread, }; -use once_cell::sync::OnceCell; +use once_cell::sync::{Lazy, OnceCell}; use tokio::{ runtime::{Builder, Handle}, sync::Notify, @@ -14,25 +14,35 @@ use tracing::error; use crate::daemon::daemon_main; -static BURROW_NOTIFY: OnceCell> = OnceCell::new(); static BURROW_HANDLE: OnceCell = OnceCell::new(); +static BURROW_READY: OnceCell<()> = OnceCell::new(); +static BURROW_SPAWN_LOCK: Lazy> = Lazy::new(|| Mutex::new(())); #[no_mangle] pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) { + let path_buf = if path.is_null() { + None + } else { + Some(PathBuf::from(CStr::from_ptr(path).to_str().unwrap())) + }; + let db_path_buf = if db_path.is_null() { + None + } else { + Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap())) + }; + spawn_in_process_with_paths(path_buf, db_path_buf); +} + +pub fn spawn_in_process_with_paths(path_buf: Option, db_path_buf: Option) { crate::tracing::initialize(); - let notify = BURROW_NOTIFY.get_or_init(|| Arc::new(Notify::new())); + let _guard = BURROW_SPAWN_LOCK.lock().unwrap(); + if BURROW_READY.get().is_some() { + return; + } + + let notify = Arc::new(Notify::new()); let handle = BURROW_HANDLE.get_or_init(|| { - let path_buf = if path.is_null() { - None - } else { - Some(PathBuf::from(CStr::from_ptr(path).to_str().unwrap())) - }; - let db_path_buf = if db_path.is_null() { - None - } else { - Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap())) - }; let sender = notify.clone(); let (handle_tx, handle_rx) = tokio::sync::oneshot::channel(); @@ -62,4 +72,5 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c let receiver = notify.clone(); handle.block_on(async move { receiver.notified().await }); + let _ = BURROW_READY.set(()); } diff --git a/burrow/src/lib.rs b/burrow/src/lib.rs index 15b6a19..7867d18 100644 --- a/burrow/src/lib.rs +++ b/burrow/src/lib.rs @@ -16,10 +16,10 @@ pub(crate) mod tracing; #[cfg(target_os = "linux")] pub mod usernet; -#[cfg(target_vendor = "apple")] -pub use daemon::apple::spawn_in_process; +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +pub use daemon::apple::{spawn_in_process, spawn_in_process_with_paths}; #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub use daemon::{ - rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData, - DaemonStartOptions, + rpc::grpc_defs, rpc::BurrowClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, + DaemonCommand, DaemonResponseData, DaemonStartOptions, }; diff --git a/docs/GTK_APP.md b/docs/GTK_APP.md index ef73d2b..582b0a2 100644 --- a/docs/GTK_APP.md +++ b/docs/GTK_APP.md @@ -15,7 +15,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo apt install -y clang meson cmake pkg-config libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils + sudo apt install -y clang meson cmake pkg-config libssl-dev libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils ``` 2. Install flatpak builder (Optional) @@ -38,7 +38,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo dnf install -y clang ninja-build cmake meson gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib + sudo dnf install -y clang ninja-build cmake meson openssl-devel gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib ``` 2. Install flatpak builder (Optional) @@ -61,7 +61,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo xbps-install -Sy gcc clang meson cmake pkg-config gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib + sudo xbps-install -Sy gcc clang meson cmake pkg-config openssl-devel gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib ``` 2. Install flatpak builder (Optional) @@ -88,6 +88,12 @@ flatpak install --user \ ## Building +With Nix, enter the focused GTK shell before running the Meson build: + +```bash +nix develop .#gtk +``` +
General @@ -139,6 +145,16 @@ flatpak install --user \ ## Running +The GTK app mirrors the Apple home surface: a Burrow header, Networks carousel, +Accounts section, Tunnel action, and the same add flows for WireGuard, Tor, and +Tailnet. It talks to the daemon over the same gRPC API used by Apple clients for +network storage, tunnel state, Tailnet discovery, authority probing, browser +sign-in, and Tailnet payloads. + +On Linux the GTK app first looks for a daemon on the configured gRPC socket. If +none is reachable, it starts an embedded user-scoped daemon with a socket under +`XDG_RUNTIME_DIR` and a database under `XDG_DATA_HOME` before refreshing the UI. +
General diff --git a/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md index 1227444..a34a609 100644 --- a/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md +++ b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md @@ -44,6 +44,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to - Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code. - The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized. - Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly. +- Non-Apple presentation clients should follow the same daemon-first lifecycle pattern: connect to a managed daemon when present, or start a user-scoped embedded daemon before issuing RPCs, without adding platform-local control-plane paths. ## Contributor Playbook @@ -54,6 +55,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to - daemon unavailable behavior - successful RPC path - error propagation through the UI +- Keep Linux GTK and Apple clients visually and functionally aligned around the same daemon-backed home surface: Networks, Accounts, Tunnel, and add flows should remain corresponding views over the daemon API. ## Alternatives Considered @@ -63,6 +65,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to ## Impact on Other Work - Governs the Tailnet refactor and future Apple runtime work. +- Governs Linux GTK daemon startup parity where the same daemon API is reused from a user-scoped presentation process. - Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring. ## Decision diff --git a/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md index fea4aba..36458ef 100644 --- a/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md +++ b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md @@ -37,6 +37,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h - Burrow-owned authority when explicitly applicable - Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server. - The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority. +- Platform clients consume the same daemon gRPC surface for Tailnet discovery, authority probing, browser sign-in, and saved network payloads. macOS/iOS SwiftUI and Linux GTK may differ in presentation and local credential stores, but neither should introduce a second control-plane path. ## Security and Operational Considerations @@ -48,6 +49,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h - Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one. - Store the authority explicitly in payloads and infer provider internally only when needed. +- Keep Linux GTK and Apple clients at functional parity by routing Tailnet add/discover/probe/login through `TailnetControl` and `Networks` RPCs instead of platform-local HTTP or legacy JSON daemon commands. - Prefer tests that validate authority normalization and discovery behavior over UI-provider branching. ## Alternatives Considered @@ -58,7 +60,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h ## Impact on Other Work - Refines BEP-0002’s Tailscale-shaped control-plane work. -- Constrains the Tailnet Apple refactor and future daemon control-plane storage. +- Constrains the Tailnet Apple and Linux GTK refactors plus future daemon control-plane storage. ## Decision @@ -68,4 +70,5 @@ Pending. - `burrow/src/control/` - `Apple/UI/Networks/` +- `burrow-gtk/src/` - `proto/burrow.proto`