diff --git a/.forgejo/workflows/build-apple.yml b/.forgejo/workflows/build-apple.yml deleted file mode 100644 index fd69acc..0000000 --- a/.forgejo/workflows/build-apple.yml +++ /dev/null @@ -1,159 +0,0 @@ -name: Build Apple - -on: - push: - branches: - - main - pull_request: - branches: - - "**" - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true - -jobs: - build: - name: Build App (${{ matrix.platform }}) - runs-on: namespace-profile-macos-large - strategy: - fail-fast: false - matrix: - include: - - platform: macOS - cache-id: macos - destination: platform=macOS - rust-targets: x86_64-apple-darwin,aarch64-apple-darwin - - platform: iOS Simulator - cache-id: ios-simulator - destination: platform=iOS Simulator,name=iPhone 17 Pro - rust-targets: aarch64-apple-ios-sim,x86_64-apple-ios - env: - CARGO_INCREMENTAL: 0 - RUST_BACKTRACE: short - RUSTC_WRAPPER: sccache - SCCACHE_CACHE_SIZE: 20G - steps: - - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 - submodules: recursive - - - name: Select Xcode - shell: bash - run: | - set -euo pipefail - candidates=( - "/Applications/Xcode_26.1.app/Contents/Developer" - "/Applications/Xcode_26_1.app/Contents/Developer" - "/Applications/Xcode.app/Contents/Developer" - "/Applications/Xcode/Xcode.app/Contents/Developer" - ) - selected="" - for candidate in "${candidates[@]}"; do - if [[ -d "$candidate" ]]; then - selected="$candidate" - break - fi - done - if [[ -z "$selected" ]] && command -v xcode-select >/dev/null 2>&1; then - selected="$(xcode-select -p)" - fi - if [[ -z "$selected" ]]; then - echo "::error ::Unable to locate an Xcode toolchain" >&2 - exit 1 - fi - echo "DEVELOPER_DIR=$selected" >> "$GITHUB_ENV" - DEVELOPER_DIR="$selected" /usr/bin/xcodebuild -version || true - - - name: Prepare Cache Dirs - shell: bash - run: | - set -euo pipefail - cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" - shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" - lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/${{ matrix.cache-id }}}" - mkdir -p \ - "${shared_root}/cargo" \ - "${shared_root}/rustup" \ - "${shared_root}/sccache" \ - "${shared_root}/homebrew" \ - "${shared_root}/apple/PackageCache" \ - "${shared_root}/apple/SourcePackages" \ - "${lane_root}/cargo-target" \ - "${lane_root}/DerivedData" - echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}" - echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" - echo "RUSTUP_HOME=${shared_root}/rustup" >> "${GITHUB_ENV}" - echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}" - echo "HOMEBREW_CACHE=${shared_root}/homebrew" >> "${GITHUB_ENV}" - echo "APPLE_PACKAGE_CACHE=${shared_root}/apple/PackageCache" >> "${GITHUB_ENV}" - echo "APPLE_SOURCE_PACKAGES=${shared_root}/apple/SourcePackages" >> "${GITHUB_ENV}" - echo "APPLE_DERIVED_DATA=${lane_root}/DerivedData" >> "${GITHUB_ENV}" - df -h "${shared_root}" "${lane_root}" || true - - - name: Install Rust - shell: bash - run: | - set -euo pipefail - - export PATH="${CARGO_HOME}/bin:${PATH}" - - if ! command -v rustup >/dev/null 2>&1; then - curl --proto '=https' --tlsv1.2 -fsSL https://sh.rustup.rs | sh -s -- -y --profile minimal --default-toolchain 1.93.1 - else - rustup set profile minimal - rustup toolchain install 1.93.1 - rustup default 1.93.1 - fi - - mkdir -p "${CARGO_HOME}/bin" - echo "${CARGO_HOME}/bin" >> "${GITHUB_PATH}" - export PATH="${CARGO_HOME}/bin:${PATH}" - - rustup show active-toolchain - toolchain="$(rustup show active-toolchain | awk '{print $1}')" - cargo_bin="$(rustup which --toolchain "${toolchain}" cargo)" - rustc_bin="$(rustup which --toolchain "${toolchain}" rustc)" - - targets='${{ matrix.rust-targets }}' - for target in ${targets//,/ }; do - rustup target add --toolchain "${toolchain}" "${target}" - done - - "${rustc_bin}" --version - "${cargo_bin}" --version - - - name: Install Protobuf - shell: bash - run: | - set -euo pipefail - if ! command -v protoc >/dev/null 2>&1; then - brew install protobuf - fi - if ! command -v sccache >/dev/null 2>&1; then - brew install sccache - fi - - - name: Build - shell: bash - working-directory: Apple - run: | - set -euo pipefail - xcodebuild build \ - -project Burrow.xcodeproj \ - -scheme App \ - -destination '${{ matrix.destination }}' \ - -skipPackagePluginValidation \ - -skipMacroValidation \ - -onlyUsePackageVersionsFromResolvedFile \ - -clonedSourcePackagesDirPath "$APPLE_SOURCE_PACKAGES" \ - -packageCachePath "$APPLE_PACKAGE_CACHE" \ - -derivedDataPath "$APPLE_DERIVED_DATA" \ - CODE_SIGNING_ALLOWED=NO \ - CODE_SIGNING_REQUIRED=NO \ - CODE_SIGN_IDENTITY="" \ - DEVELOPMENT_TEAM="" diff --git a/.forgejo/workflows/build-rust.yml b/.forgejo/workflows/build-rust.yml index 53191ab..9ed49e1 100644 --- a/.forgejo/workflows/build-rust.yml +++ b/.forgejo/workflows/build-rust.yml @@ -16,50 +16,27 @@ concurrency: jobs: rust: name: Cargo Test - runs-on: namespace-profile-linux-medium - env: - CARGO_INCREMENTAL: 0 - NIX_CONFIG: | - experimental-features = nix-command flakes - accept-flake-config = true - RUSTC_WRAPPER: sccache - SCCACHE_CACHE_SIZE: 20G + runs-on: [self-hosted, linux, x86_64, burrow-forge] steps: - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 - - - name: Prepare Cache Dirs shell: bash run: | set -euo pipefail - cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" - shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" - lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-rust}" - mkdir -p \ - "${shared_root}/cargo" \ - "${shared_root}/sccache" \ - "${shared_root}/xdg" \ - "${lane_root}/cargo-target" - echo "CARGO_HOME=${shared_root}/cargo" >> "${GITHUB_ENV}" - echo "SCCACHE_DIR=${shared_root}/sccache" >> "${GITHUB_ENV}" - echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" - echo "CARGO_TARGET_DIR=${lane_root}/cargo-target" >> "${GITHUB_ENV}" - { - echo 'NIX_CONFIG<> "${GITHUB_ENV}" - df -h /nix "${shared_root}" "${lane_root}" || true + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx - name: Test shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -euo pipefail -c ' - sccache --zero-stats >/dev/null 2>&1 || true - cargo test --workspace --all-features - sccache --show-stats || true - ' + nix develop .#ci -c cargo test --workspace --all-features diff --git a/.forgejo/workflows/build-site.yml b/.forgejo/workflows/build-site.yml index ea4d58e..67be5bb 100644 --- a/.forgejo/workflows/build-site.yml +++ b/.forgejo/workflows/build-site.yml @@ -16,48 +16,27 @@ concurrency: jobs: site: name: Next.js Build - runs-on: namespace-profile-linux-medium - env: - NIX_CONFIG: | - experimental-features = nix-command flakes - accept-flake-config = true + runs-on: [self-hosted, linux, x86_64, burrow-forge] steps: - name: Checkout - uses: https://code.forgejo.org/actions/checkout@v4 - with: - token: ${{ github.token }} - fetch-depth: 0 - - - name: Prepare Cache Dirs shell: bash run: | set -euo pipefail - cache_root="${NSC_CACHE_PATH:-${HOME}/.cache/burrow}" - shared_root="${NSC_SHARED_CACHE_PATH:-${cache_root}/shared}" - lane_root="${NSC_LANE_CACHE_PATH:-${cache_root}/lane/build-site}" - mkdir -p \ - "${shared_root}/npm" \ - "${shared_root}/xdg" \ - "${lane_root}/next-cache" - echo "NPM_CONFIG_CACHE=${shared_root}/npm" >> "${GITHUB_ENV}" - echo "XDG_CACHE_HOME=${shared_root}/xdg" >> "${GITHUB_ENV}" - echo "NEXT_CACHE_DIR=${lane_root}/next-cache" >> "${GITHUB_ENV}" - { - echo 'NIX_CONFIG<> "${GITHUB_ENV}" - df -h /nix "${shared_root}" "${lane_root}" || true + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx - name: Build shell: bash run: | set -euo pipefail - nix develop .#ci -c bash -euo pipefail -c ' - mkdir -p site/.next - rm -rf site/.next/cache - ln -sfn "${NEXT_CACHE_DIR}" site/.next/cache - cd site - npm install - npm run build - ' + nix develop .#ci -c bash -lc 'cd site && npm ci --no-audit --no-fund && npm run build' diff --git a/.forgejo/workflows/lint-governance.yml b/.forgejo/workflows/lint-governance.yml new file mode 100644 index 0000000..2db94cc --- /dev/null +++ b/.forgejo/workflows/lint-governance.yml @@ -0,0 +1,38 @@ +name: Lint Governance + +on: + push: + branches: + - main + pull_request: + branches: + - "**" + workflow_dispatch: + +jobs: + governance: + name: BEP Metadata + runs-on: [self-hosted, linux, x86_64, burrow-forge] + steps: + - name: Checkout + shell: bash + run: | + set -euo pipefail + repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git" + if [ ! -d .git ]; then + git init . + fi + if git remote get-url origin >/dev/null 2>&1; then + git remote set-url origin "${repo_url}" + else + git remote add origin "${repo_url}" + fi + git fetch --force --tags origin "${GITHUB_SHA}" + git checkout --force --detach FETCH_HEAD + git clean -ffdqx + + - name: Validate BEP metadata + shell: bash + run: | + set -euo pipefail + python3 Scripts/check-bep-metadata.py diff --git a/.forgejo/workflows/release.yml b/.forgejo/workflows/release.yml new file mode 100644 index 0000000..3d1e92a --- /dev/null +++ b/.forgejo/workflows/release.yml @@ -0,0 +1,60 @@ +name: Release + +on: + push: + tags: + - "v*" + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: false + +jobs: + release: + name: Release Build + runs-on: namespace-profile-linux-medium + steps: + - name: Checkout + uses: https://code.forgejo.org/actions/checkout@v4 + with: + token: ${{ github.token }} + fetch-depth: 0 + + - name: Bootstrap Nix + shell: bash + run: | + set -euo pipefail + chmod +x Scripts/ci/ensure-nix.sh + Scripts/ci/ensure-nix.sh + + - name: Build release artifacts + shell: bash + env: + RELEASE_REF: ${{ github.ref_name }} + run: | + set -euo pipefail + ref="${RELEASE_REF:-manual-${GITHUB_SHA::7}}" + export RELEASE_REF="${ref}" + chmod +x Scripts/ci/build-release-artifacts.sh + nix develop .#ci -c Scripts/ci/build-release-artifacts.sh + + - name: Upload release artifacts + uses: https://code.forgejo.org/actions/upload-artifact@v4 + with: + name: burrow-release-${{ github.ref_name }} + path: dist/* + if-no-files-found: error + + - name: Publish Forgejo release + if: startsWith(github.ref, 'refs/tags/') + shell: bash + env: + RELEASE_TAG: ${{ github.ref_name }} + API_URL: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + TOKEN: ${{ github.token }} + run: | + set -euo pipefail + chmod +x Scripts/ci/publish-forgejo-release.sh + nix develop .#ci -c Scripts/ci/publish-forgejo-release.sh diff --git a/.github/workflows/build-apple.yml b/.github/workflows/build-apple.yml index 7ae8c4c..5a135b4 100644 --- a/.github/workflows/build-apple.yml +++ b/.github/workflows/build-apple.yml @@ -54,6 +54,7 @@ jobs: - name: Install Rust uses: dtolnay/rust-toolchain@stable with: + toolchain: 1.85.0 targets: ${{ join(matrix.rust-targets, ', ') }} - name: Install Protobuf shell: bash @@ -86,4 +87,4 @@ jobs: destination: ${{ matrix.destination }} test-plan: ${{ matrix.xcode-ui-test }} artifact-prefix: ui-tests-${{ matrix.sdk-name }} - check-name: Xcode UI Tests (${{ matrix.platform }}) \ No newline at end of file + check-name: Xcode UI Tests (${{ matrix.platform }}) diff --git a/.github/workflows/build-rust.yml b/.github/workflows/build-rust.yml index 95fc628..cbbdd81 100644 --- a/.github/workflows/build-rust.yml +++ b/.github/workflows/build-rust.yml @@ -6,6 +6,9 @@ on: pull_request: branches: - "*" +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: build: name: Build Crate (${{ matrix.platform }}) @@ -72,14 +75,14 @@ jobs: - name: Install Rust uses: dtolnay/rust-toolchain@stable with: - toolchain: stable + toolchain: 1.85.0 components: rustfmt targets: ${{ join(matrix.targets, ', ') }} - name: Setup Rust Cache uses: Swatinem/rust-cache@v2 - name: Build shell: bash - run: cargo build --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }} + run: cargo build --locked --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }} - name: Test shell: bash - run: cargo test --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }} \ No newline at end of file + run: cargo test --locked --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }} diff --git a/.github/workflows/lint-governance.yml b/.github/workflows/lint-governance.yml new file mode 100644 index 0000000..08b665c --- /dev/null +++ b/.github/workflows/lint-governance.yml @@ -0,0 +1,23 @@ +name: Governance Lint + +on: + pull_request: + branches: + - "*" + +jobs: + governance: + name: BEP Metadata + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Validate BEP metadata + shell: bash + run: | + set -euo pipefail + python3 Scripts/check-bep-metadata.py diff --git a/.github/workflows/release-apple.yml b/.github/workflows/release-apple.yml index c869d6a..b36ed73 100644 --- a/.github/workflows/release-apple.yml +++ b/.github/workflows/release-apple.yml @@ -47,6 +47,7 @@ jobs: - name: Install Rust uses: dtolnay/rust-toolchain@stable with: + toolchain: 1.85.0 targets: ${{ join(matrix.rust-targets, ', ') }} - name: Install Protobuf shell: bash diff --git a/.gitignore b/.gitignore index 3ce64aa..7efe903 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Xcode xcuserdata +Apple/build/ # Swift Apple/Package/.swiftpm/ diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..0ca7ced --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,14 @@ +# instructions for agents + +1. Spell the project name as `Burrow` in user-facing copy and `burrow` in code, package, and protocol identifiers unless an existing integration requires a different literal. +2. Read [CONSTITUTION.md](CONSTITUTION.md) before changing Apple clients, the daemon, the control plane, forge infrastructure, identity, or security-sensitive code. +3. Anchor non-trivial changes in a Burrow Evolution Proposal (BEP) under [evolution/](evolution/README.md) so future contributors can inherit the rationale, safeguards, and rollout shape. +4. Before touching the Apple app, daemon IPC, or Tailnet flows, review: + - [evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md](evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md) + - [evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md](evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md) + - [evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md](evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md) + - [evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md](evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md) +5. Apple clients must talk only to the daemon over gRPC. Do not add direct HTTP, control-plane, or helper-process calls from Swift UI code. +6. Treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a separate user-facing protocol surface. +7. Maintain canonical identity and operator metadata in [contributors.nix](contributors.nix). If Burrow forge, Authentik, Headscale, or admin/group mappings need to change, edit that registry first and derive runtime configuration from it. +8. When process or architecture is unclear, stop and draft or update a BEP instead of improvising durable behavior in code. diff --git a/Apple/App/AppDelegate.swift b/Apple/App/AppDelegate.swift index 0ea93f4..c3cb4cb 100644 --- a/Apple/App/AppDelegate.swift +++ b/Apple/App/AppDelegate.swift @@ -6,6 +6,8 @@ import SwiftUI @main @MainActor class AppDelegate: NSObject, NSApplicationDelegate { + private var windowController: NSWindowController? + private let quitItem: NSMenuItem = { let quitItem = NSMenuItem( title: "Quit Burrow", @@ -17,6 +19,17 @@ class AppDelegate: NSObject, NSApplicationDelegate { return quitItem }() + private lazy var openItem: NSMenuItem = { + let item = NSMenuItem( + title: "Open Burrow", + action: #selector(openWindow), + keyEquivalent: "o" + ) + item.target = self + item.keyEquivalentModifierMask = .command + return item + }() + private let toggleItem: NSMenuItem = { let toggleView = NSHostingView(rootView: MenuItemToggleView()) toggleView.frame.size = CGSize(width: 300, height: 32) @@ -31,6 +44,7 @@ class AppDelegate: NSObject, NSApplicationDelegate { let menu = NSMenu() menu.items = [ toggleItem, + openItem, .separator(), quitItem ] @@ -41,7 +55,7 @@ class AppDelegate: NSObject, NSApplicationDelegate { let statusBar = NSStatusBar.system let statusItem = statusBar.statusItem(withLength: NSStatusItem.squareLength) if let button = statusItem.button { - button.image = NSImage(systemSymbolName: "network.badge.shield.half.filled", accessibilityDescription: nil) + button.image = NSImage(systemSymbolName: "pipe.and.drop.fill", accessibilityDescription: nil) } return statusItem }() @@ -49,5 +63,28 @@ class AppDelegate: NSObject, NSApplicationDelegate { func applicationDidFinishLaunching(_ notification: Notification) { statusItem.menu = menu } + + @objc + private func openWindow() { + if let window = windowController?.window { + window.makeKeyAndOrderFront(nil) + NSApplication.shared.activate(ignoringOtherApps: true) + return + } + + let contentView = BurrowView() + let hostingController = NSHostingController(rootView: contentView) + let window = NSWindow(contentViewController: hostingController) + window.title = "Burrow" + window.setContentSize(NSSize(width: 820, height: 720)) + window.styleMask.insert([.titled, .closable, .miniaturizable, .resizable]) + window.center() + + let controller = NSWindowController(window: window) + controller.shouldCascadeWindows = true + controller.showWindow(nil) + windowController = controller + NSApplication.shared.activate(ignoringOtherApps: true) + } } #endif diff --git a/Apple/AppUITests/BurrowUITests.swift b/Apple/AppUITests/BurrowUITests.swift new file mode 100644 index 0000000..b7d8111 --- /dev/null +++ b/Apple/AppUITests/BurrowUITests.swift @@ -0,0 +1,439 @@ +import XCTest +import UIKit + +@MainActor +final class BurrowTailnetLoginUITests: XCTestCase { + private enum TailnetLoginMode: String, Decodable { + case tailscale + case discovered + } + + private struct TestConfig: Decodable { + let email: String + let username: String + let password: String + let mode: TailnetLoginMode? + } + + override func setUpWithError() throws { + continueAfterFailure = false + } + + func testTailnetLoginThroughAuthentikWebSession() throws { + let config = try loadTestConfig() + let email = config.email + let username = config.username + let password = config.password + let mode = config.mode ?? .tailscale + let browserIdentity = mode == .tailscale ? email : username + + let app = XCUIApplication() + app.launch() + + let tailnetButton = app.buttons["quick-add-tailnet"] + XCTAssertTrue(tailnetButton.waitForExistence(timeout: 15), "Tailnet add button did not appear") + tailnetButton.tap() + + configureTailnetIfNeeded(in: app, mode: mode) + + let discoveryField = app.textFields["tailnet-discovery-email"] + XCTAssertTrue(discoveryField.waitForExistence(timeout: 10), "Tailnet discovery email field did not appear") + replaceText(in: discoveryField, with: email) + + let serverCard = app.descendants(matching: .any) + .matching(identifier: "tailnet-server-card") + .firstMatch + XCTAssertTrue(serverCard.waitForExistence(timeout: 5), "Tailnet server card did not appear") + + let signInButton = app.buttons["tailnet-start-sign-in"] + XCTAssertTrue(signInButton.waitForExistence(timeout: 10), "Tailnet sign-in button did not appear") + signInButton.tap() + + acceptAuthenticationPromptIfNeeded(in: app, timeout: 20) + + let webSession = webAuthenticationSession() + XCTAssertTrue(webSession.waitForExistence(timeout: 20), "Safari authentication session did not appear") + + signIntoAuthentik(in: webSession, username: browserIdentity, password: password) + + app.activate() + XCTAssertTrue( + waitForTailnetSignedIn(in: app, timeout: 60), + "Tailnet sign-in never reached the running state" + ) + } + + private func configureTailnetIfNeeded(in app: XCUIApplication, mode: TailnetLoginMode) { + guard mode == .discovered else { return } + + openTailnetMenu(in: app) + tapMenuButton(named: "Edit Custom Server", in: app) + + openTailnetMenu(in: app) + tapMenuButton(named: "Show Advanced Settings", in: app) + + let authorityField = app.textFields["tailnet-authority"] + XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear") + replaceText(in: authorityField, with: "") + } + + private func openTailnetMenu(in app: XCUIApplication) { + let moreButton = app.buttons["More"] + XCTAssertTrue(moreButton.waitForExistence(timeout: 5), "Tailnet menu button did not appear") + moreButton.tap() + } + + private func tapMenuButton(named title: String, in app: XCUIApplication) { + let menuButton = firstExistingElement( + from: [ + app.buttons[title], + app.descendants(matching: .button)[title], + ], + timeout: 5 + ) + XCTAssertTrue(menuButton.exists, "Menu action \(title) did not appear") + menuButton.tap() + } + + private func acceptAuthenticationPromptIfNeeded( + in app: XCUIApplication, + timeout: TimeInterval + ) { + let springboard = XCUIApplication(bundleIdentifier: "com.apple.springboard") + let deadline = Date().addingTimeInterval(timeout) + + repeat { + let promptCandidates = [ + springboard.buttons["Continue"], + springboard.buttons["Allow"], + app.buttons["Continue"], + app.buttons["Allow"], + ] + + for button in promptCandidates where button.exists && button.isHittable { + button.tap() + return + } + + RunLoop.current.run(until: Date().addingTimeInterval(0.25)) + } while Date() < deadline + + let promptCandidates = [ + springboard.buttons["Continue"], + springboard.buttons["Allow"], + app.buttons["Continue"], + app.buttons["Allow"], + ] + + for button in promptCandidates where button.exists { + button.tap() + return + } + } + + private func webAuthenticationSession() -> XCUIApplication { + let safariViewService = XCUIApplication(bundleIdentifier: "com.apple.SafariViewService") + if safariViewService.waitForExistence(timeout: 5) { + return safariViewService + } + + let safari = XCUIApplication(bundleIdentifier: "com.apple.mobilesafari") + _ = safari.waitForExistence(timeout: 5) + return safari + } + + private func signIntoAuthentik(in webSession: XCUIApplication, username: String, password: String) { + followTailnetRedirectIfNeeded(in: webSession) + + if !webSession.exists { + return + } + + let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2) + if immediatePasswordField.exists { + replaceSecureText(in: immediatePasswordField, within: webSession, with: password) + submitAuthenticationForm(in: webSession, focusedField: immediatePasswordField) + return + } + + let usernameField = firstExistingElement( + in: webSession, + queries: [ + { $0.textFields["Username"] }, + { $0.textFields["Email or Username"] }, + { $0.textFields["Email address"] }, + { $0.textFields["Email"] }, + { $0.webViews.textFields["Username"] }, + { $0.webViews.textFields["Email or Username"] }, + { $0.descendants(matching: .textField).firstMatch }, + ], + timeout: 12 + ) + if !usernameField.exists { + return + } + replaceText(in: usernameField, with: username) + + tapFirstExistingButton( + in: webSession, + titles: ["Continue", "Next", "Sign In", "Log in", "Login"], + timeout: 5 + ) + + let passwordField = firstExistingSecureField(in: webSession, timeout: 20) + XCTAssertTrue(passwordField.exists, "Authentik password field did not appear") + replaceSecureText(in: passwordField, within: webSession, with: password) + submitAuthenticationForm(in: webSession, focusedField: passwordField) + } + + private func followTailnetRedirectIfNeeded(in webSession: XCUIApplication) { + let redirectCandidates = [ + webSession.links["Found"], + webSession.webViews.links["Found"], + webSession.buttons["Found"], + webSession.webViews.buttons["Found"], + ] + + let redirectLink = firstExistingElement(from: redirectCandidates, timeout: 8) + if redirectLink.exists { + redirectLink.tap() + } + } + + private func firstExistingSecureField(in app: XCUIApplication, timeout: TimeInterval) -> XCUIElement { + let candidates = [ + app.descendants(matching: .secureTextField).firstMatch, + app.secureTextFields["Password"], + app.secureTextFields["Password or Token"], + app.webViews.secureTextFields["Password"], + app.webViews.secureTextFields["Password or Token"], + ] + + return firstExistingElement(from: candidates, timeout: timeout) + } + + private func tapFirstExistingButton( + in app: XCUIApplication, + titles: [String], + timeout: TimeInterval + ) { + let candidates = titles.flatMap { title in + [ + app.buttons[title], + app.webViews.buttons[title], + ] + } + [app.descendants(matching: .button).firstMatch] + + let button = firstExistingElement(from: candidates, timeout: timeout) + XCTAssertTrue(button.exists, "Expected one of \(titles.joined(separator: ", ")) to appear") + button.tap() + } + + private func submitAuthenticationForm(in app: XCUIApplication, focusedField: XCUIElement) { + focus(focusedField) + focusedField.typeText("\n") + if waitForAny( + [ + { !focusedField.exists }, + { !app.staticTexts["Burrow Tailnet Authentication"].exists }, + ], + timeout: 1.5 + ) { + return + } + + let keyboard = app.keyboards.firstMatch + if keyboard.waitForExistence(timeout: 2) { + let keyboardCandidates = [ + "Return", + "return", + "Go", + "go", + "Continue", + "continue", + "Done", + "done", + "Join", + "join", + "Sign In", + "Log In", + "Login", + ] + for title in keyboardCandidates { + let key = keyboard.buttons[title] + if key.exists && key.isHittable { + key.tap() + return + } + } + + if let lastKey = keyboard.buttons.allElementsBoundByIndex.last, + lastKey.exists, + lastKey.isHittable + { + lastKey.tap() + return + } + } + + tapFirstExistingButton( + in: app, + titles: ["Continue", "Sign In", "Log in", "Login"], + timeout: 5 + ) + } + + private func loadTestConfig() throws -> TestConfig { + let environment = ProcessInfo.processInfo.environment + if let email = nonEmptyEnvironment("BURROW_UI_TEST_EMAIL"), + let password = nonEmptyEnvironment("BURROW_UI_TEST_PASSWORD") + { + return TestConfig( + email: email, + username: nonEmptyEnvironment("BURROW_UI_TEST_USERNAME") ?? email, + password: password, + mode: nonEmptyEnvironment("BURROW_UI_TEST_TAILNET_MODE") + .flatMap(TailnetLoginMode.init(rawValue:)) + ) + } + + let configPath = environment["BURROW_UI_TEST_CONFIG_PATH"] ?? "/tmp/burrow-ui-test-config.json" + let configURL = URL(fileURLWithPath: configPath) + guard FileManager.default.fileExists(atPath: configURL.path) else { + throw XCTSkip( + "Missing UI test configuration. Expected env vars or config file at \(configURL.path)" + ) + } + + let data = try Data(contentsOf: configURL) + return try JSONDecoder().decode(TestConfig.self, from: data) + } + + private func nonEmptyEnvironment(_ key: String) -> String? { + guard let value = ProcessInfo.processInfo.environment[key]? + .trimmingCharacters(in: .whitespacesAndNewlines), + !value.isEmpty + else { + return nil + } + return value + } + + private func waitForFieldValue( + _ field: XCUIElement, + containing substring: String, + timeout: TimeInterval + ) -> Bool { + let predicate = NSPredicate(format: "value CONTAINS %@", substring) + let expectation = XCTNSPredicateExpectation(predicate: predicate, object: field) + return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed + } + + private func waitForButtonLabel( + _ button: XCUIElement, + equals expected: String, + timeout: TimeInterval + ) -> Bool { + let predicate = NSPredicate(format: "label == %@", expected) + let expectation = XCTNSPredicateExpectation(predicate: predicate, object: button) + return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed + } + + private func waitForTailnetSignedIn(in app: XCUIApplication, timeout: TimeInterval) -> Bool { + let button = app.buttons["tailnet-start-sign-in"] + let deadline = Date().addingTimeInterval(timeout) + + repeat { + acceptAuthenticationPromptIfNeeded(in: app, timeout: 1) + if button.exists, button.label == "Signed In" { + return true + } + RunLoop.current.run(until: Date().addingTimeInterval(0.3)) + } while Date() < deadline + + return button.exists && button.label == "Signed In" + } + + private func waitForAny(_ conditions: [() -> Bool], timeout: TimeInterval) -> Bool { + let deadline = Date().addingTimeInterval(timeout) + repeat { + if conditions.contains(where: { $0() }) { + return true + } + RunLoop.current.run(until: Date().addingTimeInterval(0.2)) + } while Date() < deadline + return conditions.contains(where: { $0() }) + } + + private func firstExistingElement( + in app: XCUIApplication, + queries: [(XCUIApplication) -> XCUIElement], + timeout: TimeInterval + ) -> XCUIElement { + firstExistingElement(from: queries.map { $0(app) }, timeout: timeout) + } + + private func firstExistingElement(from candidates: [XCUIElement], timeout: TimeInterval) -> XCUIElement { + let deadline = Date().addingTimeInterval(timeout) + repeat { + for candidate in candidates where candidate.exists { + return candidate + } + RunLoop.current.run(until: Date().addingTimeInterval(0.2)) + } while Date() < deadline + + return candidates[0] + } + + private func replaceText(in element: XCUIElement, with value: String) { + focus(element) + clearText(in: element) + element.typeText(value) + } + + private func replaceSecureText(in element: XCUIElement, within app: XCUIApplication, with value: String) { + UIPasteboard.general.string = value + focus(element) + for revealMenu in [ + { element.doubleTap() }, + { element.press(forDuration: 1.2) }, + ] { + revealMenu() + let pasteButton = firstExistingElement(from: pasteCandidates(in: app), timeout: 3) + if pasteButton.exists { + pasteButton.tap() + return + } + } + + focus(element) + element.typeText(value) + } + + private func clearText(in element: XCUIElement) { + guard let currentValue = element.value as? String, !currentValue.isEmpty else { + return + } + + let deleteSequence = String(repeating: XCUIKeyboardKey.delete.rawValue, count: currentValue.count) + element.typeText(deleteSequence) + } + + private func focus(_ element: XCUIElement) { + element.coordinate(withNormalizedOffset: CGVector(dx: 0.5, dy: 0.5)).tap() + RunLoop.current.run(until: Date().addingTimeInterval(0.3)) + } + + private func pasteCandidates(in app: XCUIApplication) -> [XCUIElement] { + let pasteLabels = ["Paste", "Incolla", "Paste from Clipboard"] + return pasteLabels.flatMap { label in + [ + app.menuItems[label], + app.buttons[label], + app.webViews.buttons[label], + app.descendants(matching: .button).matching(NSPredicate(format: "label == %@", label)).firstMatch, + app.descendants(matching: .menuItem).matching(NSPredicate(format: "label == %@", label)).firstMatch, + ] + } + } +} diff --git a/Apple/Burrow.xcodeproj/project.pbxproj b/Apple/Burrow.xcodeproj/project.pbxproj index 617b88f..83d32e0 100644 --- a/Apple/Burrow.xcodeproj/project.pbxproj +++ b/Apple/Burrow.xcodeproj/project.pbxproj @@ -8,6 +8,7 @@ /* Begin PBXBuildFile section */ D00AA8972A4669BC005C8102 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00AA8962A4669BC005C8102 /* AppDelegate.swift */; }; + D11000012F70000100112233 /* BurrowUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D11000042F70000100112233 /* BurrowUITests.swift */; }; D020F65829E4A697002790F6 /* PacketTunnelProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = D020F65729E4A697002790F6 /* PacketTunnelProvider.swift */; }; D020F65D29E4A697002790F6 /* BurrowNetworkExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; }; D03383AD2C8E67E300F7C44E /* SwiftProtobuf in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E22C8DA375008A8CEC /* SwiftProtobuf */; }; @@ -23,7 +24,6 @@ D0D4E53A2C8D996F007F820A /* BurrowCore.framework in Embed Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49A2C8D921A007F820A /* Logging.swift */; }; D0D4E5702C8D9C62007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; - D0D4E5712C8D9C6F007F820A /* HackClub.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49D2C8D921A007F820A /* HackClub.swift */; }; D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49E2C8D921A007F820A /* Network.swift */; }; D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49F2C8D921A007F820A /* WireGuard.swift */; }; D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A22C8D921A007F820A /* BurrowView.swift */; }; @@ -33,7 +33,6 @@ D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */; }; D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */; }; D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A82C8D921A007F820A /* NetworkView.swift */; }; - D0D4E57B2C8D9C6F007F820A /* OAuth2.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A92C8D921A007F820A /* OAuth2.swift */; }; D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AA2C8D921A007F820A /* Tunnel.swift */; }; D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */; }; D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */; }; @@ -44,13 +43,20 @@ D0D4E5A62C8D9E65007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; D0F4FAD32C8DC79C0068730A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; D0F7594E2C8DAB6B00126CF3 /* GRPC in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E02C8DA375008A8CEC /* GRPC */; }; - D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4962C8D921A007F820A /* grpc-swift-config.json */; }; - D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */; }; + D0FA10012D10200100112233 /* burrow.pb.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10032D10200100112233 /* burrow.pb.swift */; }; + D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10042D10200100112233 /* burrow.grpc.swift */; }; D0F7597E2C8DB30500126CF3 /* CGRPCZlib in Frameworks */ = {isa = PBXBuildFile; productRef = D0F7597D2C8DB30500126CF3 /* CGRPCZlib */; }; D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4992C8D921A007F820A /* Client.swift */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + D11000022F70000100112233 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D05B9F7129E39EEC008CB1F9; + remoteInfo = App; + }; D020F65B29E4A697002790F6 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */; @@ -132,6 +138,9 @@ /* Begin PBXFileReference section */ D00117422B30348D00D87C25 /* Configuration.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Configuration.xcconfig; sourceTree = ""; }; D00AA8962A4669BC005C8102 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + D11000032F70000100112233 /* BurrowUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = BurrowUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + D11000042F70000100112233 /* BurrowUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BurrowUITests.swift; sourceTree = ""; }; + D11000052F70000100112233 /* UITests.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UITests.xcconfig; sourceTree = ""; }; D020F63D29E4A1FF002790F6 /* Identity.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Identity.xcconfig; sourceTree = ""; }; D020F64029E4A1FF002790F6 /* Compiler.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Compiler.xcconfig; sourceTree = ""; }; D020F64229E4A1FF002790F6 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; @@ -156,11 +165,8 @@ D0BCC6032A09535900AD070D /* libburrow.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libburrow.a; sourceTree = BUILT_PRODUCTS_DIR; }; D0BF09582C8E6789000D8DEC /* UI.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UI.xcconfig; sourceTree = ""; }; D0D4E4952C8D921A007F820A /* burrow.proto */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.protobuf; path = burrow.proto; sourceTree = ""; }; - D0D4E4962C8D921A007F820A /* grpc-swift-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "grpc-swift-config.json"; sourceTree = ""; }; - D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "swift-protobuf-config.json"; sourceTree = ""; }; D0D4E4992C8D921A007F820A /* Client.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Client.swift; sourceTree = ""; }; D0D4E49A2C8D921A007F820A /* Logging.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Logging.swift; sourceTree = ""; }; - D0D4E49D2C8D921A007F820A /* HackClub.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HackClub.swift; sourceTree = ""; }; D0D4E49E2C8D921A007F820A /* Network.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Network.swift; sourceTree = ""; }; D0D4E49F2C8D921A007F820A /* WireGuard.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WireGuard.swift; sourceTree = ""; }; D0D4E4A12C8D921A007F820A /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; @@ -171,7 +177,6 @@ D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "NetworkExtension+Async.swift"; sourceTree = ""; }; D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkExtensionTunnel.swift; sourceTree = ""; }; D0D4E4A82C8D921A007F820A /* NetworkView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkView.swift; sourceTree = ""; }; - D0D4E4A92C8D921A007F820A /* OAuth2.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = OAuth2.swift; sourceTree = ""; }; D0D4E4AA2C8D921A007F820A /* Tunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Tunnel.swift; sourceTree = ""; }; D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelButton.swift; sourceTree = ""; }; D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelStatusView.swift; sourceTree = ""; }; @@ -183,9 +188,18 @@ D0D4E58E2C8D9D0A007F820A /* Constants.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Constants.h; sourceTree = ""; }; D0D4E58F2C8D9D0A007F820A /* Constants.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Constants.swift; sourceTree = ""; }; D0D4E5902C8D9D0A007F820A /* module.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + D0FA10032D10200100112233 /* burrow.pb.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.pb.swift; sourceTree = ""; }; + D0FA10042D10200100112233 /* burrow.grpc.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.grpc.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + D11000062F70000100112233 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; D020F65029E4A697002790F6 /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -247,6 +261,7 @@ D0D4E4F72C8D941D007F820A /* Framework.xcconfig */, D020F64029E4A1FF002790F6 /* Compiler.xcconfig */, D0D4E4F62C8D932D007F820A /* Debug.xcconfig */, + D11000052F70000100112233 /* UITests.xcconfig */, D04A3E1D2BAF465F0043EC85 /* Version.xcconfig */, D020F64229E4A1FF002790F6 /* Info.plist */, D0D4E5912C8D9D0A007F820A /* Constants */, @@ -272,6 +287,7 @@ isa = PBXGroup; children = ( D05B9F7429E39EEC008CB1F9 /* App */, + D11000072F70000100112233 /* AppUITests */, D020F65629E4A697002790F6 /* NetworkExtension */, D0D4E49C2C8D921A007F820A /* Core */, D0D4E4AD2C8D921A007F820A /* UI */, @@ -285,6 +301,7 @@ isa = PBXGroup; children = ( D05B9F7229E39EEC008CB1F9 /* Burrow.app */, + D11000032F70000100112233 /* BurrowUITests.xctest */, D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */, D0BCC6032A09535900AD070D /* libburrow.a */, D0D4E5312C8D996F007F820A /* BurrowCore.framework */, @@ -307,6 +324,14 @@ path = App; sourceTree = ""; }; + D11000072F70000100112233 /* AppUITests */ = { + isa = PBXGroup; + children = ( + D11000042F70000100112233 /* BurrowUITests.swift */, + ); + path = AppUITests; + sourceTree = ""; + }; D0B98FD729FDDB57004E7149 /* libburrow */ = { isa = PBXGroup; children = ( @@ -321,8 +346,8 @@ isa = PBXGroup; children = ( D0D4E4952C8D921A007F820A /* burrow.proto */, - D0D4E4962C8D921A007F820A /* grpc-swift-config.json */, - D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */, + D0FA10032D10200100112233 /* burrow.pb.swift */, + D0FA10042D10200100112233 /* burrow.grpc.swift */, ); path = Client; sourceTree = ""; @@ -340,7 +365,6 @@ D0D4E4A02C8D921A007F820A /* Networks */ = { isa = PBXGroup; children = ( - D0D4E49D2C8D921A007F820A /* HackClub.swift */, D0D4E49E2C8D921A007F820A /* Network.swift */, D0D4E49F2C8D921A007F820A /* WireGuard.swift */, ); @@ -358,7 +382,6 @@ D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */, D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */, D0D4E4A82C8D921A007F820A /* NetworkView.swift */, - D0D4E4A92C8D921A007F820A /* OAuth2.swift */, D0D4E4AA2C8D921A007F820A /* Tunnel.swift */, D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */, D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */, @@ -381,6 +404,24 @@ /* End PBXGroup section */ /* Begin PBXNativeTarget section */ + D11000082F70000100112233 /* BurrowUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */; + buildPhases = ( + D110000A2F70000100112233 /* Sources */, + D11000062F70000100112233 /* Frameworks */, + D11000092F70000100112233 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + D110000B2F70000100112233 /* PBXTargetDependency */, + ); + name = BurrowUITests; + productName = BurrowUITests; + productReference = D11000032F70000100112233 /* BurrowUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; D020F65229E4A697002790F6 /* NetworkExtension */ = { isa = PBXNativeTarget; buildConfigurationList = D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */; @@ -434,8 +475,6 @@ ); dependencies = ( D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */, - D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */, - D0F759602C8DB24400126CF3 /* PBXTargetDependency */, ); name = Core; packageProductDependencies = ( @@ -498,6 +537,10 @@ LastSwiftUpdateCheck = 1600; LastUpgradeCheck = 1520; TargetAttributes = { + D11000082F70000100112233 = { + CreatedOnToolsVersion = 16.0; + TestTargetID = D05B9F7129E39EEC008CB1F9; + }; D020F65229E4A697002790F6 = { CreatedOnToolsVersion = 14.3; }; @@ -530,6 +573,7 @@ projectRoot = ""; targets = ( D05B9F7129E39EEC008CB1F9 /* App */, + D11000082F70000100112233 /* BurrowUITests */, D020F65229E4A697002790F6 /* NetworkExtension */, D0D4E5502C8D9BF2007F820A /* UI */, D0D4E5302C8D996F007F820A /* Core */, @@ -539,6 +583,13 @@ /* End PBXProject section */ /* Begin PBXResourcesBuildPhase section */ + D11000092F70000100112233 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; D05B9F7029E39EEC008CB1F9 /* Resources */ = { isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; @@ -602,6 +653,14 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + D110000A2F70000100112233 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D11000012F70000100112233 /* BurrowUITests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D020F64F29E4A697002790F6 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -623,8 +682,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */, - D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */, + D0FA10012D10200100112233 /* burrow.pb.swift in Sources */, + D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */, D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */, D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */, ); @@ -634,7 +693,6 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - D0D4E5712C8D9C6F007F820A /* HackClub.swift in Sources */, D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */, D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */, D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */, @@ -644,7 +702,6 @@ D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */, D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */, D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */, - D0D4E57B2C8D9C6F007F820A /* OAuth2.swift in Sources */, D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */, D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */, D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */, @@ -662,6 +719,11 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + D110000B2F70000100112233 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D05B9F7129E39EEC008CB1F9 /* App */; + targetProxy = D11000022F70000100112233 /* PBXContainerItemProxy */; + }; D020F65C29E4A697002790F6 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = D020F65229E4A697002790F6 /* NetworkExtension */; @@ -697,14 +759,6 @@ target = D0D4E5302C8D996F007F820A /* Core */; targetProxy = D0F4FAD12C8DC7960068730A /* PBXContainerItemProxy */; }; - D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - productRef = D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */; - }; - D0F759602C8DB24400126CF3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - productRef = D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */; - }; D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */ = { isa = PBXTargetDependency; productRef = D0F759892C8DB34200126CF3 /* GRPC */; @@ -712,6 +766,20 @@ /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ + D110000C2F70000100112233 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + D110000D2F70000100112233 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */; + buildSettings = { + }; + name = Release; + }; D020F65F29E4A697002790F6 /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = D020F66229E4A6E5002790F6 /* NetworkExtension.xcconfig */; @@ -799,6 +867,15 @@ /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ + D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D110000C2F70000100112233 /* Debug */, + D110000D2F70000100112233 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -929,16 +1006,6 @@ package = D0B1D10E2C436152004B7823 /* XCRemoteSwiftPackageReference "swift-async-algorithms" */; productName = AsyncAlgorithms; }; - D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */ = { - isa = XCSwiftPackageProductDependency; - package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */; - productName = "plugin:GRPCSwiftPlugin"; - }; - D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */ = { - isa = XCSwiftPackageProductDependency; - package = D0D4E4852C8D8F29007F820A /* XCRemoteSwiftPackageReference "swift-protobuf" */; - productName = "plugin:SwiftProtobufPlugin"; - }; D0F7597D2C8DB30500126CF3 /* CGRPCZlib */ = { isa = XCSwiftPackageProductDependency; package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */; diff --git a/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme b/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme index a524e87..f580ea7 100644 --- a/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme +++ b/Apple/Burrow.xcodeproj/xcshareddata/xcschemes/App.xcscheme @@ -28,7 +28,20 @@ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" shouldUseLaunchSchemeArgsEnv = "YES" - shouldAutocreateTestPlan = "YES"> + shouldAutocreateTestPlan = "NO"> + + + + + + = { switch FileManager.default.containerURL(forSecurityApplicationGroupIdentifier: appGroupIdentifier) { case .some(let url): .success(url) - case .none: .failure(.invalidAppGroupIdentifier) + case .none: + fallbackContainerURL().mapError { _ in .invalidAppGroupIdentifier } } }() + + private static func fallbackContainerURL() -> Result { +#if targetEnvironment(simulator) + Result { + // The simulator app's Application Support path lives inside its sandbox container, + // so the host daemon cannot reach it. Use a shared host temp location instead. + let url = URL(filePath: "/tmp", directoryHint: .isDirectory) + .appending(component: bundleIdentifier, directoryHint: .isDirectory) + .appending(component: "SimulatorFallback", directoryHint: .isDirectory) + try FileManager.default.createDirectory(at: url, withIntermediateDirectories: true) + return url + } +#else + .failure(Error.invalidAppGroupIdentifier) +#endif + } } extension Logger { diff --git a/Apple/Configuration/UITests.xcconfig b/Apple/Configuration/UITests.xcconfig new file mode 100644 index 0000000..a97e290 --- /dev/null +++ b/Apple/Configuration/UITests.xcconfig @@ -0,0 +1,14 @@ +#include "Compiler.xcconfig" + +SUPPORTED_PLATFORMS = iphonesimulator iphoneos +TARGETED_DEVICE_FAMILY[sdk=iphone*] = 1,2 + +PRODUCT_NAME = $(TARGET_NAME) +PRODUCT_BUNDLE_IDENTIFIER = $(APP_BUNDLE_IDENTIFIER).uitests + +STRING_CATALOG_GENERATE_SYMBOLS = NO +SWIFT_EMIT_LOC_STRINGS = NO + +ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES +LD_RUNPATH_SEARCH_PATHS = $(inherited) @executable_path/Frameworks @loader_path/Frameworks +TEST_TARGET_NAME = App diff --git a/Apple/Core/Client.swift b/Apple/Core/Client.swift index 8874e3b..7d4cfc7 100644 --- a/Apple/Core/Client.swift +++ b/Apple/Core/Client.swift @@ -1,5 +1,7 @@ +import Foundation import GRPC import NIOTransportServices +import SwiftProtobuf public typealias TunnelClient = Burrow_TunnelAsyncClient public typealias NetworksClient = Burrow_NetworksAsyncClient @@ -30,3 +32,477 @@ extension NetworksClient: Client { self.init(channel: channel, defaultCallOptions: .init(), interceptors: .none) } } + +public struct Burrow_TailnetDiscoverRequest: Sendable { + public var email: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetDiscoverResponse: Sendable { + public var domain: String = "" + public var authority: String = "" + public var oidcIssuer: String = "" + public var managed: Bool = false + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetProbeRequest: Sendable { + public var authority: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetProbeResponse: Sendable { + public var authority: String = "" + public var statusCode: Int32 = 0 + public var summary: String = "" + public var detail: String = "" + public var reachable: Bool = false + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginStartRequest: Sendable { + public var accountName: String = "" + public var identityName: String = "" + public var hostname: String = "" + public var authority: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginStatusRequest: Sendable { + public var sessionID: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginCancelRequest: Sendable { + public var sessionID: String = "" + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TailnetLoginStatusResponse: Sendable { + public var sessionID: String = "" + public var backendState: String = "" + public var authURL: String = "" + public var running: Bool = false + public var needsLogin: Bool = false + public var tailnetName: String = "" + public var magicDNSSuffix: String = "" + public var selfDNSName: String = "" + public var tailnetIPs: [String] = [] + public var health: [String] = [] + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TunnelPacket: Sendable { + public var payload = Data() + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetDiscoverRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "email") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.email) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.email.isEmpty { + try visitor.visitSingularStringField(value: self.email, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetDiscoverResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetDiscoverResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "domain"), + 2: .same(proto: "authority"), + 3: .same(proto: "oidc_issuer"), + 4: .same(proto: "managed"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.domain) + case 2: try decoder.decodeSingularStringField(value: &self.authority) + case 3: try decoder.decodeSingularStringField(value: &self.oidcIssuer) + case 4: try decoder.decodeSingularBoolField(value: &self.managed) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.domain.isEmpty { + try visitor.visitSingularStringField(value: self.domain, fieldNumber: 1) + } + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 2) + } + if !self.oidcIssuer.isEmpty { + try visitor.visitSingularStringField(value: self.oidcIssuer, fieldNumber: 3) + } + if self.managed { + try visitor.visitSingularBoolField(value: self.managed, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetProbeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetProbeRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "authority") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.authority) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetProbeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetProbeResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "authority"), + 2: .same(proto: "status_code"), + 3: .same(proto: "summary"), + 4: .same(proto: "detail"), + 5: .same(proto: "reachable"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.authority) + case 2: try decoder.decodeSingularInt32Field(value: &self.statusCode) + case 3: try decoder.decodeSingularStringField(value: &self.summary) + case 4: try decoder.decodeSingularStringField(value: &self.detail) + case 5: try decoder.decodeSingularBoolField(value: &self.reachable) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1) + } + if self.statusCode != 0 { + try visitor.visitSingularInt32Field(value: self.statusCode, fieldNumber: 2) + } + if !self.summary.isEmpty { + try visitor.visitSingularStringField(value: self.summary, fieldNumber: 3) + } + if !self.detail.isEmpty { + try visitor.visitSingularStringField(value: self.detail, fieldNumber: 4) + } + if self.reachable { + try visitor.visitSingularBoolField(value: self.reachable, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginStartRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStartRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "account_name"), + 2: .standard(proto: "identity_name"), + 3: .same(proto: "hostname"), + 4: .same(proto: "authority"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.accountName) + case 2: try decoder.decodeSingularStringField(value: &self.identityName) + case 3: try decoder.decodeSingularStringField(value: &self.hostname) + case 4: try decoder.decodeSingularStringField(value: &self.authority) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.accountName.isEmpty { + try visitor.visitSingularStringField(value: self.accountName, fieldNumber: 1) + } + if !self.identityName.isEmpty { + try visitor.visitSingularStringField(value: self.identityName, fieldNumber: 2) + } + if !self.hostname.isEmpty { + try visitor.visitSingularStringField(value: self.hostname, fieldNumber: 3) + } + if !self.authority.isEmpty { + try visitor.visitSingularStringField(value: self.authority, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginStatusRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStatusRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginCancelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginCancelRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TailnetLoginStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TailnetLoginStatusResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 2: .standard(proto: "backend_state"), + 3: .standard(proto: "auth_url"), + 4: .same(proto: "running"), + 5: .standard(proto: "needs_login"), + 6: .standard(proto: "tailnet_name"), + 7: .standard(proto: "magic_dns_suffix"), + 8: .standard(proto: "self_dns_name"), + 9: .standard(proto: "tailnet_ips"), + 10: .same(proto: "health"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularStringField(value: &self.sessionID) + case 2: try decoder.decodeSingularStringField(value: &self.backendState) + case 3: try decoder.decodeSingularStringField(value: &self.authURL) + case 4: try decoder.decodeSingularBoolField(value: &self.running) + case 5: try decoder.decodeSingularBoolField(value: &self.needsLogin) + case 6: try decoder.decodeSingularStringField(value: &self.tailnetName) + case 7: try decoder.decodeSingularStringField(value: &self.magicDNSSuffix) + case 8: try decoder.decodeSingularStringField(value: &self.selfDNSName) + case 9: try decoder.decodeRepeatedStringField(value: &self.tailnetIPs) + case 10: try decoder.decodeRepeatedStringField(value: &self.health) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + if !self.backendState.isEmpty { + try visitor.visitSingularStringField(value: self.backendState, fieldNumber: 2) + } + if !self.authURL.isEmpty { + try visitor.visitSingularStringField(value: self.authURL, fieldNumber: 3) + } + if self.running { + try visitor.visitSingularBoolField(value: self.running, fieldNumber: 4) + } + if self.needsLogin { + try visitor.visitSingularBoolField(value: self.needsLogin, fieldNumber: 5) + } + if !self.tailnetName.isEmpty { + try visitor.visitSingularStringField(value: self.tailnetName, fieldNumber: 6) + } + if !self.magicDNSSuffix.isEmpty { + try visitor.visitSingularStringField(value: self.magicDNSSuffix, fieldNumber: 7) + } + if !self.selfDNSName.isEmpty { + try visitor.visitSingularStringField(value: self.selfDNSName, fieldNumber: 8) + } + if !self.tailnetIPs.isEmpty { + try visitor.visitRepeatedStringField(value: self.tailnetIPs, fieldNumber: 9) + } + if !self.health.isEmpty { + try visitor.visitRepeatedStringField(value: self.health, fieldNumber: 10) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +extension Burrow_TunnelPacket: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "burrow.TunnelPacket" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "payload") + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularBytesField(value: &self.payload) + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.payload.isEmpty { + try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } +} + +public struct TailnetClient: Client, GRPCClient { + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions + + public init(channel: any GRPCChannel) { + self.channel = channel + self.defaultCallOptions = .init() + } + + public func discover( + _ request: Burrow_TailnetDiscoverRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetDiscoverResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/Discover", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func probe( + _ request: Burrow_TailnetProbeRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetProbeResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/Probe", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func loginStart( + _ request: Burrow_TailnetLoginStartRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetLoginStatusResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginStart", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func loginStatus( + _ request: Burrow_TailnetLoginStatusRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_TailnetLoginStatusResponse { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginStatus", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } + + public func loginCancel( + _ request: Burrow_TailnetLoginCancelRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + try await self.performAsyncUnaryCall( + path: "/burrow.TailnetControl/LoginCancel", + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } +} + +public struct TunnelPacketClient: Client, GRPCClient { + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions + + public init(channel: any GRPCChannel) { + self.channel = channel + self.defaultCallOptions = .init() + } + + public func makeTunnelPacketsCall( + callOptions: CallOptions? = nil + ) -> GRPCAsyncBidirectionalStreamingCall { + self.makeAsyncBidirectionalStreamingCall( + path: "/burrow.Tunnel/TunnelPackets", + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: [] + ) + } +} diff --git a/Apple/Core/Client/Generated/burrow.grpc.swift b/Apple/Core/Client/Generated/burrow.grpc.swift new file mode 100644 index 0000000..d1f848c --- /dev/null +++ b/Apple/Core/Client/Generated/burrow.grpc.swift @@ -0,0 +1,761 @@ +// +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the protocol buffer compiler. +// Source: burrow.proto +// +import GRPC +import NIO +import NIOConcurrencyHelpers +import SwiftProtobuf + + +/// Usage: instantiate `Burrow_TunnelClient`, then call methods of this protocol to make API calls. +public protocol Burrow_TunnelClientProtocol: GRPCClient { + var serviceName: String { get } + var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get } + + func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void + ) -> ServerStreamingCall + + func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> UnaryCall + + func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> UnaryCall + + func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_TunnelStatusResponse) -> Void + ) -> ServerStreamingCall +} + +extension Burrow_TunnelClientProtocol { + public var serviceName: String { + return "burrow.Tunnel" + } + + /// Server streaming call to TunnelConfiguration + /// + /// - Parameters: + /// - request: Request to send to TunnelConfiguration. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [], + handler: handler + ) + } + + /// Unary call to TunnelStart + /// + /// - Parameters: + /// - request: Request to send to TunnelStart. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + /// Unary call to TunnelStop + /// + /// - Parameters: + /// - request: Request to send to TunnelStop. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + /// Server streaming call to TunnelStatus + /// + /// - Parameters: + /// - request: Request to send to TunnelStatus. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_TunnelStatusResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [], + handler: handler + ) + } +} + +@available(*, deprecated) +extension Burrow_TunnelClient: @unchecked Sendable {} + +@available(*, deprecated, renamed: "Burrow_TunnelNIOClient") +public final class Burrow_TunnelClient: Burrow_TunnelClientProtocol { + private let lock = Lock() + private var _defaultCallOptions: CallOptions + private var _interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions { + get { self.lock.withLock { return self._defaultCallOptions } } + set { self.lock.withLockVoid { self._defaultCallOptions = newValue } } + } + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { + get { self.lock.withLock { return self._interceptors } } + set { self.lock.withLockVoid { self._interceptors = newValue } } + } + + /// Creates a client for the burrow.Tunnel service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self._defaultCallOptions = defaultCallOptions + self._interceptors = interceptors + } +} + +public struct Burrow_TunnelNIOClient: Burrow_TunnelClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + + /// Creates a client for the burrow.Tunnel service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public protocol Burrow_TunnelAsyncClientProtocol: GRPCClient { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get } + + func makeTunnelConfigurationCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeTunnelStartCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeTunnelStopCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeTunnelStatusCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_TunnelAsyncClientProtocol { + public static var serviceDescriptor: GRPCServiceDescriptor { + return Burrow_TunnelClientMetadata.serviceDescriptor + } + + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { + return nil + } + + public func makeTunnelConfigurationCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [] + ) + } + + public func makeTunnelStartCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + public func makeTunnelStopCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + public func makeTunnelStatusCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_TunnelAsyncClientProtocol { + public func tunnelConfiguration( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [] + ) + } + + public func tunnelStart( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? [] + ) + } + + public func tunnelStop( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? [] + ) + } + + public func tunnelStatus( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public struct Burrow_TunnelAsyncClient: Burrow_TunnelAsyncClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? + + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +public protocol Burrow_TunnelClientInterceptorFactoryProtocol: Sendable { + + /// - Returns: Interceptors to use when invoking 'tunnelConfiguration'. + func makeTunnelConfigurationInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStart'. + func makeTunnelStartInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStop'. + func makeTunnelStopInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'tunnelStatus'. + func makeTunnelStatusInterceptors() -> [ClientInterceptor] +} + +public enum Burrow_TunnelClientMetadata { + public static let serviceDescriptor = GRPCServiceDescriptor( + name: "Tunnel", + fullName: "burrow.Tunnel", + methods: [ + Burrow_TunnelClientMetadata.Methods.tunnelConfiguration, + Burrow_TunnelClientMetadata.Methods.tunnelStart, + Burrow_TunnelClientMetadata.Methods.tunnelStop, + Burrow_TunnelClientMetadata.Methods.tunnelStatus, + ] + ) + + public enum Methods { + public static let tunnelConfiguration = GRPCMethodDescriptor( + name: "TunnelConfiguration", + path: "/burrow.Tunnel/TunnelConfiguration", + type: GRPCCallType.serverStreaming + ) + + public static let tunnelStart = GRPCMethodDescriptor( + name: "TunnelStart", + path: "/burrow.Tunnel/TunnelStart", + type: GRPCCallType.unary + ) + + public static let tunnelStop = GRPCMethodDescriptor( + name: "TunnelStop", + path: "/burrow.Tunnel/TunnelStop", + type: GRPCCallType.unary + ) + + public static let tunnelStatus = GRPCMethodDescriptor( + name: "TunnelStatus", + path: "/burrow.Tunnel/TunnelStatus", + type: GRPCCallType.serverStreaming + ) + } +} + +/// Usage: instantiate `Burrow_NetworksClient`, then call methods of this protocol to make API calls. +public protocol Burrow_NetworksClientProtocol: GRPCClient { + var serviceName: String { get } + var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get } + + func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? + ) -> UnaryCall + + func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions?, + handler: @escaping (Burrow_NetworkListResponse) -> Void + ) -> ServerStreamingCall + + func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? + ) -> UnaryCall + + func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? + ) -> UnaryCall +} + +extension Burrow_NetworksClientProtocol { + public var serviceName: String { + return "burrow.Networks" + } + + /// Unary call to NetworkAdd + /// + /// - Parameters: + /// - request: Request to send to NetworkAdd. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + /// Server streaming call to NetworkList + /// + /// - Parameters: + /// - request: Request to send to NetworkList. + /// - callOptions: Call options. + /// - handler: A closure called when each response is received from the server. + /// - Returns: A `ServerStreamingCall` with futures for the metadata and status. + public func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil, + handler: @escaping (Burrow_NetworkListResponse) -> Void + ) -> ServerStreamingCall { + return self.makeServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [], + handler: handler + ) + } + + /// Unary call to NetworkReorder + /// + /// - Parameters: + /// - request: Request to send to NetworkReorder. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + /// Unary call to NetworkDelete + /// + /// - Parameters: + /// - request: Request to send to NetworkDelete. + /// - callOptions: Call options. + /// - Returns: A `UnaryCall` with futures for the metadata, status and response. + public func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) -> UnaryCall { + return self.makeUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(*, deprecated) +extension Burrow_NetworksClient: @unchecked Sendable {} + +@available(*, deprecated, renamed: "Burrow_NetworksNIOClient") +public final class Burrow_NetworksClient: Burrow_NetworksClientProtocol { + private let lock = Lock() + private var _defaultCallOptions: CallOptions + private var _interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + public let channel: GRPCChannel + public var defaultCallOptions: CallOptions { + get { self.lock.withLock { return self._defaultCallOptions } } + set { self.lock.withLockVoid { self._defaultCallOptions = newValue } } + } + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { + get { self.lock.withLock { return self._interceptors } } + set { self.lock.withLockVoid { self._interceptors = newValue } } + } + + /// Creates a client for the burrow.Networks service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self._defaultCallOptions = defaultCallOptions + self._interceptors = interceptors + } +} + +public struct Burrow_NetworksNIOClient: Burrow_NetworksClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + + /// Creates a client for the burrow.Networks service. + /// + /// - Parameters: + /// - channel: `GRPCChannel` to the service host. + /// - defaultCallOptions: Options to use for each service call if the user doesn't provide them. + /// - interceptors: A factory providing interceptors for each RPC. + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public protocol Burrow_NetworksAsyncClientProtocol: GRPCClient { + static var serviceDescriptor: GRPCServiceDescriptor { get } + var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get } + + func makeNetworkAddCall( + _ request: Burrow_Network, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeNetworkListCall( + _ request: Burrow_Empty, + callOptions: CallOptions? + ) -> GRPCAsyncServerStreamingCall + + func makeNetworkReorderCall( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall + + func makeNetworkDeleteCall( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? + ) -> GRPCAsyncUnaryCall +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_NetworksAsyncClientProtocol { + public static var serviceDescriptor: GRPCServiceDescriptor { + return Burrow_NetworksClientMetadata.serviceDescriptor + } + + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { + return nil + } + + public func makeNetworkAddCall( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + public func makeNetworkListCall( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncServerStreamingCall { + return self.makeAsyncServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [] + ) + } + + public func makeNetworkReorderCall( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + public func makeNetworkDeleteCall( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) -> GRPCAsyncUnaryCall { + return self.makeAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +extension Burrow_NetworksAsyncClientProtocol { + public func networkAdd( + _ request: Burrow_Network, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkAdd.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? [] + ) + } + + public func networkList( + _ request: Burrow_Empty, + callOptions: CallOptions? = nil + ) -> GRPCAsyncResponseStream { + return self.performAsyncServerStreamingCall( + path: Burrow_NetworksClientMetadata.Methods.networkList.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [] + ) + } + + public func networkReorder( + _ request: Burrow_NetworkReorderRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkReorder.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? [] + ) + } + + public func networkDelete( + _ request: Burrow_NetworkDeleteRequest, + callOptions: CallOptions? = nil + ) async throws -> Burrow_Empty { + return try await self.performAsyncUnaryCall( + path: Burrow_NetworksClientMetadata.Methods.networkDelete.path, + request: request, + callOptions: callOptions ?? self.defaultCallOptions, + interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? [] + ) + } +} + +@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +public struct Burrow_NetworksAsyncClient: Burrow_NetworksAsyncClientProtocol { + public var channel: GRPCChannel + public var defaultCallOptions: CallOptions + public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? + + public init( + channel: GRPCChannel, + defaultCallOptions: CallOptions = CallOptions(), + interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil + ) { + self.channel = channel + self.defaultCallOptions = defaultCallOptions + self.interceptors = interceptors + } +} + +public protocol Burrow_NetworksClientInterceptorFactoryProtocol: Sendable { + + /// - Returns: Interceptors to use when invoking 'networkAdd'. + func makeNetworkAddInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkList'. + func makeNetworkListInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkReorder'. + func makeNetworkReorderInterceptors() -> [ClientInterceptor] + + /// - Returns: Interceptors to use when invoking 'networkDelete'. + func makeNetworkDeleteInterceptors() -> [ClientInterceptor] +} + +public enum Burrow_NetworksClientMetadata { + public static let serviceDescriptor = GRPCServiceDescriptor( + name: "Networks", + fullName: "burrow.Networks", + methods: [ + Burrow_NetworksClientMetadata.Methods.networkAdd, + Burrow_NetworksClientMetadata.Methods.networkList, + Burrow_NetworksClientMetadata.Methods.networkReorder, + Burrow_NetworksClientMetadata.Methods.networkDelete, + ] + ) + + public enum Methods { + public static let networkAdd = GRPCMethodDescriptor( + name: "NetworkAdd", + path: "/burrow.Networks/NetworkAdd", + type: GRPCCallType.unary + ) + + public static let networkList = GRPCMethodDescriptor( + name: "NetworkList", + path: "/burrow.Networks/NetworkList", + type: GRPCCallType.serverStreaming + ) + + public static let networkReorder = GRPCMethodDescriptor( + name: "NetworkReorder", + path: "/burrow.Networks/NetworkReorder", + type: GRPCCallType.unary + ) + + public static let networkDelete = GRPCMethodDescriptor( + name: "NetworkDelete", + path: "/burrow.Networks/NetworkDelete", + type: GRPCCallType.unary + ) + } +} + diff --git a/Apple/Core/Client/Generated/burrow.pb.swift b/Apple/Core/Client/Generated/burrow.pb.swift new file mode 100644 index 0000000..fccd769 --- /dev/null +++ b/Apple/Core/Client/Generated/burrow.pb.swift @@ -0,0 +1,598 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: burrow.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +public enum Burrow_NetworkType: SwiftProtobuf.Enum, Swift.CaseIterable { + public typealias RawValue = Int + case wireGuard // = 0 + case tailnet // = 1 + case UNRECOGNIZED(Int) + + public init() { + self = .wireGuard + } + + public init?(rawValue: Int) { + switch rawValue { + case 0: self = .wireGuard + case 1: self = .tailnet + default: self = .UNRECOGNIZED(rawValue) + } + } + + public var rawValue: Int { + switch self { + case .wireGuard: return 0 + case .tailnet: return 1 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + public static let allCases: [Burrow_NetworkType] = [ + .wireGuard, + .tailnet, + ] + +} + +public enum Burrow_State: SwiftProtobuf.Enum, Swift.CaseIterable { + public typealias RawValue = Int + case stopped // = 0 + case running // = 1 + case UNRECOGNIZED(Int) + + public init() { + self = .stopped + } + + public init?(rawValue: Int) { + switch rawValue { + case 0: self = .stopped + case 1: self = .running + default: self = .UNRECOGNIZED(rawValue) + } + } + + public var rawValue: Int { + switch self { + case .stopped: return 0 + case .running: return 1 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + public static let allCases: [Burrow_State] = [ + .stopped, + .running, + ] + +} + +public struct Burrow_NetworkReorderRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var index: Int32 = 0 + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_WireGuardPeer: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var endpoint: String = String() + + public var subnet: [String] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_WireGuardNetwork: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var address: String = String() + + public var dns: String = String() + + public var peer: [Burrow_WireGuardPeer] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_NetworkDeleteRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_Network: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var id: Int32 = 0 + + public var type: Burrow_NetworkType = .wireGuard + + public var payload: Data = Data() + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_NetworkListResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var network: [Burrow_Network] = [] + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_Empty: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +public struct Burrow_TunnelStatusResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var state: Burrow_State = .stopped + + public var start: SwiftProtobuf.Google_Protobuf_Timestamp { + get {return _start ?? SwiftProtobuf.Google_Protobuf_Timestamp()} + set {_start = newValue} + } + /// Returns true if `start` has been explicitly set. + public var hasStart: Bool {return self._start != nil} + /// Clears the value of `start`. Subsequent reads from it will return its default value. + public mutating func clearStart() {self._start = nil} + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} + + fileprivate var _start: SwiftProtobuf.Google_Protobuf_Timestamp? = nil +} + +public struct Burrow_TunnelConfigurationResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + public var addresses: [String] = [] + + public var mtu: Int32 = 0 + + public var routes: [String] = [] + + public var dnsServers: [String] = [] + + public var searchDomains: [String] = [] + + public var includeDefaultRoute: Bool = false + + public var unknownFields = SwiftProtobuf.UnknownStorage() + + public init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "burrow" + +extension Burrow_NetworkType: SwiftProtobuf._ProtoNameProviding { + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "WireGuard"), + 1: .same(proto: "Tailnet"), + ] +} + +extension Burrow_State: SwiftProtobuf._ProtoNameProviding { + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "Stopped"), + 1: .same(proto: "Running"), + ] +} + +extension Burrow_NetworkReorderRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkReorderRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .same(proto: "index"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.index) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + if self.index != 0 { + try visitor.visitSingularInt32Field(value: self.index, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkReorderRequest, rhs: Burrow_NetworkReorderRequest) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.index != rhs.index {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_WireGuardPeer: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".WireGuardPeer" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "endpoint"), + 2: .same(proto: "subnet"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.endpoint) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &self.subnet) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.endpoint.isEmpty { + try visitor.visitSingularStringField(value: self.endpoint, fieldNumber: 1) + } + if !self.subnet.isEmpty { + try visitor.visitRepeatedStringField(value: self.subnet, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_WireGuardPeer, rhs: Burrow_WireGuardPeer) -> Bool { + if lhs.endpoint != rhs.endpoint {return false} + if lhs.subnet != rhs.subnet {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_WireGuardNetwork: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".WireGuardNetwork" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "address"), + 2: .same(proto: "dns"), + 3: .same(proto: "peer"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.address) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.dns) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.peer) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.address.isEmpty { + try visitor.visitSingularStringField(value: self.address, fieldNumber: 1) + } + if !self.dns.isEmpty { + try visitor.visitSingularStringField(value: self.dns, fieldNumber: 2) + } + if !self.peer.isEmpty { + try visitor.visitRepeatedMessageField(value: self.peer, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_WireGuardNetwork, rhs: Burrow_WireGuardNetwork) -> Bool { + if lhs.address != rhs.address {return false} + if lhs.dns != rhs.dns {return false} + if lhs.peer != rhs.peer {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_NetworkDeleteRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkDeleteRequest" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkDeleteRequest, rhs: Burrow_NetworkDeleteRequest) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_Network: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".Network" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .same(proto: "type"), + 3: .same(proto: "payload"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.type) }() + case 3: try { try decoder.decodeSingularBytesField(value: &self.payload) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if self.id != 0 { + try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1) + } + if self.type != .wireGuard { + try visitor.visitSingularEnumField(value: self.type, fieldNumber: 2) + } + if !self.payload.isEmpty { + try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_Network, rhs: Burrow_Network) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.type != rhs.type {return false} + if lhs.payload != rhs.payload {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_NetworkListResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".NetworkListResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "network"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.network) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.network.isEmpty { + try visitor.visitRepeatedMessageField(value: self.network, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_NetworkListResponse, rhs: Burrow_NetworkListResponse) -> Bool { + if lhs.network != rhs.network {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_Empty: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".Empty" + public static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + public mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + public func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_Empty, rhs: Burrow_Empty) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_TunnelStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".TunnelStatusResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "state"), + 2: .same(proto: "start"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularEnumField(value: &self.state) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._start) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if self.state != .stopped { + try visitor.visitSingularEnumField(value: self.state, fieldNumber: 1) + } + try { if let v = self._start { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_TunnelStatusResponse, rhs: Burrow_TunnelStatusResponse) -> Bool { + if lhs.state != rhs.state {return false} + if lhs._start != rhs._start {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = _protobuf_package + ".TunnelConfigurationResponse" + public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "addresses"), + 2: .same(proto: "mtu"), + 3: .same(proto: "routes"), + 4: .standard(proto: "dns_servers"), + 5: .standard(proto: "search_domains"), + 6: .standard(proto: "include_default_route"), + ] + + public mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.addresses) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.mtu) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &self.routes) }() + case 4: try { try decoder.decodeRepeatedStringField(value: &self.dnsServers) }() + case 5: try { try decoder.decodeRepeatedStringField(value: &self.searchDomains) }() + case 6: try { try decoder.decodeSingularBoolField(value: &self.includeDefaultRoute) }() + default: break + } + } + } + + public func traverse(visitor: inout V) throws { + if !self.addresses.isEmpty { + try visitor.visitRepeatedStringField(value: self.addresses, fieldNumber: 1) + } + if self.mtu != 0 { + try visitor.visitSingularInt32Field(value: self.mtu, fieldNumber: 2) + } + if !self.routes.isEmpty { + try visitor.visitRepeatedStringField(value: self.routes, fieldNumber: 3) + } + if !self.dnsServers.isEmpty { + try visitor.visitRepeatedStringField(value: self.dnsServers, fieldNumber: 4) + } + if !self.searchDomains.isEmpty { + try visitor.visitRepeatedStringField(value: self.searchDomains, fieldNumber: 5) + } + if self.includeDefaultRoute { + try visitor.visitSingularBoolField(value: self.includeDefaultRoute, fieldNumber: 6) + } + try unknownFields.traverse(visitor: &visitor) + } + + public static func ==(lhs: Burrow_TunnelConfigurationResponse, rhs: Burrow_TunnelConfigurationResponse) -> Bool { + if lhs.addresses != rhs.addresses {return false} + if lhs.mtu != rhs.mtu {return false} + if lhs.routes != rhs.routes {return false} + if lhs.dnsServers != rhs.dnsServers {return false} + if lhs.searchDomains != rhs.searchDomains {return false} + if lhs.includeDefaultRoute != rhs.includeDefaultRoute {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Apple/Core/Client/google/protobuf/timestamp.proto b/Apple/Core/Client/google/protobuf/timestamp.proto new file mode 100644 index 0000000..7db2f6a --- /dev/null +++ b/Apple/Core/Client/google/protobuf/timestamp.proto @@ -0,0 +1,64 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a 24-hour linear smear. +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from RFC +// 3339 date strings. +message Timestamp { + // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. + // Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/Apple/Core/Client/grpc-swift-config.json b/Apple/Core/Client/grpc-swift-config.json deleted file mode 100644 index 2d89698..0000000 --- a/Apple/Core/Client/grpc-swift-config.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "invocations": [ - { - "protoFiles": [ - "burrow.proto", - ], - "server": false, - "visibility": "public" - } - ] -} diff --git a/Apple/Core/Client/swift-protobuf-config.json b/Apple/Core/Client/swift-protobuf-config.json deleted file mode 100644 index 87aaec3..0000000 --- a/Apple/Core/Client/swift-protobuf-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "invocations": [ - { - "protoFiles": [ - "burrow.proto", - ], - "visibility": "public" - } - ] -} diff --git a/Apple/NetworkExtension/PacketTunnelProvider.swift b/Apple/NetworkExtension/PacketTunnelProvider.swift index 54b813c..3f3d8b4 100644 --- a/Apple/NetworkExtension/PacketTunnelProvider.swift +++ b/Apple/NetworkExtension/PacketTunnelProvider.swift @@ -1,21 +1,35 @@ import AsyncAlgorithms import BurrowConfiguration import BurrowCore +import GRPC import libburrow -@preconcurrency import NetworkExtension +import NetworkExtension import os -// Xcode 26 imports `startTunnel(options:)` as `[String: NSObject]?` and treats the -// override as crossing a nonisolated boundary. The extension target does not -// mutate or forward these Cocoa objects, so treat them as an unchecked escape hatch. -extension NSObject: @retroactive @unchecked Sendable {} +private final class SendableCallbackBox: @unchecked Sendable { + let callback: Callback -class PacketTunnelProvider: NEPacketTunnelProvider { + init(_ callback: Callback) { + self.callback = callback + } +} + +final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable { enum Error: Swift.Error { case missingTunnelConfiguration } - private static let logger = Logger.logger(for: PacketTunnelProvider.self) + private let logger = Logger.logger(for: PacketTunnelProvider.self) + private var packetCall: GRPCAsyncBidirectionalStreamingCall? + private var inboundPacketTask: Task? + private var outboundPacketTask: Task? + + private var client: TunnelClient { + get throws { try _client.get() } + } + private let _client: Result = Result { + try TunnelClient.unix(socketURL: Constants.socketURL) + } override init() { do { @@ -24,51 +38,289 @@ class PacketTunnelProvider: NEPacketTunnelProvider { databasePath: try Constants.databaseURL.path(percentEncoded: false) ) } catch { - Self.logger.error("Failed to spawn networking thread: \(error)") + logger.error("Failed to spawn networking thread: \(error)") } } - nonisolated override func startTunnel(options: [String: NSObject]? = nil) async throws { - do { - let client = try TunnelClient.unix(socketURL: Constants.socketURL) - let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first - guard let settings = configuration?.settings else { - throw Error.missingTunnelConfiguration + override func startTunnel( + options: [String: NSObject]?, + completionHandler: @escaping (Swift.Error?) -> Void + ) { + let completion = SendableCallbackBox(completionHandler) + Task { + do { + _ = try await client.tunnelStart(.init()) + let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first + guard let settings = configuration?.settings else { + throw Error.missingTunnelConfiguration + } + try await setTunnelNetworkSettings(settings) + try startPacketBridge() + logger.log("Started tunnel with network settings: \(settings)") + completion.callback(nil) + } catch { + logger.error("Failed to start tunnel: \(error)") + stopPacketBridge() + completion.callback(error) } - try await setTunnelNetworkSettings(settings) - _ = try await client.tunnelStart(.init()) - Self.logger.log("Started tunnel with network settings: \(settings)") - } catch { - Self.logger.error("Failed to start tunnel: \(error)") - throw error } } - nonisolated override func stopTunnel(with reason: NEProviderStopReason) async { - do { - let client = try TunnelClient.unix(socketURL: Constants.socketURL) - _ = try await client.tunnelStop(.init()) - Self.logger.log("Stopped client") - } catch { - Self.logger.error("Failed to stop tunnel: \(error)") + override func stopTunnel( + with reason: NEProviderStopReason, + completionHandler: @escaping () -> Void + ) { + let completion = SendableCallbackBox(completionHandler) + Task { + stopPacketBridge() + do { + _ = try await client.tunnelStop(.init()) + logger.log("Stopped client") + } catch { + logger.error("Failed to stop tunnel: \(error)") + } + completion.callback() + } + } +} + +extension PacketTunnelProvider { + private func startPacketBridge() throws { + stopPacketBridge() + + let packetClient = TunnelPacketClient.unix(socketURL: try Constants.socketURL) + let call = packetClient.makeTunnelPacketsCall() + self.packetCall = call + + inboundPacketTask = Task { [weak self] in + guard let self else { return } + do { + for try await packet in call.responseStream { + let payload = packet.payload + self.packetFlow.writePackets( + [payload], + withProtocols: [Self.protocolNumber(for: payload)] + ) + } + } catch { + guard !Task.isCancelled else { return } + self.logger.error("Tunnel packet receive loop failed: \(error)") + } + } + + outboundPacketTask = Task { [weak self] in + guard let self else { return } + defer { call.requestStream.finish() } + do { + while !Task.isCancelled { + let packets = await self.readPacketsBatch() + for (payload, _) in packets { + var packet = Burrow_TunnelPacket() + packet.payload = payload + try await call.requestStream.send(packet) + } + } + } catch { + guard !Task.isCancelled else { return } + self.logger.error("Tunnel packet send loop failed: \(error)") + } + } + } + + private func stopPacketBridge() { + inboundPacketTask?.cancel() + inboundPacketTask = nil + outboundPacketTask?.cancel() + outboundPacketTask = nil + packetCall?.cancel() + packetCall = nil + } + + private func readPacketsBatch() async -> [(Data, NSNumber)] { + await withCheckedContinuation { continuation in + packetFlow.readPackets { packets, protocols in + continuation.resume(returning: Array(zip(packets, protocols))) + } + } + } + + private static func protocolNumber(for payload: Data) -> NSNumber { + guard let version = payload.first.map({ $0 >> 4 }) else { + return NSNumber(value: AF_INET) + } + switch version { + case 6: + return NSNumber(value: AF_INET6) + default: + return NSNumber(value: AF_INET) } } } extension Burrow_TunnelConfigurationResponse { fileprivate var settings: NEPacketTunnelNetworkSettings { - let ipv6Addresses = addresses.filter { IPv6Address($0) != nil } + let parsedAddresses = addresses.compactMap(ParsedTunnelAddress.init(rawValue:)) + let ipv4Addresses = parsedAddresses.compactMap(\.ipv4Address) + let ipv6Addresses = parsedAddresses.compactMap(\.ipv6Address) + let parsedRoutes = routes.compactMap(ParsedTunnelRoute.init(rawValue:)) + var ipv4Routes = parsedRoutes.compactMap(\.ipv4Route) + var ipv6Routes = parsedRoutes.compactMap(\.ipv6Route) + if includeDefaultRoute { + ipv4Routes.append(.default()) + ipv6Routes.append(.default()) + } let settings = NEPacketTunnelNetworkSettings(tunnelRemoteAddress: "1.1.1.1") settings.mtu = NSNumber(value: mtu) - settings.ipv4Settings = NEIPv4Settings( - addresses: addresses.filter { IPv4Address($0) != nil }, - subnetMasks: ["255.255.255.0"] - ) - settings.ipv6Settings = NEIPv6Settings( - addresses: ipv6Addresses, - networkPrefixLengths: ipv6Addresses.map { _ in 64 } - ) + if !ipv4Addresses.isEmpty { + let ipv4Settings = NEIPv4Settings( + addresses: ipv4Addresses.map(\.address), + subnetMasks: ipv4Addresses.map(\.subnetMask) + ) + if !ipv4Routes.isEmpty { + ipv4Settings.includedRoutes = ipv4Routes + } + settings.ipv4Settings = ipv4Settings + } + if !ipv6Addresses.isEmpty { + let ipv6Settings = NEIPv6Settings( + addresses: ipv6Addresses.map(\.address), + networkPrefixLengths: ipv6Addresses.map(\.prefixLength) + ) + if !ipv6Routes.isEmpty { + ipv6Settings.includedRoutes = ipv6Routes + } + settings.ipv6Settings = ipv6Settings + } + if !dnsServers.isEmpty { + let dnsSettings = NEDNSSettings(servers: dnsServers) + if !searchDomains.isEmpty { + dnsSettings.matchDomains = searchDomains + } + settings.dnsSettings = dnsSettings + } return settings } } + +private struct ParsedTunnelAddress { + struct IPv4AddressSetting { + let address: String + let subnetMask: String + } + + struct IPv6AddressSetting { + let address: String + let prefixLength: NSNumber + } + + let ipv4Address: IPv4AddressSetting? + let ipv6Address: IPv6AddressSetting? + + init?(rawValue: String) { + let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init) + let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !address.isEmpty else { + return nil + } + + let prefix = components.count == 2 ? Int(components[1]) : nil + if IPv4Address(address) != nil { + let prefixLength = prefix ?? 32 + guard (0 ... 32).contains(prefixLength) else { + return nil + } + ipv4Address = IPv4AddressSetting( + address: address, + subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength) + ) + ipv6Address = nil + return + } + + if IPv6Address(address) != nil { + let prefixLength = prefix ?? 128 + guard (0 ... 128).contains(prefixLength) else { + return nil + } + ipv4Address = nil + ipv6Address = IPv6AddressSetting( + address: address, + prefixLength: NSNumber(value: prefixLength) + ) + return + } + + return nil + } + + private static func ipv4SubnetMask(prefixLength: Int) -> String { + guard prefixLength > 0 else { + return "0.0.0.0" + } + let mask = UInt32.max << (32 - prefixLength) + let octets = [ + (mask >> 24) & 0xff, + (mask >> 16) & 0xff, + (mask >> 8) & 0xff, + mask & 0xff, + ] + return octets.map(String.init).joined(separator: ".") + } +} + +private struct ParsedTunnelRoute { + let ipv4Route: NEIPv4Route? + let ipv6Route: NEIPv6Route? + + init?(rawValue: String) { + let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init) + let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !address.isEmpty else { + return nil + } + + let prefix = components.count == 2 ? Int(components[1]) : nil + if IPv4Address(address) != nil { + let prefixLength = prefix ?? 32 + guard (0 ... 32).contains(prefixLength) else { + return nil + } + ipv4Route = NEIPv4Route( + destinationAddress: address, + subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength) + ) + ipv6Route = nil + return + } + + if IPv6Address(address) != nil { + let prefixLength = prefix ?? 128 + guard (0 ... 128).contains(prefixLength) else { + return nil + } + ipv4Route = nil + ipv6Route = NEIPv6Route( + destinationAddress: address, + networkPrefixLength: NSNumber(value: prefixLength) + ) + return + } + + return nil + } + + private static func ipv4SubnetMask(prefixLength: Int) -> String { + var mask = UInt32.max << (32 - prefixLength) + if prefixLength == 0 { + mask = 0 + } + let octets = [ + String((mask >> 24) & 0xff), + String((mask >> 16) & 0xff), + String((mask >> 8) & 0xff), + String(mask & 0xff), + ] + return octets.joined(separator: ".") + } +} diff --git a/Apple/NetworkExtension/libburrow/build-rust.sh b/Apple/NetworkExtension/libburrow/build-rust.sh index 3da8fae..5db2a2b 100755 --- a/Apple/NetworkExtension/libburrow/build-rust.sh +++ b/Apple/NetworkExtension/libburrow/build-rust.sh @@ -62,79 +62,36 @@ else CARGO_TARGET_SUBDIR="release" fi -RUSTUP_TOOLCHAIN="" if [[ -x "$(command -v rustup)" ]]; then - RUSTUP_TOOLCHAIN="$(rustup show active-toolchain | awk '{print $1}')" - if [[ -z "${RUSTUP_TOOLCHAIN}" ]]; then - echo 'error: Unable to determine active rustup toolchain' - exit 1 - fi - CARGO_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" cargo)" - RUSTC_BIN="$(rustup which --toolchain "${RUSTUP_TOOLCHAIN}" rustc)" - CARGO_PATH="$(dirname "${CARGO_BIN}"):$(dirname "${RUSTC_BIN}"):/usr/bin" + CARGO_PATH="$(dirname $(rustup which cargo)):/usr/bin" else - CARGO_BIN="$(command -v cargo)" - CARGO_PATH="$(dirname "${CARGO_BIN}"):/usr/bin" + CARGO_PATH="$(dirname $(readlink -f $(which cargo))):/usr/bin" fi PROTOC=$(readlink -f $(which protoc)) CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH" -if [[ -n "${RUSTC_WRAPPER:-}" && "${RUSTC_WRAPPER}" != /* ]]; then - WRAPPER_PATH="$(command -v "${RUSTC_WRAPPER}" || true)" - if [[ -n "${WRAPPER_PATH}" ]]; then - RUSTC_WRAPPER="${WRAPPER_PATH}" - fi -fi - -if [[ -x "$(command -v rustup)" ]]; then - for TARGET in "${RUST_TARGETS[@]}"; do - if ! rustup target list --installed | grep -qx "${TARGET}"; then - rustup target add --toolchain "${RUSTUP_TOOLCHAIN}" "${TARGET}" - fi - done -fi - # Run cargo without the various environment variables set by Xcode. # Those variables can confuse cargo and the build scripts it runs. -EXTRA_ENV=() -for VAR_NAME in HOME CARGO_HOME CARGO_TARGET_DIR RUSTUP_HOME RUSTC_WRAPPER SCCACHE_DIR CARGO_INCREMENTAL; do - if [[ -n "${!VAR_NAME:-}" ]]; then - EXTRA_ENV+=("${VAR_NAME}=${!VAR_NAME}") - fi -done -EFFECTIVE_CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-${CONFIGURATION_TEMP_DIR}/target}" -BUILD_ENV=( +CARGO_ENV=( "PATH=$CARGO_PATH" "PROTOC=$PROTOC" - "CARGO_TARGET_DIR=${EFFECTIVE_CARGO_TARGET_DIR}" - "${EXTRA_ENV[@]}" + "CARGO_TARGET_DIR=${CONFIGURATION_TEMP_DIR}/target" ) -if [[ -n "${RUSTUP_TOOLCHAIN}" ]]; then - BUILD_ENV+=("RUSTUP_TOOLCHAIN=${RUSTUP_TOOLCHAIN}") + +if [[ -n "$IPHONEOS_DEPLOYMENT_TARGET" ]]; then + CARGO_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=$IPHONEOS_DEPLOYMENT_TARGET") fi -if [[ -n "${RUSTC_BIN:-}" ]]; then - BUILD_ENV+=("RUSTC=${RUSTC_BIN}") + +if [[ -n "$MACOSX_DEPLOYMENT_TARGET" ]]; then + CARGO_ENV+=("MACOSX_DEPLOYMENT_TARGET=$MACOSX_DEPLOYMENT_TARGET") fi -if [[ -n "${IPHONEOS_DEPLOYMENT_TARGET:-}" ]]; then - BUILD_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=${IPHONEOS_DEPLOYMENT_TARGET}") -fi -if [[ -n "${MACOSX_DEPLOYMENT_TARGET:-}" ]]; then - BUILD_ENV+=("MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}") -fi -echo "Using Rust toolchain: ${RUSTUP_TOOLCHAIN:-system}" -echo "Using cargo: ${CARGO_BIN}" -if [[ -n "${RUSTC_BIN:-}" ]]; then - echo "Using rustc: ${RUSTC_BIN}" -fi -if [[ -n "${RUSTC_WRAPPER:-}" ]]; then - echo "Using rustc wrapper: ${RUSTC_WRAPPER}" -fi -env -i "${BUILD_ENV[@]}" "${CARGO_BIN}" build "${CARGO_ARGS[@]}" + +env -i "${CARGO_ENV[@]}" cargo build "${CARGO_ARGS[@]}" mkdir -p "${BUILT_PRODUCTS_DIR}" # Use `lipo` to merge the architectures together into BUILT_PRODUCTS_DIR /usr/bin/xcrun --sdk $PLATFORM_NAME lipo \ - -create $(printf "${EFFECTIVE_CARGO_TARGET_DIR}/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \ + -create $(printf "${CONFIGURATION_TEMP_DIR}/target/%q/${CARGO_TARGET_SUBDIR}/libburrow.a " "${RUST_TARGETS[@]}") \ -output "${BUILT_PRODUCTS_DIR}/libburrow.a" diff --git a/Apple/UI/Assets.xcassets/HackClub.colorset/Contents.json b/Apple/UI/Assets.xcassets/HackClub.colorset/Contents.json deleted file mode 100644 index 911b4b1..0000000 --- a/Apple/UI/Assets.xcassets/HackClub.colorset/Contents.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "colors" : [ - { - "color" : { - "color-space" : "srgb", - "components" : { - "alpha" : "1.000", - "blue" : "0x50", - "green" : "0x37", - "red" : "0xEC" - } - }, - "idiom" : "universal" - } - ], - "info" : { - "author" : "xcode", - "version" : 1 - } -} diff --git a/Apple/UI/Assets.xcassets/HackClub.imageset/Contents.json b/Apple/UI/Assets.xcassets/HackClub.imageset/Contents.json deleted file mode 100644 index ddd0664..0000000 --- a/Apple/UI/Assets.xcassets/HackClub.imageset/Contents.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "images" : [ - { - "filename" : "flag-standalone-wtransparent.pdf", - "idiom" : "universal" - } - ], - "info" : { - "author" : "xcode", - "version" : 1 - } -} diff --git a/Apple/UI/Assets.xcassets/HackClub.imageset/flag-standalone-wtransparent.pdf b/Apple/UI/Assets.xcassets/HackClub.imageset/flag-standalone-wtransparent.pdf deleted file mode 100644 index 1506fe9..0000000 Binary files a/Apple/UI/Assets.xcassets/HackClub.imageset/flag-standalone-wtransparent.pdf and /dev/null differ diff --git a/Apple/UI/BurrowView.swift b/Apple/UI/BurrowView.swift index 96467c7..e15d3f7 100644 --- a/Apple/UI/BurrowView.swift +++ b/Apple/UI/BurrowView.swift @@ -1,67 +1,1709 @@ -import AuthenticationServices +import BurrowConfiguration +import Foundation import SwiftUI +#if canImport(AuthenticationServices) +import AuthenticationServices +#endif +#if canImport(UIKit) +import UIKit +#elseif canImport(AppKit) +import AppKit +#endif -#if !os(macOS) public struct BurrowView: View { - @Environment(\.webAuthenticationSession) - private var webAuthenticationSession + @State private var networkViewModel: NetworkViewModel + @State private var accountStore = NetworkAccountStore() + @State private var activeSheet: ConfigurationSheet? + @State private var didRunAutomation = false public var body: some View { NavigationStack { - VStack { - HStack { - Text("Networks") - .font(.largeTitle) - .fontWeight(.bold) - Spacer() - Menu { - Button("Hack Club", action: addHackClubNetwork) - Button("WireGuard", action: addWireGuardNetwork) - } label: { - Image(systemName: "plus.circle.fill") - .font(.title) - .accessibilityLabel("Add") + ScrollView { + VStack(alignment: .leading, spacing: 24) { + HStack(alignment: .top) { + VStack(alignment: .leading, spacing: 6) { + Text("Burrow") + .font(.largeTitle) + .fontWeight(.bold) + if showsHeaderSubtitle { + Text("Networks and accounts") + .font(.headline) + .foregroundStyle(.secondary) + } + } + if showsToolbarAddMenu { + Spacer() + Menu { + Button("Add WireGuard Network") { + activeSheet = .wireGuard + } + Button("Save Tor Account") { + activeSheet = .tor + } + Button("Add Tailnet Account") { + activeSheet = .tailnet + } + } label: { + Image(systemName: "plus.circle.fill") + .font(.title) + .accessibilityLabel("Add") + } + } + } + .padding(.top) + + if showsInlineQuickActions { + quickAddSection + } + + VStack(alignment: .leading, spacing: 12) { + sectionHeader( + title: "Networks", + detail: showsInlineQuickActions + ? nil + : "Stored daemon networks and their active account selectors" + ) + if let connectionError = networkViewModel.connectionError { + Text(connectionError) + .font(.footnote) + .foregroundStyle(.secondary) + } + NetworkCarouselView(networks: networkViewModel.cards) + } + + if showsAccountsSection { + VStack(alignment: .leading, spacing: 12) { + sectionHeader( + title: "Accounts", + detail: showsInlineQuickActions + ? nil + : "Per-network identities and sign-in state" + ) + if accountStore.accounts.isEmpty { + ContentUnavailableView( + "No Accounts Yet", + systemImage: "person.crop.circle.badge.plus", + description: Text("Save a Tor account or sign in to Tailnet to keep network identities ready on this device.") + ) + .frame(maxWidth: .infinity, minHeight: 180) + } else { + LazyVStack(spacing: 12) { + ForEach(accountStore.accounts) { account in + AccountRowView( + account: account, + hasSecret: accountStore.hasStoredSecret(for: account) + ) + } + } + } + } + } + + VStack(alignment: .leading, spacing: 8) { + sectionHeader( + title: "Tunnel", + detail: showsInlineQuickActions ? nil : "Current system extension state" + ) + TunnelStatusView() + TunnelButton() + .padding(.bottom) } } - .padding(.top) - NetworkCarouselView() - Spacer() - TunnelStatusView() - TunnelButton() - .padding(.bottom) + .padding() } - .padding() - .handleOAuth2Callback() + } + .sheet(item: $activeSheet) { sheet in + ConfigurationSheetView( + sheet: sheet, + networkViewModel: networkViewModel, + accountStore: accountStore + ) + } + .onAppear { + runAutomationIfNeeded() } } public init() { + _networkViewModel = State( + initialValue: NetworkViewModel( + socketURLResult: Result { try Constants.socketURL } + ) + ) } - private func addHackClubNetwork() { - Task { - try await authenticateWithSlack() + private func runAutomationIfNeeded() { + guard !didRunAutomation, + let automation = BurrowAutomationConfig.current, + automation.action == .tailnetLogin || automation.action == .tailnetProbe + else { + return + } + didRunAutomation = true + activeSheet = .tailnet + } + + @ViewBuilder + private var quickAddSection: some View { + VStack(alignment: .leading, spacing: 12) { + sectionHeader(title: "Add", detail: nil) + VStack(spacing: 12) { + ForEach(ConfigurationSheet.allCases) { sheet in + QuickAddButton(sheet: sheet) { + activeSheet = sheet + } + } + } } } - private func addWireGuardNetwork() { + @ViewBuilder + private func sectionHeader(title: String, detail: String?) -> some View { + VStack(alignment: .leading, spacing: 4) { + Text(title) + .font(.title2.weight(.semibold)) + if let detail, !detail.isEmpty { + Text(detail) + .font(.subheadline) + .foregroundStyle(.secondary) + } + } } - private func authenticateWithSlack() async throws { - guard - let authorizationEndpoint = URL(string: "https://slack.com/openid/connect/authorize"), - let tokenEndpoint = URL(string: "https://slack.com/api/openid.connect.token"), - let redirectURI = URL(string: "https://burrow.rs/callback/oauth2") else { return } - let session = OAuth2.Session( - authorizationEndpoint: authorizationEndpoint, - tokenEndpoint: tokenEndpoint, - redirectURI: redirectURI, - scopes: ["openid", "profile"], - clientID: "2210535565.6884042183125", - clientSecret: "2793c8a5255cae38830934c664eeb62d" - ) - let response = try await session.authorize(webAuthenticationSession) + private var showsInlineQuickActions: Bool { + #if os(iOS) + true + #else + false + #endif } + + private var showsToolbarAddMenu: Bool { + !showsInlineQuickActions + } + + private var showsHeaderSubtitle: Bool { + !showsInlineQuickActions + } + + private var showsAccountsSection: Bool { + #if os(iOS) + !accountStore.accounts.isEmpty + #else + true + #endif + } +} + +private enum ConfigurationSheet: String, CaseIterable, Identifiable { + case wireGuard + case tor + case tailnet + + var id: String { rawValue } + + var kind: AccountNetworkKind { + switch self { + case .wireGuard: .wireGuard + case .tor: .tor + case .tailnet: .tailnet + } + } + + var iconName: String { + switch self { + case .wireGuard: + "wave.3.right" + case .tor: + "shield.lefthalf.filled.badge.checkmark" + case .tailnet: + "network.badge.shield.half.filled" + } + } + + var quickActionTitle: String { + switch self { + case .wireGuard: + "WireGuard" + case .tor: + "Tor" + case .tailnet: + "Tailnet" + } + } + + var quickActionSubtitle: String { + switch self { + case .wireGuard: + "Import a tunnel" + case .tor: + "Save an Arti profile" + case .tailnet: + "Sign in or save a control plane" + } + } + + var quickActionColor: Color { + switch self { + case .wireGuard: + .blue + case .tor, .tailnet: + kind.accentColor + } + } +} + +private struct QuickAddButton: View { + let sheet: ConfigurationSheet + let action: () -> Void + + var body: some View { + Button(action: action) { + HStack(spacing: 14) { + Image(systemName: sheet.iconName) + .font(.title3.weight(.semibold)) + .frame(width: 24) + + VStack(alignment: .leading, spacing: 4) { + Text(sheet.quickActionTitle) + .font(.headline) + Text(sheet.quickActionSubtitle) + .font(.caption) + .opacity(0.88) + } + + Spacer() + } + .frame(maxWidth: .infinity, minHeight: 64, alignment: .leading) + } + .accessibilityIdentifier("quick-add-\(sheet.rawValue)") + .buttonStyle(.floating(color: sheet.quickActionColor, cornerRadius: 18)) + } +} + +private struct AccountDraft { + var title = "" + var accountName = "" + var identityName = "" + var wireGuardConfig = "" + + var discoveryEmail = "" + var authority = "" + var tailnet = "" + var hostname = ProcessInfo.processInfo.hostName + var username = "" + var secret = "" + var authMode: AccountAuthMode = .none + + var torAddresses = "100.64.0.2/32" + var torDNS = "1.1.1.1, 1.0.0.1" + var torMTU = "1400" + var torListen = "127.0.0.1:9040" + + init(sheet: ConfigurationSheet) { + switch sheet { + case .wireGuard: + break + case .tor: + title = "Default Tor" + accountName = "default" + identityName = "apple" + case .tailnet: + title = "Tailnet" + accountName = "default" + identityName = "apple" + authority = TailnetProvider.tailscale.defaultAuthority ?? "" + authMode = .web + } + } +} + +private struct ConfigurationSheetView: View { + @Environment(\.dismiss) private var dismiss + + let sheet: ConfigurationSheet + let networkViewModel: NetworkViewModel + let accountStore: NetworkAccountStore + + @State private var draft: AccountDraft + @State private var isSubmitting = false + @State private var errorMessage: String? + @State private var discoveryStatus: TailnetDiscoveryResponse? + @State private var discoveryError: String? + @State private var isDiscoveringTailnet = false + @State private var authorityProbeStatus: TailnetAuthorityProbeStatus? + @State private var authorityProbeError: String? + @State private var isProbingAuthority = false + @State private var tailnetLoginStatus: TailnetLoginStatus? + @State private var tailnetLoginError: String? + @State private var tailnetLoginSessionID: String? + @State private var isStartingTailnetLogin = false + @State private var tailnetPresentedAuthURL: URL? + @State private var preserveTailnetLoginSession = false + @State private var usesCustomTailnetAuthority = false + @State private var showsAdvancedTailnetSettings = false + @State private var browserAuthenticator = TailnetBrowserAuthenticator() + @State private var tailnetLoginPollTask: Task? + @State private var tailnetDiscoveryTask: Task? + @State private var tailnetProbeTask: Task? + @State private var didRunAutomation = false + + init( + sheet: ConfigurationSheet, + networkViewModel: NetworkViewModel, + accountStore: NetworkAccountStore + ) { + self.sheet = sheet + self.networkViewModel = networkViewModel + self.accountStore = accountStore + _draft = State(initialValue: AccountDraft(sheet: sheet)) + } + + var body: some View { + NavigationStack { + Form { + Section { + sheetSummaryCard + } + .listRowInsets(.init(top: 4, leading: 0, bottom: 4, trailing: 0)) + .listRowBackground(Color.clear) + + if showsIdentitySection { + Section("Identity") { + identityFields + } + } + + switch sheet { + case .wireGuard: + Section("WireGuard Configuration") { + TextEditor(text: $draft.wireGuardConfig) + .font(.body.monospaced()) + .frame(minHeight: wireGuardEditorHeight) + .contextMenu { + wireGuardContextActions + } + } + case .tor: + Section("Tor Preferences") { + TextField("Virtual Addresses", text: $draft.torAddresses) + TextField("DNS Resolvers", text: $draft.torDNS) + TextField("MTU", text: $draft.torMTU) + TextField("Transparent Listener", text: $draft.torListen) + } + case .tailnet: + tailnetSections + } + + if let errorMessage { + Section { + Text(errorMessage) + .foregroundStyle(.red) + } + } + } + .navigationTitle(sheet.kind.title) + #if os(iOS) + .navigationBarTitleDisplayMode(.inline) + #endif + .toolbar { + ToolbarItem(placement: .cancellationAction) { + Button("Cancel") { + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + dismiss() + } + } + } + #if os(iOS) + ToolbarItem(placement: .topBarTrailing) { + Menu { + sheetMenuActions + } label: { + Image(systemName: "ellipsis.circle") + } + .accessibilityLabel("More") + } + #else + ToolbarItem(placement: .primaryAction) { + Menu { + sheetMenuActions + } label: { + Image(systemName: "ellipsis.circle") + } + .accessibilityLabel("More") + } + #endif + if !showsBottomActionButton { + ToolbarItem(placement: .confirmationAction) { + Button(confirmationTitle) { + submit() + } + .disabled(isSubmitting || submissionDisabled) + } + } + } + } + #if os(macOS) + .frame(minWidth: 520, minHeight: 620) + #endif + .safeAreaInset(edge: .bottom) { + if showsBottomActionButton { + bottomActionBar + } + } + .onAppear { + runAutomationIfNeeded() + } + .onChange(of: draft.authority) { _, _ in + resetAuthorityProbe() + if sheet == .tailnet, usesCustomTailnetAuthority { + scheduleTailnetAuthorityProbe() + } + } + .onChange(of: draft.discoveryEmail) { _, _ in + resetTailnetDiscoveryFeedback() + if sheet == .tailnet, !usesCustomTailnetAuthority { + scheduleTailnetDiscovery() + } + } + .onChange(of: draft.authMode) { _, newMode in + guard newMode != .web else { return } + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + } + } + .onDisappear { + tailnetLoginPollTask?.cancel() + tailnetDiscoveryTask?.cancel() + tailnetProbeTask?.cancel() + browserAuthenticator.cancel() + if !preserveTailnetLoginSession { + Task { @MainActor in + await cancelTailnetLoginIfNeeded() + } + } + } + } + + @ViewBuilder + private var identityFields: some View { + TextField("Title", text: $draft.title) + TextField("Account", text: $draft.accountName) + TextField("Identity", text: $draft.identityName) + if sheet == .tailnet { + TextField("Hostname", text: $draft.hostname) + .burrowLoginField() + .autocorrectionDisabled() + } + } + + @ViewBuilder + private var tailnetSections: some View { + Section("Connection") { + TextField("Email address", text: $draft.discoveryEmail) + .burrowEmailField() + .burrowLoginField() + .autocorrectionDisabled() + .accessibilityIdentifier("tailnet-discovery-email") + .submitLabel(.continue) + .onSubmit { + if !usesCustomTailnetAuthority { + scheduleTailnetDiscovery(immediate: true) + } + } + + tailnetServerCard + + if showsAdvancedTailnetSettings { + if usesCustomTailnetAuthority { + TextField("Server URL", text: $draft.authority) + .burrowLoginField() + .autocorrectionDisabled() + .accessibilityIdentifier("tailnet-authority") + } else { + TextField("Tailnet", text: $draft.tailnet) + .burrowLoginField() + .autocorrectionDisabled() + .accessibilityIdentifier("tailnet-name") + } + } + } + + Section("Authentication") { + if showsAdvancedTailnetSettings { + Picker("Authentication", selection: $draft.authMode) { + ForEach(availableTailnetAuthModes) { mode in + Text(mode.title).tag(mode) + } + } + .pickerStyle(.menu) + } + + if draft.authMode == .web { + Button { + startTailnetLogin() + } label: { + Label { + Text(isStartingTailnetLogin ? "Starting Sign-In" : tailnetSignInActionTitle) + } icon: { + Image(systemName: isStartingTailnetLogin ? "hourglass" : "person.badge.key") + } + } + .buttonStyle(.borderless) + .disabled(isStartingTailnetLogin || tailnetLoginActionDisabled) + .accessibilityIdentifier("tailnet-start-sign-in") + + if let tailnetLoginStatus { + tailnetLoginCard(status: tailnetLoginStatus, failure: nil) + } else if let tailnetLoginError { + tailnetLoginCard(status: nil, failure: tailnetLoginError) + } + } else { + TextField("Username", text: $draft.username) + .burrowLoginField() + .autocorrectionDisabled() + if draft.authMode != .none { + SecureField( + draft.authMode == .password ? "Password" : "Preauth Key", + text: $draft.secret + ) + } + } + + Text(tailnetAuthenticationFootnote) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + + private var sheetSummaryCard: some View { + VStack(alignment: .leading, spacing: 10) { + HStack(spacing: 12) { + Image(systemName: sheet.iconName) + .font(.title3.weight(.semibold)) + .foregroundStyle(sheetAccentColor) + .frame(width: 28, height: 28) + .background( + Circle() + .fill(sheetAccentColor.opacity(0.14)) + ) + + VStack(alignment: .leading, spacing: 3) { + Text(summaryTitle) + .font(.headline) + Text(sheet.kind.subtitle) + .font(.footnote) + .foregroundStyle(.secondary) + } + + Spacer() + } + + if let availabilityNote = sheet.kind.availabilityNote { + Text(availabilityNote) + .font(.footnote) + .foregroundStyle(.secondary) + } + + if sheet == .tailnet { + labeledValue("Server", tailnetServerDisplayLabel) + if let connectionSummary = tailnetConnectionSummary { + Text(connectionSummary) + .font(.footnote.weight(.medium)) + .foregroundStyle(tailnetConnectionSummaryColor) + } + if tailnetLoginStatus?.running == true { + HStack(spacing: 8) { + summaryBadge("Signed In") + } + } + } + } + .padding(14) + .background( + RoundedRectangle(cornerRadius: 18) + .fill(.thinMaterial) + ) + } + + private var tailnetServerCard: some View { + VStack(alignment: .leading, spacing: 8) { + HStack(alignment: .top, spacing: 12) { + VStack(alignment: .leading, spacing: 4) { + Text(usesCustomTailnetAuthority ? "Custom Server" : "Server") + .font(.subheadline.weight(.medium)) + Text(tailnetServerDisplayLabel) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + + Spacer() + + if isDiscoveringTailnet || isProbingAuthority { + ProgressView() + .controlSize(.small) + } else if let summary = tailnetConnectionSummary { + Text(summary) + .font(.caption.weight(.medium)) + .foregroundStyle(tailnetConnectionSummaryColor) + } + } + + if let detail = tailnetServerDetail { + Text(detail) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + .accessibilityIdentifier("tailnet-server-card") + } + + private func tailnetAuthorityProbeCard( + status: TailnetAuthorityProbeStatus?, + failure: String? + ) -> some View { + VStack(alignment: .leading, spacing: 6) { + if let status { + Text(status.summary) + .font(.subheadline.weight(.medium)) + Text(status.detail ?? "HTTP \(status.statusCode) from \(status.authority)") + .font(.footnote) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } else if let failure { + Text("Connection failed") + .font(.subheadline.weight(.medium)) + .foregroundStyle(.red) + Text(failure) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + .accessibilityIdentifier("tailnet-authority-probe-card") + } + + private func tailnetDiscoveryCard( + status: TailnetDiscoveryResponse?, + failure: String? + ) -> some View { + VStack(alignment: .leading, spacing: 6) { + if let status { + Text("Discovered Tailnet Server") + .font(.subheadline.weight(.medium)) + Text(status.authority) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + Text(status.provider == .tailscale ? "Managed authority" : "Custom authority") + .font(.footnote) + .foregroundStyle(.secondary) + if let oidcIssuer = status.oidcIssuer { + Text("OIDC: \(oidcIssuer)") + .font(.footnote) + .foregroundStyle(.secondary) + .lineLimit(3) + .textSelection(.enabled) + } + } else if let failure { + Text("Discovery failed") + .font(.subheadline.weight(.medium)) + .foregroundStyle(.red) + Text(failure) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + .accessibilityIdentifier("tailnet-discovery-card") + } + + private func tailnetLoginCard( + status: TailnetLoginStatus?, + failure: String? + ) -> some View { + VStack(alignment: .leading, spacing: 6) { + if let status { + Text(status.running ? "Signed In" : status.needsLogin ? "Browser Sign-In Required" : "Checking Sign-In") + .font(.subheadline.weight(.medium)) + if let tailnetName = status.tailnetName, !tailnetName.isEmpty { + Text("Tailnet: \(tailnetName)") + .font(.footnote) + .foregroundStyle(.secondary) + } + if let selfDNSName = status.selfDNSName, !selfDNSName.isEmpty { + Text(selfDNSName) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + if !status.tailnetIPs.isEmpty { + Text(status.tailnetIPs.joined(separator: ", ")) + .font(.footnote.monospaced()) + .foregroundStyle(.secondary) + .textSelection(.enabled) + } + if !status.health.isEmpty { + Text(status.health.joined(separator: " • ")) + .font(.footnote) + .foregroundStyle(.secondary) + } + } else if let failure { + Text("Sign-In failed") + .font(.subheadline.weight(.medium)) + .foregroundStyle(.red) + Text(failure) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding(12) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + .accessibilityIdentifier("tailnet-login-card") + } + + private func summaryBadge(_ label: String) -> some View { + Text(label) + .font(.caption.weight(.medium)) + .foregroundStyle(.secondary) + .padding(.horizontal, 10) + .padding(.vertical, 5) + .background( + Capsule() + .fill(.white.opacity(0.5)) + ) + } + + @ViewBuilder + private var bottomActionBar: some View { + VStack(spacing: 0) { + Divider() + .overlay(.white.opacity(0.3)) + Button(confirmationTitle) { + submit() + } + .buttonStyle(.floating(color: sheetAccentColor, cornerRadius: 18)) + .disabled(isSubmitting || submissionDisabled) + .padding(.horizontal) + .padding(.top, 12) + .padding(.bottom, 8) + } + .background(.ultraThinMaterial) + } + + @ViewBuilder + private var sheetMenuActions: some View { + Button("Use Suggested Identity") { + applySuggestedIdentity() + } + + switch sheet { + case .wireGuard: + Button("Paste Configuration") { + pasteWireGuardConfiguration() + } + .disabled(clipboardString?.isEmpty ?? true) + + Button("Clear Configuration", role: .destructive) { + draft.wireGuardConfig = "" + } + .disabled(draft.wireGuardConfig.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty) + + case .tor: + Menu("Presets") { + Button("Recommended Tor Defaults") { + applyTorDefaults() + } + Button("Restore Suggested Identity") { + applySuggestedIdentity() + } + } + + case .tailnet: + Button(usesCustomTailnetAuthority ? "Use Automatic Server" : "Edit Custom Server") { + toggleTailnetAuthorityMode() + } + + Button(showsAdvancedTailnetSettings ? "Hide Advanced Settings" : "Show Advanced Settings") { + showsAdvancedTailnetSettings.toggle() + } + + if showsAdvancedTailnetSettings, availableTailnetAuthModes.count > 1 { + Menu("Authentication") { + ForEach(availableTailnetAuthModes) { mode in + Button(mode.title) { + draft.authMode = mode + if mode == .none { + draft.secret = "" + } + } + } + } + } + + Button("Refresh Server Lookup") { + scheduleTailnetDiscovery(immediate: true) + } + .disabled(usesCustomTailnetAuthority || normalizedOptional(draft.discoveryEmail) == nil) + } + } + + @ViewBuilder + private var wireGuardContextActions: some View { + Button("Paste Configuration") { + pasteWireGuardConfiguration() + } + .disabled(clipboardString?.isEmpty ?? true) + + Button("Clear", role: .destructive) { + draft.wireGuardConfig = "" + } + .disabled(draft.wireGuardConfig.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty) + } + + private var sheetAccentColor: Color { + switch sheet { + case .wireGuard: + .blue + case .tor, .tailnet: + sheet.kind.accentColor + } + } + + private var summaryTitle: String { + switch sheet { + case .wireGuard: + "Import WireGuard" + case .tor: + "Configure Tor" + case .tailnet: + "Connect Tailnet" + } + } + + private var showsBottomActionButton: Bool { + #if os(iOS) + return true + #else + return false + #endif + } + + private var showsIdentitySection: Bool { + switch sheet { + case .wireGuard, .tor: + return true + case .tailnet: + return showsAdvancedTailnetSettings + } + } + + private var wireGuardEditorHeight: CGFloat { + #if os(iOS) + 180 + #else + 220 + #endif + } + + private var confirmationTitle: String { + switch sheet { + case .wireGuard: + return "Add Network" + case .tor: + return "Save Account" + case .tailnet: + return "Save Account" + } + } + + private var tailnetLoginActionDisabled: Bool { + switch sheet { + case .tailnet: + if usesCustomTailnetAuthority { + return normalizedOptional(draft.authority) == nil + } + return false + case .wireGuard, .tor: + return true + } + } + + private var submissionDisabled: Bool { + switch sheet { + case .wireGuard: + return draft.wireGuardConfig.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + case .tor: + return normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil + case .tailnet: + if normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil { + return true + } + if normalizedOptional(draft.authority) == nil { + return true + } + if draft.authMode == .web { + return tailnetLoginStatus?.running != true + } + if draft.authMode != .none && normalizedOptional(draft.secret) == nil { + return true + } + return false + } + } + + private var tailnetServerDisplayLabel: String { + if usesCustomTailnetAuthority { + return normalizedOptional(draft.authority) + ?? "Enter a custom Tailnet server" + } + return TailnetProvider.tailscale.defaultAuthority ?? "Tailscale managed" + } + + private var tailnetServerDetail: String? { + if usesCustomTailnetAuthority { + if let discovery = discoveryStatus { + return "Discovered from \(discovery.domain)." + } + if let discoveryError { + return discoveryError + } + return "Use a custom Tailnet authority when your domain does not advertise one." + } + return "Continue with Tailscale, or open advanced settings to use a custom server." + } + + private var tailnetConnectionSummary: String? { + if isDiscoveringTailnet { + return "Finding server" + } + if isProbingAuthority { + return "Checking" + } + if let authorityProbeStatus { + return authorityProbeStatus.summary + } + if authorityProbeError != nil { + return "Unavailable" + } + return nil + } + + private var tailnetConnectionSummaryColor: Color { + if authorityProbeError != nil { + return .red + } + return .secondary + } + + private func submit() { + isSubmitting = true + errorMessage = nil + + Task { @MainActor in + defer { isSubmitting = false } + do { + switch sheet { + case .wireGuard: + try await submitWireGuard() + dismiss() + case .tor: + try submitTor() + dismiss() + case .tailnet: + try await submitTailnet() + } + } catch { + errorMessage = error.localizedDescription + } + } + } + + private func submitWireGuard() async throws { + let networkID = try await networkViewModel.addWireGuardNetwork( + configText: draft.wireGuardConfig + ) + + let title = titleOrFallback("WireGuard \(networkID)") + let record = NetworkAccountRecord( + id: UUID(), + kind: .wireGuard, + title: title, + authority: nil, + provider: nil, + accountName: normalized(draft.accountName, fallback: "default"), + identityName: normalized(draft.identityName, fallback: "network-\(networkID)"), + hostname: nil, + username: nil, + tailnet: nil, + authMode: .none, + note: "Linked to daemon network #\(networkID).", + createdAt: .now, + updatedAt: .now + ) + try accountStore.upsert(record, secret: nil) + } + + private func submitTor() throws { + let title = titleOrFallback("Tor \(normalized(draft.identityName, fallback: "apple"))") + let note = [ + "Addresses: \(csvSummary(draft.torAddresses))", + "DNS: \(csvSummary(draft.torDNS))", + "MTU: \(normalized(draft.torMTU, fallback: "1400"))", + "Listen: \(normalized(draft.torListen, fallback: "127.0.0.1:9040"))", + ].joined(separator: " • ") + + let record = NetworkAccountRecord( + id: UUID(), + kind: .tor, + title: title, + authority: "arti://local", + provider: nil, + accountName: normalized(draft.accountName, fallback: "default"), + identityName: normalized(draft.identityName, fallback: "apple"), + hostname: nil, + username: nil, + tailnet: nil, + authMode: .none, + note: note, + createdAt: .now, + updatedAt: .now + ) + try accountStore.upsert(record, secret: nil) + } + + private func submitTailnet() async throws { + let secret = (draft.authMode == .none || draft.authMode == .web) ? nil : draft.secret + let username = normalizedOptional(draft.username) + preserveTailnetLoginSession = draft.authMode == .web && tailnetLoginStatus?.running == true + try await saveTailnetAccount(secret: secret, username: username) + dismiss() + } + + private func runAutomationIfNeeded() { + guard !didRunAutomation, + sheet == .tailnet, + let automation = BurrowAutomationConfig.current, + automation.action == .tailnetLogin || automation.action == .tailnetProbe + else { + return + } + + didRunAutomation = true + draft.title = automation.title ?? draft.title + draft.accountName = automation.accountName ?? draft.accountName + draft.identityName = automation.identityName ?? draft.identityName + draft.hostname = automation.hostname ?? draft.hostname + + Task { @MainActor in + switch automation.action { + case .tailnetLogin: + applyTailnetDefaults(for: .tailscale) + startTailnetLogin() + case .tailnetProbe: + usesCustomTailnetAuthority = true + showsAdvancedTailnetSettings = true + draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority + probeTailnetAuthority() + } + } + } + + private func saveTailnetAccount(secret: String?, username: String?) async throws { + let provider = inferredTailnetProvider + let title = titleOrFallback( + hostnameFallback(from: draft.authority, fallback: "Tailnet") + ) + + let payload = TailnetNetworkPayload( + provider: provider, + authority: normalizedOptional(draft.authority) ?? normalizedOptional(provider.defaultAuthority ?? ""), + account: normalized(draft.accountName, fallback: "default"), + identity: normalized(draft.identityName, fallback: "apple"), + tailnet: normalizedOptional(draft.tailnet), + hostname: normalizedOptional(draft.hostname) + ) + + var noteParts: [String] = [ + "Server: \(hostnameFallback(from: payload.authority ?? "", fallback: "tailnet"))", + ] + + if showsAdvancedTailnetSettings || draft.authMode != .web { + noteParts.append("Auth: \(draft.authMode.title)") + } + + if draft.authMode == .web, tailnetLoginStatus?.running == true { + noteParts.append("Browser sign-in complete") + } + + do { + let networkID = try await networkViewModel.addTailnetNetwork(payload: payload) + noteParts.append("Linked to daemon network #\(networkID)") + } catch { + noteParts.append("Daemon network add pending") + } + + let record = NetworkAccountRecord( + id: UUID(), + kind: .tailnet, + title: title, + authority: payload.authority, + provider: provider, + accountName: payload.account, + identityName: payload.identity, + hostname: payload.hostname, + username: username, + tailnet: payload.tailnet, + authMode: draft.authMode, + note: noteParts.joined(separator: " • "), + createdAt: .now, + updatedAt: .now + ) + try accountStore.upsert(record, secret: secret) + } + + private func applySuggestedIdentity() { + let defaults = AccountDraft(sheet: sheet) + if draft.title.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { + draft.title = defaults.title + } + draft.accountName = defaults.accountName + draft.identityName = defaults.identityName + if sheet == .tailnet { + draft.hostname = defaults.hostname + } + } + + private func applyTorDefaults() { + let defaults = AccountDraft(sheet: .tor) + draft.title = defaults.title + draft.accountName = defaults.accountName + draft.identityName = defaults.identityName + draft.torAddresses = defaults.torAddresses + draft.torDNS = defaults.torDNS + draft.torMTU = defaults.torMTU + draft.torListen = defaults.torListen + } + + private func applyTailnetDefaults(for provider: TailnetProvider) { + resetTailnetDiscoveryFeedback() + usesCustomTailnetAuthority = provider != .tailscale + draft.authority = provider.defaultAuthority ?? "" + if !availableTailnetAuthModes.contains(draft.authMode) { + draft.authMode = .web + } + } + + private func startTailnetLogin() { + isStartingTailnetLogin = true + tailnetLoginError = nil + preserveTailnetLoginSession = false + + Task { @MainActor in + defer { isStartingTailnetLogin = false } + do { + let authority = try await resolveTailnetAuthorityForLogin() + let status = try await networkViewModel.startTailnetLogin( + accountName: normalized(draft.accountName, fallback: "default"), + identityName: normalized(draft.identityName, fallback: "apple"), + hostname: normalizedOptional(draft.hostname), + authority: authority + ) + tailnetLoginSessionID = status.sessionID + updateTailnetLoginStatus(status) + beginTailnetLoginPolling(sessionID: status.sessionID) + } catch { + tailnetLoginError = error.localizedDescription + } + } + } + + private func probeTailnetAuthority() { + guard let authority = normalizedOptional(draft.authority) else { + authorityProbeStatus = nil + authorityProbeError = "Enter a server URL first." + return + } + + isProbingAuthority = true + authorityProbeStatus = nil + authorityProbeError = nil + + Task { @MainActor in + defer { isProbingAuthority = false } + do { + authorityProbeStatus = try await networkViewModel.probeTailnetAuthority(authority) + } catch { + authorityProbeError = error.localizedDescription + } + } + } + + private func resetAuthorityProbe() { + tailnetProbeTask?.cancel() + authorityProbeStatus = nil + authorityProbeError = nil + tailnetLoginError = nil + } + + private func resetTailnetDiscoveryFeedback() { + tailnetDiscoveryTask?.cancel() + discoveryStatus = nil + discoveryError = nil + } + + private func discoverTailnetAuthority() { + guard let email = normalizedOptional(draft.discoveryEmail) else { + discoveryStatus = nil + discoveryError = "Enter an email address first." + return + } + + isDiscoveringTailnet = true + discoveryStatus = nil + discoveryError = nil + + Task { @MainActor in + defer { isDiscoveringTailnet = false } + do { + let discovery = try await networkViewModel.discoverTailnet(email: email) + discoveryStatus = discovery + draft.authority = discovery.authority + probeTailnetAuthority() + } catch { + discoveryError = error.localizedDescription + } + } + } + + private func scheduleTailnetDiscovery(immediate: Bool = false) { + guard sheet == .tailnet else { return } + tailnetDiscoveryTask?.cancel() + + guard !usesCustomTailnetAuthority else { + discoveryStatus = nil + discoveryError = nil + return + } + + guard normalizedOptional(draft.discoveryEmail) != nil else { + discoveryStatus = nil + discoveryError = nil + draft.authority = TailnetProvider.tailscale.defaultAuthority ?? "" + return + } + + tailnetDiscoveryTask = Task { @MainActor in + if !immediate { + try? await Task.sleep(for: .milliseconds(450)) + } + guard !Task.isCancelled else { return } + discoverTailnetAuthority() + } + } + + private func scheduleTailnetAuthorityProbe() { + guard sheet == .tailnet else { return } + tailnetProbeTask?.cancel() + guard normalizedOptional(draft.authority) != nil else { return } + + tailnetProbeTask = Task { @MainActor in + try? await Task.sleep(for: .milliseconds(300)) + guard !Task.isCancelled else { return } + probeTailnetAuthority() + } + } + + private func toggleTailnetAuthorityMode() { + let discoveredAuthority = discoveryStatus?.authority + usesCustomTailnetAuthority.toggle() + resetTailnetDiscoveryFeedback() + resetAuthorityProbe() + if usesCustomTailnetAuthority { + draft.authority = discoveredAuthority ?? draft.authority + } else { + draft.authority = TailnetProvider.tailscale.defaultAuthority ?? "" + scheduleTailnetDiscovery(immediate: normalizedOptional(draft.discoveryEmail) != nil) + } + } + + private func resolveTailnetAuthorityForLogin() async throws -> String { + if !usesCustomTailnetAuthority { + let authority = TailnetProvider.tailscale.defaultAuthority ?? "" + draft.authority = authority + scheduleTailnetAuthorityProbe() + return authority + } + + if let authority = normalizedOptional(draft.authority) { + return authority + } + + if let email = normalizedOptional(draft.discoveryEmail) { + let discovery = try await networkViewModel.discoverTailnet(email: email) + discoveryStatus = discovery + discoveryError = nil + draft.authority = discovery.authority + scheduleTailnetAuthorityProbe() + return discovery.authority + } + + throw NSError(domain: "BurrowTailnet", code: 1, userInfo: [ + NSLocalizedDescriptionKey: "Enter an email address or a custom server URL first." + ]) + } + + private func beginTailnetLoginPolling(sessionID: String) { + tailnetLoginPollTask?.cancel() + tailnetLoginPollTask = Task { @MainActor in + while !Task.isCancelled { + do { + let status = try await networkViewModel.tailnetLoginStatus(sessionID: sessionID) + updateTailnetLoginStatus(status) + if status.running { + tailnetLoginPollTask = nil + return + } + } catch { + tailnetLoginError = error.localizedDescription + tailnetLoginPollTask = nil + return + } + try? await Task.sleep(for: .seconds(1)) + } + } + } + + private func updateTailnetLoginStatus(_ status: TailnetLoginStatus) { + tailnetLoginStatus = status + tailnetLoginError = nil + tailnetLoginSessionID = status.sessionID + + if status.running { + browserAuthenticator.cancel() + tailnetPresentedAuthURL = nil + return + } + + guard let authURL = status.authURL else { + return + } + + if tailnetPresentedAuthURL != authURL { + tailnetPresentedAuthURL = authURL + browserAuthenticator.start(url: authURL) { [sessionID = status.sessionID] in + Task { @MainActor in + if tailnetLoginStatus?.running != true { + tailnetLoginSessionID = sessionID + } + } + } + } + } + + private func cancelTailnetLoginIfNeeded() async { + tailnetLoginPollTask?.cancel() + tailnetLoginPollTask = nil + browserAuthenticator.cancel() + tailnetPresentedAuthURL = nil + + guard tailnetLoginStatus?.running != true, + let sessionID = tailnetLoginSessionID + else { + return + } + + do { + try await networkViewModel.cancelTailnetLogin(sessionID: sessionID) + } catch { + tailnetLoginError = error.localizedDescription + } + + tailnetLoginStatus = nil + tailnetLoginSessionID = nil + } + + private func pasteWireGuardConfiguration() { + guard let clipboardString else { return } + draft.wireGuardConfig = clipboardString + } + + private var clipboardString: String? { + #if canImport(UIKit) + UIPasteboard.general.string + #elseif canImport(AppKit) + NSPasteboard.general.string(forType: .string) + #else + nil + #endif + } + + private func normalized(_ value: String, fallback: String) -> String { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines) + return trimmed.isEmpty ? fallback : trimmed + } + + private func normalizedOptional(_ value: String) -> String? { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines) + return trimmed.isEmpty ? nil : trimmed + } + + private func titleOrFallback(_ fallback: String) -> String { + normalized(draft.title, fallback: fallback) + } + + private func csvSummary(_ value: String) -> String { + value + .split(separator: ",") + .map { $0.trimmingCharacters(in: .whitespacesAndNewlines) } + .filter { !$0.isEmpty } + .joined(separator: ", ") + } + + private func hostnameFallback(from value: String, fallback: String) -> String { + guard let url = URL(string: value), let host = url.host, !host.isEmpty else { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines) + return trimmed.isEmpty ? fallback : trimmed + } + return host + } + + private var availableTailnetAuthModes: [AccountAuthMode] { + [.web, .none, .password, .preauthKey] + } + + private var tailnetSignInActionTitle: String { + if tailnetLoginStatus?.running == true { + return "Signed In" + } + if tailnetLoginSessionID != nil { + return "Resume Sign-In" + } + return "Continue with Tailscale" + } + + private var tailnetAuthenticationFootnote: String { + switch draft.authMode { + case .web: + if usesCustomTailnetAuthority { + return "Burrow signs in through the daemon using your custom Tailnet server." + } + return "Burrow signs in through the daemon using Tailscale's managed browser flow." + case .none: + return "Save the authority only. Useful when the control plane handles authentication elsewhere." + case .password, .preauthKey: + return "Tailnet account material stays on-device. Burrow stores the authority and credentials for daemon-managed registration and refresh." + } + } + + private var inferredTailnetProvider: TailnetProvider { + TailnetProvider.inferred( + authority: normalizedOptional(draft.authority), + explicit: discoveryStatus?.provider + ) + } + + @ViewBuilder + private func labeledValue(_ label: String, _ value: String) -> some View { + VStack(alignment: .leading, spacing: 2) { + Text(label) + .font(.caption) + .foregroundStyle(.secondary) + Text(value) + .font(.body.monospaced()) + } + } +} + +private struct AccountRowView: View { + let account: NetworkAccountRecord + let hasSecret: Bool + + var body: some View { + VStack(alignment: .leading, spacing: 10) { + HStack(alignment: .top) { + VStack(alignment: .leading, spacing: 4) { + Text(account.title) + .font(.headline) + Text(account.kind.title) + .font(.subheadline) + .foregroundStyle(account.kind.accentColor) + } + Spacer() + if hasSecret { + Label("Credential stored", systemImage: "key.fill") + .font(.caption) + .foregroundStyle(.secondary) + } + } + + if let authority = account.authority { + labeledValue("Authority", authority) + } + + labeledValue("Account", account.accountName) + labeledValue("Identity", account.identityName) + + if let hostname = account.hostname { + labeledValue("Hostname", hostname) + } + + if let username = account.username { + labeledValue("Username", username) + } + + if let tailnet = account.tailnet { + labeledValue("Tailnet", tailnet) + } + + if let note = account.note { + Text(note) + .font(.footnote) + .foregroundStyle(.secondary) + } + } + .padding() + .frame(maxWidth: .infinity, alignment: .leading) + .background( + RoundedRectangle(cornerRadius: 16) + .fill(.thinMaterial) + ) + } + + @ViewBuilder + private func labeledValue(_ label: String, _ value: String) -> some View { + VStack(alignment: .leading, spacing: 2) { + Text(label) + .font(.caption) + .foregroundStyle(.secondary) + Text(value) + .font(.body.monospaced()) + } + } +} + +private extension View { + @ViewBuilder + func burrowLoginField() -> some View { + #if os(iOS) + textInputAutocapitalization(.never) + #else + self + #endif + } + + @ViewBuilder + func burrowEmailField() -> some View { + #if os(iOS) + textInputAutocapitalization(.never) + .keyboardType(.emailAddress) + #else + self + #endif + } +} + +#if canImport(AuthenticationServices) +@MainActor +private final class TailnetBrowserAuthenticator: NSObject { + private var session: ASWebAuthenticationSession? + private static var prefersEphemeralSessionForCurrentProcess: Bool { + let rawValue = ProcessInfo.processInfo.environment["BURROW_UI_TEST_EPHEMERAL_AUTH"]? + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + return rawValue == "1" || rawValue == "true" || rawValue == "yes" + } + + func start(url: URL, onDismiss: @escaping @Sendable () -> Void) { + cancel() + let session = ASWebAuthenticationSession(url: url, callbackURLScheme: nil) { _, _ in + onDismiss() + } + session.presentationContextProvider = self + session.prefersEphemeralWebBrowserSession = Self.prefersEphemeralSessionForCurrentProcess + self.session = session + _ = session.start() + } + + func cancel() { + session?.cancel() + session = nil + } +} + +extension TailnetBrowserAuthenticator: ASWebAuthenticationPresentationContextProviding { + func presentationAnchor(for session: ASWebAuthenticationSession) -> ASPresentationAnchor { + #if canImport(AppKit) + return NSApplication.shared.keyWindow + ?? NSApplication.shared.windows.first + ?? ASPresentationAnchor() + #elseif canImport(UIKit) + return ASPresentationAnchor() + #else + return ASPresentationAnchor() + #endif + } +} +#else +@MainActor +private final class TailnetBrowserAuthenticator { + func start(url: URL, onDismiss: @escaping @Sendable () -> Void) { + _ = url + onDismiss() + } + + func cancel() {} +} +#endif + +private struct BurrowAutomationConfig { + enum Action: String { + case tailnetLogin = "tailnet-login" + case tailnetProbe = "tailnet-probe" + } + + let action: Action + let title: String? + let accountName: String? + let identityName: String? + let hostname: String? + let authority: String? + + static let current: BurrowAutomationConfig? = { + let environment = ProcessInfo.processInfo.environment + guard let rawAction = environment["BURROW_UI_AUTOMATION"], + let action = Action(rawValue: rawAction) + else { + return nil + } + + return BurrowAutomationConfig( + action: action, + title: environment["BURROW_UI_AUTOMATION_TITLE"], + accountName: environment["BURROW_UI_AUTOMATION_ACCOUNT"], + identityName: environment["BURROW_UI_AUTOMATION_IDENTITY"], + hostname: environment["BURROW_UI_AUTOMATION_HOSTNAME"], + authority: environment["BURROW_UI_AUTOMATION_AUTHORITY"] + ) + }() } #if DEBUG @@ -72,4 +1714,3 @@ struct NetworkView_Previews: PreviewProvider { } } #endif -#endif diff --git a/Apple/UI/NetworkCarouselView.swift b/Apple/UI/NetworkCarouselView.swift index f969356..e7368db 100644 --- a/Apple/UI/NetworkCarouselView.swift +++ b/Apple/UI/NetworkCarouselView.swift @@ -1,39 +1,61 @@ import SwiftUI struct NetworkCarouselView: View { - var networks: [any Network] = [ - HackClub(id: 1), - HackClub(id: 2), - WireGuard(id: 4), - HackClub(id: 5) - ] + var networks: [NetworkCardModel] var body: some View { - ScrollView(.horizontal) { - LazyHStack { - ForEach(networks, id: \.id) { network in - NetworkView(network: network) - .containerRelativeFrame(.horizontal, count: 10, span: 7, spacing: 0, alignment: .center) - .scrollTransition(.interactive, axis: .horizontal) { content, phase in - content - .scaleEffect(1.0 - abs(phase.value) * 0.1) - } + Group { + if networks.isEmpty { + #if os(iOS) + VStack(alignment: .leading, spacing: 6) { + Text("No stored networks yet") + .font(.headline) + Text("WireGuard and Tailnet networks show up here as soon as you add one.") + .font(.footnote) + .foregroundStyle(.secondary) } + .frame(maxWidth: .infinity, alignment: .leading) + .padding() + .background( + RoundedRectangle(cornerRadius: 18) + .fill(.thinMaterial) + ) + #else + ContentUnavailableView( + "No Networks Yet", + systemImage: "network.slash", + description: Text("Add a WireGuard network, or save a Tailnet account so Burrow can store a managed network when the daemon is reachable.") + ) + .frame(maxWidth: .infinity, minHeight: 175) + #endif + } else { + ScrollView(.horizontal) { + LazyHStack { + ForEach(networks) { network in + NetworkView(network: network) + .containerRelativeFrame(.horizontal, count: 10, span: 7, spacing: 0, alignment: .center) + .scrollTransition(.interactive, axis: .horizontal) { content, phase in + content + .scaleEffect(1.0 - abs(phase.value) * 0.1) + } + } + } + } + .scrollTargetLayout() + .scrollClipDisabled() + .scrollIndicators(.hidden) + .defaultScrollAnchor(.center) + .scrollTargetBehavior(.viewAligned) + .containerRelativeFrame(.horizontal) } } - .scrollTargetLayout() - .scrollClipDisabled() - .scrollIndicators(.hidden) - .defaultScrollAnchor(.center) - .scrollTargetBehavior(.viewAligned) - .containerRelativeFrame(.horizontal) } } #if DEBUG struct NetworkCarouselView_Previews: PreviewProvider { static var previews: some View { - NetworkCarouselView() + NetworkCarouselView(networks: [WireGuardCard(id: 1, detail: "10.13.13.2/24 · wg.burrow.rs:51820").card]) } } #endif diff --git a/Apple/UI/NetworkExtensionTunnel.swift b/Apple/UI/NetworkExtensionTunnel.swift index 7aaa3b1..23559f3 100644 --- a/Apple/UI/NetworkExtensionTunnel.swift +++ b/Apple/UI/NetworkExtensionTunnel.swift @@ -105,7 +105,7 @@ public final class NetworkExtensionTunnel: Tunnel { let proto = NETunnelProviderProtocol() proto.providerBundleIdentifier = bundleIdentifier - proto.serverAddress = "hackclub.com" + proto.serverAddress = "burrow.rs" manager.protocolConfiguration = proto try await manager.save() diff --git a/Apple/UI/NetworkView.swift b/Apple/UI/NetworkView.swift index b839d65..437adce 100644 --- a/Apple/UI/NetworkView.swift +++ b/Apple/UI/NetworkView.swift @@ -31,8 +31,8 @@ struct NetworkView: View { } extension NetworkView where Content == AnyView { - init(network: any Network) { + init(network: NetworkCardModel) { color = network.backgroundColor - content = { AnyView(network.label) } + content = { network.label } } } diff --git a/Apple/UI/Networks/HackClub.swift b/Apple/UI/Networks/HackClub.swift deleted file mode 100644 index b1c2023..0000000 --- a/Apple/UI/Networks/HackClub.swift +++ /dev/null @@ -1,27 +0,0 @@ -import BurrowCore -import SwiftUI - -struct HackClub: Network { - typealias NetworkType = Burrow_WireGuardNetwork - static let type: Burrow_NetworkType = .hackClub - - var id: Int32 - var backgroundColor: Color { .init("HackClub") } - - @MainActor var label: some View { - GeometryReader { reader in - VStack(alignment: .leading) { - Image("HackClub") - .resizable() - .aspectRatio(contentMode: .fit) - .frame(height: reader.size.height / 4) - Spacer() - Text("@conradev") - .foregroundStyle(.white) - .font(.body.monospaced()) - } - .padding() - .frame(maxWidth: .infinity) - } - } -} diff --git a/Apple/UI/Networks/Network.swift b/Apple/UI/Networks/Network.swift index c6d5fba..35bd0e1 100644 --- a/Apple/UI/Networks/Network.swift +++ b/Apple/UI/Networks/Network.swift @@ -1,36 +1,623 @@ -import Atomics +import BurrowConfiguration import BurrowCore +import Foundation +import Security import SwiftProtobuf import SwiftUI -protocol Network { - associatedtype NetworkType: Message - associatedtype Label: View +struct NetworkCardModel: Identifiable { + let id: Int32 + let backgroundColor: Color + let label: AnyView +} - static var type: Burrow_NetworkType { get } +struct TailnetNetworkPayload: Codable, Sendable { + var provider: TailnetProvider + var authority: String? + var account: String + var identity: String + var tailnet: String? + var hostname: String? - var id: Int32 { get } - var backgroundColor: Color { get } + func encoded() throws -> Data { + let encoder = JSONEncoder() + encoder.outputFormatting = [.prettyPrinted, .sortedKeys] + return try encoder.encode(self) + } +} - @MainActor var label: Label { get } +struct TailnetDiscoveryResponse: Codable, Sendable { + var domain: String + var provider: TailnetProvider + var authority: String + var oidcIssuer: String? +} + +struct TailnetAuthorityProbeStatus: Sendable { + var authority: String + var statusCode: Int + var summary: String + var detail: String? +} + +struct TailnetLoginStatus: Sendable { + var sessionID: String + var backendState: String + var authURL: URL? + var running: Bool + var needsLogin: Bool + var tailnetName: String? + var magicDNSSuffix: String? + var selfDNSName: String? + var tailnetIPs: [String] + var health: [String] +} + +enum TailnetDiscoveryClient { + static func discover(email: String, socketURL: URL) async throws -> TailnetDiscoveryResponse { + var request = Burrow_TailnetDiscoverRequest() + request.email = email + + let response = try await TailnetClient.unix(socketURL: socketURL).discover(request) + return TailnetDiscoveryResponse( + domain: response.domain, + provider: response.managed ? .tailscale : .headscale, + authority: response.authority, + oidcIssuer: response.oidcIssuer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.oidcIssuer + ) + } +} + +enum TailnetAuthorityProbeClient { + static func probe(authority: String, socketURL: URL) async throws -> TailnetAuthorityProbeStatus { + var request = Burrow_TailnetProbeRequest() + request.authority = authority + + let response = try await TailnetClient.unix(socketURL: socketURL).probe(request) + return TailnetAuthorityProbeStatus( + authority: response.authority, + statusCode: Int(response.statusCode), + summary: response.summary, + detail: response.detail.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.detail + ) + } +} + +enum TailnetLoginClient { + static func start( + accountName: String, + identityName: String, + hostname: String?, + authority: String, + socketURL: URL + ) async throws -> TailnetLoginStatus { + var request = Burrow_TailnetLoginStartRequest() + request.accountName = accountName + request.identityName = identityName + request.hostname = hostname ?? "" + request.authority = authority + let response = try await TailnetClient.unix(socketURL: socketURL).loginStart(request) + return decode(response) + } + + static func status(sessionID: String, socketURL: URL) async throws -> TailnetLoginStatus { + var request = Burrow_TailnetLoginStatusRequest() + request.sessionID = sessionID + let response = try await TailnetClient.unix(socketURL: socketURL).loginStatus(request) + return decode(response) + } + + static func cancel(sessionID: String, socketURL: URL) async throws { + var request = Burrow_TailnetLoginCancelRequest() + request.sessionID = sessionID + _ = try await TailnetClient.unix(socketURL: socketURL).loginCancel(request) + } + + private static func decode(_ response: Burrow_TailnetLoginStatusResponse) -> TailnetLoginStatus { + TailnetLoginStatus( + sessionID: response.sessionID, + backendState: response.backendState, + authURL: URL(string: response.authURL.trimmingCharacters(in: .whitespacesAndNewlines)), + running: response.running, + needsLogin: response.needsLogin, + tailnetName: response.tailnetName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.tailnetName, + magicDNSSuffix: response.magicDNSSuffix.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.magicDNSSuffix, + selfDNSName: response.selfDNSName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty + ? nil + : response.selfDNSName, + tailnetIPs: response.tailnetIPs, + health: response.health + ) + } } @Observable @MainActor final class NetworkViewModel: Sendable { private(set) var networks: [Burrow_Network] = [] + private(set) var connectionError: String? + private let socketURLResult: Result - private var task: Task! + @ObservationIgnored private var task: Task? - init(socketURL: URL) { + init(socketURLResult: Result) { + self.socketURLResult = socketURLResult + startStreaming() + } + + deinit { + task?.cancel() + } + + var cards: [NetworkCardModel] { + networks.map(Self.makeCard(for:)) + } + + var nextNetworkID: Int32 { + (networks.map(\.id).max() ?? 0) + 1 + } + + func addWireGuardNetwork(configText: String) async throws -> Int32 { + try await addNetwork(type: .wireGuard, payload: Data(configText.utf8)) + } + + func addTailnetNetwork(payload: TailnetNetworkPayload) async throws -> Int32 { + try await addNetwork(type: .tailnet, payload: payload.encoded()) + } + + func discoverTailnet(email: String) async throws -> TailnetDiscoveryResponse { + let socketURL = try socketURLResult.get() + return try await TailnetDiscoveryClient.discover(email: email, socketURL: socketURL) + } + + func probeTailnetAuthority(_ authority: String) async throws -> TailnetAuthorityProbeStatus { + let socketURL = try socketURLResult.get() + return try await TailnetAuthorityProbeClient.probe(authority: authority, socketURL: socketURL) + } + + func startTailnetLogin( + accountName: String, + identityName: String, + hostname: String?, + authority: String + ) async throws -> TailnetLoginStatus { + let socketURL = try socketURLResult.get() + return try await TailnetLoginClient.start( + accountName: accountName, + identityName: identityName, + hostname: hostname, + authority: authority, + socketURL: socketURL + ) + } + + func tailnetLoginStatus(sessionID: String) async throws -> TailnetLoginStatus { + let socketURL = try socketURLResult.get() + return try await TailnetLoginClient.status(sessionID: sessionID, socketURL: socketURL) + } + + func cancelTailnetLogin(sessionID: String) async throws { + let socketURL = try socketURLResult.get() + try await TailnetLoginClient.cancel(sessionID: sessionID, socketURL: socketURL) + } + + private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 { + let socketURL = try socketURLResult.get() + let networkID = nextNetworkID + let request = Burrow_Network.with { + $0.id = networkID + $0.type = type + $0.payload = payload + } + + let client = NetworksClient.unix(socketURL: socketURL) + _ = try await client.networkAdd(request) + return networkID + } + + private func startStreaming() { + task?.cancel() + let socketURLResult = self.socketURLResult task = Task { [weak self] in - let client = NetworksClient.unix(socketURL: socketURL) - for try await networks in client.networkList(.init()) { - guard let viewModel = self else { continue } - Task { @MainActor in - viewModel.networks = networks.network + do { + let socketURL = try socketURLResult.get() + let client = NetworksClient.unix(socketURL: socketURL) + for try await response in client.networkList(.init()) { + guard !Task.isCancelled else { return } + await MainActor.run { + guard let self else { return } + self.networks = response.network + self.connectionError = nil + } + } + } catch { + guard !Task.isCancelled else { return } + await MainActor.run { + guard let self else { return } + self.connectionError = error.localizedDescription } } } } + + private static func makeCard(for network: Burrow_Network) -> NetworkCardModel { + switch network.type { + case .wireGuard: + WireGuardCard(network: network).card + case .tailnet: + TailnetCard(network: network).card + case .UNRECOGNIZED(let rawValue): + unsupportedCard( + id: network.id, + title: "Unknown Network", + detail: "Type \(rawValue) is not recognized by this build." + ) + @unknown default: + unsupportedCard( + id: network.id, + title: "Unsupported Network", + detail: "Update Burrow to view this network." + ) + } + } + + private static func unsupportedCard(id: Int32, title: String, detail: String) -> NetworkCardModel { + NetworkCardModel( + id: id, + backgroundColor: .gray.opacity(0.85), + label: AnyView( + VStack(alignment: .leading, spacing: 12) { + Text(title) + .font(.title3.weight(.semibold)) + .foregroundStyle(.white) + Text(detail) + .font(.body) + .foregroundStyle(.white.opacity(0.9)) + Spacer() + Text("Network #\(id)") + .font(.footnote.monospaced()) + .foregroundStyle(.white.opacity(0.8)) + } + .padding() + .frame(maxWidth: .infinity, alignment: .leading) + ) + ) + } +} + +enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable { + case tailscale + case headscale + case burrow + + var id: String { rawValue } + + var title: String { + switch self { + case .tailscale: "Tailscale" + case .headscale: "Custom Tailnet" + case .burrow: "Burrow" + } + } + + var defaultAuthority: String? { + switch self { + case .tailscale: + "https://controlplane.tailscale.com" + case .headscale: + "https://ts.burrow.net" + case .burrow: + nil + } + } + + var subtitle: String { + switch self { + case .tailscale: + "Managed Tailnet authority." + case .headscale: + "Custom Tailnet control server." + case .burrow: + "Burrow-native Tailnet authority." + } + } + + static func inferred(authority: String?, explicit: TailnetProvider?) -> TailnetProvider { + if explicit == .burrow { + return .burrow + } + if isManagedTailscaleAuthority(authority) { + return .tailscale + } + return .headscale + } + + static func isManagedTailscaleAuthority(_ authority: String?) -> Bool { + guard let normalized = authority? + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + .trimmingCharacters(in: CharacterSet(charactersIn: "/")), + !normalized.isEmpty + else { + return false + } + + return normalized == "https://controlplane.tailscale.com" + || normalized == "http://controlplane.tailscale.com" + || normalized == "controlplane.tailscale.com" + } +} + +enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { + case wireGuard + case tor + case tailnet + + var id: String { rawValue } + + var title: String { + switch self { + case .wireGuard: "WireGuard" + case .tor: "Tor" + case .tailnet: "Tailnet" + } + } + + var subtitle: String { + switch self { + case .wireGuard: "Import a tunnel and optional account metadata." + case .tor: "Store Arti account and identity preferences." + case .tailnet: "Save Tailnet authority, identity defaults, and login material." + } + } + + var accentColor: Color { + switch self { + case .wireGuard: .init("WireGuard") + case .tor: .orange + case .tailnet: .mint + } + } + + var actionTitle: String { + switch self { + case .wireGuard: "Add Network" + case .tor: "Save Account" + case .tailnet: "Save Account" + } + } + + var availabilityNote: String? { + switch self { + case .wireGuard: + nil + case .tor: + "Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet." + case .tailnet: + "Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can already be stored in the daemon." + } + } +} + +enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { + case web + case none + case password + case preauthKey + + var id: String { rawValue } + + var title: String { + switch self { + case .web: "Browser Sign-In" + case .none: "None" + case .password: "Password" + case .preauthKey: "Preauth Key" + } + } +} + +struct NetworkAccountRecord: Codable, Identifiable, Hashable, Sendable { + let id: UUID + var kind: AccountNetworkKind + var title: String + var authority: String? + var provider: TailnetProvider? + var accountName: String + var identityName: String + var hostname: String? + var username: String? + var tailnet: String? + var authMode: AccountAuthMode + var note: String? + var createdAt: Date + var updatedAt: Date +} + +struct TailnetCard { + var id: Int32 + var title: String + var detail: String + + init(network: Burrow_Network) { + let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload)) + id = network.id + title = payload?.tailnet ?? payload?.hostname ?? "Tailnet" + detail = [ + payload?.authority.flatMap { URL(string: $0)?.host } ?? payload?.authority, + payload?.authority, + payload.map { "Account: \($0.account)" }, + ] + .compactMap { $0 } + .joined(separator: " · ") + .ifEmpty("Stored Tailnet configuration") + } + + var card: NetworkCardModel { + NetworkCardModel( + id: id, + backgroundColor: .mint, + label: AnyView( + VStack(alignment: .leading, spacing: 12) { + HStack { + VStack(alignment: .leading, spacing: 4) { + Text("Tailnet") + .font(.headline) + .foregroundStyle(.white.opacity(0.85)) + Text(title) + .font(.title3.weight(.semibold)) + .foregroundStyle(.white) + } + Spacer() + } + Spacer() + Text(detail) + .font(.body.monospaced()) + .foregroundStyle(.white.opacity(0.92)) + .lineLimit(4) + } + .padding() + .frame(maxWidth: .infinity, alignment: .leading) + ) + ) + } +} + +@Observable +@MainActor +final class NetworkAccountStore { + private static let storageKey = "burrow.network-accounts" + + private let defaults: UserDefaults + private(set) var accounts: [NetworkAccountRecord] = [] + + init(defaults: UserDefaults = UserDefaults(suiteName: Constants.appGroupIdentifier) ?? .standard) { + self.defaults = defaults + load() + } + + func upsert(_ record: NetworkAccountRecord, secret: String?) throws { + if let index = accounts.firstIndex(where: { $0.id == record.id }) { + accounts[index] = record + } else { + accounts.append(record) + } + accounts.sort { + if $0.kind == $1.kind { + return $0.title.localizedCaseInsensitiveCompare($1.title) == .orderedAscending + } + return $0.kind.rawValue < $1.kind.rawValue + } + try persist() + if let secret, !secret.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { + try AccountSecretStore.store(secret, for: record.id) + } else { + try AccountSecretStore.removeSecret(for: record.id) + } + } + + func delete(_ record: NetworkAccountRecord) throws { + accounts.removeAll { $0.id == record.id } + try persist() + try AccountSecretStore.removeSecret(for: record.id) + } + + func hasStoredSecret(for record: NetworkAccountRecord) -> Bool { + AccountSecretStore.hasSecret(for: record.id) + } + + private func load() { + guard let data = defaults.data(forKey: Self.storageKey) else { + accounts = [] + return + } + + do { + accounts = try JSONDecoder().decode([NetworkAccountRecord].self, from: data) + } catch { + accounts = [] + } + } + + private func persist() throws { + let data = try JSONEncoder().encode(accounts) + defaults.set(data, forKey: Self.storageKey) + } +} + +private enum AccountSecretStore { + private static let service = "\(Constants.bundleIdentifier).accounts" + + static func hasSecret(for accountID: UUID) -> Bool { + let query = baseQuery(for: accountID) + return SecItemCopyMatching(query as CFDictionary, nil) == errSecSuccess + } + + static func store(_ secret: String, for accountID: UUID) throws { + let data = Data(secret.utf8) + let query = baseQuery(for: accountID) + let status = SecItemCopyMatching(query as CFDictionary, nil) + + if status == errSecSuccess { + let updateStatus = SecItemUpdate( + query as CFDictionary, + [kSecValueData as String: data] as CFDictionary + ) + guard updateStatus == errSecSuccess else { + throw AccountSecretStoreError.osStatus(updateStatus) + } + return + } + + var item = query + item[kSecValueData as String] = data + item[kSecAttrAccessible as String] = kSecAttrAccessibleAfterFirstUnlock + let addStatus = SecItemAdd(item as CFDictionary, nil) + guard addStatus == errSecSuccess else { + throw AccountSecretStoreError.osStatus(addStatus) + } + } + + static func removeSecret(for accountID: UUID) throws { + let status = SecItemDelete(baseQuery(for: accountID) as CFDictionary) + guard status == errSecSuccess || status == errSecItemNotFound else { + throw AccountSecretStoreError.osStatus(status) + } + } + + private static func baseQuery(for accountID: UUID) -> [String: Any] { + [ + kSecClass as String: kSecClassGenericPassword, + kSecAttrService as String: service, + kSecAttrAccount as String: accountID.uuidString, + ] + } +} + +private enum AccountSecretStoreError: LocalizedError { + case osStatus(OSStatus) + + var errorDescription: String? { + switch self { + case .osStatus(let status): + if let message = SecCopyErrorMessageString(status, nil) as String? { + return message + } + return "Keychain error \(status)" + } + } +} + +private extension String { + func ifEmpty(_ fallback: @autoclosure () -> String) -> String { + isEmpty ? fallback() : self + } } diff --git a/Apple/UI/Networks/WireGuard.swift b/Apple/UI/Networks/WireGuard.swift index cba67ef..c0426cd 100644 --- a/Apple/UI/Networks/WireGuard.swift +++ b/Apple/UI/Networks/WireGuard.swift @@ -1,14 +1,40 @@ import BurrowCore +import Foundation import SwiftUI -struct WireGuard: Network { - typealias NetworkType = Burrow_WireGuardNetwork - static let type: BurrowCore.Burrow_NetworkType = .wireGuard - +struct WireGuardCard { var id: Int32 - var backgroundColor: Color { .init("WireGuard") } + var title: String + var detail: String - @MainActor var label: some View { + init(id: Int32, title: String = "WireGuard", detail: String = "Stored configuration") { + self.id = id + self.title = title + self.detail = detail + } + + init(network: Burrow_Network) { + let payload = String(data: network.payload, encoding: .utf8) ?? "" + let address = Self.firstValue(for: "Address", in: payload) + let endpoint = Self.firstValue(for: "Endpoint", in: payload) + self.id = network.id + self.title = "WireGuard" + self.detail = [address, endpoint] + .compactMap { $0 } + .filter { !$0.isEmpty } + .joined(separator: " · ") + .ifEmpty("Stored configuration") + } + + var card: NetworkCardModel { + NetworkCardModel( + id: id, + backgroundColor: .init("WireGuard"), + label: AnyView(label) + ) + } + + private var label: some View { GeometryReader { reader in VStack(alignment: .leading) { HStack { @@ -23,12 +49,29 @@ struct WireGuard: Network { } .frame(maxWidth: .infinity, maxHeight: reader.size.height / 4) Spacer() - Text("@conradev") + Text(detail) .foregroundStyle(.white) .font(.body.monospaced()) + .lineLimit(3) } .padding() .frame(maxWidth: .infinity) } } + + private static func firstValue(for key: String, in config: String) -> String? { + config + .split(whereSeparator: \.isNewline) + .map(String.init) + .first(where: { $0.hasPrefix("\(key) = ") })? + .split(separator: "=", maxSplits: 1) + .last + .map { $0.trimmingCharacters(in: .whitespaces) } + } +} + +private extension String { + func ifEmpty(_ fallback: @autoclosure () -> String) -> String { + isEmpty ? fallback() : self + } } diff --git a/Apple/UI/OAuth2.swift b/Apple/UI/OAuth2.swift deleted file mode 100644 index 0fafc8d..0000000 --- a/Apple/UI/OAuth2.swift +++ /dev/null @@ -1,293 +0,0 @@ -import AuthenticationServices -import Foundation -import os -import SwiftUI - -enum OAuth2 { - enum Error: Swift.Error { - case unknown - case invalidAuthorizationURL - case invalidCallbackURL - case invalidRedirectURI - } - - struct Credential { - var accessToken: String - var refreshToken: String? - var expirationDate: Date? - } - - struct Session { - var authorizationEndpoint: URL - var tokenEndpoint: URL - var redirectURI: URL - var responseType = OAuth2.ResponseType.code - var scopes: Set - var clientID: String - var clientSecret: String - - fileprivate static let queue: OSAllocatedUnfairLock<[Int: CheckedContinuation]> = { - .init(initialState: [:]) - }() - - fileprivate static func handle(url: URL) { - let continuations = queue.withLock { continuations in - let copy = continuations - continuations.removeAll() - return copy - } - for (_, continuation) in continuations { - continuation.resume(returning: url) - } - } - - init( - authorizationEndpoint: URL, - tokenEndpoint: URL, - redirectURI: URL, - scopes: Set, - clientID: String, - clientSecret: String - ) { - self.authorizationEndpoint = authorizationEndpoint - self.tokenEndpoint = tokenEndpoint - self.redirectURI = redirectURI - self.scopes = scopes - self.clientID = clientID - self.clientSecret = clientSecret - } - - private var authorizationURL: URL { - get throws { - var queryItems: [URLQueryItem] = [ - .init(name: "client_id", value: clientID), - .init(name: "response_type", value: responseType.rawValue), - .init(name: "redirect_uri", value: redirectURI.absoluteString) - ] - if !scopes.isEmpty { - queryItems.append(.init(name: "scope", value: scopes.joined(separator: ","))) - } - guard var components = URLComponents(url: authorizationEndpoint, resolvingAgainstBaseURL: false) else { - throw OAuth2.Error.invalidAuthorizationURL - } - components.queryItems = queryItems - guard let authorizationURL = components.url else { throw OAuth2.Error.invalidAuthorizationURL } - return authorizationURL - } - } - - private func handle(callbackURL: URL) async throws -> OAuth2.AccessTokenResponse { - switch responseType { - case .code: - guard let components = URLComponents(url: callbackURL, resolvingAgainstBaseURL: false) else { - throw OAuth2.Error.invalidCallbackURL - } - return try await handle(response: try components.decode(OAuth2.CodeResponse.self)) - default: - throw OAuth2.Error.invalidCallbackURL - } - } - - private func handle(response: OAuth2.CodeResponse) async throws -> OAuth2.AccessTokenResponse { - var components = URLComponents() - components.queryItems = [ - .init(name: "client_id", value: clientID), - .init(name: "client_secret", value: clientSecret), - .init(name: "grant_type", value: GrantType.authorizationCode.rawValue), - .init(name: "code", value: response.code), - .init(name: "redirect_uri", value: redirectURI.absoluteString) - ] - let httpBody = Data(components.percentEncodedQuery!.utf8) - - var request = URLRequest(url: tokenEndpoint) - request.setValue("application/x-www-form-urlencoded", forHTTPHeaderField: "Content-Type") - request.httpMethod = "POST" - request.httpBody = httpBody - - let session = URLSession(configuration: .ephemeral) - let (data, _) = try await session.data(for: request) - return try OAuth2.decoder.decode(OAuth2.AccessTokenResponse.self, from: data) - } - - func authorize(_ session: WebAuthenticationSession) async throws -> Credential { - let authorizationURL = try authorizationURL - let callbackURL = try await session.start( - url: authorizationURL, - redirectURI: redirectURI - ) - return try await handle(callbackURL: callbackURL).credential - } - } - - private struct CodeResponse: Codable { - var code: String - var state: String? - } - - private struct AccessTokenResponse: Codable { - var accessToken: String - var tokenType: TokenType - var expiresIn: Double? - var refreshToken: String? - - var credential: Credential { - .init( - accessToken: accessToken, - refreshToken: refreshToken, - expirationDate: expiresIn.map { Date(timeIntervalSinceNow: $0) } - ) - } - } - - enum TokenType: Codable, RawRepresentable { - case bearer - case unknown(String) - - init(rawValue: String) { - self = switch rawValue.lowercased() { - case "bearer": .bearer - default: .unknown(rawValue) - } - } - - var rawValue: String { - switch self { - case .bearer: "bearer" - case .unknown(let type): type - } - } - } - - enum GrantType: Codable, RawRepresentable { - case authorizationCode - case unknown(String) - - init(rawValue: String) { - self = switch rawValue.lowercased() { - case "authorization_code": .authorizationCode - default: .unknown(rawValue) - } - } - - var rawValue: String { - switch self { - case .authorizationCode: "authorization_code" - case .unknown(let type): type - } - } - } - - enum ResponseType: Codable, RawRepresentable { - case code - case idToken - case unknown(String) - - init(rawValue: String) { - self = switch rawValue.lowercased() { - case "code": .code - case "id_token": .idToken - default: .unknown(rawValue) - } - } - - var rawValue: String { - switch self { - case .code: "code" - case .idToken: "id_token" - case .unknown(let type): type - } - } - } - - fileprivate static var decoder: JSONDecoder { - let decoder = JSONDecoder() - decoder.keyDecodingStrategy = .convertFromSnakeCase - return decoder - } - - fileprivate static var encoder: JSONEncoder { - let encoder = JSONEncoder() - encoder.keyEncodingStrategy = .convertToSnakeCase - return encoder - } -} - -extension WebAuthenticationSession: @unchecked @retroactive Sendable { -} - -extension WebAuthenticationSession { -#if canImport(BrowserEngineKit) - @available(iOS 17.4, macOS 14.4, tvOS 17.4, watchOS 10.4, *) - fileprivate static func callback(for redirectURI: URL) throws -> ASWebAuthenticationSession.Callback { - switch redirectURI.scheme { - case "https": - guard let host = redirectURI.host else { throw OAuth2.Error.invalidRedirectURI } - return .https(host: host, path: redirectURI.path) - case "http": - throw OAuth2.Error.invalidRedirectURI - case .some(let scheme): - return .customScheme(scheme) - case .none: - throw OAuth2.Error.invalidRedirectURI - } - } -#endif - - fileprivate func start(url: URL, redirectURI: URL) async throws -> URL { - #if canImport(BrowserEngineKit) - if #available(iOS 17.4, macOS 14.4, tvOS 17.4, watchOS 10.4, *) { - return try await authenticate( - using: url, - callback: try Self.callback(for: redirectURI), - additionalHeaderFields: [:] - ) - } - #endif - - return try await withThrowingTaskGroup(of: URL.self) { group in - group.addTask { - return try await authenticate(using: url, callbackURLScheme: redirectURI.scheme ?? "") - } - - let id = Int.random(in: 0.. some View { - onOpenURL { url in OAuth2.Session.handle(url: url) } - } -} - -extension URLComponents { - fileprivate func decode(_ type: T.Type) throws -> T { - guard let queryItems else { - throw DecodingError.valueNotFound( - T.self, - .init(codingPath: [], debugDescription: "Missing query items") - ) - } - let data = try OAuth2.encoder.encode(try queryItems.values) - return try OAuth2.decoder.decode(T.self, from: data) - } -} - -extension Sequence where Element == URLQueryItem { - fileprivate var values: [String: String?] { - get throws { - try Dictionary(map { ($0.name, $0.value) }) { _, _ in - throw DecodingError.dataCorrupted(.init(codingPath: [], debugDescription: "Duplicate query items")) - } - } - } -} diff --git a/Cargo.lock b/Cargo.lock index a7833c9..2950701 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + [[package]] name = "adler2" version = "2.0.1" @@ -165,6 +174,18 @@ version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +[[package]] +name = "argon2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash 0.5.0", +] + [[package]] name = "arrayvec" version = "0.7.6" @@ -416,28 +437,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -[[package]] -name = "aws-lc-rs" -version = "1.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" -dependencies = [ - "aws-lc-sys", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" -dependencies = [ - "cc", - "cmake", - "dunce", - "fs_extra", -] - [[package]] name = "axum" version = "0.6.20" @@ -453,7 +452,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.32", "itoa", - "matchit 0.7.3", + "matchit", "memchr", "mime", "percent-encoding", @@ -468,32 +467,33 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ - "axum-core 0.5.6", + "async-trait", + "axum-core 0.4.5", "bytes", - "form_urlencoded", "futures-util", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.8.1", + "hyper 1.7.0", "hyper-util", "itoa", - "matchit 0.8.4", + "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", - "serde_core", + "rustversion", + "serde", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", - "tower 0.5.3", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -518,23 +518,40 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ + "async-trait", "bytes", - "futures-core", + "futures-util", "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", + "rustversion", "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + [[package]] name = "base16ct" version = "0.2.0" @@ -690,12 +707,14 @@ version = "0.1.0" dependencies = [ "aead", "anyhow", + "argon2", "arti-client", "async-channel", "async-stream 0.2.1", - "axum 0.8.8", + "axum 0.7.9", "base64 0.21.7", "blake2", + "bytes", "caps", "chacha20poly1305", "clap", @@ -704,36 +723,41 @@ dependencies = [ "dotenv", "fehler", "futures", + "hickory-proto", "hmac", "hyper-util", "insta", "ip_network", "ip_network_table", + "ipnetwork", "libc", "libsystemd", "log", + "netstack-smoltcp", "nix 0.27.1", "once_cell", "parking_lot", - "prost 0.14.3", - "prost-types 0.14.3", + "prost 0.13.5", + "prost-types 0.13.5", "rand 0.8.5", "rand_core 0.6.4", - "reqwest", + "reqwest 0.12.23", "ring", "rusqlite", "rust-ini", "schemars 0.8.22", "serde", "serde_json", + "subtle", + "tempfile", "tokio", "tokio-stream", "tokio-util", "toml 0.8.23", - "tonic 0.14.5", - "tonic-prost", - "tonic-prost-build", - "tower 0.5.3", + "tonic 0.12.3", + "tonic-build", + "tor-rtcompat", + "tower 0.4.13", "tracing", "tracing-journald", "tracing-log 0.1.4", @@ -811,9 +835,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.57" +version = "1.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423" +checksum = "80f41ae168f955c12fb8960b057d70d0ca153fb83182b57d86380443527be7e9" dependencies = [ "find-msvc-tools", "jobserver", @@ -821,12 +845,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "cesu8" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" - [[package]] name = "cexpr" version = "0.6.0" @@ -874,14 +892,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.44" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -973,15 +991,6 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" -[[package]] -name = "cmake" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" -dependencies = [ - "cc", -] - [[package]] name = "coarsetime" version = "0.1.37" @@ -999,16 +1008,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" -[[package]] -name = "combine" -version = "4.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" -dependencies = [ - "bytes", - "memchr", -] - [[package]] name = "compression-codecs" version = "0.4.32" @@ -1121,9 +1120,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "convert_case" -version = "0.10.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", ] @@ -1147,16 +1146,6 @@ dependencies = [ "libc", ] -[[package]] -name = "core-foundation" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1226,6 +1215,12 @@ dependencies = [ "itertools 0.13.0", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -1439,9 +1434,50 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.10.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "defmt" +version = "0.3.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0963443817029b2024136fc4dd07a5107eb8f977eaf18fcd1fdeb11306b64ad" +dependencies = [ + "defmt 1.0.1", +] + +[[package]] +name = "defmt" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "548d977b6da32fa1d1fda2876453da1e7df63ad0304c8b3dae4dbe7b96f39b78" +dependencies = [ + "bitflags 1.3.2", + "defmt-macros", +] + +[[package]] +name = "defmt-macros" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d4fc12a85bcf441cfe44344c4b72d58493178ce635338a3f3b78943aceb258e" +dependencies = [ + "defmt-parser", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "defmt-parser" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d60334b3b2e7c9d91ef8150abfb6fa4c1c39ebbcf4a81c2e346aad939fee3e" +dependencies = [ + "thiserror 2.0.16", +] [[package]] name = "der" @@ -1539,23 +1575,22 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", "syn 2.0.106", "unicode-xid", ] @@ -1634,12 +1669,6 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "dyn-clone" version = "1.0.20" @@ -1738,6 +1767,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "enum-ordinalize" version = "3.1.15" @@ -1800,6 +1841,15 @@ dependencies = [ "windows-sys 0.61.0", ] +[[package]] +name = "etherparse" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8d8a704b617484e9d867a0423cd45f7577f008c4068e2e33378f8d3860a6d73" +dependencies = [ + "arrayvec", +] + [[package]] name = "event-listener" version = "5.4.1" @@ -1901,9 +1951,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.9" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" [[package]] name = "fixedbitset" @@ -1971,9 +2021,9 @@ dependencies = [ [[package]] name = "fs-mistrust" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "189ebb6d350de8d03181999fa9ebe8a021c5ab041004388f29e4dd2c52dc88a2" +checksum = "9f5ac9f88fd18733e0f9ce1f4a95c40eb1d4f83131bf1472e81d1f128fefb7c2" dependencies = [ "derive_builder_fork_arti", "dirs", @@ -1984,12 +2034,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "fslock" version = "0.2.1" @@ -2158,6 +2202,12 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + [[package]] name = "glob" version = "0.3.3" @@ -2230,6 +2280,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "hash32" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2282,6 +2341,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "heapless" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" +dependencies = [ + "hash32", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.5.0" @@ -2294,6 +2363,31 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hickory-proto" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "ring", + "thiserror 2.0.16", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -2437,9 +2531,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ "atomic-waker", "bytes", @@ -2465,13 +2559,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.8.1", + "hyper 1.7.0", "hyper-util", "rustls", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", + "webpki-roots", ] [[package]] @@ -2492,7 +2587,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.8.1", + "hyper 1.7.0", "hyper-util", "pin-project-lite", "tokio", @@ -2500,35 +2595,47 @@ dependencies = [ ] [[package]] -name = "hyper-util" -version = "0.1.20" +name = "hyper-tls" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.32", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.8.1", + "hyper 1.7.0", "ipnet", "libc", "percent-encoding", "pin-project-lite", "socket2 0.6.3", - "system-configuration", "tokio", "tower-service", "tracing", - "windows-registry", ] [[package]] name = "iana-time-zone" -version = "0.1.65" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2536,7 +2643,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.1", + "windows-core 0.62.2", ] [[package]] @@ -2733,13 +2840,24 @@ dependencies = [ [[package]] name = "inventory" -version = "0.3.22" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "009ae045c87e7082cb72dab0ccd01ae075dd00141ddc108f43a0ea150a9e7227" +checksum = "a4f0c30c76f2f4ccee3fe55a2435f691ca00c0e4bd87abe4f4a851b1d4dac39b" dependencies = [ "rustversion", ] +[[package]] +name = "io-uring" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "libc", +] + [[package]] name = "ip_network" version = "0.4.1" @@ -2768,6 +2886,15 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "ipnetwork" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf370abdafd54d13e54a620e8c3e1145f28e46cc9d704bc6d94414559df41763" +dependencies = [ + "serde", +] + [[package]] name = "iri-string" version = "0.7.8" @@ -2817,28 +2944,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" -[[package]] -name = "jni" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" -dependencies = [ - "cesu8", - "cfg-if", - "combine", - "jni-sys", - "log", - "thiserror 1.0.69", - "walkdir", - "windows-sys 0.45.0", -] - -[[package]] -name = "jni-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" - [[package]] name = "jobserver" version = "0.1.34" @@ -2851,10 +2956,12 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.91" +version = "0.3.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +checksum = "797146bb2677299a1eb6b7b50a890f4c361b29ef967addf5b2fa45dae1bb6d7d" dependencies = [ + "cfg-if", + "futures-util", "once_cell", "wasm-bindgen", ] @@ -2932,7 +3039,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -2963,9 +3070,9 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" dependencies = [ "bitflags 2.9.4", "libc", @@ -3042,6 +3149,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" +[[package]] +name = "managed" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d" + [[package]] name = "matchers" version = "0.2.0" @@ -3057,12 +3170,6 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "matchit" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" - [[package]] name = "memchr" version = "2.7.5" @@ -3179,14 +3286,30 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe 0.1.6", + "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] +[[package]] +name = "netstack-smoltcp" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab8eb143b5f4a5907f5ac72a929edf6c9d9454485cf5a3a35ce8fd3c62165adf" +dependencies = [ + "etherparse", + "futures", + "rand 0.8.5", + "smoltcp", + "spin", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "nix" version = "0.26.4" @@ -3209,6 +3332,7 @@ dependencies = [ "bitflags 2.9.4", "cfg-if", "libc", + "memoffset 0.9.1", ] [[package]] @@ -3321,9 +3445,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" [[package]] name = "num-integer" @@ -3357,9 +3481,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.6" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0bca838442ec211fa11de3a8b0e0e8f3a4522575b5c4c06ed722e005036f26" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" dependencies = [ "num_enum_derive", "rustversion", @@ -3367,9 +3491,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.6" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "680998035259dcfcafe653688bf2aa6d3e2dc05e98be6ab46afb089dc84f1df8" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -3396,11 +3520,24 @@ dependencies = [ "objc2-core-foundation", ] +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -3461,12 +3598,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-probe" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" - [[package]] name = "openssl-sys" version = "0.9.109" @@ -3601,6 +3732,17 @@ dependencies = [ "subtle", ] +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "paste" version = "1.0.15" @@ -3615,7 +3757,7 @@ checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest", "hmac", - "password-hash", + "password-hash 0.4.2", "sha2", ] @@ -3642,12 +3784,11 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.8.3" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "hashbrown 0.15.5", "indexmap 2.11.4", ] @@ -3798,6 +3939,12 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + [[package]] name = "postage" version = "0.5.0" @@ -3869,11 +4016,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.5.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.25.4+spec-1.1.0", + "toml_edit 0.23.7", ] [[package]] @@ -3919,30 +4066,29 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", - "prost-derive 0.14.3", + "prost-derive 0.13.5", ] [[package]] name = "prost-build" -version = "0.14.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck", "itertools 0.14.0", "log", "multimap", + "once_cell", "petgraph", "prettyplease", - "prost 0.14.3", - "prost-types 0.14.3", - "pulldown-cmark", - "pulldown-cmark-to-cmark", + "prost 0.13.5", + "prost-types 0.13.5", "regex", "syn 2.0.106", "tempfile", @@ -3963,9 +4109,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3985,31 +4131,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ - "prost 0.14.3", -] - -[[package]] -name = "pulldown-cmark" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c41efbf8f90ac44de7f3a868f0867851d261b56291732d0cbf7cceaaeb55a6" -dependencies = [ - "bitflags 2.9.4", - "memchr", - "unicase", -] - -[[package]] -name = "pulldown-cmark-to-cmark" -version = "22.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" -dependencies = [ - "pulldown-cmark", + "prost 0.13.5", ] [[package]] @@ -4050,7 +4176,6 @@ version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ - "aws-lc-rs", "bytes", "getrandom 0.3.3", "lru-slab", @@ -4286,42 +4411,80 @@ checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "reqwest" -version = "0.13.2" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", - "h2 0.4.12", + "futures-util", + "h2 0.3.27", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 0.1.2", + "system-configuration", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + +[[package]] +name = "reqwest" +version = "0.12.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.8.1", + "hyper 1.7.0", "hyper-rustls", "hyper-util", "js-sys", "log", - "mime", "percent-encoding", "pin-project-lite", "quinn", "rustls", "rustls-pki-types", - "rustls-platform-verifier", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", "tokio-rustls", - "tower 0.5.3", + "tower 0.5.2", "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", ] [[package]] @@ -4414,6 +4577,12 @@ dependencies = [ "ordered-multimap", ] +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -4472,12 +4641,12 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.32" +version = "0.23.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" +checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" dependencies = [ - "aws-lc-rs", "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -4485,15 +4654,12 @@ dependencies = [ ] [[package]] -name = "rustls-native-certs" -version = "0.8.3" +name = "rustls-pemfile" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "openssl-probe 0.2.1", - "rustls-pki-types", - "schannel", - "security-framework 3.5.1", + "base64 0.21.7", ] [[package]] @@ -4506,40 +4672,12 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-platform-verifier" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" -dependencies = [ - "core-foundation 0.10.1", - "core-foundation-sys", - "jni", - "log", - "once_cell", - "rustls", - "rustls-native-certs", - "rustls-platform-verifier-android", - "rustls-webpki", - "security-framework 3.5.1", - "security-framework-sys", - "webpki-root-certs", - "windows-sys 0.61.0", -] - -[[package]] -name = "rustls-platform-verifier-android" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" - [[package]] name = "rustls-webpki" -version = "0.103.6" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4559,9 +4697,9 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safelog" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8949ab2810bf603caef654634e5b4cedcbc05c120342a177cf8aaa122ef4bb76" +checksum = "ee9f10dd250956c65d58a19507dd06ff976f898560fe843580d05134541f0898" dependencies = [ "derive_more", "educe", @@ -4678,20 +4816,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.9.4", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" -dependencies = [ - "bitflags 2.9.4", - "core-foundation 0.10.1", + "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", @@ -4809,9 +4934,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98" dependencies = [ "serde_core", ] @@ -4994,6 +5119,21 @@ version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +[[package]] +name = "smoltcp" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad095989c1533c1c266d9b1e8d70a1329dd3723c3edac6d03bbd67e7bf6f4bb" +dependencies = [ + "bitflags 1.3.2", + "byteorder", + "cfg-if", + "defmt 0.3.100", + "heapless", + "log", + "managed", +] + [[package]] name = "socket2" version = "0.5.10" @@ -5019,6 +5159,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -5215,20 +5358,20 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.7.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags 2.9.4", - "core-foundation 0.9.4", + "bitflags 1.3.2", + "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.6.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ "core-foundation-sys", "libc", @@ -5380,19 +5523,22 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.50.0" +version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ + "backtrace", "bytes", + "io-uring", "libc", "mio", "pin-project-lite", "signal-hook-registry", + "slab", "socket2 0.6.3", "tokio-macros", "tracing", - "windows-sys 0.61.0", + "windows-sys 0.59.0", ] [[package]] @@ -5407,15 +5553,25 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", "syn 2.0.106", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.3" @@ -5428,9 +5584,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.18" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -5471,11 +5627,11 @@ checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap 2.11.4", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned 1.1.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", - "winnow", + "winnow 0.7.13", ] [[package]] @@ -5496,15 +5652,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "toml_datetime" -version = "1.0.0+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" -dependencies = [ - "serde_core", -] - [[package]] name = "toml_edit" version = "0.22.27" @@ -5516,28 +5663,28 @@ dependencies = [ "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow", + "winnow 0.7.13", ] [[package]] name = "toml_edit" -version = "0.25.4+spec-1.1.0" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193cbd0ce53dc966037f54351dbbcf0d5a642c7f0038c382ef9e677ce8c13f2" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ "indexmap 2.11.4", - "toml_datetime 1.0.0+spec-1.1.0", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", - "winnow", + "winnow 0.7.13", ] [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.1.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011" dependencies = [ - "winnow", + "winnow 1.0.1", ] [[package]] @@ -5548,9 +5695,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.6+spec-1.1.0" +version = "1.1.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed" [[package]] name = "tonic" @@ -5581,28 +5728,29 @@ dependencies = [ [[package]] name = "tonic" -version = "0.14.5" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ + "async-stream 0.3.6", "async-trait", - "axum 0.8.8", + "axum 0.7.9", "base64 0.22.1", "bytes", "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.8.1", + "hyper 1.7.0", "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.3", - "sync_wrapper 1.0.2", + "prost 0.13.5", + "socket2 0.5.10", "tokio", "tokio-stream", - "tower 0.5.3", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -5610,41 +5758,16 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.14.5" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1882ac3bf5ef12877d7ed57aad87e75154c11931c2ba7e6cde5e22d63522c734" -dependencies = [ - "prettyplease", - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "tonic-prost" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" -dependencies = [ - "bytes", - "prost 0.14.3", - "tonic 0.14.5", -] - -[[package]] -name = "tonic-prost-build" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3144df636917574672e93d0f56d7edec49f90305749c668df5101751bb8f95a" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", "prost-build", - "prost-types 0.14.3", + "prost-types 0.13.5", "quote", "syn 2.0.106", - "tempfile", - "tonic-build", ] [[package]] @@ -6622,18 +6745,15 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.11.4", "pin-project-lite", - "slab", "sync_wrapper 1.0.2", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -6641,9 +6761,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.8" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "bitflags 2.9.4", "bytes", @@ -6652,7 +6772,7 @@ dependencies = [ "http-body 1.0.1", "iri-string", "pin-project-lite", - "tower 0.5.3", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -6809,7 +6929,7 @@ dependencies = [ "libloading 0.7.4", "log", "nix 0.26.4", - "reqwest", + "reqwest 0.11.27", "schemars 0.8.22", "serde", "socket2 0.5.10", @@ -6847,12 +6967,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" - [[package]] name = "unicode-ident" version = "1.0.19" @@ -6861,9 +6975,9 @@ checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-segmentation" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" [[package]] name = "unicode-width" @@ -7038,9 +7152,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.114" +version = "0.2.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +checksum = "7dc0882f7b5bb01ae8c5215a1230832694481c1a4be062fd410e12ea3da5b631" dependencies = [ "cfg-if", "once_cell", @@ -7051,23 +7165,19 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.64" +version = "0.4.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +checksum = "19280959e2844181895ef62f065c63e0ca07ece4771b53d89bfdb967d97cbf05" dependencies = [ - "cfg-if", - "futures-util", "js-sys", - "once_cell", "wasm-bindgen", - "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.114" +version = "0.2.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +checksum = "75973d3066e01d035dbedaad2864c398df42f8dd7b1ea057c35b8407c015b537" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7075,9 +7185,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.114" +version = "0.2.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +checksum = "91af5e4be765819e0bcfee7322c14374dc821e35e72fa663a830bbc7dc199eac" dependencies = [ "bumpalo", "proc-macro2", @@ -7088,9 +7198,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.114" +version = "0.2.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +checksum = "c9bf0406a78f02f336bf1e451799cca198e8acde4ffa278f0fb20487b150a633" dependencies = [ "unicode-ident", ] @@ -7137,9 +7247,9 @@ checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" [[package]] name = "web-sys" -version = "0.3.91" +version = "0.3.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +checksum = "749466a37ee189057f54748b200186b59a03417a117267baf3fd89cecc9fb837" dependencies = [ "js-sys", "wasm-bindgen", @@ -7156,10 +7266,10 @@ dependencies = [ ] [[package]] -name = "webpki-root-certs" -version = "1.0.6" +name = "webpki-roots" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] @@ -7259,15 +7369,15 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.62.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.0", - "windows-result 0.4.0", - "windows-strings 0.5.0", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", ] [[package]] @@ -7311,9 +7421,9 @@ checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-link" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-numerics" @@ -7325,17 +7435,6 @@ dependencies = [ "windows-link 0.1.3", ] -[[package]] -name = "windows-registry" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f91f87ce112ffb7275000ea98eb1940912c21c1567c9312fde20261f3eadd29" -dependencies = [ - "windows-link 0.2.0", - "windows-result 0.4.0", - "windows-strings 0.5.0", -] - [[package]] name = "windows-result" version = "0.3.4" @@ -7347,11 +7446,11 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -7365,20 +7464,20 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] name = "windows-sys" -version = "0.45.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.42.2", + "windows-targets 0.48.5", ] [[package]] @@ -7414,22 +7513,7 @@ version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" dependencies = [ - "windows-link 0.2.0", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-link 0.2.1", ] [[package]] @@ -7489,12 +7573,6 @@ dependencies = [ "windows-link 0.1.3", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -7513,12 +7591,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -7537,12 +7609,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -7573,12 +7639,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -7597,12 +7657,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -7621,12 +7675,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -7645,12 +7693,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -7678,6 +7720,22 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "wit-bindgen" version = "0.46.0" @@ -7872,9 +7930,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "zeroize_derive", ] diff --git a/Dockerfile b/Dockerfile index 404179b..3497e22 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/library/rust:1.79-slim-bookworm AS builder +FROM docker.io/library/rust:1.85-slim-bookworm AS builder ARG TARGETPLATFORM ARG LLVM_VERSION=16 diff --git a/Makefile b/Makefile index 6738052..1a0488c 100644 --- a/Makefile +++ b/Makefile @@ -1,56 +1,21 @@ -FLAKE ?= . -AGENIX ?= nix run ${FLAKE}\#agenix -- - -SECRETS := forgejo/admin-password \ - forgejo/agent-ssh-key \ - forgejo/nsc-token \ - forgejo/nsc-dispatcher-config \ - forgejo/nsc-autoscaler-config \ - cloudflare/api-token \ - hetzner/api-token \ - forwardemail/api-token \ - forwardemail/hetzner-s3-user \ - forwardemail/hetzner-s3-secret - tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- -.PHONY: secret secret-file secrets-list - -secret: - @if [ -z "${name}" ]; then \ - printf 'Usage: make secret name=\nAvailable secrets:\n %s\n' "${SECRETS}"; \ - exit 1; \ - fi - ${AGENIX} -e secrets/${name}.age - -secret-file: - @if [ -z "${name}" ]; then \ - printf 'Usage: make secret-file name= file=\nAvailable secrets:\n %s\n' "${SECRETS}"; \ - exit 1; \ - fi - @if [ -z "${file}" ]; then \ - printf 'Usage: make secret-file name= file=\n'; \ - exit 1; \ - fi - @if [ ! -f "${file}" ]; then \ - printf 'Source file "%s" not found.\n' "${file}"; \ - exit 1; \ - fi - SECRET_SOURCE_FILE="${file}" EDITOR="${PWD}/Scripts/agenix-load-file.sh" ${AGENIX} -e secrets/${name}.age /dev/null 2>&1 || true - done - BURROW_SECRET_TMPFILES=() -} - -burrow_decrypt_age_secret_to_temp() { - local repo_root="$1" - local secret_path="$2" - local agenix_path - local identity_path - local tmp_file - - if [[ ! -f "${secret_path}" ]]; then - echo "age secret not found: ${secret_path}" >&2 - return 1 - fi - agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")" - identity_path="$(burrow_agenix_identity_path "${repo_root}")" - - tmp_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret.XXXXXX")" - if [[ -n "${identity_path}" ]]; then - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" -i "${identity_path}" > "${tmp_file}" - else - nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -d "${agenix_path}" > "${tmp_file}" - fi - chmod 600 "${tmp_file}" - BURROW_SECRET_TMPFILES+=("${tmp_file}") - printf '%s\n' "${tmp_file}" -} - -burrow_resolve_secret_file() { - local repo_root="$1" - local explicit_path="$2" - local intake_path="$3" - local age_path="$4" - local fallback_path="${5:-}" - - if [[ -n "${explicit_path}" ]]; then - if [[ ! -s "${explicit_path}" ]]; then - echo "required file missing or empty: ${explicit_path}" >&2 - return 1 - fi - printf '%s\n' "${explicit_path}" - return 0 - fi - - if [[ -n "${age_path}" && -f "${age_path}" ]]; then - burrow_decrypt_age_secret_to_temp "${repo_root}" "${age_path}" - return 0 - fi - - if [[ -n "${intake_path}" && -s "${intake_path}" ]]; then - printf '%s\n' "${intake_path}" - return 0 - fi - - if [[ -n "${fallback_path}" && -s "${fallback_path}" ]]; then - printf '%s\n' "${fallback_path}" - return 0 - fi - - return 1 -} - -burrow_encrypt_secret_from_file() { - local repo_root="$1" - local secret_path="$2" - local source_path="$3" - local agenix_path - local backup_file="" - - if [[ ! -s "${source_path}" ]]; then - echo "secret source missing or empty: ${source_path}" >&2 - return 1 - fi - agenix_path="$(burrow_secret_repo_path "${repo_root}" "${secret_path}")" - if [[ -f "${secret_path}" ]]; then - backup_file="$(mktemp "${TMPDIR:-/tmp}/burrow-secret-backup.XXXXXX")" - cp "${secret_path}" "${backup_file}" - fi - rm -f "${secret_path}" - - if ! nix --extra-experimental-features "nix-command flakes" run "${repo_root}#agenix" -- -e "${agenix_path}" < "${source_path}"; then - if [[ -n "${backup_file}" && -f "${backup_file}" ]]; then - mv "${backup_file}" "${secret_path}" - fi - return 1 - fi - - [[ -n "${backup_file}" ]] && rm -f "${backup_file}" -} diff --git a/Scripts/agenix-load-file.sh b/Scripts/agenix-load-file.sh deleted file mode 100755 index b91108b..0000000 --- a/Scripts/agenix-load-file.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -if [[ $# -lt 1 ]]; then - echo "Usage: agenix-load-file.sh " >&2 - exit 1 -fi - -dest="${!#}" -source_path="${SECRET_SOURCE_FILE:-}" - -if [[ -z "$source_path" ]]; then - echo "SECRET_SOURCE_FILE is not set; point it at the source file to encrypt." >&2 - exit 1 -fi - -if [[ ! -f "$source_path" ]]; then - echo "Source file '$source_path' does not exist." >&2 - exit 1 -fi - -cp "$source_path" "$dest" diff --git a/Scripts/authentik-sync-1password-oidc.sh b/Scripts/authentik-sync-1password-oidc.sh new file mode 100755 index 0000000..f523d9a --- /dev/null +++ b/Scripts/authentik-sync-1password-oidc.sh @@ -0,0 +1,243 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG:-onepassword}" +application_name="${AUTHENTIK_ONEPASSWORD_APPLICATION_NAME:-1Password}" +provider_name="${AUTHENTIK_ONEPASSWORD_PROVIDER_NAME:-1Password}" +template_slug="${AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG:-ts}" +client_id="${AUTHENTIK_ONEPASSWORD_CLIENT_ID:-1password.burrow.net}" +launch_url="${AUTHENTIK_ONEPASSWORD_LAUNCH_URL:-https://burrow-team.1password.com/}" +redirect_uris_json="${AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON:-[ + \"https://burrow-team.1password.com/sso/oidc/redirect/\", + \"onepassword://sso/oidc/redirect\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-1password-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG + AUTHENTIK_ONEPASSWORD_APPLICATION_NAME + AUTHENTIK_ONEPASSWORD_PROVIDER_NAME + AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG + AUTHENTIK_ONEPASSWORD_CLIENT_ID + AUTHENTIK_ONEPASSWORD_LAUNCH_URL + AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "public", + client_id: $client_id, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: 1Password OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: 1Password OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: 1Password OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-burrow-directory.sh b/Scripts/authentik-sync-burrow-directory.sh new file mode 100644 index 0000000..277c5f4 --- /dev/null +++ b/Scripts/authentik-sync-burrow-directory.sh @@ -0,0 +1,263 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +directory_json="${AUTHENTIK_BURROW_DIRECTORY_JSON:-[]}" +users_group="${AUTHENTIK_BURROW_USERS_GROUP:-burrow-users}" +admins_group="${AUTHENTIK_BURROW_ADMINS_GROUP:-burrow-admins}" +forgejo_application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-burrow-directory.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_BURROW_DIRECTORY_JSON + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_BURROW_USERS_GROUP + AUTHENTIK_BURROW_ADMINS_GROUP + AUTHENTIK_FORGEJO_APPLICATION_SLUG +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if ! printf '%s' "$directory_json" | jq -e 'type == "array"' >/dev/null; then + echo "error: AUTHENTIK_BURROW_DIRECTORY_JSON must be a JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \ + | jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 +} + +ensure_group() { + local group_name="$1" + local payload group_pk + + payload="$( + jq -cn \ + --arg name "$group_name" \ + '{name: $name}' + )" + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -n "$group_pk" ]]; then + api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null + else + group_pk="$( + api POST "/api/v3/core/groups/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$group_pk" ]]; then + echo "error: could not create Authentik group ${group_name}" >&2 + exit 1 + fi + + printf '%s\n' "$group_pk" +} + +lookup_user_pk() { + local username="$1" + + api GET "/api/v3/core/users/?page_size=200&search=${username}" \ + | jq -r --arg username "$username" '.results[]? | select(.username == $username) | .pk // empty' \ + | head -n1 +} + +ensure_user() { + local user_spec="$1" + local username name email is_admin groups_json password_file effective_groups_json group_name + local group_pks_json payload user_pk + + username="$(printf '%s\n' "$user_spec" | jq -r '.username')" + name="$(printf '%s\n' "$user_spec" | jq -r '.name')" + email="$(printf '%s\n' "$user_spec" | jq -r '.email')" + is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')" + groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')" + password_file="$(printf '%s\n' "$user_spec" | jq -r '.passwordFile // empty')" + + if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then + echo "error: each Burrow Authentik user requires username and email" >&2 + exit 1 + fi + + effective_groups_json="$( + printf '%s\n' "$groups_json" \ + | jq -c --arg users_group "$users_group" --arg admins_group "$admins_group" --argjson is_admin "$is_admin" ' + . + [$users_group] + (if $is_admin then [$admins_group] else [] end) | unique + ' + )" + + group_pks_json='[]' + while IFS= read -r group_name; do + group_pk="$(ensure_group "$group_name")" + group_pks_json="$( + jq -cn \ + --argjson current "$group_pks_json" \ + --arg next "$group_pk" \ + '$current + [$next]' + )" + done < <(printf '%s\n' "$effective_groups_json" | jq -r '.[]') + + payload="$( + jq -cn \ + --arg username "$username" \ + --arg name "$name" \ + --arg email "$email" \ + --argjson groups "$group_pks_json" \ + '{ + username: $username, + name: $name, + email: $email, + is_active: true, + path: "users", + groups: $groups + }' + )" + + user_pk="$(lookup_user_pk "$username")" + if [[ -n "$user_pk" ]]; then + api PATCH "/api/v3/core/users/${user_pk}/" "$payload" >/dev/null + else + user_pk="$( + api POST "/api/v3/core/users/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$user_pk" ]]; then + echo "error: could not create Authentik user ${username}" >&2 + exit 1 + fi + + if [[ -n "$password_file" ]]; then + if [[ ! -s "$password_file" ]]; then + echo "error: password file for Authentik user ${username} is missing: ${password_file}" >&2 + exit 1 + fi + + api POST "/api/v3/core/users/${user_pk}/set_password/" "$( + jq -cn \ + --arg password "$(tr -d '\r\n' < "$password_file")" \ + '{password: $password}' + )" >/dev/null + fi +} + +lookup_application_pk() { + local slug="$1" + + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +wait_for_authentik +ensure_group "$users_group" >/dev/null +ensure_group "$admins_group" >/dev/null + +while IFS= read -r user_spec; do + ensure_user "$user_spec" +done < <(printf '%s\n' "$directory_json" | jq -c '.[]') + +if [[ -n "$forgejo_application_slug" ]]; then + ensure_application_group_binding "$forgejo_application_slug" "$users_group" +fi + +echo "Synced Burrow Authentik directory." diff --git a/Scripts/authentik-sync-forgejo-oidc.sh b/Scripts/authentik-sync-forgejo-oidc.sh new file mode 100644 index 0000000..7b292dc --- /dev/null +++ b/Scripts/authentik-sync-forgejo-oidc.sh @@ -0,0 +1,250 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-git}" +application_name="${AUTHENTIK_FORGEJO_APPLICATION_NAME:-burrow.net}" +provider_name="${AUTHENTIK_FORGEJO_PROVIDER_NAME:-burrow.net}" +client_id="${AUTHENTIK_FORGEJO_CLIENT_ID:-git.burrow.net}" +client_secret="${AUTHENTIK_FORGEJO_CLIENT_SECRET:-}" +launch_url="${AUTHENTIK_FORGEJO_LAUNCH_URL:-https://git.burrow.net/}" +redirect_uris_json="${AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON:-[ + \"https://git.burrow.net/user/oauth2/burrow.net/callback\", + \"https://git.burrow.net/user/oauth2/authentik/callback\", + \"https://git.burrow.net/user/oauth2/GitHub/callback\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-forgejo-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_FORGEJO_CLIENT_SECRET + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_FORGEJO_APPLICATION_SLUG + AUTHENTIK_FORGEJO_APPLICATION_NAME + AUTHENTIK_FORGEJO_PROVIDER_NAME + AUTHENTIK_FORGEJO_CLIENT_ID + AUTHENTIK_FORGEJO_LAUNCH_URL + AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then + echo "Forgejo OIDC client secret is not configured; skipping Authentik Forgejo sync." >&2 + exit 0 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c '.results[]? | select(.assigned_application_slug == "ts")' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Burrow Tailnet OAuth provider template" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg client_secret "$client_secret" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "confidential", + client_id: $client_id, + client_secret: $client_secret, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Forgejo OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: false, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Forgejo OIDC application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Forgejo OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-google-source.sh b/Scripts/authentik-sync-google-source.sh new file mode 100755 index 0000000..a4c9edb --- /dev/null +++ b/Scripts/authentik-sync-google-source.sh @@ -0,0 +1,284 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +google_client_id="${AUTHENTIK_GOOGLE_CLIENT_ID:-}" +google_client_secret="${AUTHENTIK_GOOGLE_CLIENT_SECRET:-}" +source_slug="${AUTHENTIK_GOOGLE_SOURCE_SLUG:-google}" +source_name="${AUTHENTIK_GOOGLE_SOURCE_NAME:-Google}" +identification_stage_name="${AUTHENTIK_GOOGLE_IDENTIFICATION_STAGE_NAME:-default-authentication-identification}" +authentication_flow_slug="${AUTHENTIK_GOOGLE_AUTHENTICATION_FLOW_SLUG:-default-source-authentication}" +enrollment_flow_slug="${AUTHENTIK_GOOGLE_ENROLLMENT_FLOW_SLUG:-default-source-enrollment}" +login_mode="${AUTHENTIK_GOOGLE_LOGIN_MODE:-redirect}" +user_matching_mode="${AUTHENTIK_GOOGLE_USER_MATCHING_MODE:-email_link}" +policy_engine_mode="${AUTHENTIK_GOOGLE_POLICY_ENGINE_MODE:-any}" +google_account_map_json="${AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON:-[]}" +property_mapping_name="${AUTHENTIK_GOOGLE_PROPERTY_MAPPING_NAME:-Burrow Google Account Map}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-google-source.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_GOOGLE_CLIENT_ID + AUTHENTIK_GOOGLE_CLIENT_SECRET + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_GOOGLE_SOURCE_SLUG + AUTHENTIK_GOOGLE_SOURCE_NAME + AUTHENTIK_GOOGLE_IDENTIFICATION_STAGE_NAME + AUTHENTIK_GOOGLE_AUTHENTICATION_FLOW_SLUG + AUTHENTIK_GOOGLE_ENROLLMENT_FLOW_SLUG + AUTHENTIK_GOOGLE_LOGIN_MODE promoted|redirect + AUTHENTIK_GOOGLE_USER_MATCHING_MODE identifier|email_link|email_deny|username_link|username_deny + AUTHENTIK_GOOGLE_POLICY_ENGINE_MODE all|any + AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON JSON array of alias mappings + AUTHENTIK_GOOGLE_PROPERTY_MAPPING_NAME +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$google_client_id" || -z "$google_client_secret" || "$google_client_id" == PENDING* || "$google_client_secret" == PENDING* ]]; then + echo "Google OAuth credentials are not configured; skipping Authentik Google source sync." >&2 + echo "Set Authorized redirect URI in Google to ${authentik_url}/source/oauth/callback/${source_slug}/" >&2 + exit 0 +fi + +if ! printf '%s' "$google_account_map_json" | jq -e 'type == "array"' >/dev/null; then + echo "error: AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON must be a JSON array" >&2 + exit 1 +fi + +case "$login_mode" in + promoted|redirect) ;; + *) + echo "warning: unsupported AUTHENTIK_GOOGLE_LOGIN_MODE=$login_mode; falling back to redirect" >&2 + login_mode="redirect" + ;; +esac + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_single_result() { + local path="$1" + local jq_filter="$2" + + api GET "$path" | jq -r "$jq_filter" | head -n1 +} + +wait_for_authentik + +flow_pk="$( + lookup_single_result \ + "/api/v3/flows/instances/?slug=${authentication_flow_slug}" \ + '.results[] | select(.slug != null) | .pk // empty' +)" +if [[ -z "$flow_pk" ]]; then + echo "error: could not resolve Authentik authentication flow slug ${authentication_flow_slug}" >&2 + exit 1 +fi + +enrollment_flow_pk="$( + lookup_single_result \ + "/api/v3/flows/instances/?slug=${enrollment_flow_slug}" \ + '.results[] | select(.slug != null) | .pk // empty' +)" +if [[ -z "$enrollment_flow_pk" ]]; then + echo "error: could not resolve Authentik enrollment flow slug ${enrollment_flow_slug}" >&2 + exit 1 +fi + +identification_stage="$( + api GET "/api/v3/stages/identification/" \ + | jq -c --arg name "$identification_stage_name" '.results[] | select(.name == $name)' +)" +if [[ -z "$identification_stage" ]]; then + echo "error: could not resolve Authentik identification stage ${identification_stage_name}" >&2 + exit 1 +fi + +stage_pk="$(printf '%s\n' "$identification_stage" | jq -r '.pk')" + +property_mapping_payload='[]' +if [[ "$(printf '%s' "$google_account_map_json" | jq 'length')" -gt 0 ]]; then + alias_map_python="$( + printf '%s' "$google_account_map_json" \ + | jq -c ' + map({ + key: (.source_email | ascii_downcase), + value: { + username: .username, + email: .email, + name: .name + } + }) + | from_entries + ' + )" + + oauth_property_mapping_expression="$( + cat </dev/null + else + property_mapping_pk="$( + api POST "/api/v3/propertymappings/source/oauth/" "$oauth_property_mapping_payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "${property_mapping_pk:-}" ]]; then + echo "error: Google OAuth property mapping did not return a primary key" >&2 + exit 1 + fi + + property_mapping_payload="$(jq -cn --arg property_mapping_pk "$property_mapping_pk" '[$property_mapping_pk]')" +fi + +oauth_source_payload="$( + jq -n \ + --arg name "$source_name" \ + --arg slug "$source_slug" \ + --arg authentication_flow "$flow_pk" \ + --arg enrollment_flow "$enrollment_flow_pk" \ + --arg user_matching_mode "$user_matching_mode" \ + --arg policy_engine_mode "$policy_engine_mode" \ + --argjson user_property_mappings "$property_mapping_payload" \ + --arg consumer_key "$google_client_id" \ + --arg consumer_secret "$google_client_secret" \ + '{ + name: $name, + slug: $slug, + enabled: true, + promoted: true, + authentication_flow: $authentication_flow, + enrollment_flow: $enrollment_flow, + user_property_mappings: $user_property_mappings, + group_property_mappings: [], + policy_engine_mode: $policy_engine_mode, + user_matching_mode: $user_matching_mode, + provider_type: "google", + consumer_key: $consumer_key, + consumer_secret: $consumer_secret + }' +)" + +existing_source="$( + api GET "/api/v3/sources/oauth/?slug=${source_slug}" \ + | jq -c '.results[]?' +)" + +if [[ -n "$existing_source" ]]; then + source_pk="$(printf '%s\n' "$existing_source" | jq -r '.pk')" + api PATCH "/api/v3/sources/oauth/${source_slug}/" "$oauth_source_payload" >/dev/null +else + source_pk="$( + api POST "/api/v3/sources/oauth/" "$oauth_source_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "$source_pk" ]]; then + echo "error: Google OAuth source did not return a primary key" >&2 + exit 1 +fi + +stage_patch="$( + printf '%s\n' "$identification_stage" \ + | jq -c \ + --arg source_pk "$source_pk" \ + --arg login_mode "$login_mode" ' + .sources = ( + if $login_mode == "redirect" then + [$source_pk] + else + ([ $source_pk ] + ((.sources // []) | map(select(. != $source_pk)))) + end + ) + | .show_source_labels = true + | if $login_mode == "redirect" then + .user_fields = [] + else + . + end + | { + sources, + show_source_labels, + user_fields + }' +)" + +api PATCH "/api/v3/stages/identification/${stage_pk}/" "$stage_patch" >/dev/null + +echo "Synced Authentik Google source ${source_slug} (${source_pk}) in ${login_mode} mode." diff --git a/Scripts/authentik-sync-linear-saml.sh b/Scripts/authentik-sync-linear-saml.sh new file mode 100755 index 0000000..5da64ad --- /dev/null +++ b/Scripts/authentik-sync-linear-saml.sh @@ -0,0 +1,344 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}" +application_name="${AUTHENTIK_LINEAR_APPLICATION_NAME:-Linear}" +provider_name="${AUTHENTIK_LINEAR_PROVIDER_NAME:-Linear}" +launch_url="${AUTHENTIK_LINEAR_LAUNCH_URL:-https://linear.app/burrownet}" +acs_url="${AUTHENTIK_LINEAR_ACS_URL:-}" +audience="${AUTHENTIK_LINEAR_AUDIENCE:-}" +issuer="${AUTHENTIK_LINEAR_ISSUER:-${authentik_url}/application/saml/${application_slug}/metadata/}" +default_relay_state="${AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE:-}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-linear-saml.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_LINEAR_ACS_URL + AUTHENTIK_LINEAR_AUDIENCE + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_LINEAR_APPLICATION_SLUG + AUTHENTIK_LINEAR_APPLICATION_NAME + AUTHENTIK_LINEAR_PROVIDER_NAME + AUTHENTIK_LINEAR_LAUNCH_URL + AUTHENTIK_LINEAR_ISSUER + AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$acs_url" ]]; then + echo "error: AUTHENTIK_LINEAR_ACS_URL is required" >&2 + exit 1 +fi + +if [[ -z "$audience" ]]; then + echo "error: AUTHENTIK_LINEAR_AUDIENCE is required" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_oauth_template_field() { + local field="$1" + + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \ + | head -n1 +} + +reconcile_property_mapping() { + local name="$1" + local saml_name="$2" + local friendly_name="$3" + local expression="$4" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg saml_name "$saml_name" \ + --arg friendly_name "$friendly_name" \ + --arg expression "$expression" \ + '{ + name: $name, + saml_name: $saml_name, + friendly_name: $friendly_name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty' + fi +} + +wait_for_authentik + +authorization_flow="$(lookup_oauth_template_field authorization_flow)" +invalidation_flow="$(lookup_oauth_template_field invalidation_flow)" +signing_kp="$(lookup_oauth_template_field signing_key)" + +if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then + echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2 + exit 1 +fi + +email_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Email" \ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \ + "email" \ + 'return request.user.email' +)" + +name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Name" \ + "name" \ + "name" \ + 'return request.user.name or request.user.username' +)" + +first_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML First Name" \ + "firstName" \ + "firstName" \ + $'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username' +)" + +last_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Linear SAML Last Name" \ + "lastName" \ + "lastName" \ + $'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username' +)" + +if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then + echo "error: failed to reconcile Linear SAML property mappings" >&2 + exit 1 +fi + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg acs_url "$acs_url" \ + --arg audience "$audience" \ + --arg issuer "$issuer" \ + --arg signing_kp "$signing_kp" \ + --arg default_relay_state "$default_relay_state" \ + --arg name_id_mapping "$email_mapping_pk" \ + --arg email_mapping "$email_mapping_pk" \ + --arg name_mapping "$name_mapping_pk" \ + --arg first_name_mapping "$first_name_mapping_pk" \ + --arg last_name_mapping "$last_name_mapping_pk" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + acs_url: $acs_url, + audience: $audience, + issuer: $issuer, + signing_kp: $signing_kp, + sign_assertion: true, + sign_response: true, + sp_binding: "post", + name_id_mapping: $name_id_mapping, + property_mappings: [ + $email_mapping, + $name_mapping, + $first_name_mapping, + $last_name_mapping + ] + } + + (if $default_relay_state == "" then {} else {default_relay_state: $default_relay_state} end)' +)" + +existing_provider="$( + api GET "/api/v3/providers/saml/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/saml/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Linear SAML provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="existing" + api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Linear SAML application did not return a primary key" >&2 + exit 1 +fi + +for _ in $(seq 1 30); do + metadata_status="$( + curl -sS \ + -o /dev/null \ + -w '%{http_code}' \ + --max-redirs 0 \ + "${authentik_url}/application/saml/${application_slug}/metadata/" \ + || true + )" + case "$metadata_status" in + 200|301|302|307|308) + echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." + exit 0 + ;; + esac + sleep 2 +done + +echo "warning: Linear SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-linear-scim.sh b/Scripts/authentik-sync-linear-scim.sh new file mode 100644 index 0000000..4ef83e4 --- /dev/null +++ b/Scripts/authentik-sync-linear-scim.sh @@ -0,0 +1,311 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}" +provider_name="${AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME:-Linear SCIM}" +scim_url="${AUTHENTIK_LINEAR_SCIM_URL:-}" +scim_token_file="${AUTHENTIK_LINEAR_SCIM_TOKEN_FILE:-}" +user_identifier="${AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER:-email}" +owner_group="${AUTHENTIK_LINEAR_OWNER_GROUP:-linear-owners}" +admin_group="${AUTHENTIK_LINEAR_ADMIN_GROUP:-linear-admins}" +guest_group="${AUTHENTIK_LINEAR_GUEST_GROUP:-linear-guests}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-linear-scim.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_LINEAR_SCIM_URL + AUTHENTIK_LINEAR_SCIM_TOKEN_FILE + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_LINEAR_APPLICATION_SLUG + AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME + AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER + AUTHENTIK_LINEAR_OWNER_GROUP + AUTHENTIK_LINEAR_ADMIN_GROUP + AUTHENTIK_LINEAR_GUEST_GROUP +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$scim_url" ]]; then + echo "error: AUTHENTIK_LINEAR_SCIM_URL is required" >&2 + exit 1 +fi + +if [[ -z "$scim_token_file" || ! -s "$scim_token_file" ]]; then + echo "error: AUTHENTIK_LINEAR_SCIM_TOKEN_FILE is required and must be readable" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \ + | jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 +} + +ensure_group() { + local group_name="$1" + local payload group_pk + + payload="$(jq -cn --arg name "$group_name" '{name: $name}')" + group_pk="$(lookup_group_pk "$group_name")" + + if [[ -n "$group_pk" ]]; then + api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null + else + group_pk="$( + api POST "/api/v3/core/groups/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + if [[ -z "$group_pk" ]]; then + echo "error: could not reconcile Authentik group ${group_name}" >&2 + exit 1 + fi + + printf '%s\n' "$group_pk" +} + +lookup_application() { + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +} + +lookup_scim_provider() { + api GET "/api/v3/providers/scim/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_backchannel_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +} + +lookup_scim_mapping_pk() { + local managed_name="$1" + + api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \ + | jq -r --arg managed "$managed_name" '.results[]? | select(.managed == $managed) | .pk // empty' \ + | head -n1 +} + +reconcile_property_mapping() { + local name="$1" + local expression="$2" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg expression "$expression" \ + '{ + name: $name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk // empty' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/scim/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/scim/" "$payload" \ + | jq -r '.pk // empty' + fi +} + +sync_object() { + local provider_pk="$1" + local model="$2" + local object_id="$3" + + if ! api POST "/api/v3/providers/scim/${provider_pk}/sync/object/" "$( + jq -cn \ + --arg model "$model" \ + --arg object_id "$object_id" \ + '{ + sync_object_model: $model, + sync_object_id: $object_id, + override_dry_run: false + }' + )" >/dev/null; then + echo "warning: could not trigger immediate Linear SCIM sync for ${model} ${object_id}; provider will continue with its normal sync cycle." >&2 + fi +} + +wait_for_authentik + +group_mapping_pk="$(lookup_scim_mapping_pk "goauthentik.io/providers/scim/group")" +case "$user_identifier" in + email) + user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\n\nidentifier = request.user.email\nif identifier == "":\n identifier = request.user.username\n\nreturn {\n "userName": identifier,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}' + ;; + username) + user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\nreturn {\n "userName": request.user.username,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}' + ;; + *) + echo "error: unsupported AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER value: ${user_identifier}" >&2 + exit 1 + ;; +esac +user_mapping_pk="$(reconcile_property_mapping "Burrow Linear SCIM User" "$user_mapping_expression")" + +if [[ -z "$user_mapping_pk" || -z "$group_mapping_pk" ]]; then + echo "error: could not resolve managed Authentik SCIM property mappings" >&2 + exit 1 +fi + +owner_group_pk="$(ensure_group "$owner_group")" +admin_group_pk="$(ensure_group "$admin_group")" +guest_group_pk="$(ensure_group "$guest_group")" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg url "$scim_url" \ + --arg token "$(tr -d '\r\n' < "$scim_token_file")" \ + --arg user_mapping_pk "$user_mapping_pk" \ + --arg group_mapping_pk "$group_mapping_pk" \ + --arg owner_group_pk "$owner_group_pk" \ + --arg admin_group_pk "$admin_group_pk" \ + --arg guest_group_pk "$guest_group_pk" \ + '{ + name: $name, + url: $url, + token: $token, + auth_mode: "token", + verify_certificates: true, + compatibility_mode: "default", + property_mappings: [$user_mapping_pk], + property_mappings_group: [$group_mapping_pk], + group_filters: [ + $owner_group_pk, + $admin_group_pk, + $guest_group_pk + ], + dry_run: false + }' +)" + +existing_provider="$(lookup_scim_provider)" +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/scim/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/scim/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Linear SCIM provider did not return a primary key" >&2 + exit 1 +fi + +application="$(lookup_application)" +if [[ -z "$application" ]]; then + echo "error: could not resolve Authentik application ${application_slug}" >&2 + exit 1 +fi + +application_payload="$( + printf '%s\n' "$application" \ + | jq \ + --arg provider_pk "$provider_pk" \ + '{ + name: .name, + slug: .slug, + provider: .provider, + backchannel_providers: ((.backchannel_providers // []) + [($provider_pk | tonumber)] | unique), + open_in_new_tab: .open_in_new_tab, + meta_launch_url: .meta_launch_url, + policy_engine_mode: .policy_engine_mode + }' +)" +api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null + +group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')" +user_pks_json="$( + api GET "/api/v3/core/users/?page_size=200" \ + | jq -c \ + --argjson group_pks "$group_pks_json" \ + '[.results[]? + | select( + ([((.groups // [])[] | tostring)] as $user_groups + | ($group_pks | map(. as $wanted | ($user_groups | index($wanted)) != null) | any)) + ) + | .pk]' +)" + +while IFS= read -r group_pk; do + [[ -z "$group_pk" ]] && continue + sync_object "$provider_pk" "authentik.core.models.Group" "$group_pk" +done < <(printf '%s\n' "$group_pks_json" | jq -r '.[]') + +while IFS= read -r user_pk; do + [[ -z "$user_pk" ]] && continue + sync_object "$provider_pk" "authentik.core.models.User" "$user_pk" +done < <(printf '%s\n' "$user_pks_json" | jq -r '.[]') + +status_json="$(api GET "/api/v3/providers/scim/${provider_pk}/sync/status/" || true)" +if ! printf '%s\n' "$status_json" | jq -e 'has("last_sync_status")' >/dev/null 2>&1; then + echo "warning: could not read Linear SCIM sync status for provider ${provider_pk}; keeping reconciled configuration." >&2 +fi + +echo "Synced Authentik Linear SCIM provider ${provider_name} (${provider_pk}) with groups ${owner_group}, ${admin_group}, ${guest_group}." diff --git a/Scripts/authentik-sync-tailnet-auth-flow.sh b/Scripts/authentik-sync-tailnet-auth-flow.sh new file mode 100755 index 0000000..1c715cc --- /dev/null +++ b/Scripts/authentik-sync-tailnet-auth-flow.sh @@ -0,0 +1,309 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +provider_slug="${AUTHENTIK_TAILNET_PROVIDER_SLUG:-ts}" +provider_slugs_json="${AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON:-}" +authentication_flow_name="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME:-Burrow Tailnet Authentication}" +authentication_flow_slug="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG:-burrow-tailnet-authentication}" +identification_stage_name="${AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME:-burrow-tailnet-identification-stage}" +password_stage_name="${AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME:-burrow-tailnet-password-stage}" +user_login_stage_name="${AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME:-burrow-tailnet-user-login-stage}" +google_source_slug="${AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG:-google}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-tailnet-auth-flow.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_TAILNET_PROVIDER_SLUG + AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON + AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME + AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG + AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME + AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME + AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME + AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -n "$provider_slugs_json" ]]; then + if ! printf '%s' "$provider_slugs_json" | jq -e 'type == "array" and length > 0 and all(.[]; type == "string" and length > 0)' >/dev/null; then + echo "error: AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON must be a non-empty JSON array of strings" >&2 + exit 1 + fi +else + provider_slugs_json="$(jq -cn --arg slug "$provider_slug" '[$slug]')" +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_stage_by_name() { + local path="$1" + local name="$2" + + api GET "${path}?page_size=200" \ + | jq -c --arg name "$name" '.results[]? | select(.name == $name)' \ + | head -n1 +} + +lookup_flow_pk() { + local slug="$1" + + api GET "/api/v3/flows/instances/?slug=${slug}" \ + | jq -r '.results[]? | select(.slug != null) | .pk // empty' \ + | head -n1 +} + +lookup_source_pk() { + local slug="$1" + + api GET "/api/v3/sources/oauth/?page_size=200&slug=${slug}" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_password_stage() { + local existing payload stage_pk + + existing="$(lookup_stage_by_name "/api/v3/stages/password/" "$password_stage_name")" + payload="$( + jq -cn \ + --arg name "$password_stage_name" \ + '{ + name: $name, + backends: [ + "authentik.core.auth.InbuiltBackend", + "authentik.core.auth.TokenBackend" + ], + allow_show_password: false, + failed_attempts_before_cancel: 5 + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/password/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/password/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_identification_stage() { + local password_stage_pk="$1" + local google_source_pk="$2" + local existing payload stage_pk sources_json + + existing="$(lookup_stage_by_name "/api/v3/stages/identification/" "$identification_stage_name")" + if [[ -n "$google_source_pk" ]]; then + sources_json="$(jq -cn --arg source "$google_source_pk" '[$source]')" + else + sources_json='[]' + fi + + payload="$( + jq -cn \ + --arg name "$identification_stage_name" \ + --arg password_stage "$password_stage_pk" \ + --argjson sources "$sources_json" \ + '{ + name: $name, + user_fields: ["username", "email"], + password_stage: $password_stage, + case_insensitive_matching: true, + show_matched_user: true, + sources: $sources, + show_source_labels: true, + pretend_user_exists: false, + enable_remember_me: false + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/identification/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/identification/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_user_login_stage() { + local existing payload stage_pk + + existing="$(lookup_stage_by_name "/api/v3/stages/user_login/" "$user_login_stage_name")" + payload="$( + jq -cn \ + --arg name "$user_login_stage_name" \ + '{ + name: $name, + session_duration: "hours=12", + terminate_other_sessions: false, + remember_me_offset: "seconds=0", + network_binding: "no_binding", + geoip_binding: "no_binding" + }' + )" + + if [[ -n "$existing" ]]; then + stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/stages/user_login/${stage_pk}/" "$payload" >/dev/null + else + stage_pk="$( + api POST "/api/v3/stages/user_login/" "$payload" \ + | jq -r '.pk // empty' + )" + fi + + printf '%s\n' "$stage_pk" +} + +ensure_authentication_flow() { + local existing_pk payload + + existing_pk="$(lookup_flow_pk "$authentication_flow_slug")" + payload="$( + jq -cn \ + --arg name "$authentication_flow_name" \ + --arg slug "$authentication_flow_slug" \ + '{ + name: $name, + title: $name, + slug: $slug, + designation: "authentication", + policy_engine_mode: "any", + layout: "stacked" + }' + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/flows/instances/${authentication_flow_slug}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/flows/instances/" "$payload" \ + | jq -r '.pk // empty' + fi +} + +ensure_flow_binding() { + local flow_pk="$1" + local stage_pk="$2" + local order="$3" + local existing payload binding_pk + + existing="$( + api GET "/api/v3/flows/bindings/?target=${flow_pk}&stage=${stage_pk}&page_size=200" \ + | jq -c '.results[]?' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$flow_pk" \ + --arg stage "$stage_pk" \ + --argjson order "$order" \ + '{ + target: $target, + stage: $stage, + order: $order, + policy_engine_mode: "any" + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/flows/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/flows/bindings/" "$payload" >/dev/null + fi +} + +wait_for_authentik + +mapfile -t provider_pks < <( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --argjson provider_slugs "$provider_slugs_json" ' + .results[]? + | select( + ((.assigned_application_slug // empty) as $assigned | ($provider_slugs | index($assigned)) != null) + or ((.slug // empty) as $slug | ($provider_slugs | index($slug)) != null) + ) + | .pk // empty + ' +) + +if [[ "${#provider_pks[@]}" -eq 0 ]]; then + echo "error: could not resolve any Authentik Tailnet OAuth providers from ${provider_slugs_json}" >&2 + exit 1 +fi + +google_source_pk="$(lookup_source_pk "$google_source_slug" || true)" +password_stage_pk="$(ensure_password_stage)" +identification_stage_pk="$(ensure_identification_stage "$password_stage_pk" "$google_source_pk")" +user_login_stage_pk="$(ensure_user_login_stage)" +authentication_flow_pk="$(ensure_authentication_flow)" + +ensure_flow_binding "$authentication_flow_pk" "$identification_stage_pk" 10 +ensure_flow_binding "$authentication_flow_pk" "$user_login_stage_pk" 30 + +for provider_pk in "${provider_pks[@]}"; do + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$( + jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}' + )" >/dev/null +done + +echo "Synced Burrow Tailnet authentication flow for providers ${provider_slugs_json}." diff --git a/Scripts/authentik-sync-tailscale-oidc.sh b/Scripts/authentik-sync-tailscale-oidc.sh new file mode 100755 index 0000000..58fe7e4 --- /dev/null +++ b/Scripts/authentik-sync-tailscale-oidc.sh @@ -0,0 +1,369 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_TAILSCALE_APPLICATION_SLUG:-tailscale}" +application_name="${AUTHENTIK_TAILSCALE_APPLICATION_NAME:-Tailscale}" +provider_name="${AUTHENTIK_TAILSCALE_PROVIDER_NAME:-Tailscale}" +template_slug="${AUTHENTIK_TAILSCALE_TEMPLATE_SLUG:-ts}" +client_id="${AUTHENTIK_TAILSCALE_CLIENT_ID:-tailscale.burrow.net}" +client_secret="${AUTHENTIK_TAILSCALE_CLIENT_SECRET:-}" +launch_url="${AUTHENTIK_TAILSCALE_LAUNCH_URL:-https://login.tailscale.com/start/oidc}" +access_group="${AUTHENTIK_TAILSCALE_ACCESS_GROUP:-}" +default_external_application_slug="${AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG:-}" +redirect_uris_json="${AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON:-[ + \"https://login.tailscale.com/a/oauth_response\" +]}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-tailscale-oidc.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + AUTHENTIK_TAILSCALE_CLIENT_SECRET + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_TAILSCALE_APPLICATION_SLUG + AUTHENTIK_TAILSCALE_APPLICATION_NAME + AUTHENTIK_TAILSCALE_PROVIDER_NAME + AUTHENTIK_TAILSCALE_TEMPLATE_SLUG + AUTHENTIK_TAILSCALE_CLIENT_ID + AUTHENTIK_TAILSCALE_LAUNCH_URL + AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON + AUTHENTIK_TAILSCALE_ACCESS_GROUP + AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then + echo "Tailscale OIDC client secret is not configured; skipping Authentik Tailscale sync." >&2 + exit 0 +fi + +if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then + echo "error: AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +wait_for_authentik + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200" \ + | jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \ + | head -n1 +} + +lookup_application_pk() { + local slug="$1" + local application_pk lookup_result lookup_status + + application_pk="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 + )" + + if [[ -n "$application_pk" ]]; then + printf '%s\n' "$application_pk" + return 0 + fi + + lookup_result="$(api_with_status GET "/api/v3/core/applications/${slug}/")" + lookup_status="$(printf '%s\n' "$lookup_result" | sed -n '1p')" + if [[ "$lookup_status" =~ ^20[01]$ ]]; then + printf '%s\n' "$lookup_result" | sed '1d' | jq -r '.pk // empty' + fi +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +ensure_default_external_application() { + local application_slug="$1" + local application_pk default_brand brand_payload + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "error: could not resolve Authentik application ${application_slug} for brand default application" >&2 + exit 1 + fi + + default_brand="$( + api GET "/api/v3/core/brands/?page_size=200" \ + | jq -c '.results[]? | select(.default == true)' \ + | head -n1 + )" + + if [[ -z "$default_brand" ]]; then + echo "warning: could not resolve the default Authentik brand; skipping external default application" >&2 + return 0 + fi + + brand_payload="$( + printf '%s\n' "$default_brand" \ + | jq --arg application_pk "$application_pk" '.default_application = $application_pk' + )" + + api PUT "/api/v3/core/brands/$(printf '%s\n' "$default_brand" | jq -r '.brand_uuid')/" "$brand_payload" >/dev/null +} + +template_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \ + | head -n1 +)" + +if [[ -z "$template_provider" ]]; then + echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2 + exit 1 +fi + +authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')" +invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')" +property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')" +signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')" + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg client_id "$client_id" \ + --arg client_secret "$client_secret" \ + --arg signing_key "$signing_key" \ + --argjson property_mappings "$property_mappings" \ + --argjson redirect_uris "$redirect_uris_json" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + client_type: "confidential", + client_id: $client_id, + client_secret: $client_secret, + include_claims_in_id_token: true, + redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})), + property_mappings: $property_mappings, + signing_key: $signing_key, + issuer_mode: "per_provider", + sub_mode: "hashed_user_id" + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/oauth2/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Tailscale OIDC provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Tailscale OIDC application did not return a primary key" >&2 + exit 1 +fi + +if [[ -n "$access_group" ]]; then + ensure_application_group_binding "$application_slug" "$access_group" +fi + +if [[ -n "$default_external_application_slug" ]]; then + ensure_default_external_application "$default_external_application_slug" +fi + +for _ in $(seq 1 30); do + if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then + echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})." + exit 0 + fi + sleep 2 +done + +echo "warning: Tailscale OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})." diff --git a/Scripts/authentik-sync-zulip-saml.sh b/Scripts/authentik-sync-zulip-saml.sh new file mode 100644 index 0000000..cd18752 --- /dev/null +++ b/Scripts/authentik-sync-zulip-saml.sh @@ -0,0 +1,412 @@ +#!/usr/bin/env bash +set -euo pipefail + +authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}" +bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}" +application_slug="${AUTHENTIK_ZULIP_APPLICATION_SLUG:-zulip}" +application_name="${AUTHENTIK_ZULIP_APPLICATION_NAME:-Zulip}" +provider_name="${AUTHENTIK_ZULIP_PROVIDER_NAME:-Zulip}" +acs_url="${AUTHENTIK_ZULIP_ACS_URL:-https://chat.burrow.net/complete/saml/}" +audience="${AUTHENTIK_ZULIP_AUDIENCE:-https://chat.burrow.net}" +launch_url="${AUTHENTIK_ZULIP_LAUNCH_URL:-https://chat.burrow.net/}" +access_group="${AUTHENTIK_ZULIP_ACCESS_GROUP:-}" +admin_group="${AUTHENTIK_ZULIP_ADMIN_GROUP:-}" +issuer="${AUTHENTIK_ZULIP_ISSUER:-$authentik_url}" + +usage() { + cat <<'EOF' +Usage: Scripts/authentik-sync-zulip-saml.sh + +Required environment: + AUTHENTIK_BOOTSTRAP_TOKEN + +Optional environment: + AUTHENTIK_URL + AUTHENTIK_ZULIP_APPLICATION_SLUG + AUTHENTIK_ZULIP_APPLICATION_NAME + AUTHENTIK_ZULIP_PROVIDER_NAME + AUTHENTIK_ZULIP_ACS_URL + AUTHENTIK_ZULIP_AUDIENCE + AUTHENTIK_ZULIP_LAUNCH_URL + AUTHENTIK_ZULIP_ACCESS_GROUP + AUTHENTIK_ZULIP_ADMIN_GROUP + AUTHENTIK_ZULIP_ISSUER +EOF +} + +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + usage + exit 0 +fi + +if [[ -z "$bootstrap_token" ]]; then + echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2 + exit 1 +fi + +api() { + local method="$1" + local path="$2" + local data="${3:-}" + + if [[ -n "$data" ]]; then + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + else + curl -fsS \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + fi +} + +api_with_status() { + local method="$1" + local path="$2" + local data="${3:-}" + local response_file status + + response_file="$(mktemp)" + trap 'rm -f "$response_file"' RETURN + + if [[ -n "$data" ]]; then + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + -H "Content-Type: application/json" \ + -d "$data" \ + "${authentik_url}${path}" + )" + else + status="$( + curl -sS \ + -o "$response_file" \ + -w '%{http_code}' \ + -X "$method" \ + -H "Authorization: Bearer ${bootstrap_token}" \ + "${authentik_url}${path}" + )" + fi + + printf '%s\n' "$status" + cat "$response_file" +} + +wait_for_authentik() { + for _ in $(seq 1 90); do + if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + + echo "error: Authentik did not become ready at ${authentik_url}" >&2 + exit 1 +} + +lookup_oauth_template_field() { + local field="$1" + + api GET "/api/v3/providers/oauth2/?page_size=200" \ + | jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \ + | head -n1 +} + +lookup_group_pk() { + local group_name="$1" + + api GET "/api/v3/core/groups/?page_size=200" \ + | jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \ + | head -n1 +} + +lookup_application_pk() { + local slug="$1" + + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \ + | head -n1 +} + +ensure_application_group_binding() { + local application_slug="$1" + local group_name="$2" + local application_pk group_pk existing payload binding_pk + + application_pk="$(lookup_application_pk "$application_slug")" + if [[ -z "$application_pk" ]]; then + echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2 + return 0 + fi + + group_pk="$(lookup_group_pk "$group_name")" + if [[ -z "$group_pk" ]]; then + echo "error: could not resolve Authentik group ${group_name}" >&2 + exit 1 + fi + + existing="$( + api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \ + | jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \ + | head -n1 + )" + + payload="$( + jq -cn \ + --arg target "$application_pk" \ + --arg group "$group_pk" \ + '{ + group: $group, + target: $target, + negate: false, + enabled: true, + order: 100, + timeout: 30, + failure_result: false + }' + )" + + if [[ -n "$existing" ]]; then + binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')" + api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null + else + api POST "/api/v3/policies/bindings/" "$payload" >/dev/null + fi +} + +reconcile_property_mapping() { + local name="$1" + local saml_name="$2" + local friendly_name="$3" + local expression="$4" + local payload existing_pk + + payload="$( + jq -n \ + --arg name "$name" \ + --arg saml_name "$saml_name" \ + --arg friendly_name "$friendly_name" \ + --arg expression "$expression" \ + '{ + name: $name, + saml_name: $saml_name, + friendly_name: $friendly_name, + expression: $expression + }' + )" + + existing_pk="$( + api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \ + | jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \ + | head -n1 + )" + + if [[ -n "$existing_pk" ]]; then + api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null + printf '%s\n' "$existing_pk" + else + api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty' + fi +} + +wait_for_authentik + +authorization_flow="$(lookup_oauth_template_field authorization_flow)" +invalidation_flow="$(lookup_oauth_template_field invalidation_flow)" +signing_kp="$(lookup_oauth_template_field signing_key)" + +if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then + echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2 + exit 1 +fi + +email_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Email" \ + "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \ + "email" \ + 'return request.user.email' +)" + +name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Name" \ + "name" \ + "name" \ + 'return request.user.name or request.user.username' +)" + +first_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML First Name" \ + "firstName" \ + "firstName" \ + $'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username' +)" + +last_name_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Last Name" \ + "lastName" \ + "lastName" \ + $'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username' +)" + +role_mapping_pk="" +if [[ -n "$admin_group" ]]; then + role_mapping_pk="$( + reconcile_property_mapping \ + "Burrow Zulip SAML Role" \ + "zulip_role" \ + "zulip_role" \ + $'admin_group = "'$admin_group$'"\nif any(group.name == admin_group for group in request.user.ak_groups.all()):\n return "owner"\nreturn None' + )" +fi + +if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then + echo "error: failed to reconcile Zulip SAML property mappings" >&2 + exit 1 +fi + +provider_payload="$( + jq -n \ + --arg name "$provider_name" \ + --arg authorization_flow "$authorization_flow" \ + --arg invalidation_flow "$invalidation_flow" \ + --arg acs_url "$acs_url" \ + --arg audience "$audience" \ + --arg issuer "$issuer" \ + --arg signing_kp "$signing_kp" \ + --arg name_id_mapping "$email_mapping_pk" \ + --arg email_mapping "$email_mapping_pk" \ + --arg name_mapping "$name_mapping_pk" \ + --arg first_name_mapping "$first_name_mapping_pk" \ + --arg last_name_mapping "$last_name_mapping_pk" \ + --arg role_mapping "$role_mapping_pk" \ + '{ + name: $name, + authorization_flow: $authorization_flow, + invalidation_flow: $invalidation_flow, + acs_url: $acs_url, + audience: $audience, + issuer: $issuer, + signing_kp: $signing_kp, + sign_assertion: true, + sign_response: true, + sp_binding: "post", + name_id_mapping: $name_id_mapping, + property_mappings: [ + $email_mapping, + $name_mapping, + $first_name_mapping, + $last_name_mapping + ] + (if $role_mapping != "" then [$role_mapping] else [] end) + }' +)" + +existing_provider="$( + api GET "/api/v3/providers/saml/?page_size=200" \ + | jq -c \ + --arg application_slug "$application_slug" \ + --arg provider_name "$provider_name" \ + '.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \ + | head -n1 +)" + +if [[ -n "$existing_provider" ]]; then + provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')" + api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null +else + provider_pk="$( + api POST "/api/v3/providers/saml/" "$provider_payload" \ + | jq -r '.pk // empty' + )" +fi + +if [[ -z "${provider_pk:-}" ]]; then + echo "error: Zulip SAML provider did not return a primary key" >&2 + exit 1 +fi + +application_payload="$( + jq -n \ + --arg name "$application_name" \ + --arg slug "$application_slug" \ + --arg provider "$provider_pk" \ + --arg launch_url "$launch_url" \ + '{ + name: $name, + slug: $slug, + provider: ($provider | tonumber), + meta_launch_url: $launch_url, + open_in_new_tab: true, + policy_engine_mode: "any" + }' +)" + +existing_application="$( + api GET "/api/v3/core/applications/?page_size=200" \ + | jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \ + | head -n1 +)" + +if [[ -n "$existing_application" ]]; then + application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')" + api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null +else + create_application_result="$( + api_with_status POST "/api/v3/core/applications/" "$application_payload" + )" + create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')" + create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')" + + if [[ "$create_application_status" =~ ^20[01]$ ]]; then + application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')" + elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e ' + (.slug // [] | index("Application with this slug already exists.")) != null + or (.provider // [] | index("Application with this provider already exists.")) != null + ' >/dev/null; then + application_pk="existing-duplicate" + else + printf '%s\n' "$create_application_body" >&2 + echo "error: could not reconcile Authentik application ${application_slug}" >&2 + exit 1 + fi +fi + +if [[ -z "${application_pk:-}" ]]; then + echo "error: Zulip SAML application did not return a primary key" >&2 + exit 1 +fi + +if [[ -n "$access_group" ]]; then + ensure_application_group_binding "$application_slug" "$access_group" +fi + +for _ in $(seq 1 30); do + metadata_status="$( + curl -sS \ + -o /dev/null \ + -w '%{http_code}' \ + --max-redirs 0 \ + "${authentik_url}/application/saml/${application_slug}/metadata/" \ + || true + )" + case "$metadata_status" in + 200|301|302|307|308) + echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})." + exit 0 + ;; + esac + sleep 2 +done + +echo "warning: Zulip SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2 +echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})." diff --git a/Scripts/bep b/Scripts/bep new file mode 100755 index 0000000..1c6bd64 --- /dev/null +++ b/Scripts/bep @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root=$(git rev-parse --show-toplevel) +proposals_dir="$repo_root/evolution/proposals" + +auto_browse() { + if command -v wisu >/dev/null 2>&1; then + exec wisu -i -g --icons "$repo_root/evolution" + fi + exec ls -la "$repo_root/evolution" +} + +usage() { + cat <<'USAGE' +Usage: bep [command] + +Commands: + list [--status ] List BEPs, optionally filtered by status. + open Open a BEP in $EDITOR. + help Show this help. + +If no command is provided, bep launches a simple browser for evolution/. +USAGE +} + +normalize_id() { + local raw="$1" + if [[ "$raw" =~ ^BEP-[0-9]+$ ]]; then + printf '%s' "$raw" + return 0 + fi + if [[ "$raw" =~ ^[0-9]+$ ]]; then + printf 'BEP-%04d' "$raw" + return 0 + fi + return 1 +} + +read_status() { + local file="$1" + awk -F ': ' '/^Status:/ {print $2; exit}' "$file" +} + +read_title() { + local file="$1" + local line + line=$(head -n 1 "$file" || true) + printf '%s' "$line" | sed -E 's/^# `[^`]+`[[:space:]]+//; s/^[^A-Za-z0-9]+//' +} + +list_bep() { + local filter="${1:-}" + local filter_lower="" + if [[ -n "$filter" ]]; then + filter_lower=$(printf '%s' "$filter" | tr '[:upper:]' '[:lower:]') + fi + + printf '%-10s %-18s %s\n' "BEP" "Status" "Title" + local file + local entries=() + for file in "$proposals_dir"/BEP-*.md; do + [[ -e "$file" ]] || continue + local base + base=$(basename "$file") + local id + id=$(printf '%s' "$base" | cut -d- -f1-2) + local status + status=$(read_status "$file") + local status_lower + status_lower=$(printf '%s' "$status" | tr '[:upper:]' '[:lower:]') + if [[ -n "$filter_lower" && "$status_lower" != "$filter_lower" ]]; then + continue + fi + local title + title=$(read_title "$file") + entries+=("$(printf '%-10s %-18s %s' "$id" "$status" "$title")") + done + if [[ ${#entries[@]} -gt 0 ]]; then + printf '%s\n' "${entries[@]}" | sort + fi +} + +open_bep() { + local raw="$1" + local id + if ! id=$(normalize_id "$raw"); then + echo "Unknown BEP id: $raw" >&2 + exit 1 + fi + local matches + matches=("$proposals_dir"/"$id"-*.md) + if [[ ${#matches[@]} -eq 0 || ! -e "${matches[0]}" ]]; then + echo "No proposal found for $id" >&2 + exit 1 + fi + if [[ ${#matches[@]} -gt 1 ]]; then + echo "Multiple proposals match $id:" >&2 + printf ' %s\n' "${matches[@]}" >&2 + exit 1 + fi + local editor="${EDITOR:-vi}" + exec "$editor" "${matches[0]}" +} + +command=${1:-} +case "$command" in + "") + auto_browse + ;; + list) + if [[ ${2:-} == "--status" && -n ${3:-} ]]; then + list_bep "$3" + else + list_bep + fi + ;; + open) + if [[ -z ${2:-} ]]; then + echo "bep open requires an id" >&2 + exit 1 + fi + open_bep "$2" + ;; + help|-h|--help) + usage + ;; + *) + echo "Unknown command: $command" >&2 + usage + exit 1 + ;; +esac diff --git a/Scripts/bootstrap-forge-intake.sh b/Scripts/bootstrap-forge-intake.sh index b927083..0cc1d91 100644 --- a/Scripts/bootstrap-forge-intake.sh +++ b/Scripts/bootstrap-forge-intake.sh @@ -3,8 +3,6 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -12,33 +10,27 @@ Usage: Scripts/bootstrap-forge-intake.sh [options] Copy the minimum Burrow forge bootstrap secrets onto the target host under /var/lib/burrow/intake with the ownership expected by the NixOS services. -Legacy path only: the current forge runtime consumes agenix secrets directly. Options: --host SSH target (default: root@git.burrow.net) --ssh-key SSH private key used to reach the host - (default: secrets/forgejo/agent-ssh-key.age, then intake/) + (default: intake/agent_at_burrow_net_ed25519) --password-file Forgejo admin bootstrap password file - (default: secrets/forgejo/admin-password.age, then intake/) + (default: intake/forgejo_pass_contact_at_burrow_net.txt) --agent-key-file Agent SSH private key copied for runner bootstrap - (default: secrets/forgejo/agent-ssh-key.age, then intake/) + (default: intake/agent_at_burrow_net_ed25519) --no-verify Skip remote ls/stat verification after install -h, --help Show this help text EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" -PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-}" -AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}" +AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" VERIFY=1 -cleanup() { - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT - while [[ $# -gt 0 ]]; do case "$1" in --host) @@ -75,29 +67,12 @@ done mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" -SSH_KEY="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${SSH_KEY}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" -PASSWORD_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${PASSWORD_FILE}" \ - "${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt" \ - "${REPO_ROOT}/secrets/forgejo/admin-password.age" -)" -AGENT_KEY_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${AGENT_KEY_FILE}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" +for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do + if [[ ! -s "${path}" ]]; then + echo "required file missing or empty: ${path}" >&2 + exit 1 + fi +done ssh_opts=( -i "${SSH_KEY}" diff --git a/Scripts/check-bep-metadata.py b/Scripts/check-bep-metadata.py new file mode 100755 index 0000000..d054934 --- /dev/null +++ b/Scripts/check-bep-metadata.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import pathlib +import re +import sys + + +REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent +PROPOSALS_DIR = REPO_ROOT / "evolution" / "proposals" +ALLOWED_STATUSES = { + "Pitch", + "Draft", + "In Review", + "Accepted", + "Implemented", + "Rejected", + "Returned for Revision", + "Superseded", + "Archived", +} +REQUIRED_FIELDS = [ + "Status", + "Proposal", + "Authors", + "Coordinator", + "Reviewers", + "Constitution Sections", + "Implementation PRs", + "Decision Date", +] + + +def text_block_lines(path: pathlib.Path) -> list[str]: + content = path.read_text(encoding="utf-8") + match = re.search(r"```text\n(.*?)\n```", content, re.DOTALL) + if not match: + raise ValueError("missing leading ```text metadata block") + return [line.rstrip() for line in match.group(1).splitlines() if line.strip()] + + +def validate(path: pathlib.Path) -> list[str]: + errors: list[str] = [] + proposal_id = path.name.split("-", 2)[:2] + expected_id = "-".join(proposal_id).removesuffix(".md") + + try: + lines = text_block_lines(path) + except ValueError as exc: + return [f"{path}: {exc}"] + + field_names = [line.split(":", 1)[0] for line in lines] + if field_names != REQUIRED_FIELDS: + errors.append( + f"{path}: metadata fields must appear in order {', '.join(REQUIRED_FIELDS)}" + ) + return errors + + fields = dict(line.split(":", 1) for line in lines) + fields = {key.strip(): value.strip() for key, value in fields.items()} + + if fields["Status"] not in ALLOWED_STATUSES: + errors.append(f"{path}: invalid Status {fields['Status']!r}") + + if fields["Proposal"] != expected_id: + errors.append( + f"{path}: Proposal field {fields['Proposal']!r} does not match filename id {expected_id!r}" + ) + + if fields["Status"] in {"Accepted", "Implemented", "Superseded", "Rejected", "Archived"} and fields["Decision Date"] == "Pending": + errors.append( + f"{path}: Decision Date must not be Pending once status is {fields['Status']}" + ) + + return errors + + +def main() -> int: + errors: list[str] = [] + for path in sorted(PROPOSALS_DIR.glob("BEP-*.md")): + errors.extend(validate(path)) + + if errors: + for error in errors: + print(error, file=sys.stderr) + return 1 + + print(f"checked {len(list(PROPOSALS_DIR.glob('BEP-*.md')))} BEPs") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/Scripts/check-forge-host.sh b/Scripts/check-forge-host.sh index 05ddeca..0f79bf4 100755 --- a/Scripts/check-forge-host.sh +++ b/Scripts/check-forge-host.sh @@ -3,8 +3,6 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -14,21 +12,18 @@ Run a post-boot verification pass against the Burrow forge host. Options: --host SSH target (default: root@git.burrow.net) - --ssh-key SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/) + --ssh-key SSH private key (default: intake/agent_at_burrow_net_ed25519) --expect-nsc Fail if forgejo-nsc services are not active + --expect-tailnet Fail if Authentik and Headscale services are not active -h, --help Show this help text EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" EXPECT_NSC=0 - -cleanup() { - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT +EXPECT_TAILNET=0 while [[ $# -gt 0 ]]; do case "$1" in @@ -44,6 +39,10 @@ while [[ $# -gt 0 ]]; do EXPECT_NSC=1 shift ;; + --expect-tailnet) + EXPECT_TAILNET=1 + shift + ;; -h|--help) usage exit 0 @@ -58,17 +57,10 @@ done mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" -SSH_KEY="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${SSH_KEY}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" || { - echo "forge SSH key could not be resolved" >&2 +if [[ ! -f "${SSH_KEY}" ]]; then + echo "forge SSH key not found: ${SSH_KEY}" >&2 exit 1 -} +fi ssh \ -i "${SSH_KEY}" \ @@ -77,6 +69,7 @@ ssh \ -o StrictHostKeyChecking=accept-new \ "${HOST}" \ EXPECT_NSC="${EXPECT_NSC}" \ + EXPECT_TAILNET="${EXPECT_TAILNET}" \ 'bash -s' <<'EOF' set -euo pipefail @@ -93,6 +86,13 @@ nsc_services=( forgejo-nsc-autoscaler.service ) +tailnet_services=( + burrow-authentik-runtime.service + burrow-authentik-ready.service + headscale.service + headscale-bootstrap.service +) + show_service() { local service="$1" systemctl show \ @@ -145,13 +145,41 @@ for service in "${nsc_services[@]}"; do fi done +for service in "${tailnet_services[@]}"; do + echo "== ${service} ==" + show_service "${service}" || true + if [[ "${EXPECT_TAILNET}" == "1" ]] && ! service_is_healthy "${service}"; then + echo "required tailnet service is not active: ${service}" >&2 + exit 1 + fi +done + echo "== intake ==" ls -l /var/lib/burrow/intake || true +if [[ "${EXPECT_TAILNET}" == "1" ]]; then + echo "== agenix ==" + ls -l /run/agenix || true + test -s /run/agenix/burrowAuthentikEnv + test -s /run/agenix/burrowHeadscaleOidcClientSecret +fi + +if [[ "${EXPECT_NSC}" == "1" ]]; then + echo "== agenix-nsc ==" + ls -l /run/agenix || true + test -s /run/agenix/burrowForgejoNscToken + test -s /run/agenix/burrowForgejoNscDispatcherConfig + test -s /run/agenix/burrowForgejoNscAutoscalerConfig +fi + if command -v curl >/dev/null 2>&1; then echo "== http-local ==" curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/ curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login + if [[ "${EXPECT_TAILNET}" == "1" ]]; then + curl -fsS -o /dev/null -H 'Host: auth.burrow.net' -w 'authentik_ready %{http_code}\n' http://127.0.0.1/-/health/ready/ + curl -sS -o /dev/null -H 'Host: ts.burrow.net' -w 'headscale_root %{http_code}\n' http://127.0.0.1/ || true + fi fi EOF diff --git a/Scripts/ci/build-release-artifacts.sh b/Scripts/ci/build-release-artifacts.sh new file mode 100755 index 0000000..20b4c06 --- /dev/null +++ b/Scripts/ci/build-release-artifacts.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "${repo_root}" + +release_ref="${RELEASE_REF:-manual-${GITHUB_SHA:-unknown}}" +target="x86_64-unknown-linux-gnu" +out_dir="${repo_root}/dist" +staging="${out_dir}/burrow-${release_ref}-${target}" + +mkdir -p "${staging}" + +cargo build --locked --release -p burrow --bin burrow +install -m 0755 target/release/burrow "${staging}/burrow" +cp README.md "${staging}/README.md" + +tarball="${out_dir}/burrow-${release_ref}-${target}.tar.gz" +tar -C "${out_dir}" -czf "${tarball}" "$(basename "${staging}")" +shasum -a 256 "${tarball}" > "${tarball}.sha256" diff --git a/Scripts/ci/ensure-nix.sh b/Scripts/ci/ensure-nix.sh new file mode 100755 index 0000000..14be895 --- /dev/null +++ b/Scripts/ci/ensure-nix.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash +set -euo pipefail + +source_nix_profile() { + local candidate + for candidate in \ + "/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" \ + "${HOME}/.nix-profile/etc/profile.d/nix.sh" + do + if [[ -f "${candidate}" ]]; then + # shellcheck disable=SC1090 + . "${candidate}" + return 0 + fi + done + return 1 +} + +linux_cp_supports_preserve() { + cp --help 2>&1 | grep -q -- '--preserve' +} + +ensure_root_owned_home() { + if [[ "$(id -u)" -ne 0 ]]; then + return 0 + fi + + if [[ ! -d "${HOME}" ]] || [[ ! -O "${HOME}" ]]; then + export HOME="/root" + fi + + mkdir -p "${HOME}" +} + +ensure_linux_nixbld_accounts() { + if [[ "$(id -u)" -ne 0 ]]; then + return 0 + fi + + if command -v getent >/dev/null 2>&1 && getent group nixbld >/dev/null 2>&1; then + return 0 + fi + + if command -v addgroup >/dev/null 2>&1 && ! command -v groupadd >/dev/null 2>&1; then + addgroup -S nixbld >/dev/null 2>&1 || true + for i in $(seq 1 10); do + adduser -S -D -H -h /var/empty -s /sbin/nologin -G nixbld "nixbld${i}" >/dev/null 2>&1 || true + done + return 0 + fi + + if command -v groupadd >/dev/null 2>&1; then + groupadd -r nixbld >/dev/null 2>&1 || true + for i in $(seq 1 10); do + useradd \ + --system \ + --no-create-home \ + --home-dir /var/empty \ + --shell /usr/sbin/nologin \ + --gid nixbld \ + "nixbld${i}" >/dev/null 2>&1 || true + done + return 0 + fi + + echo "linux nix bootstrap requires nixbld group creation support" >&2 + exit 1 +} + +ensure_linux_nix_bootstrap_prereqs() { + if linux_cp_supports_preserve; then + ensure_root_owned_home + ensure_linux_nixbld_accounts + return 0 + fi + + if command -v apk >/dev/null 2>&1; then + apk add --no-cache coreutils xz >/dev/null + elif command -v apt-get >/dev/null 2>&1; then + export DEBIAN_FRONTEND=noninteractive + apt-get update -y >/dev/null + apt-get install -y coreutils xz-utils >/dev/null + elif command -v dnf >/dev/null 2>&1; then + dnf install -y coreutils xz >/dev/null + elif command -v yum >/dev/null 2>&1; then + yum install -y coreutils xz >/dev/null + else + echo "linux nix bootstrap requires GNU cp but no supported package manager was found" >&2 + exit 1 + fi + + linux_cp_supports_preserve || { + echo "linux nix bootstrap still lacks GNU cp after installing prerequisites" >&2 + exit 1 + } + + ensure_root_owned_home + ensure_linux_nixbld_accounts +} + +if ! command -v nix >/dev/null 2>&1; then + if ! command -v curl >/dev/null 2>&1; then + echo "curl is required to install nix" >&2 + exit 1 + fi + + case "$(uname -s)" in + Linux) + ensure_linux_nix_bootstrap_prereqs + curl -fsSL https://nixos.org/nix/install | sh -s -- --no-daemon + ;; + Darwin) + installer="$(mktemp -t burrow-nix.XXXXXX)" + trap 'rm -f "${installer}"' EXIT + curl -fsSL -o "${installer}" https://install.determinate.systems/nix + chmod +x "${installer}" + if command -v sudo >/dev/null 2>&1; then + if sudo -n true 2>/dev/null; then + sudo -n sh "${installer}" install --no-confirm + else + sudo sh "${installer}" install --no-confirm + fi + else + sh "${installer}" install --no-confirm + fi + ;; + *) + echo "unsupported platform for nix bootstrap: $(uname -s)" >&2 + exit 1 + ;; + esac +fi + +source_nix_profile || true +export PATH="${HOME}/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" + +config_root="${XDG_CONFIG_HOME:-$HOME/.config}" +config_file="${config_root}/nix/nix.conf" +if [[ -e "${config_file}" && ! -w "${config_file}" ]]; then + config_root="$(mktemp -d -t burrow-nix-config.XXXXXX)" + export XDG_CONFIG_HOME="${config_root}" + config_file="${XDG_CONFIG_HOME}/nix/nix.conf" +fi + +mkdir -p "$(dirname -- "${config_file}")" +cat > "${config_file}" <<'EOF' +experimental-features = nix-command flakes +sandbox = true +fallback = true +substituters = https://cache.nixos.org +trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= +EOF + +command -v nix >/dev/null 2>&1 || { + echo "nix is still unavailable after bootstrap" >&2 + exit 1 +} diff --git a/Scripts/ci/publish-forgejo-release.sh b/Scripts/ci/publish-forgejo-release.sh new file mode 100755 index 0000000..338f71b --- /dev/null +++ b/Scripts/ci/publish-forgejo-release.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +set -euo pipefail + +: "${API_URL:?API_URL is required}" +: "${REPOSITORY:?REPOSITORY is required}" +: "${RELEASE_TAG:?RELEASE_TAG is required}" +: "${TOKEN:?TOKEN is required}" + +release_api="${API_URL}/repos/${REPOSITORY}/releases" +tag_api="${release_api}/tags/${RELEASE_TAG}" +release_json="$(mktemp)" +create_json="$(mktemp)" +trap 'rm -f "${release_json}" "${create_json}"' EXIT + +status="$( + curl -sS -o "${release_json}" -w '%{http_code}' \ + -H "Authorization: token ${TOKEN}" \ + "${tag_api}" +)" + +if [[ "${status}" == "404" ]]; then + jq -n \ + --arg tag "${RELEASE_TAG}" \ + --arg name "Burrow ${RELEASE_TAG}" \ + '{ + tag_name: $tag, + target_commitish: $tag, + name: $name, + body: "Automated prerelease built on Forgejo Namespace runners.", + draft: false, + prerelease: true + }' > "${create_json}" + + curl -fsS \ + -H "Authorization: token ${TOKEN}" \ + -H "Content-Type: application/json" \ + -d @"${create_json}" \ + "${release_api}" > "${release_json}" +elif [[ "${status}" != "200" ]]; then + echo "failed to query Forgejo release for ${RELEASE_TAG} (HTTP ${status})" >&2 + cat "${release_json}" >&2 + exit 1 +fi + +release_id="$(jq -r '.id' "${release_json}")" +if [[ -z "${release_id}" || "${release_id}" == "null" ]]; then + echo "Forgejo release payload is missing an id" >&2 + cat "${release_json}" >&2 + exit 1 +fi + +for file in dist/*; do + name="$(basename "${file}")" + asset_id="$(jq -r --arg name "${name}" '.assets[]? | select(.name == $name) | .id' "${release_json}" | head -n1)" + if [[ -n "${asset_id}" ]]; then + curl -fsS -X DELETE \ + -H "Authorization: token ${TOKEN}" \ + "${release_api}/${release_id}/assets/${asset_id}" >/dev/null + fi + + curl -fsS \ + -H "Authorization: token ${TOKEN}" \ + -F "attachment=@${file}" \ + "${release_api}/${release_id}/assets?name=${name}" >/dev/null +done diff --git a/Scripts/cloudflare-upsert-a-record.sh b/Scripts/cloudflare-upsert-a-record.sh index af4cef4..88745af 100755 --- a/Scripts/cloudflare-upsert-a-record.sh +++ b/Scripts/cloudflare-upsert-a-record.sh @@ -1,11 +1,6 @@ #!/usr/bin/env bash set -euo pipefail -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" - usage() { cat <<'EOF' Usage: Scripts/cloudflare-upsert-a-record.sh --zone --name --ipv4
[options] @@ -18,7 +13,7 @@ Options: --name Fully-qualified DNS record name --ipv4
IPv4 address for the A record --token-file Cloudflare API token file - default: secrets/cloudflare/api-token.age, then intake/cloudflare-token.txt + default: intake/cloudflare-token.txt --ttl Record TTL, or auto default: auto --proxied Whether to proxy through Cloudflare @@ -30,15 +25,10 @@ EOF ZONE_NAME="" RECORD_NAME="" IPV4="" -TOKEN_FILE="${CLOUDFLARE_TOKEN_FILE:-}" +TOKEN_FILE="intake/cloudflare-token.txt" TTL_VALUE="auto" PROXIED="false" -cleanup() { - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT - while [[ $# -gt 0 ]]; do case "$1" in --zone) @@ -81,16 +71,11 @@ if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then usage >&2 exit 2 fi -TOKEN_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${TOKEN_FILE}" \ - "${REPO_ROOT}/intake/cloudflare-token.txt" \ - "${REPO_ROOT}/secrets/cloudflare/api-token.age" -)" || { - echo "Cloudflare token file could not be resolved" >&2 + +if [[ ! -f "${TOKEN_FILE}" ]]; then + echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2 exit 1 -} +fi if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then echo "Invalid IPv4 address: ${IPV4}" >&2 diff --git a/Scripts/forge-deploy.sh b/Scripts/forge-deploy.sh index 1a7eec7..5c4b959 100755 --- a/Scripts/forge-deploy.sh +++ b/Scripts/forge-deploy.sh @@ -5,8 +5,6 @@ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -20,7 +18,7 @@ Defaults: Environment: BURROW_FORGE_HOST root@git.burrow.net - BURROW_FORGE_SSH_KEY explicit path, otherwise secrets/forgejo/agent-ssh-key.age + BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519 EOF } @@ -30,7 +28,6 @@ ALLOW_DIRTY=0 BURROW_FLAKE_TMPDIRS=() cleanup() { - burrow_cleanup_secret_tmpfiles burrow_cleanup_flake_tmpdirs } trap cleanup EXIT @@ -74,17 +71,21 @@ if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then fi FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -FORGE_SSH_KEY="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${BURROW_FORGE_SSH_KEY:-}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" || { - echo "Unable to resolve the forge SSH key." >&2 +FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" + +if [[ -z "${FORGE_SSH_KEY}" ]]; then + if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then + FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" + else + FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519" + fi +fi + +if [[ ! -f "${FORGE_SSH_KEY}" ]]; then + echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2 + echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2 exit 1 -} +fi FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")" diff --git a/Scripts/forgejo-prune-runners.py b/Scripts/forgejo-prune-runners.py deleted file mode 100755 index 65c9ae9..0000000 --- a/Scripts/forgejo-prune-runners.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import json -import os -import pathlib -import subprocess -import time -import urllib.error -import urllib.request - - -def _read_token() -> str: - token = os.environ.get("FORGEJO_API_TOKEN", "").strip() - token_file = os.environ.get("FORGEJO_API_TOKEN_FILE", "").strip() - if not token and token_file: - token = pathlib.Path(token_file).read_text().strip() - if not token: - raise SystemExit("Forgejo API token is missing") - if token.startswith("PENDING-"): - raise SystemExit("Forgejo API token is pending") - return token - - -def _request(method: str, url: str, token: str) -> tuple[int, str]: - headers = {"Authorization": f"token {token}", "Accept": "application/json"} - req = urllib.request.Request(url, headers=headers, method=method) - try: - with urllib.request.urlopen(req, timeout=20) as resp: - body = resp.read().decode("utf-8") - return resp.getcode(), body - except urllib.error.HTTPError as exc: - body = exc.read().decode("utf-8") - return exc.code, body - - -def _list_runners(api_url: str, token: str, org: str | None) -> tuple[str, list[dict]]: - if org: - list_url = f"{api_url}/orgs/{org}/actions/runners" - else: - list_url = f"{api_url}/actions/runners" - status, body = _request("GET", list_url, token) - if status == 404: - return list_url, [] - if status >= 400: - raise RuntimeError(f"list runners failed ({status}) {body}") - try: - runners = json.loads(body) - except json.JSONDecodeError as exc: - raise RuntimeError(f"invalid runner list response: {exc}") from exc - if not isinstance(runners, list): - raise RuntimeError("runner list response is not a list") - return list_url, runners - - -def _delete_runner(api_url: str, token: str, org: str | None, runner_id: int) -> bool: - if org: - delete_url = f"{api_url}/orgs/{org}/actions/runners/{runner_id}" - else: - delete_url = f"{api_url}/actions/runners/{runner_id}" - status, body = _request("DELETE", delete_url, token) - if status in (200, 204): - return True - print(f"[forgejo-prune-runners] delete {runner_id} failed: {status} {body}") - return False - - -def _prune_db(ttl_seconds: int) -> int: - cutoff = int(time.time()) - ttl_seconds - now = int(time.time()) - sql = ( - "WITH updated AS (" - "UPDATE action_runner " - f"SET deleted = {now} " - "WHERE (deleted IS NULL OR deleted = 0) " - f"AND ((last_online IS NOT NULL AND last_online > 0 AND last_online < {cutoff}) " - f"OR (COALESCE(last_online, 0) = 0 AND created < {cutoff})) " - "RETURNING 1" - ") SELECT count(*) FROM updated;" - ) - result = subprocess.run( - ["psql", "-h", "/run/postgresql", "-U", "forgejo", "forgejo", "-tAc", sql], - check=True, - capture_output=True, - text=True, - ) - output = (result.stdout or "").strip() - try: - return int(output) - except ValueError: - return 0 - - -def main() -> None: - api_url = os.environ.get("FORGEJO_API_URL", "https://git.burrow.net/api/v1").rstrip("/") - org = os.environ.get("FORGEJO_ORG", "hackclub").strip() or None - dry_run = os.environ.get("FORGEJO_DRY_RUN", "0") == "1" - db_only = os.environ.get("FORGEJO_PRUNE_DB", "0") == "1" - ttl_seconds = int(os.environ.get("FORGEJO_RUNNER_TTL_SEC", "3600")) - - if db_only: - removed = _prune_db(ttl_seconds) - print(f"[forgejo-prune-runners] pruned {removed} runners via DB") - return - - token = _read_token() - - try: - _, runners = _list_runners(api_url, token, org) - except RuntimeError as exc: - if org is not None: - print(f"[forgejo-prune-runners] org runner list failed ({exc}); retrying instance scope") - _, runners = _list_runners(api_url, token, None) - org = None - else: - raise SystemExit(str(exc)) - - if not runners: - removed = _prune_db(ttl_seconds) - print(f"[forgejo-prune-runners] pruned {removed} runners via DB fallback") - return - - removed = 0 - for runner in runners: - runner_id = runner.get("id") - name = runner.get("name", "unknown") - status = (runner.get("status") or "").lower() - busy = bool(runner.get("busy")) - if status == "online" or busy: - continue - if runner_id is None: - continue - if dry_run: - print(f"[forgejo-prune-runners] would delete runner {runner_id} ({name}) status={status}") - continue - if _delete_runner(api_url, token, org, int(runner_id)): - removed += 1 - print(f"[forgejo-prune-runners] deleted runner {runner_id} ({name})") - - print(f"[forgejo-prune-runners] done; removed {removed} runners") - - -if __name__ == "__main__": - main() diff --git a/Scripts/hcloud-upload-nixos-image.sh b/Scripts/hcloud-upload-nixos-image.sh index 36f1e3b..2590519 100755 --- a/Scripts/hcloud-upload-nixos-image.sh +++ b/Scripts/hcloud-upload-nixos-image.sh @@ -6,14 +6,12 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" DEFAULT_CONFIG="burrow-forge" DEFAULT_FLAKE="." DEFAULT_LOCATION="hel1" DEFAULT_ARCHITECTURE="x86" -DEFAULT_TOKEN_FILE="" +DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt" CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}" FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}" @@ -32,13 +30,6 @@ NIX_BUILD_FLAGS=() BURROW_FLAKE_TMPDIRS=() LOCAL_STORE_DIR="" -cleanup() { - burrow_cleanup_secret_tmpfiles - burrow_cleanup_flake_tmpdirs -} - -trap cleanup EXIT - usage() { cat <<'EOF' Usage: Scripts/hcloud-upload-nixos-image.sh [options] @@ -51,7 +42,7 @@ Options: --location Hetzner location for the temporary upload server (default: hel1) --architecture CPU architecture of the image (default: x86) --server-type Hetzner server type for the temporary upload server - --token-file Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt) + --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) --artifact-path Prebuilt raw image artifact to upload directly --output-hash Stable hash label for --artifact-path uploads --builder-spec Complete builders string passed to nix build @@ -134,17 +125,6 @@ while [[ $# -gt 0 ]]; do esac done -TOKEN_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${TOKEN_FILE}" \ - "${REPO_ROOT}/intake/hetzner-api-token.txt" \ - "${REPO_ROOT}/secrets/hetzner/api-token.age" -)" || { - echo "Hetzner API token file could not be resolved" >&2 - exit 1 -} - cleanup() { burrow_cleanup_flake_tmpdirs if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then diff --git a/Scripts/hetzner-forge.sh b/Scripts/hetzner-forge.sh index 73e1953..cfce7eb 100755 --- a/Scripts/hetzner-forge.sh +++ b/Scripts/hetzner-forge.sh @@ -2,9 +2,6 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' @@ -34,7 +31,7 @@ Options: -h, --help Show this help text. Environment: - HCLOUD_TOKEN_FILE Defaults to secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt + HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt EOF } @@ -46,15 +43,10 @@ IMAGE="ubuntu-24.04" CONFIG="burrow-forge" FLAKE="." UPLOAD_LOCATION="" -TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}" YES=0 SSH_KEYS=("contact@burrow.net" "agent@burrow.net") -cleanup() { - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT - if [[ $# -gt 0 ]]; then case "$1" in show|create|delete|recreate|build-image|create-from-image|recreate-from-image) @@ -118,16 +110,10 @@ while [[ $# -gt 0 ]]; do esac done -TOKEN_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${TOKEN_FILE}" \ - "${REPO_ROOT}/intake/hetzner-api-token.txt" \ - "${REPO_ROOT}/secrets/hetzner/api-token.age" -)" || { - echo "Hetzner API token file could not be resolved" >&2 +if [[ ! -f "${TOKEN_FILE}" ]]; then + echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2 exit 1 -} +fi if [[ -z "${UPLOAD_LOCATION}" ]]; then UPLOAD_LOCATION="${LOCATION}" diff --git a/Scripts/nsc-build-and-upload-image.sh b/Scripts/nsc-build-and-upload-image.sh index 27badb6..6fb99a9 100755 --- a/Scripts/nsc-build-and-upload-image.sh +++ b/Scripts/nsc-build-and-upload-image.sh @@ -6,13 +6,11 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}" FLAKE="${HCLOUD_IMAGE_FLAKE:-.}" LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}" -TOKEN_FILE="${HCLOUD_TOKEN_FILE:-}" +TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}" NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}" NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}" NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}" @@ -28,13 +26,6 @@ EXTRA_LABELS=() BURROW_FLAKE_TMPDIRS=() BUILDER_ID="" -cleanup() { - burrow_cleanup_secret_tmpfiles - burrow_cleanup_flake_tmpdirs -} - -trap cleanup EXIT - usage() { cat <<'EOF' Usage: Scripts/nsc-build-and-upload-image.sh [options] @@ -46,7 +37,7 @@ Options: --config images.-raw output to build (default: burrow-forge) --flake Flake path to build from (default: .) --location Hetzner upload location (default: hel1) - --token-file Hetzner API token file (default: secrets/hetzner/api-token.age, then intake/hetzner-api-token.txt) + --token-file Hetzner API token file (default: intake/hetzner-api-token.txt) --machine-type Namespace machine type (default: linux/amd64:32x64) --ssh-host Namespace SSH endpoint (default: ssh.ord2.namespace.so) --duration Namespace builder lifetime (default: 4h) @@ -135,17 +126,6 @@ while [[ $# -gt 0 ]]; do esac done -TOKEN_FILE="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${TOKEN_FILE}" \ - "${REPO_ROOT}/intake/hetzner-api-token.txt" \ - "${REPO_ROOT}/secrets/hetzner/api-token.age" -)" || { - echo "Hetzner API token file could not be resolved" >&2 - exit 1 -} - cleanup() { if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then "${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true diff --git a/Scripts/provision-forgejo-nsc.sh b/Scripts/provision-forgejo-nsc.sh index 537107e..b31de21 100755 --- a/Scripts/provision-forgejo-nsc.sh +++ b/Scripts/provision-forgejo-nsc.sh @@ -6,47 +6,41 @@ REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" # shellcheck source=Scripts/_burrow-flake.sh source "${SCRIPT_DIR}/_burrow-flake.sh" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" usage() { cat <<'EOF' Usage: Scripts/provision-forgejo-nsc.sh [options] -Generate Burrow forgejo-nsc runtime inputs and refresh the authoritative -`secrets/forgejo/*.age` files, optionally refreshing the Namespace token from -the currently logged-in namespace account. +Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the +Namespace token from the currently logged-in namespace account. Options: --host SSH target used to mint the Forgejo PAT. Default: root@git.burrow.net --ssh-key SSH private key for the forge host. - Default: secrets/forgejo/agent-ssh-key.age, then intake/ + Default: intake/agent_at_burrow_net_ed25519 --nsc-bin Override the nsc binary. - --no-refresh-token Reuse the existing encrypted Namespace token if it already exists. + --no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists. --token-name Forgejo PAT name prefix (default: forgejo-nsc) --contact-user Forgejo username used for PAT creation (default: contact) - --scope-owner Forgejo org/user owner for the default NSC scope (default: hackclub) + --scope-owner Forgejo org/user owner for the default NSC scope (default: burrow) --scope-name Forgejo repository name for the default NSC scope (default: burrow) -h, --help Show this help text. EOF } HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" NSC_BIN="${NSC_BIN:-}" KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" REFRESH_TOKEN=1 TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}" CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}" -SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-hackclub}" +SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-burrow}" SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}" BURROW_FLAKE_TMPDIRS=() -TMP_DIR="" cleanup() { - [[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true - burrow_cleanup_secret_tmpfiles burrow_cleanup_flake_tmpdirs } trap cleanup EXIT @@ -103,15 +97,13 @@ burrow_require_cmd nix burrow_require_cmd ssh burrow_require_cmd python3 -SSH_KEY="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${SSH_KEY}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" -TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/burrow-forgejo-nsc.XXXXXX")" +if [[ ! -f "${SSH_KEY}" ]]; then + echo "forge SSH key not found: ${SSH_KEY}" >&2 + exit 1 +fi + +mkdir -p "${REPO_ROOT}/intake" +chmod 700 "${REPO_ROOT}/intake" flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")" if [[ -z "${NSC_BIN}" ]]; then @@ -136,77 +128,16 @@ if [[ ! -x "${NSC_BIN}" ]]; then exit 1 fi -token_file="${TMP_DIR}/forgejo_nsc_token.txt" -dispatcher_out="${TMP_DIR}/forgejo_nsc_dispatcher.yaml" -autoscaler_out="${TMP_DIR}/forgejo_nsc_autoscaler.yaml" +token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt" +dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" +autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml" autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml" -token_secret="${REPO_ROOT}/secrets/forgejo/nsc-token.age" -dispatcher_secret="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" -autoscaler_secret="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" -if [[ "${REFRESH_TOKEN}" -eq 1 ]]; then - ssh \ - -i "${SSH_KEY}" \ - -o IdentitiesOnly=yes \ - -o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \ - -o StrictHostKeyChecking=accept-new \ - "${HOST}" \ - 'sudo -u forgejo-nsc python3 - <<'"'"'PY'"'"' -import json -from pathlib import Path - -payload = {} - -token_json = Path("/var/lib/forgejo-nsc/.config/ns/token.json") -if token_json.exists(): - data = json.loads(token_json.read_text(encoding="utf-8")) - session = str(data.get("session_token", "")).strip() - if session: - payload["session_token"] = session - -token_cache = Path("/var/lib/forgejo-nsc/.config/ns/token.cache") -if token_cache.exists(): - bearer = token_cache.read_text(encoding="utf-8").strip() - if bearer: - payload["bearer_token"] = bearer - -if not payload: - raise SystemExit("forgejo-nsc host does not have a usable Namespace session") - -print(json.dumps(payload, indent=2)) -PY' > "${token_file}" +if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then + "${NSC_BIN}" auth check-login --duration 20m >/dev/null + "${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null chmod 600 "${token_file}" -elif [[ -f "${token_secret}" ]]; then - burrow_decrypt_age_secret_to_temp "${REPO_ROOT}" "${token_secret}" > "${token_file}" -fi - -if [[ -s "${token_file}" ]]; then - TOKEN_FILE="${token_file}" python3 - <<'PY' -import json -import os -from pathlib import Path - -path = Path(os.environ["TOKEN_FILE"]) -raw = path.read_text(encoding="utf-8").strip() -if not raw: - raise SystemExit(0) - -try: - parsed = json.loads(raw) -except json.JSONDecodeError: - parsed = None - -if isinstance(parsed, dict): - bearer = parsed.get("bearer_token") - session = parsed.get("session_token") - if isinstance(bearer, str) and bearer.strip(): - raise SystemExit(0) - if isinstance(session, str) and session.strip(): - raise SystemExit(0) - -path.write_text(json.dumps({"bearer_token": raw}, indent=2) + "\n", encoding="utf-8") -PY fi webhook_secret="$(python3 - <<'PY' @@ -302,9 +233,5 @@ PY chmod 600 "${dispatcher_out}" "${autoscaler_out}" -burrow_encrypt_secret_from_file "${REPO_ROOT}" "${token_secret}" "${token_file}" -burrow_encrypt_secret_from_file "${REPO_ROOT}" "${dispatcher_secret}" "${dispatcher_out}" -burrow_encrypt_secret_from_file "${REPO_ROOT}" "${autoscaler_secret}" "${autoscaler_out}" - -echo "Updated secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age." +echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml." echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}." diff --git a/Scripts/run-ios-tailnet-ui-tests.sh b/Scripts/run-ios-tailnet-ui-tests.sh new file mode 100755 index 0000000..5170a1e --- /dev/null +++ b/Scripts/run-ios-tailnet-ui-tests.sh @@ -0,0 +1,163 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}" +simulator_name="${BURROW_UI_TEST_SIMULATOR_NAME:-iPhone 17 Pro}" +simulator_os="${BURROW_UI_TEST_SIMULATOR_OS:-26.4}" +simulator_id="${BURROW_UI_TEST_SIMULATOR_ID:-}" +derived_data_path="${BURROW_UI_TEST_DERIVED_DATA_PATH:-/tmp/burrow-ui-tests-deriveddata}" +source_packages_path="${BURROW_UI_TEST_SOURCE_PACKAGES_PATH:-/tmp/burrow-ui-tests-sourcepackages}" +fallback_dir="/tmp/${bundle_id}/SimulatorFallback" +socket_path="${fallback_dir}/burrow.sock" +tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState" +daemon_log="${BURROW_UI_TEST_DAEMON_LOG:-/tmp/burrow-ui-test-daemon.log}" +ui_test_config_path="${BURROW_UI_TEST_CONFIG_PATH:-/tmp/burrow-ui-test-config.json}" +ui_test_runner_bundle_id="${bundle_id}.uitests.xctrunner" +ui_test_email="${BURROW_UI_TEST_EMAIL:-ui-test@burrow.net}" +ui_test_username="${BURROW_UI_TEST_USERNAME:-ui-test}" +ui_test_tailnet_mode="${BURROW_UI_TEST_TAILNET_MODE:-tailscale}" +password_secret="${repo_root}/secrets/infra/authentik-ui-test-password.age" +age_identity="${BURROW_UI_TEST_AGE_IDENTITY:-${HOME}/.ssh/id_ed25519}" + +ui_test_password="${BURROW_UI_TEST_PASSWORD:-}" +if [[ -z "$ui_test_password" ]]; then + if [[ -f "$password_secret" && -f "$age_identity" ]]; then + ui_test_password="$(age -d -i "$age_identity" "$password_secret" | tr -d '\r\n')" + else + echo "error: BURROW_UI_TEST_PASSWORD is unset and ${password_secret} could not be decrypted" >&2 + exit 1 + fi +fi + +rm -rf "$fallback_dir" "$tailnet_state_root" +mkdir -p "$fallback_dir" "$tailnet_state_root" "$derived_data_path" "$source_packages_path" +rm -f "$socket_path" + +resolve_simulator_id() { + xcrun simctl list devices available -j | python3 -c ' +import json +import os +import sys + +target_name = sys.argv[1] +target_os = sys.argv[2] +target_runtime = "com.apple.CoreSimulator.SimRuntime.iOS-" + target_os.replace(".", "-") +devices = json.load(sys.stdin).get("devices", {}) +healthy = [] +for runtime, entries in devices.items(): + if runtime != target_runtime: + continue + for entry in entries: + if not entry.get("isAvailable", False): + continue + if not os.path.isdir(entry.get("dataPath", "")): + continue + healthy.append(entry) +for entry in healthy: + if entry.get("name") == target_name: + print(entry["udid"]) + raise SystemExit(0) +for entry in healthy: + if target_name in entry.get("name", ""): + print(entry["udid"]) + raise SystemExit(0) +raise SystemExit(1) +' "$simulator_name" "$simulator_os" +} + +if [[ -z "$simulator_id" ]]; then + simulator_id="$(resolve_simulator_id || true)" +fi + +if [[ -n "$simulator_id" ]]; then + xcrun simctl boot "$simulator_id" >/dev/null 2>&1 || true + xcrun simctl bootstatus "$simulator_id" -b + xcrun simctl terminate "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true + xcrun simctl terminate "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true + xcrun simctl uninstall "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true + xcrun simctl uninstall "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true + destination="id=${simulator_id}" +else + destination="platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}" +fi + +cleanup() { + rm -f "$ui_test_config_path" + if [[ -n "${daemon_pid:-}" ]]; then + kill "$daemon_pid" >/dev/null 2>&1 || true + wait "$daemon_pid" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +umask 077 +python3 - <<'PY' "$ui_test_config_path" "$ui_test_email" "$ui_test_username" "$ui_test_password" "$ui_test_tailnet_mode" +import json +import pathlib +import sys + +config_path = pathlib.Path(sys.argv[1]) +config_path.write_text( + json.dumps( + { + "email": sys.argv[2], + "username": sys.argv[3], + "password": sys.argv[4], + "mode": sys.argv[5], + } + ), + encoding="utf-8", +) +PY + +cargo build -p burrow --bin burrow + +( + cd "$fallback_dir" + RUST_LOG="${BURROW_UI_TEST_RUST_LOG:-info,burrow=debug}" \ + BURROW_SOCKET_PATH="burrow.sock" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ + "${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1 +) & +daemon_pid=$! + +for _ in $(seq 1 50); do + [[ -S "$socket_path" ]] && break + sleep 0.2 +done + +if [[ ! -S "$socket_path" ]]; then + echo "error: Burrow daemon did not create ${socket_path}" >&2 + [[ -f "$daemon_log" ]] && cat "$daemon_log" >&2 + exit 1 +fi + +common_xcodebuild_args=( + -quiet + -skipPackagePluginValidation + -project "${repo_root}/Apple/Burrow.xcodeproj" + -scheme App + -configuration Debug + -destination "$destination" + -derivedDataPath "$derived_data_path" + -clonedSourcePackagesDirPath "$source_packages_path" + -only-testing:BurrowUITests + -parallel-testing-enabled NO + -maximum-concurrent-test-simulator-destinations 1 + -maximum-parallel-testing-workers 1 + CODE_SIGNING_ALLOWED=NO +) + +xcodebuild \ + "${common_xcodebuild_args[@]}" \ + build-for-testing + +BURROW_UI_TEST_EMAIL="$ui_test_email" \ +BURROW_UI_TEST_USERNAME="$ui_test_username" \ +BURROW_UI_TEST_PASSWORD="$ui_test_password" \ +BURROW_UI_TEST_CONFIG_PATH="$ui_test_config_path" \ +BURROW_UI_TEST_EPHEMERAL_AUTH=1 \ +xcodebuild \ + "${common_xcodebuild_args[@]}" \ + test-without-building diff --git a/Scripts/run-tailnet-connectivity-smoke.sh b/Scripts/run-tailnet-connectivity-smoke.sh new file mode 100755 index 0000000..f3053d3 --- /dev/null +++ b/Scripts/run-tailnet-connectivity-smoke.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}" +smoke_root="${BURROW_TAILNET_SMOKE_ROOT:-/tmp/burrow-tailnet-connectivity}" +socket_path="${smoke_root}/burrow.sock" +db_path="${smoke_root}/burrow.db" +daemon_log="${BURROW_TAILNET_SMOKE_DAEMON_LOG:-${smoke_root}/daemon.log}" +payload_path="${smoke_root}/tailnet.json" +authority="${BURROW_TAILNET_SMOKE_AUTHORITY:-https://ts.burrow.net}" +account_name="${BURROW_TAILNET_SMOKE_ACCOUNT:-ui-test}" +identity_name="${BURROW_TAILNET_SMOKE_IDENTITY:-apple}" +hostname="${BURROW_TAILNET_SMOKE_HOSTNAME:-burrow-apple}" +message="${BURROW_TAILNET_SMOKE_MESSAGE:-burrow-tailnet-smoke}" +timeout_ms="${BURROW_TAILNET_SMOKE_TIMEOUT_MS:-8000}" +remote_ip="${BURROW_TAILNET_SMOKE_REMOTE_IP:-}" +remote_port="${BURROW_TAILNET_SMOKE_REMOTE_PORT:-18081}" +remote_hostname="${BURROW_TAILNET_SMOKE_REMOTE_HOSTNAME:-burrow-echo}" +remote_authkey="${BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY:-}" +helper_bin="${BURROW_TAILNET_SMOKE_HELPER_BIN:-${smoke_root}/tailscale-login-bridge}" +remote_state_root="${BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT:-${smoke_root}/remote-state}" +remote_stdout="${smoke_root}/remote-helper.stdout" +remote_stderr="${BURROW_TAILNET_SMOKE_REMOTE_LOG:-${smoke_root}/remote-helper.log}" + +if [[ -n "${TS_AUTHKEY:-}" ]]; then + default_tailnet_state_root="${smoke_root}/local-state" +else + default_tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState" +fi +tailnet_state_root="${BURROW_TAILNET_STATE_ROOT:-${default_tailnet_state_root}}" + +need_login=0 +if [[ -z "${TS_AUTHKEY:-}" ]] && { [[ ! -d "$tailnet_state_root" ]] || [[ -z "$(find "$tailnet_state_root" -mindepth 1 -maxdepth 2 -print -quit 2>/dev/null)" ]]; }; then + need_login=1 +fi + +if [[ "$need_login" -eq 1 ]]; then + echo "Tailnet state root is empty; running iOS login bootstrap first..." + "${repo_root}/Scripts/run-ios-tailnet-ui-tests.sh" +fi + +rm -rf "$smoke_root" +mkdir -p "$smoke_root" + +cleanup() { + rm -f "$payload_path" + if [[ -n "${daemon_pid:-}" ]]; then + kill "$daemon_pid" >/dev/null 2>&1 || true + wait "$daemon_pid" >/dev/null 2>&1 || true + fi + if [[ -n "${remote_pid:-}" ]]; then + kill "$remote_pid" >/dev/null 2>&1 || true + wait "$remote_pid" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +wait_for_helper_listen() { + python3 - <<'PY' "$1" +import json +import pathlib +import sys +import time + +path = pathlib.Path(sys.argv[1]) +deadline = time.time() + 20 +while time.time() < deadline: + if path.exists(): + with path.open("r", encoding="utf-8") as handle: + line = handle.readline().strip() + if line: + hello = json.loads(line) + print(hello["listen_addr"]) + raise SystemExit(0) + time.sleep(0.1) +raise SystemExit("timed out waiting for helper startup line") +PY +} + +wait_for_helper_ip() { + python3 - <<'PY' "$1" +import json +import sys +import time +import urllib.request + +url = sys.argv[1] +deadline = time.time() + 30 +while time.time() < deadline: + with urllib.request.urlopen(url, timeout=5) as response: + status = json.load(response) + if status.get("running") and status.get("tailscale_ips"): + print(status["tailscale_ips"][0]) + raise SystemExit(0) + time.sleep(0.25) +raise SystemExit("timed out waiting for helper to become ready") +PY +} + +python3 - <<'PY' "$payload_path" "$authority" "$account_name" "$identity_name" "$hostname" +import json +import pathlib +import sys + +path = pathlib.Path(sys.argv[1]) +payload = { + "authority": sys.argv[2], + "account": sys.argv[3], + "identity": sys.argv[4], + "hostname": sys.argv[5], +} +path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8") +PY + +cargo build -p burrow --bin burrow +( + cd "${repo_root}/Tools/tailscale-login-bridge" + GOWORK=off go build -o "$helper_bin" . +) + +if [[ -z "$remote_ip" ]]; then + if [[ -z "$remote_authkey" ]] && { [[ ! -d "$remote_state_root" ]] || [[ -z "$(find "$remote_state_root" -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null)" ]]; }; then + echo "error: set BURROW_TAILNET_SMOKE_REMOTE_IP, BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY, or BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT to an existing logged-in helper state" >&2 + exit 1 + fi + + if [[ -n "$remote_authkey" ]]; then + rm -rf "$remote_state_root" + mkdir -p "$remote_state_root" + fi + + ( + cd "$repo_root" + if [[ -n "$remote_authkey" ]]; then + export TS_AUTHKEY="$remote_authkey" + fi + "$helper_bin" \ + --listen 127.0.0.1:0 \ + --state-dir "$remote_state_root" \ + --hostname "$remote_hostname" \ + --control-url "$authority" \ + --udp-echo-port "$remote_port" \ + >"$remote_stdout" 2>"$remote_stderr" + ) & + remote_pid=$! + + remote_listen_addr="$(wait_for_helper_listen "$remote_stdout")" + remote_ip="$(wait_for_helper_ip "http://${remote_listen_addr}/status")" +fi + +( + cd "$smoke_root" + RUST_LOG="${BURROW_TAILNET_SMOKE_RUST_LOG:-info,burrow=debug}" \ + BURROW_SOCKET_PATH="$socket_path" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ + "${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1 +) & +daemon_pid=$! + +for _ in $(seq 1 50); do + [[ -S "$socket_path" ]] && break + sleep 0.2 +done + +if [[ ! -S "$socket_path" ]]; then + echo "error: Burrow daemon did not create ${socket_path}" >&2 + [[ -f "$daemon_log" ]] && cat "$daemon_log" >&2 + exit 1 +fi + +run_burrow() { + BURROW_SOCKET_PATH="$socket_path" \ + BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \ + "${repo_root}/target/debug/burrow" "$@" +} + +run_burrow network-add 1 1 "$payload_path" +run_burrow start +run_burrow tunnel-config +run_burrow tailnet-udp-echo "${remote_ip}:${remote_port}" --message "$message" --timeout-ms "$timeout_ms" + +echo +echo "Tailnet connectivity smoke passed." +echo "State root: $tailnet_state_root" +echo "Remote: ${remote_ip}:${remote_port}" diff --git a/Scripts/seal-forgejo-nsc-secrets.sh b/Scripts/seal-forgejo-nsc-secrets.sh new file mode 100755 index 0000000..a6b3918 --- /dev/null +++ b/Scripts/seal-forgejo-nsc-secrets.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" + +usage() { + cat <<'EOF' +Usage: Scripts/seal-forgejo-nsc-secrets.sh [options] + +Encrypt Burrow forgejo-nsc runtime inputs from intake/ into the agenix secrets +consumed by burrow-forge. + +Options: + --provision Re-render the local intake files before sealing. + --host SSH target forwarded to provision-forgejo-nsc.sh. + --ssh-key SSH private key forwarded to provision-forgejo-nsc.sh. + --nsc-bin Override the nsc binary for provisioning. + -h, --help Show this help text. +EOF +} + +PROVISION=0 +HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" +SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}" +NSC_BIN="${NSC_BIN:-}" + +while [[ $# -gt 0 ]]; do + case "$1" in + --provision) + PROVISION=1 + shift + ;; + --host) + HOST="${2:?missing value for --host}" + shift 2 + ;; + --ssh-key) + SSH_KEY="${2:?missing value for --ssh-key}" + shift 2 + ;; + --nsc-bin) + NSC_BIN="${2:?missing value for --nsc-bin}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "unknown option: $1" >&2 + usage >&2 + exit 64 + ;; + esac +done + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "missing required command: $1" >&2 + exit 1 + fi +} + +require_cmd age +require_cmd nix +require_cmd python3 + +if [[ "${PROVISION}" -eq 1 ]]; then + provision_args=(--host "${HOST}" --ssh-key "${SSH_KEY}") + if [[ -n "${NSC_BIN}" ]]; then + provision_args+=(--nsc-bin "${NSC_BIN}") + fi + "${SCRIPT_DIR}/provision-forgejo-nsc.sh" "${provision_args[@]}" +fi + +tmpdir="$(mktemp -d)" +cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +seal_secret() { + local target="$1" + local source_path="$2" + recipients_file="${tmpdir}/$(basename "${target}").recipients" + if [[ ! -s "${source_path}" ]]; then + echo "required runtime input missing or empty: ${source_path}" >&2 + exit 1 + fi + nix eval --impure --json --expr "let s = import ${REPO_ROOT}/secrets.nix; in s.\"${target}\".publicKeys" \ + | python3 -c 'import json, sys; [print(item) for item in json.load(sys.stdin)]' \ + > "${recipients_file}" + + age -R "${recipients_file}" -o "${REPO_ROOT}/${target}" "${source_path}" +} + +seal_secret "secrets/infra/forgejo-nsc-token.age" "${REPO_ROOT}/intake/forgejo_nsc_token.txt" +seal_secret "secrets/infra/forgejo-nsc-dispatcher-config.age" "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml" +seal_secret "secrets/infra/forgejo-nsc-autoscaler-config.age" "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml" + +chmod 600 \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age" + +echo "Sealed forgejo-nsc runtime inputs into:" +printf ' %s\n' \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \ + "${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age" +echo "Deploy burrow-forge to apply the new CI credentials." diff --git a/Scripts/sync-forgejo-nsc-config.sh b/Scripts/sync-forgejo-nsc-config.sh index d6ac48c..2ce7114 100755 --- a/Scripts/sync-forgejo-nsc-config.sh +++ b/Scripts/sync-forgejo-nsc-config.sh @@ -1,109 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -usage() { - cat <<'EOF' -Usage: Scripts/sync-forgejo-nsc-config.sh [options] - -Deploy Burrow forgejo-nsc runtime inputs from age secrets onto the forge host. - -Options: - --host SSH target (default: root@git.burrow.net) - --ssh-key SSH private key (default: secrets/forgejo/agent-ssh-key.age, then intake/) - --rotate-pat Re-render the encrypted runtime inputs before deploying. - --no-restart Validate the encrypted inputs only; do not deploy. - -h, --help Show this help text. -EOF -} - -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${SCRIPT_DIR}/_burrow-secrets.sh" - -HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}" -SSH_KEY="${BURROW_FORGE_SSH_KEY:-}" -KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}" -ROTATE_PAT=0 -NO_RESTART=0 -TMP_DIR="" - -cleanup() { - [[ -n "${TMP_DIR}" ]] && rm -rf "${TMP_DIR}" >/dev/null 2>&1 || true - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT - -while [[ $# -gt 0 ]]; do - case "$1" in - --host) - HOST="${2:?missing value for --host}" - shift 2 - ;; - --ssh-key) - SSH_KEY="${2:?missing value for --ssh-key}" - shift 2 - ;; - --rotate-pat) - ROTATE_PAT=1 - shift - ;; - --no-restart) - NO_RESTART=1 - shift - ;; - -h|--help) - usage - exit 0 - ;; - *) - echo "unknown option: $1" >&2 - usage >&2 - exit 64 - ;; - esac -done - -mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")" - -burrow_require_cmd() { - if ! command -v "$1" >/dev/null 2>&1; then - echo "missing required command: $1" >&2 - exit 1 - fi -} - -burrow_require_cmd ssh - -SSH_KEY="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${SSH_KEY}" \ - "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" \ - "${REPO_ROOT}/secrets/forgejo/agent-ssh-key.age" \ - "${HOME}/.ssh/agent_at_burrow_net_ed25519" -)" - -if [[ "${ROTATE_PAT}" -eq 1 ]]; then - "${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}" -fi - -token_file="${REPO_ROOT}/secrets/forgejo/nsc-token.age" -dispatcher_file="${REPO_ROOT}/secrets/forgejo/nsc-dispatcher-config.age" -autoscaler_file="${REPO_ROOT}/secrets/forgejo/nsc-autoscaler-config.age" - -for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do - if [[ ! -s "${path}" ]]; then - echo "required runtime input missing or empty: ${path}" >&2 - exit 1 - fi -done - -if [[ "${NO_RESTART}" -eq 0 ]]; then - BURROW_FORGE_HOST="${HOST}" \ - BURROW_FORGE_SSH_KEY="${SSH_KEY}" \ - BURROW_FORGE_KNOWN_HOSTS_FILE="${KNOWN_HOSTS_FILE}" \ - "${SCRIPT_DIR}/forge-deploy.sh" --switch -fi - -echo "forgejo-nsc runtime sync complete (host=${HOST}, deployed=$((1 - NO_RESTART)))." +echo "Scripts/sync-forgejo-nsc-config.sh is obsolete." >&2 +echo "Burrow forgejo-nsc now consumes agenix-backed secrets instead of host-local intake files." >&2 +echo "Use Scripts/seal-forgejo-nsc-secrets.sh and deploy burrow-forge." >&2 +exit 1 diff --git a/Tools/forwardemail-custom-s3.sh b/Tools/forwardemail-custom-s3.sh index 4640bc8..5f39ddd 100755 --- a/Tools/forwardemail-custom-s3.sh +++ b/Tools/forwardemail-custom-s3.sh @@ -3,22 +3,17 @@ set -euo pipefail umask 077 -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" -REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)" -# shellcheck source=Scripts/_burrow-secrets.sh -source "${REPO_ROOT}/Scripts/_burrow-secrets.sh" - usage() { cat <<'EOF' Usage: Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file secrets/forwardemail/api-token.age \ + --api-token-file intake/forwardemail_api_token.txt \ --s3-endpoint https:// \ --s3-region \ --s3-bucket \ - --s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \ - --s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age + --s3-access-key-file intake/hetzner-s3-user.txt \ + --s3-secret-key-file intake/hetzner-s3-secret.txt Options: --domain Forward Email domain to update. @@ -59,18 +54,13 @@ read_secret() { printf '%s' "$value" } -cleanup() { - burrow_cleanup_secret_tmpfiles -} -trap cleanup EXIT - domain="" -api_token_file="${FORWARDEMAIL_API_TOKEN_FILE:-}" +api_token_file="" s3_endpoint="" s3_region="" s3_bucket="" -s3_access_key_file="${FORWARDEMAIL_S3_ACCESS_KEY_FILE:-}" -s3_secret_key_file="${FORWARDEMAIL_S3_SECRET_KEY_FILE:-}" +s3_access_key_file="" +s3_secret_key_file="" test_only=false while [[ $# -gt 0 ]]; do @@ -118,38 +108,16 @@ while [[ $# -gt 0 ]]; do done [[ -n "$domain" ]] || fail "--domain is required" +[[ -n "$api_token_file" ]] || fail "--api-token-file is required" [[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set" [[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set" [[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set" -api_token_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${api_token_file}" \ - "${REPO_ROOT}/intake/forwardemail_api_token.txt" \ - "${REPO_ROOT}/secrets/forwardemail/api-token.age" -)" || fail "unable to resolve Forward Email API token file" +[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set" +[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set" + require_file "$api_token_file" api_token="$(read_secret "$api_token_file")" -if [[ "$test_only" != true ]]; then - s3_access_key_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${s3_access_key_file}" \ - "${REPO_ROOT}/intake/hetzner-s3-user.txt" \ - "${REPO_ROOT}/secrets/forwardemail/hetzner-s3-user.age" - )" || fail "unable to resolve Hetzner S3 access key file" - s3_secret_key_file="$( - burrow_resolve_secret_file \ - "${REPO_ROOT}" \ - "${s3_secret_key_file}" \ - "${REPO_ROOT}/intake/hetzner-s3-secret.txt" \ - "${REPO_ROOT}/secrets/forwardemail/hetzner-s3-secret.age" - )" || fail "unable to resolve Hetzner S3 secret key file" - require_file "$s3_access_key_file" - require_file "$s3_secret_key_file" -fi - if [[ "$test_only" == false ]]; then require_file "$s3_access_key_file" require_file "$s3_secret_key_file" diff --git a/Tools/forwardemail-hetzner-storage.py b/Tools/forwardemail-hetzner-storage.py index 2c5ff82..3a2a941 100755 --- a/Tools/forwardemail-hetzner-storage.py +++ b/Tools/forwardemail-hetzner-storage.py @@ -6,7 +6,6 @@ import argparse import datetime as dt import hashlib import hmac -import subprocess import sys import textwrap from pathlib import Path @@ -14,38 +13,11 @@ from urllib.parse import urlencode, urlparse import requests -REPO_ROOT = Path(__file__).resolve().parent.parent - - -def default_secret_path(age_rel: str, intake_rel: str) -> str: - age_path = REPO_ROOT / age_rel - if age_path.exists(): - return str(age_path) - return intake_rel - def read_secret(path: str) -> str: - file_path = Path(path) - if not file_path.is_absolute(): - file_path = REPO_ROOT / file_path - if file_path.suffix == ".age": - value = subprocess.check_output( - [ - "nix", - "--extra-experimental-features", - "nix-command flakes", - "run", - f"{REPO_ROOT}#agenix", - "--", - "-d", - str(file_path), - ], - text=True, - ).strip() - else: - value = file_path.read_text(encoding="utf-8").strip() + value = Path(path).read_text(encoding="utf-8").strip() if not value: - raise SystemExit(f"error: empty secret file: {file_path}") + raise SystemExit(f"error: empty secret file: {path}") return value @@ -240,12 +212,12 @@ def parse_args() -> argparse.Namespace: parser.add_argument("--region", default="hel1", help="S3 region.") parser.add_argument( "--access-key-file", - default=default_secret_path("secrets/forwardemail/hetzner-s3-user.age", "intake/hetzner-s3-user.txt"), + default="intake/hetzner-s3-user.txt", help="File containing the S3 access key id.", ) parser.add_argument( "--secret-key-file", - default=default_secret_path("secrets/forwardemail/hetzner-s3-secret.age", "intake/hetzner-s3-secret.txt"), + default="intake/hetzner-s3-secret.txt", help="File containing the S3 secret key.", ) parser.add_argument( diff --git a/Tools/tailscale-login-bridge/go.mod b/Tools/tailscale-login-bridge/go.mod new file mode 100644 index 0000000..0e19f33 --- /dev/null +++ b/Tools/tailscale-login-bridge/go.mod @@ -0,0 +1,66 @@ +module burrow.dev/tailscale-login-bridge + +go 1.26.1 + +require tailscale.com v1.96.5 + +require ( + filippo.io/edwards25519 v1.2.0 // indirect + github.com/akutz/memconn v0.1.0 // indirect + github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect + github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect + github.com/aws/smithy-go v1.24.0 // indirect + github.com/coder/websocket v1.8.12 // indirect + github.com/creachadair/msync v0.7.1 // indirect + github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gaissmai/bart v0.26.1 // indirect + github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced // indirect + github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/jsimonetti/rtnetlink v1.4.0 // indirect + github.com/klauspost/compress v1.18.2 // indirect + github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect + github.com/mdlayher/socket v0.5.0 // indirect + github.com/mitchellh/go-ps v1.0.0 // indirect + github.com/pires/go-proxyproto v0.8.1 // indirect + github.com/prometheus-community/pro-bing v0.4.0 // indirect + github.com/safchain/ethtool v0.3.0 // indirect + github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect + github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect + github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a // indirect + github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect + github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect + github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect + github.com/x448/float16 v0.8.4 // indirect + go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect + golang.zx2c4.com/wireguard/windows v0.5.3 // indirect + gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 // indirect +) diff --git a/Tools/tailscale-login-bridge/go.sum b/Tools/tailscale-login-bridge/go.sum new file mode 100644 index 0000000..5393a62 --- /dev/null +++ b/Tools/tailscale-login-bridge/go.sum @@ -0,0 +1,229 @@ +9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q= +9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM= +filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo= +filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= +filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= +filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= +github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= +github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= +github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= +github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho= +github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008= +github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= +github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= +github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= +github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo= +github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c= +github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= +github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo= +github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= +github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I= +github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= +github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= +github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= +github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= +github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= +github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= +github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= +github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= +github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= +github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= +github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= +github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= +github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= +github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= +github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= +github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= +github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= +github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= +github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= +github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= +github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= +github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= +github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= +github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= +github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= +github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= +github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= +github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= +github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk= +github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= +github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w= +golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg= +golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= +golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= +golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 h1:Zy8IV/+FMLxy6j6p87vk/vQGKcdnbprwjTxc8UiUtsA= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tailscale.com v1.96.5 h1:gNkfA/KSZAl6jCH9cj8urq00HRWItDDTtGsyATI89jA= +tailscale.com v1.96.5/go.mod h1:/3lnZBYb2UEwnN0MNu2SDXUtT06AGd5k0s+OWx3WmcY= diff --git a/Tools/tailscale-login-bridge/main.go b/Tools/tailscale-login-bridge/main.go new file mode 100644 index 0000000..877d0e4 --- /dev/null +++ b/Tools/tailscale-login-bridge/main.go @@ -0,0 +1,523 @@ +package main + +import ( + "context" + "encoding/binary" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/netip" + "net/http" + "os" + "strconv" + "sync" + "time" + + "github.com/tailscale/wireguard-go/tun" + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +type statusResponse struct { + BackendState string `json:"backend_state"` + AuthURL string `json:"auth_url,omitempty"` + Running bool `json:"running"` + NeedsLogin bool `json:"needs_login"` + TailnetName string `json:"tailnet_name,omitempty"` + MagicDNSSuffix string `json:"magic_dns_suffix,omitempty"` + SelfDNSName string `json:"self_dns_name,omitempty"` + TailscaleIPs []string `json:"tailscale_ips,omitempty"` + Health []string `json:"health,omitempty"` + Peers []peerSummary `json:"peers,omitempty"` +} + +type peerSummary struct { + Name string `json:"name,omitempty"` + DNSName string `json:"dns_name,omitempty"` + TailscaleIPs []string `json:"tailscale_ips,omitempty"` + Online bool `json:"online"` + Active bool `json:"active"` + Relay string `json:"relay,omitempty"` + CurAddr string `json:"cur_addr,omitempty"` + LastSeenUnix int64 `json:"last_seen_unix,omitempty"` +} + +type pingResponse struct { + Result *ipnstate.PingResult `json:"result,omitempty"` +} + +type helperHello struct { + ListenAddr string `json:"listen_addr"` + PacketSocket string `json:"packet_socket,omitempty"` +} + +type helperState struct { + mu sync.RWMutex + authURL string +} + +func (s *helperState) authURLSnapshot() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.authURL +} + +func (s *helperState) setAuthURL(url string) { + s.mu.Lock() + defer s.mu.Unlock() + s.authURL = url +} + +func (s *helperState) clearAuthURL() { + s.setAuthURL("") +} + +// chanTUN is a tun.Device backed by channels so another process can feed and +// consume raw IP packets while tsnet handles the Tailnet control/data plane. +type chanTUN struct { + Inbound chan []byte + Outbound chan []byte + closed chan struct{} + events chan tun.Event +} + +func newChanTUN() *chanTUN { + t := &chanTUN{ + Inbound: make(chan []byte, 1024), + Outbound: make(chan []byte, 1024), + closed: make(chan struct{}), + events: make(chan tun.Event, 1), + } + t.events <- tun.EventUp + return t +} + +func (t *chanTUN) File() *os.File { return nil } + +func (t *chanTUN) Close() error { + select { + case <-t.closed: + default: + close(t.closed) + close(t.Inbound) + } + return nil +} + +func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) { + select { + case <-t.closed: + return 0, io.EOF + case pkt, ok := <-t.Outbound: + if !ok { + return 0, io.EOF + } + sizes[0] = copy(bufs[0][offset:], pkt) + return 1, nil + } +} + +func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) { + for _, buf := range bufs { + pkt := buf[offset:] + if len(pkt) == 0 { + continue + } + select { + case <-t.closed: + return 0, errors.New("closed") + case t.Inbound <- append([]byte(nil), pkt...): + default: + } + } + return len(bufs), nil +} + +func (t *chanTUN) MTU() (int, error) { return 1280, nil } +func (t *chanTUN) Name() (string, error) { return "burrow-tailnet", nil } +func (t *chanTUN) Events() <-chan tun.Event { return t.events } +func (t *chanTUN) BatchSize() int { return 1 } + +func main() { + listen := flag.String("listen", "127.0.0.1:0", "local listen address") + stateDir := flag.String("state-dir", "", "persistent state directory") + hostname := flag.String("hostname", "burrow-apple", "tailnet hostname") + controlURL := flag.String("control-url", "", "optional control URL") + packetSocket := flag.String("packet-socket", "", "optional unix socket path for raw packet bridging") + udpEchoPort := flag.Int("udp-echo-port", 0, "optional tailnet UDP echo port") + flag.Parse() + + if *stateDir == "" { + log.Fatal("--state-dir is required") + } + + if err := os.MkdirAll(*stateDir, 0o755); err != nil { + log.Fatalf("create state dir: %v", err) + } + + server := &tsnet.Server{ + Dir: *stateDir, + Hostname: *hostname, + UserLogf: log.Printf, + } + + var tunDevice *chanTUN + var packetListener net.Listener + if *packetSocket != "" { + _ = os.Remove(*packetSocket) + ln, err := net.Listen("unix", *packetSocket) + if err != nil { + log.Fatalf("packet listen: %v", err) + } + packetListener = ln + defer func() { + packetListener.Close() + _ = os.Remove(*packetSocket) + }() + + tunDevice = newChanTUN() + server.Tun = tunDevice + } + if *controlURL != "" { + server.ControlURL = *controlURL + } + defer server.Close() + + if err := server.Start(); err != nil { + log.Fatalf("start tsnet: %v", err) + } + + localClient, err := server.LocalClient() + if err != nil { + log.Fatalf("local client: %v", err) + } + state := &helperState{} + + ln, err := net.Listen("tcp", *listen) + if err != nil { + log.Fatalf("listen: %v", err) + } + defer ln.Close() + + if packetListener != nil { + go servePacketBridge(packetListener, tunDevice) + } + if *udpEchoPort > 0 { + go serveUDPEcho(context.Background(), server, localClient, *udpEchoPort) + } + + hello := helperHello{ + ListenAddr: ln.Addr().String(), + } + if *packetSocket != "" { + hello.PacketSocket = *packetSocket + } + if err := json.NewEncoder(os.Stdout).Encode(hello); err != nil { + log.Fatalf("write hello: %v", err) + } + _ = os.Stdout.Sync() + + mux := http.NewServeMux() + mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { + status, err := snapshot(r.Context(), localClient, state) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + w.Header().Set("content-type", "application/json") + _ = json.NewEncoder(w).Encode(status) + }) + mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { + ip := r.URL.Query().Get("ip") + if ip == "" { + http.Error(w, "missing ip", http.StatusBadRequest) + return + } + target, err := netip.ParseAddr(ip) + if err != nil { + http.Error(w, fmt.Sprintf("invalid ip: %v", err), http.StatusBadRequest) + return + } + + pingType := tailcfg.PingTSMP + switch r.URL.Query().Get("type") { + case "", "tsmp", "TSMP": + pingType = tailcfg.PingTSMP + case "icmp", "ICMP": + pingType = tailcfg.PingICMP + case "peerapi": + pingType = tailcfg.PingPeerAPI + default: + http.Error(w, "unsupported ping type", http.StatusBadRequest) + return + } + + result, err := localClient.Ping(r.Context(), target, pingType) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + + w.Header().Set("content-type", "application/json") + _ = json.NewEncoder(w).Encode(&pingResponse{Result: result}) + }) + mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + go func() { + _ = server.Close() + time.Sleep(100 * time.Millisecond) + os.Exit(0) + }() + }) + + httpServer := &http.Server{ + Handler: mux, + } + log.Fatal(httpServer.Serve(ln)) +} + +func servePacketBridge(listener net.Listener, device *chanTUN) { + for { + conn, err := listener.Accept() + if err != nil { + if errors.Is(err, net.ErrClosed) { + return + } + log.Printf("packet accept: %v", err) + continue + } + log.Printf("packet bridge connected") + if err := bridgePacketConn(conn, device); err != nil && !errors.Is(err, io.EOF) { + log.Printf("packet bridge error: %v", err) + } + _ = conn.Close() + log.Printf("packet bridge disconnected") + } +} + +func bridgePacketConn(conn net.Conn, device *chanTUN) error { + errCh := make(chan error, 2) + + go func() { + for { + pkt, err := readFrame(conn) + if err != nil { + errCh <- err + return + } + select { + case <-device.closed: + errCh <- io.EOF + return + case device.Outbound <- pkt: + } + } + }() + + go func() { + for { + select { + case <-device.closed: + errCh <- io.EOF + return + case pkt, ok := <-device.Inbound: + if !ok { + errCh <- io.EOF + return + } + if err := writeFrame(conn, pkt); err != nil { + errCh <- err + return + } + } + } + }() + + return <-errCh +} + +func readFrame(r io.Reader) ([]byte, error) { + var size [4]byte + if _, err := io.ReadFull(r, size[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(size[:]) + if length == 0 { + return []byte{}, nil + } + packet := make([]byte, length) + if _, err := io.ReadFull(r, packet); err != nil { + return nil, err + } + return packet, nil +} + +func writeFrame(w io.Writer, packet []byte) error { + var size [4]byte + binary.BigEndian.PutUint32(size[:], uint32(len(packet))) + if _, err := w.Write(size[:]); err != nil { + return err + } + if len(packet) == 0 { + return nil + } + _, err := w.Write(packet) + return err +} + +func snapshot(ctx context.Context, localClient *local.Client, state *helperState) (*statusResponse, error) { + status, err := localClient.Status(ctx) + if err != nil { + return nil, err + } + + authURL := status.AuthURL + if authURL == "" { + authURL = state.authURLSnapshot() + } + if status.BackendState == ipn.Running.String() { + state.clearAuthURL() + authURL = "" + } else if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && authURL == "" { + authURL, err = awaitAuthURL(ctx, localClient, state) + if err != nil { + return nil, err + } + } + + response := &statusResponse{ + BackendState: status.BackendState, + AuthURL: authURL, + Running: status.BackendState == ipn.Running.String(), + NeedsLogin: status.BackendState == ipn.NeedsLogin.String(), + Health: append([]string(nil), status.Health...), + } + + if status.CurrentTailnet != nil { + response.TailnetName = status.CurrentTailnet.Name + response.MagicDNSSuffix = status.CurrentTailnet.MagicDNSSuffix + } + if status.Self != nil { + response.SelfDNSName = status.Self.DNSName + } + for _, ip := range status.TailscaleIPs { + response.TailscaleIPs = append(response.TailscaleIPs, ip.String()) + } + for _, key := range status.Peers() { + peer := status.Peer[key] + if peer == nil { + continue + } + summary := peerSummary{ + Name: peer.HostName, + DNSName: peer.DNSName, + Online: peer.Online, + Active: peer.Active, + Relay: peer.Relay, + CurAddr: peer.CurAddr, + LastSeenUnix: peer.LastSeen.Unix(), + } + for _, ip := range peer.TailscaleIPs { + summary.TailscaleIPs = append(summary.TailscaleIPs, ip.String()) + } + response.Peers = append(response.Peers, summary) + } + return response, nil +} + +func serveUDPEcho(ctx context.Context, server *tsnet.Server, localClient *local.Client, port int) { + ip, err := awaitTailscaleIP(ctx, localClient) + if err != nil { + log.Printf("udp echo setup failed: %v", err) + return + } + + listenAddr := net.JoinHostPort(ip.String(), strconv.Itoa(port)) + pc, err := server.ListenPacket("udp", listenAddr) + if err != nil { + log.Printf("udp echo listen failed on %s: %v", listenAddr, err) + return + } + defer pc.Close() + + log.Printf("udp echo listening on %s", pc.LocalAddr()) + buf := make([]byte, 64<<10) + for { + n, addr, err := pc.ReadFrom(buf) + if err != nil { + if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) { + return + } + log.Printf("udp echo read failed: %v", err) + return + } + if _, err := pc.WriteTo(buf[:n], addr); err != nil { + log.Printf("udp echo write failed: %v", err) + return + } + } +} + +func awaitTailscaleIP(ctx context.Context, localClient *local.Client) (netip.Addr, error) { + for range 60 { + status, err := localClient.StatusWithoutPeers(ctx) + if err == nil { + for _, ip := range status.TailscaleIPs { + if ip.Is4() { + return ip, nil + } + } + for _, ip := range status.TailscaleIPs { + if ip.Is6() { + return ip, nil + } + } + } + select { + case <-ctx.Done(): + return netip.Addr{}, ctx.Err() + case <-time.After(250 * time.Millisecond): + } + } + return netip.Addr{}, errors.New("timed out waiting for tailscale IP") +} + +func awaitAuthURL(ctx context.Context, localClient *local.Client, state *helperState) (string, error) { + watchCtx, cancel := context.WithTimeout(ctx, 8*time.Second) + defer cancel() + + watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + if err != nil { + return "", err + } + defer watcher.Close() + + if err := localClient.StartLoginInteractive(ctx); err != nil { + return "", err + } + + for { + notify, err := watcher.Next() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + return state.authURLSnapshot(), nil + } + return "", err + } + if notify.BrowseToURL != nil && *notify.BrowseToURL != "" { + state.setAuthURL(*notify.BrowseToURL) + return *notify.BrowseToURL, nil + } + if notify.State != nil && *notify.State == ipn.Running { + state.clearAuthURL() + return "", nil + } + } +} diff --git a/burrow-gtk/Cargo.toml b/burrow-gtk/Cargo.toml index 21cb52e..b12577a 100644 --- a/burrow-gtk/Cargo.toml +++ b/burrow-gtk/Cargo.toml @@ -11,6 +11,8 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]} burrow = { version = "*", path = "../burrow/" } tokio = { version = "1.35.0", features = ["time", "sync"] } gettext-rs = { version = "0.7.0", features = ["gettext-system"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" [build-dependencies] anyhow = "1.0" diff --git a/burrow-gtk/src/account_store.rs b/burrow-gtk/src/account_store.rs new file mode 100644 index 0000000..6aee78b --- /dev/null +++ b/burrow-gtk/src/account_store.rs @@ -0,0 +1,139 @@ +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use std::{ + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AccountRecord { + pub id: String, + pub kind: AccountKind, + pub title: String, + pub authority: Option, + pub account: String, + pub identity: String, + pub hostname: Option, + pub tailnet: Option, + pub note: Option, + pub created_at: u64, + pub updated_at: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum AccountKind { + WireGuard, + Tor, + Tailnet, +} + +impl AccountKind { + pub fn title(self) -> &'static str { + match self { + Self::WireGuard => "WireGuard", + Self::Tor => "Tor", + Self::Tailnet => "Tailnet", + } + } + + fn sort_rank(self) -> u8 { + match self { + Self::Tailnet => 0, + Self::Tor => 1, + Self::WireGuard => 2, + } + } +} + +pub fn load() -> Result> { + let path = storage_path()?; + if !path.exists() { + return Ok(Vec::new()); + } + let data = + std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?; + serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display())) +} + +pub fn upsert(mut record: AccountRecord) -> Result> { + let mut accounts = load()?; + let now = timestamp(); + record.updated_at = now; + if record.created_at == 0 { + record.created_at = now; + } + + if let Some(index) = accounts.iter().position(|account| account.id == record.id) { + accounts[index] = record; + } else { + accounts.push(record); + } + accounts.sort_by(|lhs, rhs| { + lhs.kind + .sort_rank() + .cmp(&rhs.kind.sort_rank()) + .then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase())) + }); + persist(&accounts)?; + Ok(accounts) +} + +pub fn new_record( + kind: AccountKind, + title: String, + authority: Option, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + note: Option, +) -> AccountRecord { + let now = timestamp(); + AccountRecord { + id: format!("{}-{now}", kind.title().to_ascii_lowercase()), + kind, + title, + authority, + account, + identity, + hostname, + tailnet, + note, + created_at: now, + updated_at: now, + } +} + +fn persist(accounts: &[AccountRecord]) -> Result<()> { + let path = storage_path()?; + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("failed to create {}", parent.display()))?; + } + let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?; + std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display())) +} + +fn storage_path() -> Result { + if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") { + return Ok(PathBuf::from(data_home) + .join("burrow") + .join("accounts.json")); + } + if let Some(home) = std::env::var_os("HOME") { + return Ok(PathBuf::from(home) + .join(".local") + .join("share") + .join("burrow") + .join("accounts.json")); + } + Ok(std::env::temp_dir().join("burrow-accounts.json")) +} + +fn timestamp() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|duration| duration.as_secs()) + .unwrap_or_default() +} diff --git a/burrow-gtk/src/components/app.rs b/burrow-gtk/src/components/app.rs index 62c98c0..7354825 100644 --- a/burrow-gtk/src/components/app.rs +++ b/burrow-gtk/src/components/app.rs @@ -1,24 +1,19 @@ use super::*; use anyhow::Context; -use std::time::Duration; - -const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5); pub struct App { - daemon_client: Arc>>, - settings_screen: Controller, - switch_screen: AsyncController, + _home_screen: AsyncController, } #[derive(Debug)] pub enum AppMsg { None, - PostInit, } impl App { pub fn run() { let app = RelmApp::new(config::ID); + relm4::set_global_css(APP_CSS); Self::setup_gresources().unwrap(); Self::setup_i18n().unwrap(); @@ -49,7 +44,7 @@ impl AsyncComponent for App { view! { adw::Window { set_title: Some("Burrow"), - set_default_size: (640, 480), + set_default_size: (900, 760), } } @@ -58,100 +53,84 @@ impl AsyncComponent for App { root: Self::Root, sender: AsyncComponentSender, ) -> AsyncComponentParts { - let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok())); - - let switch_screen = switch_screen::SwitchScreen::builder() - .launch(switch_screen::SwitchScreenInit { - daemon_client: Arc::clone(&daemon_client), - }) - .forward(sender.input_sender(), |_| AppMsg::None); - - let settings_screen = settings_screen::SettingsScreen::builder() - .launch(settings_screen::SettingsScreenInit { - daemon_client: Arc::clone(&daemon_client), - }) + let home_screen = home_screen::HomeScreen::builder() + .launch(()) .forward(sender.input_sender(), |_| AppMsg::None); let widgets = view_output!(); - let view_stack = adw::ViewStack::new(); - view_stack.add_titled(switch_screen.widget(), None, "Switch"); - view_stack.add_titled(settings_screen.widget(), None, "Settings"); - - let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build(); - view_switcher_bar.set_reveal(true); - - // When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate - // - // let toolbar = adw::ToolbarView::new(); - // toolbar.add_top_bar( - // &adw::HeaderBar::builder() - // .title_widget(>k::Label::new(Some("Burrow"))) - // .build(), - // ); - // toolbar.add_bottom_bar(&view_switcher_bar); - // toolbar.set_content(Some(&view_stack)); - // root.set_content(Some(&toolbar)); - let content = gtk::Box::new(gtk::Orientation::Vertical, 0); content.append( &adw::HeaderBar::builder() .title_widget(>k::Label::new(Some("Burrow"))) .build(), ); - content.append(&view_stack); - content.append(&view_switcher_bar); + content.append(home_screen.widget()); root.set_content(Some(&content)); - sender.input(AppMsg::PostInit); - - let model = App { - daemon_client, - switch_screen, - settings_screen, - }; + let model = App { _home_screen: home_screen }; AsyncComponentParts { model, widgets } } async fn update( &mut self, - _msg: Self::Input, + msg: Self::Input, _sender: AsyncComponentSender, _root: &Self::Root, ) { - loop { - tokio::time::sleep(RECONNECT_POLL_TIME).await; - { - let mut daemon_client = self.daemon_client.lock().await; - let mut disconnected_daemon_client = false; - - if let Some(daemon_client) = daemon_client.as_mut() { - if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await { - disconnected_daemon_client = true; - self.switch_screen - .emit(switch_screen::SwitchScreenMsg::DaemonDisconnect); - self.settings_screen - .emit(settings_screen::SettingsScreenMsg::DaemonStateChange) - } - } - - if disconnected_daemon_client || daemon_client.is_none() { - match DaemonClient::new().await { - Ok(new_daemon_client) => { - *daemon_client = Some(new_daemon_client); - self.switch_screen - .emit(switch_screen::SwitchScreenMsg::DaemonReconnect); - self.settings_screen - .emit(settings_screen::SettingsScreenMsg::DaemonStateChange) - } - Err(_e) => { - // TODO: Handle Error - } - } - } - } + match msg { + AppMsg::None => {} } } } + +const APP_CSS: &str = r#" +.empty-state { + border-radius: 18px; + padding: 22px; + background: alpha(@card_bg_color, 0.72); +} + +.summary-card { + border-radius: 18px; + padding: 14px; + background: alpha(@card_bg_color, 0.72); +} + +.network-card { + border-radius: 10px; + padding: 16px; + box-shadow: 0 2px 6px alpha(black, 0.14); +} + +.wireguard-card { + background: linear-gradient(135deg, #3277d8, #174ea6); +} + +.tailnet-card { + background: linear-gradient(135deg, #31b891, #147d69); +} + +.network-card-kind, +.network-card-title, +.network-card-detail { + color: white; +} + +.network-card-kind { + opacity: 0.86; + font-weight: 700; +} + +.network-card-title { + font-size: 1.22em; + font-weight: 700; +} + +.network-card-detail { + opacity: 0.92; + font-family: monospace; +} +"#; diff --git a/burrow-gtk/src/components/home_screen.rs b/burrow-gtk/src/components/home_screen.rs new file mode 100644 index 0000000..0bfdda2 --- /dev/null +++ b/burrow-gtk/src/components/home_screen.rs @@ -0,0 +1,1178 @@ +use super::*; +use crate::account_store::{self, AccountKind, AccountRecord}; +use std::time::Duration; + +pub struct HomeScreen { + daemon_banner: adw::Banner, + network_status: gtk::Label, + network_cards: gtk::Box, + account_status: gtk::Label, + account_rows: gtk::Box, + tunnel_status: gtk::Label, + tunnel_button: gtk::Button, + tunnel_state: Option, + tailnet_session_id: Option, + tailnet_running: bool, +} + +#[derive(Debug)] +pub enum HomeScreenMsg { + EnsureDaemon, + Refresh, + TunnelAction, + OpenWireGuard, + OpenTor, + OpenTailnet, + AddWireGuard { + title: String, + account: String, + identity: String, + config: String, + }, + SaveTor { + title: String, + account: String, + identity: String, + note: String, + }, + DiscoverTailnet(String), + ProbeTailnet(String), + StartTailnetLogin { + authority: String, + account: String, + identity: String, + hostname: Option, + }, + PollTailnetLogin, + CancelTailnetLogin, + AddTailnet { + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + }, +} + +#[relm4::component(pub, async)] +impl AsyncComponent for HomeScreen { + type Init = (); + type Input = HomeScreenMsg; + type Output = (); + type CommandOutput = (); + + view! { + gtk::ScrolledWindow { + set_vexpand: true, + + adw::Clamp { + set_maximum_size: 900, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 24, + set_margin_all: 24, + + gtk::Box { + set_orientation: gtk::Orientation::Horizontal, + set_spacing: 16, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 6, + set_hexpand: true, + + gtk::Label { + add_css_class: "title-1", + set_xalign: 0.0, + set_label: "Burrow", + }, + + gtk::Label { + add_css_class: "heading", + add_css_class: "dim-label", + set_xalign: 0.0, + set_label: "Networks and accounts", + }, + }, + + #[name(add_button)] + gtk::MenuButton { + add_css_class: "flat", + set_icon_name: "list-add-symbolic", + set_tooltip_text: Some("Add"), + set_valign: Align::Start, + }, + }, + + #[name(daemon_banner)] + adw::Banner { + set_title: "Starting Burrow daemon", + set_revealed: false, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 12, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Networks", + }, + + #[name(network_status)] + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "Stored daemon networks and their active account selectors", + }, + }, + + gtk::ScrolledWindow { + set_policy: (gtk::PolicyType::Automatic, gtk::PolicyType::Never), + set_min_content_height: 190, + + #[name(network_cards)] + gtk::Box { + set_orientation: gtk::Orientation::Horizontal, + set_spacing: 14, + }, + }, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 12, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Accounts", + }, + + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "Per-network identities and sign-in state", + }, + }, + + #[name(account_rows)] + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 8, + set_margin_all: 0, + set_valign: Align::Center, + }, + + #[name(account_status)] + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_wrap: true, + set_label: "", + }, + }, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 8, + + gtk::Box { + set_orientation: gtk::Orientation::Vertical, + set_spacing: 4, + + gtk::Label { + add_css_class: "title-2", + set_xalign: 0.0, + set_label: "Tunnel", + }, + + gtk::Label { + add_css_class: "dim-label", + set_xalign: 0.0, + set_label: "Current daemon tunnel state", + }, + }, + + #[name(tunnel_status)] + gtk::Label { + set_xalign: 0.0, + set_label: "Checking daemon status", + }, + + #[name(tunnel_button)] + gtk::Button { + add_css_class: "suggested-action", + set_label: "Start", + set_halign: Align::Start, + connect_clicked => HomeScreenMsg::TunnelAction, + }, + }, + } + } + } + } + + async fn init( + _: Self::Init, + _root: Self::Root, + sender: AsyncComponentSender, + ) -> AsyncComponentParts { + let widgets = view_output!(); + configure_add_popover(&widgets.add_button, &sender); + + let refresh_sender = sender.input_sender().clone(); + relm4::spawn(async move { + loop { + tokio::time::sleep(Duration::from_secs(5)).await; + refresh_sender.emit(HomeScreenMsg::Refresh); + } + }); + + let model = HomeScreen { + daemon_banner: widgets.daemon_banner.clone(), + network_status: widgets.network_status.clone(), + network_cards: widgets.network_cards.clone(), + account_status: widgets.account_status.clone(), + account_rows: widgets.account_rows.clone(), + tunnel_status: widgets.tunnel_status.clone(), + tunnel_button: widgets.tunnel_button.clone(), + tunnel_state: None, + tailnet_session_id: None, + tailnet_running: false, + }; + + sender.input(HomeScreenMsg::EnsureDaemon); + + AsyncComponentParts { model, widgets } + } + + async fn update( + &mut self, + msg: Self::Input, + sender: AsyncComponentSender, + root: &Self::Root, + ) { + match msg { + HomeScreenMsg::EnsureDaemon => self.ensure_daemon().await, + HomeScreenMsg::Refresh => self.refresh().await, + HomeScreenMsg::TunnelAction => self.perform_tunnel_action().await, + HomeScreenMsg::OpenWireGuard => open_wireguard_window(root, &sender), + HomeScreenMsg::OpenTor => open_tor_window(root, &sender), + HomeScreenMsg::OpenTailnet => open_tailnet_window(root, &sender), + HomeScreenMsg::AddWireGuard { + title, + account, + identity, + config, + } => self.add_wireguard(title, account, identity, config).await, + HomeScreenMsg::SaveTor { title, account, identity, note } => { + self.save_tor(title, account, identity, note) + } + HomeScreenMsg::DiscoverTailnet(email) => self.discover_tailnet(email).await, + HomeScreenMsg::ProbeTailnet(authority) => self.probe_tailnet(authority).await, + HomeScreenMsg::StartTailnetLogin { + authority, + account, + identity, + hostname, + } => { + self.start_tailnet_login(authority, account, identity, hostname, sender) + .await; + } + HomeScreenMsg::PollTailnetLogin => self.poll_tailnet_login(sender).await, + HomeScreenMsg::CancelTailnetLogin => self.cancel_tailnet_login().await, + HomeScreenMsg::AddTailnet { + authority, + account, + identity, + hostname, + tailnet, + } => { + self.add_tailnet(authority, account, identity, hostname, tailnet) + .await; + } + } + } +} + +impl HomeScreen { + async fn ensure_daemon(&mut self) { + self.daemon_banner.set_title("Starting Burrow daemon"); + self.daemon_banner.set_revealed(true); + match daemon_api::ensure_daemon().await { + Ok(()) => { + self.daemon_banner.set_revealed(false); + self.refresh().await; + } + Err(error) => { + self.daemon_banner + .set_title(&format!("Burrow daemon is not reachable: {error}")); + self.daemon_banner.set_revealed(true); + self.tunnel_state = None; + self.tunnel_status.set_label("Daemon unavailable"); + self.tunnel_button.set_label("Enable"); + self.tunnel_button.set_sensitive(true); + self.network_status + .set_label("Stored daemon networks are unavailable until the daemon starts."); + self.render_networks(&[]); + } + } + } + + async fn refresh(&mut self) { + match daemon_api::tunnel_state().await { + Ok(state) => { + self.daemon_banner.set_revealed(false); + self.tunnel_state = Some(state); + match state { + daemon_api::TunnelState::Running => { + self.tunnel_status.set_label("Connected"); + self.tunnel_button.set_label("Stop"); + } + daemon_api::TunnelState::Stopped => { + self.tunnel_status.set_label("Disconnected"); + self.tunnel_button.set_label("Start"); + } + } + self.tunnel_button.set_sensitive(true); + } + Err(error) => { + self.tunnel_state = None; + self.daemon_banner + .set_title(&format!("Burrow daemon is not reachable: {error}")); + self.daemon_banner.set_revealed(true); + self.tunnel_status.set_label("Unknown"); + self.tunnel_button.set_label("Enable"); + self.tunnel_button.set_sensitive(true); + } + } + + match daemon_api::list_networks().await { + Ok(networks) => { + self.render_networks(&networks); + self.network_status.set_label(if networks.is_empty() { + "Stored daemon networks and their active account selectors" + } else { + "Stored daemon networks and their active account selectors" + }); + } + Err(error) => { + self.render_networks(&[]); + self.network_status + .set_label(&format!("Unable to read daemon networks: {error}")); + } + } + + match account_store::load() { + Ok(accounts) => { + self.account_status.set_label(""); + self.render_accounts(&accounts); + } + Err(error) => { + self.render_accounts(&[]); + self.account_status + .set_label(&format!("Unable to read account store: {error}")); + } + } + } + + async fn perform_tunnel_action(&mut self) { + match self.tunnel_state { + Some(daemon_api::TunnelState::Running) => { + self.tunnel_button.set_sensitive(false); + self.tunnel_status.set_label("Disconnecting..."); + if let Err(error) = daemon_api::stop_tunnel().await { + self.tunnel_status + .set_label(&format!("Stop failed: {error}")); + } + self.refresh().await; + } + Some(daemon_api::TunnelState::Stopped) => { + self.tunnel_button.set_sensitive(false); + self.tunnel_status.set_label("Connecting..."); + if let Err(error) = daemon_api::start_tunnel().await { + self.tunnel_status + .set_label(&format!("Start failed: {error}")); + } + self.refresh().await; + } + None => self.ensure_daemon().await, + } + } + + async fn add_wireguard( + &mut self, + title: String, + account: String, + identity: String, + config: String, + ) { + if config.trim().is_empty() { + self.network_status + .set_label("Paste a WireGuard configuration before adding a network."); + return; + } + match daemon_api::add_wireguard(config).await { + Ok(id) => { + let title = daemon_api::normalized(&title, &format!("WireGuard {id}")); + let record = account_store::new_record( + AccountKind::WireGuard, + title, + None, + daemon_api::normalized(&account, "default"), + daemon_api::normalized(&identity, &format!("network-{id}")), + None, + None, + Some(format!("Linked to daemon network #{id}.")), + ); + match account_store::upsert(record) { + Ok(accounts) => self.render_accounts(&accounts), + Err(error) => self + .account_status + .set_label(&format!("WireGuard account save failed: {error}")), + } + self.network_status + .set_label(&format!("Added WireGuard network #{id}.")); + self.refresh().await; + } + Err(error) => self + .network_status + .set_label(&format!("Unable to add WireGuard network: {error}")), + } + } + + fn save_tor(&mut self, title: String, account: String, identity: String, note: String) { + let record = account_store::new_record( + AccountKind::Tor, + daemon_api::normalized( + &title, + &format!("Tor {}", daemon_api::normalized(&identity, "linux")), + ), + Some("arti://local".to_owned()), + daemon_api::normalized(&account, "default"), + daemon_api::normalized(&identity, "linux"), + None, + None, + Some(note), + ); + match account_store::upsert(record) { + Ok(accounts) => { + self.account_status.set_label("Saved Tor account."); + self.render_accounts(&accounts); + } + Err(error) => self + .account_status + .set_label(&format!("Unable to save Tor account: {error}")), + } + } + + async fn discover_tailnet(&mut self, email: String) { + let Ok(email) = daemon_api::require_value(&email, "Email address") else { + self.account_status + .set_label("Enter an email address before Tailnet discovery."); + return; + }; + + self.account_status.set_label("Finding Tailnet server..."); + match daemon_api::discover_tailnet(email).await { + Ok(discovery) => { + let kind = if discovery.managed { + "managed authority" + } else { + "custom authority" + }; + let issuer = discovery + .oidc_issuer + .map(|issuer| format!(" OIDC: {issuer}.")) + .unwrap_or_default(); + self.account_status.set_label(&format!( + "Discovered {kind}: {}.{issuer}", + discovery.authority + )); + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet discovery failed: {error}")), + } + } + + async fn probe_tailnet(&mut self, authority: String) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before checking it."); + return; + }; + + self.account_status.set_label("Checking Tailnet server..."); + match daemon_api::probe_tailnet(authority).await { + Ok(probe) => { + let detail = probe + .detail + .unwrap_or_else(|| format!("HTTP {}", probe.status_code)); + self.account_status + .set_label(&format!("{}: {detail}", probe.summary)); + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet probe failed: {error}")), + } + } + + async fn start_tailnet_login( + &mut self, + authority: String, + account: String, + identity: String, + hostname: Option, + sender: AsyncComponentSender, + ) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before sign-in."); + return; + }; + + self.account_status.set_label("Starting Tailnet sign-in..."); + match daemon_api::start_tailnet_login(authority, account, identity, hostname).await { + Ok(status) => { + self.apply_login_status(&status); + if let Some(auth_url) = status.auth_url.as_deref() { + if let Err(error) = open_auth_url(auth_url) { + self.account_status.set_label(&format!( + "{} Open this URL manually: {auth_url}. Browser launch failed: {error}", + self.account_status.text() + )); + } + } + if !status.running { + sender.input(HomeScreenMsg::PollTailnetLogin); + } + } + Err(error) => self + .account_status + .set_label(&format!("Tailnet sign-in failed: {error}")), + } + } + + async fn poll_tailnet_login(&mut self, sender: AsyncComponentSender) { + let Some(session_id) = self.tailnet_session_id.clone() else { + return; + }; + if self.tailnet_running { + return; + } + + tokio::time::sleep(Duration::from_secs(1)).await; + match daemon_api::tailnet_login_status(session_id).await { + Ok(status) => { + self.apply_login_status(&status); + if !status.running { + sender.input(HomeScreenMsg::PollTailnetLogin); + } + } + Err(error) => { + self.account_status + .set_label(&format!("Tailnet sign-in status failed: {error}")); + self.tailnet_session_id = None; + } + } + } + + async fn cancel_tailnet_login(&mut self) { + let Some(session_id) = self.tailnet_session_id.clone() else { + self.account_status + .set_label("No Tailnet sign-in is active."); + return; + }; + match daemon_api::cancel_tailnet_login(session_id).await { + Ok(()) => { + self.tailnet_session_id = None; + self.tailnet_running = false; + self.account_status.set_label("Tailnet sign-in cancelled."); + } + Err(error) => self + .account_status + .set_label(&format!("Unable to cancel Tailnet sign-in: {error}")), + } + } + + async fn add_tailnet( + &mut self, + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, + ) { + let Ok(authority) = daemon_api::require_value(&authority, "Tailnet server URL") else { + self.account_status + .set_label("Enter a Tailnet server URL before saving."); + return; + }; + if self.tailnet_session_id.is_some() && !self.tailnet_running { + self.account_status + .set_label("Finish browser sign-in before saving this Tailnet account."); + return; + } + + let stored_authority = daemon_api::normalized_optional(&authority) + .unwrap_or_else(|| daemon_api::default_tailnet_authority().to_owned()); + let stored_account = daemon_api::normalized(&account, "default"); + let stored_identity = daemon_api::normalized(&identity, "linux"); + let stored_hostname = hostname.clone(); + let stored_tailnet = tailnet.clone(); + + match daemon_api::add_tailnet(authority, account, identity, hostname, tailnet).await { + Ok(id) => { + let title = stored_tailnet + .clone() + .or(stored_hostname.clone()) + .unwrap_or_else(|| format!("Tailnet {id}")); + let record = account_store::new_record( + AccountKind::Tailnet, + title, + Some(stored_authority), + stored_account, + stored_identity, + stored_hostname, + stored_tailnet, + Some(format!("Linked to daemon network #{id}.")), + ); + match account_store::upsert(record) { + Ok(accounts) => self.render_accounts(&accounts), + Err(error) => self + .account_status + .set_label(&format!("Tailnet account save failed: {error}")), + } + self.account_status + .set_label(&format!("Saved Tailnet account and network #{id}.")); + self.refresh().await; + } + Err(error) => self + .account_status + .set_label(&format!("Unable to save Tailnet account: {error}")), + } + } + + fn apply_login_status(&mut self, status: &daemon_api::TailnetLoginStatus) { + self.tailnet_session_id = Some(status.session_id.clone()); + self.tailnet_running = status.running; + + let mut parts = Vec::new(); + if status.running { + parts.push("Signed In".to_owned()); + } else if status.needs_login { + parts.push("Browser Sign-In Required".to_owned()); + } else { + parts.push("Checking Sign-In".to_owned()); + } + if !status.backend_state.is_empty() { + parts.push(format!("State: {}", status.backend_state)); + } + if let Some(tailnet_name) = &status.tailnet_name { + parts.push(format!("Tailnet: {tailnet_name}")); + } + if let Some(self_dns_name) = &status.self_dns_name { + parts.push(self_dns_name.clone()); + } + if !status.tailnet_ips.is_empty() { + parts.push(status.tailnet_ips.join(", ")); + } + if !status.health.is_empty() { + parts.push(status.health.join(" / ")); + } + self.account_status.set_label(&parts.join("\n")); + } + + fn render_networks(&self, networks: &[daemon_api::NetworkSummary]) { + while let Some(child) = self.network_cards.first_child() { + self.network_cards.remove(&child); + } + + if networks.is_empty() { + self.network_cards.append(&empty_networks_view()); + return; + } + + for network in networks { + self.network_cards.append(&network_card(network)); + } + } + + fn render_accounts(&self, accounts: &[AccountRecord]) { + while let Some(child) = self.account_rows.first_child() { + self.account_rows.remove(&child); + } + + if accounts.is_empty() { + self.account_rows.append(&empty_accounts_view()); + return; + } + + for account in accounts { + self.account_rows.append(&account_card(account)); + } + } +} + +fn configure_add_popover(button: >k::MenuButton, sender: &AsyncComponentSender) { + let popover = gtk::Popover::new(); + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 4); + box_.set_margin_all(6); + + for (label, msg) in [ + ("Add WireGuard Network", HomeScreenMsg::OpenWireGuard), + ("Save Tor Account", HomeScreenMsg::OpenTor), + ("Add Tailnet Account", HomeScreenMsg::OpenTailnet), + ] { + let item = gtk::Button::with_label(label); + item.add_css_class("flat"); + item.set_halign(Align::Fill); + let input = sender.input_sender().clone(); + item.connect_clicked(move |_| input.emit(msg_from_template(&msg))); + box_.append(&item); + } + + popover.set_child(Some(&box_)); + button.set_popover(Some(&popover)); +} + +fn msg_from_template(msg: &HomeScreenMsg) -> HomeScreenMsg { + match msg { + HomeScreenMsg::OpenWireGuard => HomeScreenMsg::OpenWireGuard, + HomeScreenMsg::OpenTor => HomeScreenMsg::OpenTor, + HomeScreenMsg::OpenTailnet => HomeScreenMsg::OpenTailnet, + _ => unreachable!(), + } +} + +fn network_card(network: &daemon_api::NetworkSummary) -> gtk::Box { + let card = gtk::Box::new(gtk::Orientation::Vertical, 10); + card.add_css_class("network-card"); + if network.title.to_ascii_lowercase().contains("wireguard") { + card.add_css_class("wireguard-card"); + } else { + card.add_css_class("tailnet-card"); + } + card.set_size_request(360, 175); + card.set_margin_bottom(8); + + let kind = if network.title.to_ascii_lowercase().contains("wireguard") { + "WireGuard" + } else { + "Tailnet" + }; + let kind_label = gtk::Label::new(Some(kind)); + kind_label.add_css_class("network-card-kind"); + kind_label.set_xalign(0.0); + + let title = gtk::Label::new(Some(&network.title)); + title.add_css_class("network-card-title"); + title.set_xalign(0.0); + title.set_wrap(true); + + let spacer = gtk::Box::new(gtk::Orientation::Vertical, 0); + spacer.set_vexpand(true); + + let detail = gtk::Label::new(Some(&network.detail)); + detail.add_css_class("network-card-detail"); + detail.set_xalign(0.0); + detail.set_wrap(true); + detail.set_lines(4); + + card.append(&kind_label); + card.append(&title); + card.append(&spacer); + card.append(&detail); + card +} + +fn empty_networks_view() -> gtk::Box { + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 6); + box_.add_css_class("empty-state"); + box_.set_size_request(520, 175); + box_.set_hexpand(true); + + let title = gtk::Label::new(Some("No Networks Yet")); + title.add_css_class("title-3"); + title.set_xalign(0.0); + let detail = gtk::Label::new(Some( + "Add a WireGuard network, or save a Tailnet account so Burrow can store a managed network when the daemon is reachable.", + )); + detail.add_css_class("dim-label"); + detail.set_wrap(true); + detail.set_xalign(0.0); + + box_.append(&title); + box_.append(&detail); + box_ +} + +fn empty_accounts_view() -> gtk::Box { + let box_ = gtk::Box::new(gtk::Orientation::Vertical, 6); + box_.add_css_class("empty-state"); + box_.set_hexpand(true); + + let title = gtk::Label::new(Some("No Accounts Yet")); + title.add_css_class("title-3"); + title.set_justify(gtk::Justification::Center); + let detail = gtk::Label::new(Some( + "Save a Tor account or sign in to Tailnet to keep network identities ready on this device.", + )); + detail.add_css_class("dim-label"); + detail.set_wrap(true); + detail.set_justify(gtk::Justification::Center); + + box_.append(&title); + box_.append(&detail); + box_ +} + +fn account_card(account: &AccountRecord) -> gtk::Box { + let card = gtk::Box::new(gtk::Orientation::Vertical, 8); + card.add_css_class("summary-card"); + card.set_hexpand(true); + + let header = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let title = gtk::Label::new(Some(&account.title)); + title.add_css_class("title-3"); + title.set_xalign(0.0); + title.set_hexpand(true); + let kind = gtk::Label::new(Some(account.kind.title())); + kind.add_css_class("dim-label"); + header.append(&title); + header.append(&kind); + card.append(&header); + + append_account_value(&card, "Account", &account.account); + append_account_value(&card, "Identity", &account.identity); + if let Some(authority) = &account.authority { + append_account_value(&card, "Authority", authority); + } + if let Some(hostname) = &account.hostname { + append_account_value(&card, "Hostname", hostname); + } + if let Some(tailnet) = &account.tailnet { + append_account_value(&card, "Tailnet", tailnet); + } + if let Some(note) = &account.note { + let note_label = gtk::Label::new(Some(note)); + note_label.add_css_class("dim-label"); + note_label.set_wrap(true); + note_label.set_xalign(0.0); + card.append(¬e_label); + } + + card +} + +fn append_account_value(card: >k::Box, label: &str, value: &str) { + let row = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let key = gtk::Label::new(Some(label)); + key.add_css_class("dim-label"); + key.set_xalign(0.0); + key.set_width_chars(9); + let value = gtk::Label::new(Some(value)); + value.set_xalign(0.0); + value.set_wrap(true); + value.set_hexpand(true); + row.append(&key); + row.append(&value); + card.append(&row); +} + +fn open_wireguard_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "WireGuard", 560, 620); + let content = sheet_content( + &window, + "Import WireGuard", + "Import a tunnel and optional account metadata.", + ); + + let title = gtk::Entry::new(); + title.set_placeholder_text(Some("Title")); + let account = gtk::Entry::new(); + account.set_placeholder_text(Some("Account")); + let identity = gtk::Entry::new(); + identity.set_placeholder_text(Some("Identity")); + let text = gtk::TextView::new(); + text.set_monospace(true); + text.set_wrap_mode(gtk::WrapMode::WordChar); + + let editor = gtk::ScrolledWindow::new(); + editor.set_min_content_height(220); + editor.set_child(Some(&text)); + + content.append(§ion_label("Identity")); + content.append(&title); + content.append(&account); + content.append(&identity); + content.append(§ion_label("WireGuard Configuration")); + content.append(&editor); + + let add = gtk::Button::with_label("Add Network"); + add.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + add.connect_clicked(move |_| { + input.emit(HomeScreenMsg::AddWireGuard { + title: title.text().to_string(), + account: account.text().to_string(), + identity: identity.text().to_string(), + config: text_view_text(&text), + }); + window_for_click.close(); + }); + content.append(&add); + + window.set_child(Some(&content)); + window.present(); +} + +fn open_tor_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "Tor", 520, 540); + let content = sheet_content( + &window, + "Configure Tor", + "Store Arti account and identity preferences.", + ); + + let title = entry_with_text("Title", "Default Tor"); + let account = entry_with_text("Account", "default"); + let identity = entry_with_text("Identity", "linux"); + let addresses = entry_with_text("Virtual Addresses", "100.64.0.2/32"); + let dns = entry_with_text("DNS Resolvers", "1.1.1.1, 1.0.0.1"); + let mtu = entry_with_text("MTU", "1400"); + let listen = entry_with_text("Transparent Listener", "127.0.0.1:9040"); + + content.append(§ion_label("Identity")); + content.append(&title); + content.append(&account); + content.append(&identity); + content.append(§ion_label("Tor Preferences")); + content.append(&addresses); + content.append(&dns); + content.append(&mtu); + content.append(&listen); + + let save = gtk::Button::with_label("Save Account"); + save.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + save.connect_clicked(move |_| { + let note = [ + format!( + "Addresses: {}", + normalized_entry(&addresses, "100.64.0.2/32") + ), + format!("DNS: {}", normalized_entry(&dns, "1.1.1.1, 1.0.0.1")), + format!("MTU: {}", normalized_entry(&mtu, "1400")), + format!("Listen: {}", normalized_entry(&listen, "127.0.0.1:9040")), + ] + .join(" - "); + input.emit(HomeScreenMsg::SaveTor { + title: normalized_entry(&title, "Default Tor"), + account: normalized_entry(&account, "default"), + identity: normalized_entry(&identity, "linux"), + note, + }); + window_for_click.close(); + }); + content.append(&save); + + window.set_child(Some(&content)); + window.present(); +} + +fn open_tailnet_window(root: >k::ScrolledWindow, sender: &AsyncComponentSender) { + let window = sheet_window(root, "Tailnet", 560, 680); + let content = sheet_content( + &window, + "Connect Tailnet", + "Save Tailnet authority, identity defaults, and login material.", + ); + + let email = gtk::Entry::new(); + email.set_placeholder_text(Some("Email address")); + let authority = entry_with_text("Server URL", daemon_api::default_tailnet_authority()); + let tailnet = gtk::Entry::new(); + tailnet.set_placeholder_text(Some("Tailnet")); + let account = entry_with_text("Account", "default"); + let identity = entry_with_text("Identity", "linux"); + let hostname = entry_with_text("Hostname", &hostname_fallback()); + + content.append(§ion_label("Connection")); + content.append(&email); + content.append(&authority); + content.append(&tailnet); + content.append(§ion_label("Identity")); + content.append(&account); + content.append(&identity); + content.append(&hostname); + + let actions = gtk::Box::new(gtk::Orientation::Horizontal, 8); + let discover = gtk::Button::with_label("Refresh Server Lookup"); + let probe = gtk::Button::with_label("Check Server"); + let sign_in = gtk::Button::with_label("Start Sign-In"); + actions.append(&discover); + actions.append(&probe); + actions.append(&sign_in); + content.append(§ion_label("Authentication")); + content.append(&actions); + + let input = sender.input_sender().clone(); + let email_for_click = email.clone(); + discover.connect_clicked(move |_| { + input.emit(HomeScreenMsg::DiscoverTailnet( + email_for_click.text().to_string(), + )); + }); + + let input = sender.input_sender().clone(); + let authority_for_probe = authority.clone(); + probe.connect_clicked(move |_| { + input.emit(HomeScreenMsg::ProbeTailnet( + authority_for_probe.text().to_string(), + )); + }); + + let input = sender.input_sender().clone(); + let authority_for_login = authority.clone(); + let account_for_login = account.clone(); + let identity_for_login = identity.clone(); + let hostname_for_login = hostname.clone(); + sign_in.connect_clicked(move |_| { + input.emit(HomeScreenMsg::StartTailnetLogin { + authority: authority_for_login.text().to_string(), + account: normalized_entry(&account_for_login, "default"), + identity: normalized_entry(&identity_for_login, "linux"), + hostname: daemon_api::normalized_optional(&hostname_for_login.text()), + }); + }); + + let save = gtk::Button::with_label("Save Account"); + save.add_css_class("suggested-action"); + let input = sender.input_sender().clone(); + let window_for_click = window.clone(); + save.connect_clicked(move |_| { + input.emit(HomeScreenMsg::AddTailnet { + authority: authority.text().to_string(), + account: normalized_entry(&account, "default"), + identity: normalized_entry(&identity, "linux"), + hostname: daemon_api::normalized_optional(&hostname.text()), + tailnet: daemon_api::normalized_optional(&tailnet.text()), + }); + window_for_click.close(); + }); + + let cancel = gtk::Button::with_label("Cancel Sign-In"); + let input = sender.input_sender().clone(); + cancel.connect_clicked(move |_| { + input.emit(HomeScreenMsg::CancelTailnetLogin); + }); + + content.append(&save); + content.append(&cancel); + + window.set_child(Some(&content)); + window.present(); +} + +fn sheet_window(root: >k::ScrolledWindow, title: &str, width: i32, height: i32) -> gtk::Window { + let window = gtk::Window::builder() + .title(title) + .default_width(width) + .default_height(height) + .modal(true) + .build(); + if let Some(root) = root.root() { + if let Ok(parent) = root.downcast::() { + window.set_transient_for(Some(&parent)); + } + } + window +} + +fn sheet_content(window: >k::Window, title: &str, detail: &str) -> gtk::Box { + let content = gtk::Box::new(gtk::Orientation::Vertical, 12); + content.set_margin_all(18); + + let summary = gtk::Box::new(gtk::Orientation::Horizontal, 12); + summary.add_css_class("summary-card"); + + let copy = gtk::Box::new(gtk::Orientation::Vertical, 4); + copy.set_hexpand(true); + + let title_label = gtk::Label::new(Some(title)); + title_label.add_css_class("title-3"); + title_label.set_xalign(0.0); + + let detail_label = gtk::Label::new(Some(detail)); + detail_label.add_css_class("dim-label"); + detail_label.set_wrap(true); + detail_label.set_xalign(0.0); + + copy.append(&title_label); + copy.append(&detail_label); + summary.append(©); + + let close = gtk::Button::builder() + .icon_name("window-close-symbolic") + .tooltip_text("Close") + .valign(Align::Start) + .build(); + close.add_css_class("flat"); + let window_for_click = window.clone(); + close.connect_clicked(move |_| window_for_click.close()); + summary.append(&close); + + content.append(&summary); + content +} + +fn section_label(label: &str) -> gtk::Label { + let section = gtk::Label::new(Some(label)); + section.add_css_class("heading"); + section.set_xalign(0.0); + section +} + +fn entry_with_text(placeholder: &str, value: &str) -> gtk::Entry { + let entry = gtk::Entry::new(); + entry.set_placeholder_text(Some(placeholder)); + entry.set_text(value); + entry +} + +fn normalized_entry(entry: >k::Entry, fallback: &str) -> String { + daemon_api::normalized(&entry.text(), fallback) +} + +fn hostname_fallback() -> String { + std::env::var("HOSTNAME").unwrap_or_else(|_| "linux".to_owned()) +} + +fn text_view_text(text_view: >k::TextView) -> String { + let buffer = text_view.buffer(); + buffer + .text(&buffer.start_iter(), &buffer.end_iter(), true) + .to_string() +} + +fn open_auth_url(url: &str) -> anyhow::Result<()> { + gtk::gio::AppInfo::launch_default_for_uri(url, None::<>k::gio::AppLaunchContext>) + .map_err(anyhow::Error::from) +} diff --git a/burrow-gtk/src/components/mod.rs b/burrow-gtk/src/components/mod.rs index b134809..8e60fa7 100644 --- a/burrow-gtk/src/components/mod.rs +++ b/burrow-gtk/src/components/mod.rs @@ -1,6 +1,6 @@ use super::*; +use crate::daemon_api; use adw::prelude::*; -use burrow::{DaemonClient, DaemonCommand, DaemonResponseData}; use gtk::Align; use relm4::{ component::{ @@ -9,13 +9,9 @@ use relm4::{ }, prelude::*, }; -use std::sync::Arc; -use tokio::sync::Mutex; mod app; -mod settings; -mod settings_screen; -mod switch_screen; +mod home_screen; pub use app::*; -pub use settings::{DaemonGroupMsg, DiagGroupMsg}; +pub use home_screen::{HomeScreen, HomeScreenMsg}; diff --git a/burrow-gtk/src/daemon_api.rs b/burrow-gtk/src/daemon_api.rs new file mode 100644 index 0000000..4ff8bf5 --- /dev/null +++ b/burrow-gtk/src/daemon_api.rs @@ -0,0 +1,420 @@ +use anyhow::{anyhow, Context, Result}; +use burrow::{ + control::{TailnetConfig, TailnetProvider}, + grpc_defs::{ + Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest, + TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest, + }, + BurrowClient, +}; +use std::{path::PathBuf, sync::OnceLock}; +use tokio::time::{timeout, Duration}; + +const RPC_TIMEOUT: Duration = Duration::from_secs(3); +const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com"; +static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new(); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TunnelState { + Running, + Stopped, +} + +#[derive(Debug, Clone)] +pub struct NetworkSummary { + pub id: i32, + pub title: String, + pub detail: String, +} + +#[derive(Debug, Clone)] +pub struct TailnetDiscovery { + pub authority: String, + pub managed: bool, + pub oidc_issuer: Option, +} + +#[derive(Debug, Clone)] +pub struct TailnetProbe { + pub summary: String, + pub detail: Option, + pub status_code: i32, +} + +#[derive(Debug, Clone)] +pub struct TailnetLoginStatus { + pub session_id: String, + pub backend_state: String, + pub auth_url: Option, + pub running: bool, + pub needs_login: bool, + pub tailnet_name: Option, + pub self_dns_name: Option, + pub tailnet_ips: Vec, + pub health: Vec, +} + +pub fn default_tailnet_authority() -> &'static str { + MANAGED_TAILSCALE_AUTHORITY +} + +pub fn configure_client_paths() -> Result<()> { + if std::env::var_os("BURROW_SOCKET_PATH").is_none() { + std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?); + } + Ok(()) +} + +pub async fn ensure_daemon() -> Result<()> { + configure_client_paths()?; + if daemon_available().await { + return Ok(()); + } + + let socket_path = socket_path()?; + let db_path = database_path()?; + ensure_parent(&socket_path)?; + ensure_parent(&db_path)?; + + if EMBEDDED_DAEMON_STARTED.get().is_none() { + tokio::task::spawn_blocking(move || { + burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path)); + }) + .await + .context("failed to join embedded daemon startup")?; + let _ = EMBEDDED_DAEMON_STARTED.set(()); + } + + tunnel_state() + .await + .map(|_| ()) + .context("Burrow daemon started but did not accept tunnel status RPCs") +} + +pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider { + let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase(); + if normalized == "controlplane.tailscale.com" + || normalized == "http://controlplane.tailscale.com" + || normalized == MANAGED_TAILSCALE_AUTHORITY + { + TailnetProvider::Tailscale + } else { + TailnetProvider::Headscale + } +} + +pub async fn daemon_available() -> bool { + tunnel_state().await.is_ok() +} + +fn socket_path() -> Result { + if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") { + return Ok(PathBuf::from(path)); + } + default_socket_path() +} + +fn default_socket_path() -> Result { + if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") { + return Ok(PathBuf::from(runtime_dir).join("burrow.sock")); + } + let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned()); + Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock"))) +} + +fn database_path() -> Result { + if let Some(path) = std::env::var_os("BURROW_DB_PATH") { + return Ok(PathBuf::from(path)); + } + if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") { + return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db")); + } + if let Some(home) = std::env::var_os("HOME") { + return Ok(PathBuf::from(home) + .join(".local") + .join("share") + .join("burrow") + .join("burrow.db")); + } + Ok(std::env::temp_dir().join("burrow.db")) +} + +fn ensure_parent(path: &PathBuf) -> Result<()> { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent) + .with_context(|| format!("failed to create {}", parent.display()))?; + } + Ok(()) +} + +pub async fn tunnel_state() -> Result { + let mut client = BurrowClient::from_uds().await?; + let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {})) + .await + .context("timed out connecting to Burrow daemon")?? + .into_inner(); + let status = timeout(RPC_TIMEOUT, stream.message()) + .await + .context("timed out reading Burrow tunnel status")?? + .context("Burrow daemon ended the status stream without a state")?; + Ok(match status.state() { + State::Running => TunnelState::Running, + State::Stopped => TunnelState::Stopped, + }) +} + +pub async fn start_tunnel() -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {})) + .await + .context("timed out starting Burrow tunnel")??; + Ok(()) +} + +pub async fn stop_tunnel() -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {})) + .await + .context("timed out stopping Burrow tunnel")??; + Ok(()) +} + +pub async fn list_networks() -> Result> { + let mut client = BurrowClient::from_uds().await?; + let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {})) + .await + .context("timed out connecting to Burrow network list")?? + .into_inner(); + let response = timeout(RPC_TIMEOUT, stream.message()) + .await + .context("timed out reading Burrow network list")?? + .context("Burrow daemon ended the network stream without a snapshot")?; + Ok(response.network.iter().map(summarize_network).collect()) +} + +pub async fn add_wireguard(config: String) -> Result { + add_network(NetworkType::WireGuard, config.into_bytes()).await +} + +pub async fn add_tailnet( + authority: String, + account: String, + identity: String, + hostname: Option, + tailnet: Option, +) -> Result { + let provider = infer_tailnet_provider(&authority); + let config = TailnetConfig { + provider, + authority: Some(authority), + account: Some(account), + identity: Some(identity), + hostname, + tailnet, + }; + let payload = serde_json::to_vec_pretty(&config)?; + add_network(NetworkType::Tailnet, payload).await +} + +pub async fn discover_tailnet(email: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .discover(TailnetDiscoverRequest { email }), + ) + .await + .context("timed out discovering Tailnet authority")?? + .into_inner(); + + Ok(TailnetDiscovery { + authority: response.authority, + managed: response.managed, + oidc_issuer: optional(response.oidc_issuer), + }) +} + +pub async fn probe_tailnet(authority: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .probe(TailnetProbeRequest { authority }), + ) + .await + .context("timed out probing Tailnet authority")?? + .into_inner(); + + Ok(TailnetProbe { + summary: response.summary, + detail: optional(response.detail), + status_code: response.status_code, + }) +} + +pub async fn start_tailnet_login( + authority: String, + account_name: String, + identity_name: String, + hostname: Option, +) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client.tailnet_client.login_start(TailnetLoginStartRequest { + account_name, + identity_name, + hostname: hostname.unwrap_or_default(), + authority, + }), + ) + .await + .context("timed out starting Tailnet sign-in")?? + .into_inner(); + Ok(decode_tailnet_status(response)) +} + +pub async fn tailnet_login_status(session_id: String) -> Result { + let mut client = BurrowClient::from_uds().await?; + let response = timeout( + RPC_TIMEOUT, + client + .tailnet_client + .login_status(TailnetLoginStatusRequest { session_id }), + ) + .await + .context("timed out reading Tailnet sign-in status")?? + .into_inner(); + Ok(decode_tailnet_status(response)) +} + +pub async fn cancel_tailnet_login(session_id: String) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + timeout( + RPC_TIMEOUT, + client + .tailnet_client + .login_cancel(TailnetLoginCancelRequest { session_id }), + ) + .await + .context("timed out cancelling Tailnet sign-in")??; + Ok(()) +} + +async fn add_network(network_type: NetworkType, payload: Vec) -> Result { + let id = next_network_id().await?; + let mut client = BurrowClient::from_uds().await?; + timeout( + RPC_TIMEOUT, + client.networks_client.network_add(Network { + id, + r#type: network_type.into(), + payload, + }), + ) + .await + .context("timed out saving network to Burrow daemon")??; + Ok(id) +} + +async fn next_network_id() -> Result { + let networks = list_networks().await?; + Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1) +} + +fn summarize_network(network: &Network) -> NetworkSummary { + match network.r#type() { + NetworkType::WireGuard => summarize_wireguard(network), + NetworkType::Tailnet => summarize_tailnet(network), + } +} + +fn summarize_wireguard(network: &Network) -> NetworkSummary { + let payload = String::from_utf8_lossy(&network.payload); + let detail = payload + .lines() + .map(str::trim) + .find(|line| !line.is_empty() && !line.starts_with('[')) + .unwrap_or("Stored WireGuard configuration") + .to_owned(); + NetworkSummary { + id: network.id, + title: format!("WireGuard {}", network.id), + detail, + } +} + +fn summarize_tailnet(network: &Network) -> NetworkSummary { + match TailnetConfig::from_slice(&network.payload) { + Ok(config) => { + let title = config + .tailnet + .clone() + .or(config.hostname.clone()) + .unwrap_or_else(|| "Tailnet".to_owned()); + let authority = config + .authority + .unwrap_or_else(|| "default authority".to_owned()); + let account = config.account.unwrap_or_else(|| "default".to_owned()); + NetworkSummary { + id: network.id, + title, + detail: format!("{authority} - account {account}"), + } + } + Err(error) => NetworkSummary { + id: network.id, + title: "Tailnet".to_owned(), + detail: format!("Unable to read Tailnet payload: {error}"), + }, + } +} + +fn decode_tailnet_status( + response: burrow::grpc_defs::TailnetLoginStatusResponse, +) -> TailnetLoginStatus { + TailnetLoginStatus { + session_id: response.session_id, + backend_state: response.backend_state, + auth_url: optional(response.auth_url), + running: response.running, + needs_login: response.needs_login, + tailnet_name: optional(response.tailnet_name), + self_dns_name: optional(response.self_dns_name), + tailnet_ips: response.tailnet_ips, + health: response.health, + } +} + +fn optional(value: String) -> Option { + let trimmed = value.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_owned()) + } +} + +pub fn normalized(value: &str, fallback: &str) -> String { + let trimmed = value.trim(); + if trimmed.is_empty() { + fallback.to_owned() + } else { + trimmed.to_owned() + } +} + +pub fn normalized_optional(value: &str) -> Option { + let trimmed = value.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_owned()) + } +} + +pub fn require_value(value: &str, label: &str) -> Result { + normalized_optional(value).ok_or_else(|| anyhow!("{label} is required")) +} diff --git a/burrow-gtk/src/main.rs b/burrow-gtk/src/main.rs index 6f91e2a..b47b63e 100644 --- a/burrow-gtk/src/main.rs +++ b/burrow-gtk/src/main.rs @@ -1,11 +1,15 @@ use anyhow::Result; pub mod components; -mod diag; +mod account_store; +mod daemon_api; // Generated using meson mod config; fn main() { + if let Err(error) = daemon_api::configure_client_paths() { + eprintln!("failed to configure Burrow daemon paths: {error}"); + } components::App::run(); } diff --git a/burrow/Cargo.toml b/burrow/Cargo.toml index 15facd1..22f3d25 100644 --- a/burrow/Cargo.toml +++ b/burrow/Cargo.toml @@ -10,11 +10,13 @@ crate-type = ["lib", "staticlib"] [dependencies] anyhow = "1.0" -tokio = { version = "1.50.0", features = [ +tokio = { version = "1.37", features = [ "rt", "macros", "sync", "io-util", + "net", + "process", "rt-multi-thread", "signal", "time", @@ -32,6 +34,7 @@ serde_json = "1.0" blake2 = "0.10" chacha20poly1305 = "0.10" rand = "0.8" +bytes = "1" rand_core = "0.6" aead = "0.5" x25519-dalek = { version = "2.0", features = [ @@ -45,40 +48,46 @@ base64 = "0.21" fehler = "1.0" ip_network_table = "0.2" ip_network = "0.4" +ipnetwork = { version = "0.21", features = ["serde"] } async-channel = "2.1" schemars = "0.8" futures = "0.3.28" once_cell = "1.19" arti-client = "0.40.0" +hickory-proto = "0.25.2" +netstack-smoltcp = "0.2.1" tokio-util = { version = "0.7.18", features = ["compat"] } +tor-rtcompat = "0.40.0" console-subscriber = { version = "0.2.0", optional = true } console = "0.15.8" -axum = "0.8.8" -reqwest = { version = "0.13.2", default-features = false, features = [ +axum = "0.7.4" +argon2 = "0.5" +reqwest = { version = "0.12", default-features = false, features = [ "json", - "rustls", + "rustls-tls", ] } rusqlite = { version = "0.38.0", features = ["blob"] } dotenv = "0.15.0" -tonic = "0.14.5" -tonic-prost = "0.14.5" -prost = "0.14.3" -prost-types = "0.14.3" -tokio-stream = "0.1.18" +tonic = "0.12.0" +prost = "0.13.1" +prost-types = "0.13.1" +tokio-stream = "0.1" async-stream = "0.2" -tower = "0.5.3" -hyper-util = "0.1.20" +tower = { version = "0.4.13", features = ["util"] } +hyper-util = "0.1.6" toml = "0.8.15" rust-ini = "0.21.0" +subtle = "2.6" [target.'cfg(target_os = "linux")'.dependencies] caps = "0.5" -libsystemd = "0.7" -tracing-journald = "0.3" libc = "0.2" +libsystemd = "0.7" +nix = { version = "0.27", features = ["fs", "socket", "uio"] } +tracing-journald = "0.3" [target.'cfg(target_vendor = "apple")'.dependencies] -nix = { version = "0.27", features = ["ioctl"] } +nix = { version = "0.27" } rusqlite = { version = "0.38.0", features = ["bundled", "blob"] } [target.'cfg(target_os = "macos")'.dependencies] @@ -86,6 +95,7 @@ tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" } [dev-dependencies] insta = { version = "1.32", features = ["yaml"] } +tempfile = "3.13" [package.metadata.generate-rpm] assets = [ @@ -102,4 +112,4 @@ bundled = ["rusqlite/bundled"] [build-dependencies] -tonic-prost-build = "0.14.5" +tonic-build = "0.12.0" diff --git a/burrow/build.rs b/burrow/build.rs index 9ecd9a8..8eea5dc 100644 --- a/burrow/build.rs +++ b/burrow/build.rs @@ -1,4 +1,4 @@ fn main() -> Result<(), Box> { - tonic_prost_build::compile_protos("../proto/burrow.proto")?; + tonic_build::compile_protos("../proto/burrow.proto")?; Ok(()) } diff --git a/burrow/src/auth/client.rs b/burrow/src/auth/client.rs deleted file mode 100644 index e9721f3..0000000 --- a/burrow/src/auth/client.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::env::var; - -use anyhow::Result; -use reqwest::Url; - -pub async fn login() -> Result<()> { - let state = "vt :P"; - let nonce = "no"; - - let mut url = Url::parse("https://slack.com/openid/connect/authorize")?; - let mut q = url.query_pairs_mut(); - q.append_pair("response_type", "code"); - q.append_pair("scope", "openid profile email"); - q.append_pair("client_id", &var("CLIENT_ID")?); - q.append_pair("state", state); - q.append_pair("team", &var("SLACK_TEAM_ID")?); - q.append_pair("nonce", nonce); - q.append_pair("redirect_uri", "https://burrow.rs/callback"); - drop(q); - - println!("Continue auth in your browser:\n{}", url.as_str()); - - Ok(()) -} diff --git a/burrow/src/auth/mod.rs b/burrow/src/auth/mod.rs index c07f47e..74f47ad 100644 --- a/burrow/src/auth/mod.rs +++ b/burrow/src/auth/mod.rs @@ -1,2 +1 @@ -pub mod client; pub mod server; diff --git a/burrow/src/auth/server/db.rs b/burrow/src/auth/server/db.rs index 995e64b..c31c473 100644 --- a/burrow/src/auth/server/db.rs +++ b/burrow/src/auth/server/db.rs @@ -1,91 +1,627 @@ -use anyhow::Result; +use anyhow::{anyhow, Context, Result}; +use argon2::{ + password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString}, + Argon2, +}; +use base64::{engine::general_purpose, Engine as _}; +use rand::RngCore; +use rusqlite::{params, Connection, OptionalExtension}; -use crate::daemon::rpc::grpc_defs::{Network, NetworkType}; +use crate::control::{ + DnsConfig, Hostinfo, LocalAuthResponse, MapRequest, MapResponse, Node, NodeCapMap, + PacketFilter, PeerCapMap, RegisterRequest, UserProfile, +}; + +const CREATE_SCHEMA: &str = r#" +CREATE TABLE IF NOT EXISTS auth_user ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + email TEXT NOT NULL UNIQUE, + display_name TEXT NOT NULL, + profile_pic_url TEXT, + groups_json TEXT NOT NULL DEFAULT '[]', + created_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE TABLE IF NOT EXISTS auth_local_credential ( + user_id INTEGER PRIMARY KEY REFERENCES auth_user(id) ON DELETE CASCADE, + username TEXT NOT NULL UNIQUE, + password_hash TEXT NOT NULL, + rotated_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE TABLE IF NOT EXISTS auth_session ( + id TEXT PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES auth_user(id) ON DELETE CASCADE, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + expires_at TEXT NOT NULL DEFAULT (datetime('now', '+7 days')) +); + +CREATE TABLE IF NOT EXISTS control_node ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + stable_id TEXT NOT NULL UNIQUE, + user_id INTEGER NOT NULL REFERENCES auth_user(id) ON DELETE CASCADE, + name TEXT NOT NULL, + node_key TEXT NOT NULL UNIQUE, + machine_key TEXT, + disco_key TEXT, + addresses_json TEXT NOT NULL, + allowed_ips_json TEXT NOT NULL, + endpoints_json TEXT NOT NULL, + home_derp INTEGER, + hostinfo_json TEXT, + tags_json TEXT NOT NULL DEFAULT '[]', + primary_routes_json TEXT NOT NULL DEFAULT '[]', + cap_version INTEGER NOT NULL DEFAULT 1, + cap_map_json TEXT NOT NULL DEFAULT '{}', + peer_cap_map_json TEXT NOT NULL DEFAULT '{}', + machine_authorized INTEGER NOT NULL DEFAULT 1, + node_key_expired INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + updated_at TEXT NOT NULL DEFAULT (datetime('now')), + last_seen TEXT, + online INTEGER +); +"#; + +#[derive(Clone, Debug)] +pub struct StoredUser { + pub profile: UserProfile, +} + +pub fn init_db(path: &str) -> Result<()> { + let conn = Connection::open(path)?; + conn.execute_batch(CREATE_SCHEMA)?; + Ok(()) +} + +pub fn ensure_local_identity( + path: &str, + username: &str, + email: &str, + display_name: &str, + password: &str, +) -> Result { + let conn = Connection::open(path)?; + conn.execute( + "INSERT INTO auth_user (email, display_name) VALUES (?, ?) + ON CONFLICT(email) DO UPDATE SET display_name = excluded.display_name", + params![email, display_name], + )?; + let user_id: i64 = + conn.query_row("SELECT id FROM auth_user WHERE email = ?", [email], |row| { + row.get(0) + })?; + + let existing_hash: Option = conn + .query_row( + "SELECT password_hash FROM auth_local_credential WHERE user_id = ?", + [user_id], + |row| row.get(0), + ) + .optional()?; + + let password_hash = match existing_hash { + Some(hash) if verify_password(password, &hash) => hash, + _ => hash_password(password)?, + }; + + conn.execute( + "INSERT INTO auth_local_credential (user_id, username, password_hash) + VALUES (?, ?, ?) + ON CONFLICT(user_id) DO UPDATE SET username = excluded.username, password_hash = excluded.password_hash, rotated_at = datetime('now')", + params![user_id, username, password_hash], + )?; + + load_user_profile(&conn, user_id) +} + +pub fn authenticate_local( + path: &str, + identifier: &str, + password: &str, +) -> Result> { + let conn = Connection::open(path)?; + let record = conn + .query_row( + "SELECT u.id, u.email, u.display_name, u.profile_pic_url, u.groups_json, c.password_hash + FROM auth_user u + JOIN auth_local_credential c ON c.user_id = u.id + WHERE c.username = ? OR u.email = ?", + params![identifier, identifier], + |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, String>(4)?, + row.get::<_, String>(5)?, + )) + }, + ) + .optional()?; + + let Some((user_id, email, display_name, profile_pic_url, groups_json, password_hash)) = record + else { + return Ok(None); + }; + + if !verify_password(password, &password_hash) { + return Ok(None); + } + + let token = random_token(); + conn.execute( + "INSERT INTO auth_session (id, user_id) VALUES (?, ?)", + params![token, user_id], + )?; + + Ok(Some(LocalAuthResponse { + access_token: token, + user: UserProfile { + id: user_id, + login_name: email, + display_name, + profile_pic_url, + groups: parse_json(&groups_json)?, + }, + })) +} + +pub fn user_for_session(path: &str, token: &str) -> Result> { + let conn = Connection::open(path)?; + let user_id = conn + .query_row( + "SELECT user_id FROM auth_session WHERE id = ? AND expires_at > datetime('now')", + [token], + |row| row.get::<_, i64>(0), + ) + .optional()?; + let Some(user_id) = user_id else { + return Ok(None); + }; + + Ok(Some(load_user(&conn, user_id)?)) +} + +pub fn upsert_node(path: &str, user: &StoredUser, request: &RegisterRequest) -> Result { + let conn = Connection::open(path)?; + let existing = find_existing_node(&conn, user.profile.id, request)?; + let name = Node::preferred_name(request); + let allowed_ips = Node::normalized_allowed_ips(request); + + match existing { + Some((node_id, stable_id, created_at)) => { + conn.execute( + "UPDATE control_node + SET name = ?, node_key = ?, machine_key = ?, disco_key = ?, addresses_json = ?, allowed_ips_json = ?, + endpoints_json = ?, home_derp = ?, hostinfo_json = ?, tags_json = ?, primary_routes_json = ?, + cap_version = ?, cap_map_json = ?, peer_cap_map_json = ?, updated_at = datetime('now'), + last_seen = datetime('now'), online = 1 + WHERE id = ?", + params![ + name, + request.node_key, + request.machine_key, + request.disco_key, + to_json(&request.addresses)?, + to_json(&allowed_ips)?, + to_json(&request.endpoints)?, + request.home_derp, + optional_json(&request.hostinfo)?, + to_json(&request.tags)?, + to_json(&request.primary_routes)?, + request.version.max(1), + to_json(&request.cap_map)?, + to_json(&request.peer_cap_map)?, + node_id, + ], + )?; + load_node(&conn, node_id, stable_id, Some(created_at)) + } + None => { + conn.execute( + "INSERT INTO control_node ( + stable_id, user_id, name, node_key, machine_key, disco_key, addresses_json, allowed_ips_json, + endpoints_json, home_derp, hostinfo_json, tags_json, primary_routes_json, cap_version, + cap_map_json, peer_cap_map_json, last_seen, online + ) VALUES ('', ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now'), 1)", + params![ + user.profile.id, + name, + request.node_key, + request.machine_key, + request.disco_key, + to_json(&request.addresses)?, + to_json(&allowed_ips)?, + to_json(&request.endpoints)?, + request.home_derp, + optional_json(&request.hostinfo)?, + to_json(&request.tags)?, + to_json(&request.primary_routes)?, + request.version.max(1), + to_json(&request.cap_map)?, + to_json(&request.peer_cap_map)?, + ], + )?; + let node_id = conn.last_insert_rowid(); + let stable_id = format!("bn-{node_id}"); + conn.execute( + "UPDATE control_node SET stable_id = ? WHERE id = ?", + params![stable_id, node_id], + )?; + load_node(&conn, node_id, stable_id, None) + } + } +} + +pub fn map_for_node( + path: &str, + user: &StoredUser, + request: &MapRequest, + domain: &str, +) -> Result { + let conn = Connection::open(path)?; + apply_map_request(&conn, user.profile.id, request)?; + let self_row = conn + .query_row( + "SELECT id, stable_id, created_at FROM control_node WHERE user_id = ? AND node_key = ?", + params![user.profile.id, request.node_key], + |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + )) + }, + ) + .optional()? + .ok_or_else(|| anyhow!("node not registered"))?; + + let node = load_node(&conn, self_row.0, self_row.1, Some(self_row.2))?; + let peers = load_peers(&conn, node.id)?; + Ok(MapResponse { + map_session_handle: Some(format!("map-{}", node.stable_id)), + seq: Some(request.map_session_seq.unwrap_or(0) + 1), + node, + peers, + domain: domain.to_owned(), + dns: Some(DnsConfig { + resolvers: vec!["1.1.1.1".to_owned(), "1.0.0.1".to_owned()], + search_domains: vec![domain.to_owned()], + magic_dns: true, + }), + packet_filters: vec![PacketFilter::default()], + }) +} pub static PATH: &str = "./server.sqlite3"; -pub fn init_db() -> Result<()> { - let conn = rusqlite::Connection::open(PATH)?; +fn apply_map_request(conn: &Connection, user_id: i64, request: &MapRequest) -> Result<()> { + let current = conn + .query_row( + "SELECT id FROM control_node WHERE user_id = ? AND node_key = ?", + params![user_id, request.node_key], + |row| row.get::<_, i64>(0), + ) + .optional()?; + let Some(node_id) = current else { + return Ok(()); + }; + + let hostinfo_json = optional_json(&request.hostinfo)?; + let endpoints_json = to_json(&request.endpoints)?; conn.execute( - "CREATE TABLE IF NOT EXISTS user ( - id PRIMARY KEY, - created_at TEXT NOT NULL - )", - (), + "UPDATE control_node + SET disco_key = COALESCE(?, disco_key), + hostinfo_json = CASE WHEN ? IS NULL THEN hostinfo_json ELSE ? END, + endpoints_json = CASE WHEN ? = '[]' THEN endpoints_json ELSE ? END, + updated_at = datetime('now'), + last_seen = datetime('now'), + online = 1 + WHERE id = ?", + params![ + request.disco_key, + hostinfo_json, + hostinfo_json, + endpoints_json, + endpoints_json, + node_id, + ], )?; - - conn.execute( - "CREATE TABLE IF NOT EXISTS user_connection ( - user_id INTEGER REFERENCES user(id) ON DELETE CASCADE, - openid_provider TEXT NOT NULL, - openid_user_id TEXT NOT NULL, - openid_user_name TEXT NOT NULL, - access_token TEXT NOT NULL, - refresh_token TEXT, - PRIMARY KEY (openid_provider, openid_user_id) - )", - (), - )?; - - conn.execute( - "CREATE TABLE IF NOT EXISTS device ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT, - public_key TEXT NOT NULL, - apns_token TEXT UNIQUE, - user_id INT REFERENCES user(id) ON DELETE CASCADE, - created_at TEXT NOT NULL DEFAULT (datetime('now')) CHECK(created_at IS datetime(created_at)), - ipv4 TEXT NOT NULL UNIQUE, - ipv6 TEXT NOT NULL UNIQUE, - access_token TEXT NOT NULL UNIQUE, - refresh_token TEXT NOT NULL UNIQUE, - expires_at TEXT NOT NULL DEFAULT (datetime('now', '+7 days')) CHECK(expires_at IS datetime(expires_at)) - )", - () - ).unwrap(); - Ok(()) } -pub fn store_connection( - openid_user: super::providers::OpenIdUser, - openid_provider: &str, - access_token: &str, - refresh_token: Option<&str>, -) -> Result<()> { - log::debug!("Storing openid user {:#?}", openid_user); - let conn = rusqlite::Connection::open(PATH)?; +fn find_existing_node( + conn: &Connection, + user_id: i64, + request: &RegisterRequest, +) -> Result> { + let mut candidates = vec![request.node_key.as_str()]; + if let Some(old) = request.old_node_key.as_deref() { + if old != request.node_key { + candidates.push(old); + } + } - conn.execute( - "INSERT OR IGNORE INTO user (id, created_at) VALUES (?, datetime('now'))", - (&openid_user.sub,), - )?; - conn.execute( - "INSERT INTO user_connection (user_id, openid_provider, openid_user_id, openid_user_name, access_token, refresh_token) VALUES ( - (SELECT id FROM user WHERE id = ?), - ?, - ?, - ?, - ?, - ? - )", - (&openid_user.sub, &openid_provider, &openid_user.sub, &openid_user.name, access_token, refresh_token), - )?; - - Ok(()) + for candidate in candidates { + let hit = conn + .query_row( + "SELECT id, stable_id, created_at FROM control_node WHERE user_id = ? AND node_key = ?", + params![user_id, candidate], + |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + )) + }, + ) + .optional()?; + if hit.is_some() { + return Ok(hit); + } + } + Ok(None) } -pub fn store_device( - openid_user: super::providers::OpenIdUser, - openid_provider: &str, - access_token: &str, - refresh_token: Option<&str>, -) -> Result<()> { - log::debug!("Storing openid user {:#?}", openid_user); - let conn = rusqlite::Connection::open(PATH)?; - - // TODO - - Ok(()) +fn load_peers(conn: &Connection, self_id: i64) -> Result> { + let mut stmt = conn.prepare( + "SELECT id, stable_id, created_at FROM control_node WHERE id != ? AND machine_authorized = 1 ORDER BY id", + )?; + let peers = stmt + .query_map([self_id], |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + )) + })? + .collect::>>()?; + peers + .into_iter() + .map(|(id, stable_id, created_at)| load_node(conn, id, stable_id, Some(created_at))) + .collect() +} + +fn load_node( + conn: &Connection, + id: i64, + stable_id: String, + created_at_hint: Option, +) -> Result { + let row = conn.query_row( + "SELECT user_id, name, node_key, machine_key, disco_key, addresses_json, allowed_ips_json, + endpoints_json, home_derp, hostinfo_json, tags_json, primary_routes_json, cap_version, + cap_map_json, peer_cap_map_json, machine_authorized, node_key_expired, + created_at, updated_at, last_seen, online + FROM control_node WHERE id = ?", + [id], + |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, Option>(4)?, + row.get::<_, String>(5)?, + row.get::<_, String>(6)?, + row.get::<_, String>(7)?, + row.get::<_, Option>(8)?, + row.get::<_, Option>(9)?, + row.get::<_, String>(10)?, + row.get::<_, String>(11)?, + row.get::<_, i32>(12)?, + row.get::<_, String>(13)?, + row.get::<_, String>(14)?, + row.get::<_, i64>(15)?, + row.get::<_, i64>(16)?, + row.get::<_, String>(17)?, + row.get::<_, String>(18)?, + row.get::<_, Option>(19)?, + row.get::<_, Option>(20)?, + )) + }, + )?; + Ok(Node { + id, + stable_id, + user_id: row.0, + name: row.1, + node_key: row.2, + machine_key: row.3, + disco_key: row.4, + addresses: parse_json(&row.5)?, + allowed_ips: parse_json(&row.6)?, + endpoints: parse_json(&row.7)?, + home_derp: row.8, + hostinfo: row.9.map(|raw| parse_json::(&raw)).transpose()?, + tags: parse_json(&row.10)?, + primary_routes: parse_json(&row.11)?, + cap_version: row.12, + cap_map: parse_json::(&row.13)?, + peer_cap_map: parse_json::(&row.14)?, + machine_authorized: row.15 != 0, + node_key_expired: row.16 != 0, + created_at: Some(created_at_hint.unwrap_or(row.17)), + updated_at: Some(row.18), + last_seen: row.19, + online: row.20.map(|value| value != 0), + }) +} + +fn load_user(conn: &Connection, user_id: i64) -> Result { + let profile = load_user_profile(conn, user_id)?; + Ok(StoredUser { profile }) +} + +fn load_user_profile(conn: &Connection, user_id: i64) -> Result { + let row = conn.query_row( + "SELECT email, display_name, profile_pic_url, groups_json FROM auth_user WHERE id = ?", + [user_id], + |row| { + Ok(( + row.get::<_, String>(0)?, + row.get::<_, String>(1)?, + row.get::<_, Option>(2)?, + row.get::<_, String>(3)?, + )) + }, + )?; + Ok(UserProfile { + id: user_id, + login_name: row.0, + display_name: row.1, + profile_pic_url: row.2, + groups: parse_json(&row.3)?, + }) +} + +fn hash_password(password: &str) -> Result { + let salt = SaltString::generate(&mut argon2::password_hash::rand_core::OsRng); + let hash = Argon2::default() + .hash_password(password.as_bytes(), &salt) + .map_err(|err| anyhow!("failed to hash password: {err}"))?; + Ok(hash.to_string()) +} + +fn verify_password(password: &str, password_hash: &str) -> bool { + PasswordHash::new(password_hash) + .ok() + .and_then(|hash| { + Argon2::default() + .verify_password(password.as_bytes(), &hash) + .ok() + }) + .is_some() +} + +fn random_token() -> String { + let mut bytes = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut bytes); + general_purpose::URL_SAFE_NO_PAD.encode(bytes) +} + +fn to_json(value: &T) -> Result { + serde_json::to_string(value).context("failed to serialize json") +} + +fn optional_json(value: &Option) -> Result> { + value.as_ref().map(to_json).transpose() +} + +fn parse_json(value: &str) -> Result { + serde_json::from_str(value) + .with_context(|| format!("failed to decode json payload from '{value}'")) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::control::{Hostinfo, RegisterRequest}; + use tempfile::TempDir; + + fn temp_db() -> Result<(TempDir, String)> { + let dir = tempfile::tempdir()?; + let db_path = dir.path().join("server.sqlite3"); + Ok((dir, db_path.to_string_lossy().to_string())) + } + + #[test] + fn local_auth_and_map_round_trip() -> Result<()> { + let (_dir, db_path) = temp_db()?; + init_db(&db_path)?; + ensure_local_identity( + &db_path, + "contact", + "contact@burrow.net", + "Burrow Contact", + "password-1", + )?; + + let auth = authenticate_local(&db_path, "contact", "password-1")? + .expect("expected login to succeed"); + let user = + user_for_session(&db_path, &auth.access_token)?.expect("expected session to resolve"); + + let node = upsert_node( + &db_path, + &user, + &RegisterRequest { + node_key: "nodekey:aaaa".to_owned(), + machine_key: Some("machinekey:aaaa".to_owned()), + disco_key: Some("discokey:aaaa".to_owned()), + addresses: vec!["100.64.0.1/32".to_owned()], + endpoints: vec!["203.0.113.10:41641".to_owned()], + hostinfo: Some(Hostinfo { + hostname: Some("burrow-dev".to_owned()), + os: Some("linux".to_owned()), + os_version: Some("6.13".to_owned()), + services: vec!["ssh".to_owned()], + request_tags: vec!["tag:dev".to_owned()], + }), + ..RegisterRequest::default() + }, + )?; + assert_eq!(node.name, "burrow-dev"); + assert_eq!(node.allowed_ips, vec!["100.64.0.1/32"]); + + let map = map_for_node( + &db_path, + &user, + &MapRequest { + node_key: "nodekey:aaaa".to_owned(), + stream: true, + endpoints: vec!["203.0.113.10:41641".to_owned()], + ..MapRequest::default() + }, + "burrow.net", + )?; + assert_eq!(map.node.node_key, "nodekey:aaaa"); + assert_eq!(map.domain, "burrow.net"); + assert!(map.dns.expect("dns config").magic_dns); + Ok(()) + } + + #[test] + fn register_can_rotate_node_keys() -> Result<()> { + let (_dir, db_path) = temp_db()?; + init_db(&db_path)?; + ensure_local_identity( + &db_path, + "contact", + "contact@burrow.net", + "Burrow Contact", + "password-1", + )?; + let auth = authenticate_local(&db_path, "contact@burrow.net", "password-1")? + .expect("expected login to succeed"); + let user = + user_for_session(&db_path, &auth.access_token)?.expect("expected session to resolve"); + + upsert_node( + &db_path, + &user, + &RegisterRequest { + node_key: "nodekey:old".to_owned(), + addresses: vec!["100.64.0.2/32".to_owned()], + ..RegisterRequest::default() + }, + )?; + + let rotated = upsert_node( + &db_path, + &user, + &RegisterRequest { + node_key: "nodekey:new".to_owned(), + old_node_key: Some("nodekey:old".to_owned()), + addresses: vec!["100.64.0.3/32".to_owned()], + ..RegisterRequest::default() + }, + )?; + assert_eq!(rotated.node_key, "nodekey:new"); + assert_eq!(rotated.addresses, vec!["100.64.0.3/32"]); + Ok(()) + } } diff --git a/burrow/src/auth/server/mod.rs b/burrow/src/auth/server/mod.rs index 88b3ff3..fdffce3 100644 --- a/burrow/src/auth/server/mod.rs +++ b/burrow/src/auth/server/mod.rs @@ -1,32 +1,297 @@ pub mod db; -pub mod providers; +pub mod tailscale; -use anyhow::Result; -use axum::{http::StatusCode, routing::post, Router}; -use providers::slack::auth; +use std::{env, path::Path}; + +use anyhow::{Context, Result}; +use axum::{ + extract::{Json, Path as AxumPath, Query, State}, + http::{header::AUTHORIZATION, HeaderMap, StatusCode}, + response::IntoResponse, + routing::{get, post}, + Router, +}; +use serde::Deserialize; use tokio::signal; +use crate::control::{ + discovery, LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest, + RegisterResponse, TailnetDiscovery, BURROW_TAILNET_DOMAIN, +}; + +#[derive(Clone, Debug)] +pub struct BootstrapIdentity { + pub username: String, + pub email: String, + pub display_name: String, + pub password_file: String, +} + +impl Default for BootstrapIdentity { + fn default() -> Self { + Self { + username: "contact".to_owned(), + email: "contact@burrow.net".to_owned(), + display_name: "Burrow Contact".to_owned(), + password_file: "intake/forgejo_pass_contact_at_burrow_net.txt".to_owned(), + } + } +} + +#[derive(Clone, Debug)] +pub struct AuthServerConfig { + pub listen: String, + pub db_path: String, + pub tailnet_domain: String, + pub bootstrap: BootstrapIdentity, +} + +impl Default for AuthServerConfig { + fn default() -> Self { + Self { + listen: "0.0.0.0:8080".to_owned(), + db_path: db::PATH.to_owned(), + tailnet_domain: BURROW_TAILNET_DOMAIN.to_owned(), + bootstrap: BootstrapIdentity::default(), + } + } +} + +impl AuthServerConfig { + pub fn from_env() -> Self { + let mut config = Self::default(); + if let Ok(value) = env::var("BURROW_AUTH_LISTEN") { + config.listen = value; + } + if let Ok(value) = env::var("BURROW_AUTH_DB_PATH") { + config.db_path = value; + } + if let Ok(value) = env::var("BURROW_AUTH_TAILNET_DOMAIN") { + config.tailnet_domain = value; + } + if let Ok(value) = env::var("BURROW_BOOTSTRAP_USERNAME") { + config.bootstrap.username = value; + } + if let Ok(value) = env::var("BURROW_BOOTSTRAP_EMAIL") { + config.bootstrap.email = value; + } + if let Ok(value) = env::var("BURROW_BOOTSTRAP_DISPLAY_NAME") { + config.bootstrap.display_name = value; + } + if let Ok(value) = env::var("BURROW_BOOTSTRAP_PASSWORD_FILE") { + config.bootstrap.password_file = value; + } + config + } + + fn bootstrap_password(&self) -> Result> { + let path = Path::new(&self.bootstrap.password_file); + if !path.exists() { + return Ok(None); + } + let password = std::fs::read_to_string(path).with_context(|| { + format!("failed to read bootstrap password from {}", path.display()) + })?; + let password = password.trim().to_owned(); + if password.is_empty() { + return Ok(None); + } + Ok(Some(password)) + } +} + +#[derive(Clone)] +struct AppState { + config: AuthServerConfig, + tailscale: tailscale::TailscaleBridgeManager, +} + +#[derive(Debug, Deserialize)] +struct TailnetDiscoveryQuery { + email: String, +} + +type AppResult = Result; + pub async fn serve() -> Result<()> { - db::init_db()?; + serve_with_config(AuthServerConfig::from_env()).await +} - let app = Router::new() - .route("/slack-auth", post(auth)) - .route("/device/new", post(device_new)); +pub async fn serve_with_config(config: AuthServerConfig) -> Result<()> { + db::init_db(&config.db_path)?; + if let Some(password) = config.bootstrap_password()? { + db::ensure_local_identity( + &config.db_path, + &config.bootstrap.username, + &config.bootstrap.email, + &config.bootstrap.display_name, + &password, + )?; + } - let listener = tokio::net::TcpListener::bind("0.0.0.0:8080").await.unwrap(); - log::info!("Starting auth server on port 8080"); + let app = build_router(config.clone()); + let listener = tokio::net::TcpListener::bind(&config.listen).await?; + log::info!("Starting auth server on {}", config.listen); axum::serve(listener, app) .with_graceful_shutdown(shutdown_signal()) - .await - .unwrap(); - + .await?; Ok(()) } -async fn device_new() -> StatusCode { +pub fn build_router(config: AuthServerConfig) -> Router { + Router::new() + .route("/healthz", get(healthz)) + .route("/device/new", post(device_new)) + .route("/v1/auth/login", post(login_local)) + .route("/v1/control/register", post(control_register)) + .route("/v1/control/map", post(control_map)) + .route("/v1/tailnet/discover", get(tailnet_discover)) + .route("/v1/tailscale/login/start", post(tailscale_login_start)) + .route("/v1/tailscale/login/:session_id", get(tailscale_login_status)) + .with_state(AppState { + config, + tailscale: tailscale::TailscaleBridgeManager::default(), + }) +} + +async fn login_local( + State(state): State, + Json(request): Json, +) -> AppResult> { + let db_path = state.config.db_path.clone(); + blocking(move || db::authenticate_local(&db_path, &request.identifier, &request.password)) + .await? + .map(Json) + .ok_or_else(|| (StatusCode::UNAUTHORIZED, "invalid credentials".to_owned())) +} + +async fn control_register( + headers: HeaderMap, + State(state): State, + Json(request): Json, +) -> AppResult> { + let token = bearer_token(&headers)?; + let db_path = state.config.db_path.clone(); + let user = blocking({ + let db_path = db_path.clone(); + let token = token.clone(); + move || db::user_for_session(&db_path, &token) + }) + .await? + .ok_or_else(|| (StatusCode::UNAUTHORIZED, "unknown session".to_owned()))?; + + let response_user = user.profile.clone(); + let node = blocking(move || db::upsert_node(&db_path, &user, &request)).await?; + Ok(Json(RegisterResponse { + user: response_user, + machine_authorized: node.machine_authorized, + node_key_expired: node.node_key_expired, + auth_url: None, + error: None, + node, + })) +} + +async fn control_map( + headers: HeaderMap, + State(state): State, + Json(request): Json, +) -> AppResult> { + let token = bearer_token(&headers)?; + let db_path = state.config.db_path.clone(); + let domain = state.config.tailnet_domain.clone(); + let user = blocking({ + let db_path = db_path.clone(); + let token = token.clone(); + move || db::user_for_session(&db_path, &token) + }) + .await? + .ok_or_else(|| (StatusCode::UNAUTHORIZED, "unknown session".to_owned()))?; + + let response = blocking(move || db::map_for_node(&db_path, &user, &request, &domain)).await?; + Ok(Json(response)) +} + +async fn tailnet_discover( + Query(query): Query, +) -> AppResult> { + if query.email.trim().is_empty() { + return Err((StatusCode::BAD_REQUEST, "email is required".to_owned())); + } + + let discovery = discovery::discover_tailnet(&query.email) + .await + .map_err(|err| (StatusCode::BAD_GATEWAY, err.to_string()))?; + Ok(Json(discovery)) +} + +async fn tailscale_login_start( + State(state): State, + Json(request): Json, +) -> AppResult> { + let response = state + .tailscale + .start_login(request) + .await + .map_err(internal_error)?; + Ok(Json(response)) +} + +async fn tailscale_login_status( + AxumPath(session_id): AxumPath, + State(state): State, +) -> AppResult> { + state + .tailscale + .status(&session_id) + .await + .map_err(internal_error)? + .map(Json) + .ok_or_else(|| (StatusCode::NOT_FOUND, "unknown tailscale login session".to_owned())) +} + +async fn healthz() -> impl IntoResponse { StatusCode::OK } +async fn device_new() -> impl IntoResponse { + StatusCode::OK +} + +async fn blocking(work: F) -> AppResult +where + F: FnOnce() -> Result + Send + 'static, + T: Send + 'static, +{ + tokio::task::spawn_blocking(work) + .await + .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))? + .map_err(internal_error) +} + +fn internal_error(err: anyhow::Error) -> (StatusCode, String) { + (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) +} + +fn bearer_token(headers: &HeaderMap) -> AppResult { + let value = headers.get(AUTHORIZATION).ok_or_else(|| { + ( + StatusCode::UNAUTHORIZED, + "missing authorization header".to_owned(), + ) + })?; + let value = value.to_str().map_err(|_| { + ( + StatusCode::BAD_REQUEST, + "invalid authorization header".to_owned(), + ) + })?; + value + .strip_prefix("Bearer ") + .map(ToOwned::to_owned) + .ok_or_else(|| (StatusCode::UNAUTHORIZED, "expected bearer token".to_owned())) +} + async fn shutdown_signal() { let ctrl_c = async { signal::ctrl_c() @@ -51,12 +316,115 @@ async fn shutdown_signal() { } } -// mod db { -// use rusqlite::{Connection, Result}; +#[cfg(test)] +mod tests { + use super::*; + use axum::{ + body::{to_bytes, Body}, + http::{Request, StatusCode}, + }; + use tempfile::tempdir; + use tower::ServiceExt; -// #[derive(Debug)] -// struct User { -// id: i32, -// created_at: String, -// } -// } + #[tokio::test] + async fn login_register_and_map_round_trip() -> Result<()> { + let dir = tempdir()?; + let password_file = dir.path().join("bootstrap-password.txt"); + std::fs::write(&password_file, "bootstrap-pass\n")?; + let db_path = dir.path().join("server.sqlite3"); + let config = AuthServerConfig { + listen: "127.0.0.1:0".to_owned(), + db_path: db_path.to_string_lossy().to_string(), + tailnet_domain: "burrow.net".to_owned(), + bootstrap: BootstrapIdentity { + password_file: password_file.to_string_lossy().to_string(), + ..BootstrapIdentity::default() + }, + }; + + db::init_db(&config.db_path)?; + let password = config.bootstrap_password()?.expect("bootstrap password"); + db::ensure_local_identity( + &config.db_path, + &config.bootstrap.username, + &config.bootstrap.email, + &config.bootstrap.display_name, + &password, + )?; + + let app = build_router(config); + + let response = app + .clone() + .oneshot( + Request::post("/v1/auth/login") + .header("content-type", "application/json") + .body(Body::from(serde_json::to_vec(&LocalAuthRequest { + identifier: "contact".to_owned(), + password: "bootstrap-pass".to_owned(), + })?))?, + ) + .await?; + assert_eq!(response.status(), StatusCode::OK); + let login: LocalAuthResponse = + serde_json::from_slice(&to_bytes(response.into_body(), usize::MAX).await?)?; + + let response = app + .clone() + .oneshot( + Request::post("/v1/control/register") + .header("content-type", "application/json") + .header("authorization", format!("Bearer {}", login.access_token)) + .body(Body::from(serde_json::to_vec(&RegisterRequest { + node_key: "nodekey:1234".to_owned(), + machine_key: Some("machinekey:1234".to_owned()), + addresses: vec!["100.64.0.10/32".to_owned()], + endpoints: vec!["198.51.100.10:41641".to_owned()], + hostinfo: Some(crate::control::Hostinfo { + hostname: Some("devbox".to_owned()), + os: Some("linux".to_owned()), + os_version: Some("6.13".to_owned()), + services: vec!["ssh".to_owned()], + request_tags: vec!["tag:dev".to_owned()], + }), + ..RegisterRequest::default() + })?))?, + ) + .await?; + assert_eq!(response.status(), StatusCode::OK); + + let response = app + .oneshot( + Request::post("/v1/control/map") + .header("content-type", "application/json") + .header("authorization", format!("Bearer {}", login.access_token)) + .body(Body::from(serde_json::to_vec(&MapRequest { + node_key: "nodekey:1234".to_owned(), + stream: true, + endpoints: vec!["198.51.100.10:41641".to_owned()], + ..MapRequest::default() + })?))?, + ) + .await?; + assert_eq!(response.status(), StatusCode::OK); + let map: MapResponse = + serde_json::from_slice(&to_bytes(response.into_body(), usize::MAX).await?)?; + assert_eq!(map.domain, "burrow.net"); + assert_eq!(map.node.name, "devbox"); + assert!(map.dns.expect("dns").magic_dns); + Ok(()) + } + + #[tokio::test] + async fn tailnet_discover_requires_email() -> Result<()> { + let app = build_router(AuthServerConfig::default()); + let response = app + .oneshot( + Request::get("/v1/tailnet/discover?email=") + .body(Body::empty())?, + ) + .await?; + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + Ok(()) + } +} diff --git a/burrow/src/auth/server/providers/mod.rs b/burrow/src/auth/server/providers/mod.rs deleted file mode 100644 index 36ff0bd..0000000 --- a/burrow/src/auth/server/providers/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -pub mod slack; -pub use super::db; - -#[derive(serde::Deserialize, Default, Debug)] -pub struct OpenIdUser { - pub sub: String, - pub name: String, -} diff --git a/burrow/src/auth/server/providers/slack.rs b/burrow/src/auth/server/providers/slack.rs deleted file mode 100644 index 581cd1e..0000000 --- a/burrow/src/auth/server/providers/slack.rs +++ /dev/null @@ -1,102 +0,0 @@ -use anyhow::Result; -use axum::{ - extract::Json, - http::StatusCode, - routing::{get, post}, -}; -use reqwest::header::AUTHORIZATION; -use serde::Deserialize; - -use super::db::store_connection; - -#[derive(Deserialize)] -pub struct SlackToken { - slack_token: String, -} -pub async fn auth(Json(payload): Json) -> (StatusCode, String) { - let slack_user = match fetch_slack_user(&payload.slack_token).await { - Ok(user) => user, - Err(e) => { - log::error!("Failed to fetch Slack user: {:?}", e); - return (StatusCode::UNAUTHORIZED, String::new()); - } - }; - - log::info!( - "Slack user {} ({}) logged in.", - slack_user.name, - slack_user.sub - ); - - let conn = match store_connection(slack_user, "slack", &payload.slack_token, None) { - Ok(user) => user, - Err(e) => { - log::error!("Failed to fetch Slack user: {:?}", e); - return (StatusCode::UNAUTHORIZED, String::new()); - } - }; - - (StatusCode::OK, String::new()) -} - -async fn fetch_slack_user(access_token: &str) -> Result { - let client = reqwest::Client::new(); - let res = client - .get("https://slack.com/api/openid.connect.userInfo") - .header(AUTHORIZATION, format!("Bearer {}", access_token)) - .send() - .await? - .json::() - .await?; - - let res_ok = res - .get("ok") - .and_then(|v| v.as_bool()) - .ok_or(anyhow::anyhow!("Slack user object not ok!"))?; - - if !res_ok { - return Err(anyhow::anyhow!("Slack user object not ok!")); - } - - Ok(serde_json::from_value(res)?) -} - -// async fn fetch_save_slack_user_data(query: Query) -> anyhow::Result<()> { -// let client = reqwest::Client::new(); -// log::trace!("Code was {}", &query.code); -// let mut url = Url::parse("https://slack.com/api/openid.connect.token")?; - -// { -// let mut q = url.query_pairs_mut(); -// q.append_pair("client_id", &var("CLIENT_ID")?); -// q.append_pair("client_secret", &var("CLIENT_SECRET")?); -// q.append_pair("code", &query.code); -// q.append_pair("grant_type", "authorization_code"); -// q.append_pair("redirect_uri", "https://burrow.rs/callback"); -// } - -// let data = client -// .post(url) -// .send() -// .await? -// .json::() -// .await?; - -// if !data.ok { -// return Err(anyhow::anyhow!("Slack code exchange response not ok!")); -// } - -// if let Some(access_token) = data.access_token { -// log::trace!("Access token is {access_token}"); -// let user = slack::fetch_slack_user(&access_token) -// .await -// .map_err(|err| anyhow::anyhow!("Failed to fetch Slack user info {:#?}", err))?; - -// db::store_user(user, access_token, String::new()) -// .map_err(|_| anyhow::anyhow!("Failed to store user in db"))?; - -// Ok(()) -// } else { -// Err(anyhow::anyhow!("Access token not found in response")) -// } -// } diff --git a/burrow/src/auth/server/tailscale.rs b/burrow/src/auth/server/tailscale.rs new file mode 100644 index 0000000..d08c807 --- /dev/null +++ b/burrow/src/auth/server/tailscale.rs @@ -0,0 +1,519 @@ +use std::{ + collections::HashMap, + env, + path::{Path, PathBuf}, + process::Stdio, + sync::Arc, + time::Duration, +}; + +use anyhow::{anyhow, Context, Result}; +use rand::RngCore; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use tokio::{ + io::{AsyncBufReadExt, BufReader}, + process::{Child, Command}, + sync::Mutex, + task::JoinHandle, +}; + +#[derive(Clone, Debug, Default, Deserialize)] +pub struct TailscaleLoginStartRequest { + pub account_name: String, + pub identity_name: String, + #[serde(default)] + pub hostname: Option, + #[serde(default)] + pub control_url: Option, + #[serde(default)] + pub packet_socket: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +pub struct TailscaleLoginStatus { + pub backend_state: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub auth_url: Option, + #[serde(default)] + pub running: bool, + #[serde(default)] + pub needs_login: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tailnet_name: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub magic_dns_suffix: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub self_dns_name: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tailscale_ips: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub health: Vec, +} + +#[derive(Clone, Debug, Serialize)] +pub struct TailscaleLoginStartResponse { + pub session_id: String, + pub status: TailscaleLoginStatus, +} + +pub struct TailscaleLoginSession { + pub session_id: String, + pub helper: Arc, + pub status: TailscaleLoginStatus, +} + +#[derive(Clone, Default)] +pub struct TailscaleBridgeManager { + client: Client, + sessions: Arc>>>, +} + +pub struct TailscaleHelperProcess { + session_id: String, + listen_url: String, + packet_socket: Option, + control_url: Option, + state_dir: PathBuf, + child: Arc>, + _stderr_task: JoinHandle<()>, +} + +type ManagedSession = TailscaleHelperProcess; + +#[derive(Debug, Deserialize)] +struct HelperHello { + listen_addr: String, + #[serde(default)] + packet_socket: Option, +} + +impl TailscaleBridgeManager { + pub async fn start_login( + &self, + request: TailscaleLoginStartRequest, + ) -> Result { + let session = self.ensure_session(request).await?; + Ok(TailscaleLoginStartResponse { + session_id: session.session_id, + status: session.status, + }) + } + + pub async fn ensure_session( + &self, + request: TailscaleLoginStartRequest, + ) -> Result { + let key = session_key_for_request(&request); + let requested_packet_socket = request + .packet_socket + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()); + let requested_control_url = request + .control_url + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()); + + if let Some(existing) = self.sessions.lock().await.get(&key).cloned() { + let needs_restart_for_socket = match (requested_packet_socket, existing.packet_socket()) + { + (Some(requested), Some(current)) => current != Path::new(requested), + (Some(_), None) => true, + _ => false, + }; + let needs_restart_for_control_url = + requested_control_url != existing.control_url().map(|value| value.trim()); + + if !needs_restart_for_socket && !needs_restart_for_control_url { + match self.fetch_status(existing.as_ref()).await { + Ok(status) => { + return Ok(TailscaleLoginSession { + session_id: existing.session_id.clone(), + helper: existing, + status, + }); + } + Err(err) => { + log::warn!( + "tailscale login session {} is stale, restarting: {err}", + existing.session_id + ); + } + } + } else { + log::info!( + "tailscale login session {} no longer matches requested transport, restarting", + existing.session_id + ); + } + + self.sessions.lock().await.remove(&key); + let _ = self.shutdown_session(existing.as_ref()).await; + } + + let session = Arc::new(spawn_tailscale_helper(&request).await?); + let status = self.wait_for_status(session.as_ref()).await?; + let response = TailscaleLoginSession { + session_id: session.session_id.clone(), + helper: session.clone(), + status, + }; + + self.sessions.lock().await.insert(key, session); + Ok(response) + } + + pub async fn status(&self, session_id: &str) -> Result> { + let session = { + let sessions = self.sessions.lock().await; + sessions + .values() + .find(|session| session.session_id == session_id) + .cloned() + }; + + match session { + Some(session) => match self.fetch_status(session.as_ref()).await { + Ok(status) => Ok(Some(status)), + Err(err) => { + self.remove_session_by_id(session_id).await; + Err(err) + } + }, + None => Ok(None), + } + } + + pub async fn cancel(&self, session_id: &str) -> Result { + let session = self.remove_session_by_id(session_id).await; + match session { + Some(session) => { + self.shutdown_session(session.as_ref()).await?; + Ok(true) + } + None => Ok(false), + } + } + + async fn wait_for_status(&self, session: &ManagedSession) -> Result { + let mut last_error = None; + let mut last_status = None; + for _ in 0..40 { + match session.status_with_client(&self.client).await { + Ok(status) if status.running || status.auth_url.is_some() => return Ok(status), + Ok(status) => last_status = Some(status), + Err(err) => last_error = Some(err), + } + tokio::time::sleep(Duration::from_millis(250)).await; + } + if let Some(status) = last_status { + return Ok(status); + } + Err(last_error.unwrap_or_else(|| anyhow!("tailscale helper did not become ready"))) + } + + async fn fetch_status(&self, session: &ManagedSession) -> Result { + session.status_with_client(&self.client).await + } + + async fn remove_session_by_id(&self, session_id: &str) -> Option> { + let mut sessions = self.sessions.lock().await; + let key = sessions + .iter() + .find_map(|(key, session)| (session.session_id == session_id).then(|| key.clone()))?; + sessions.remove(&key) + } + + async fn shutdown_session(&self, session: &ManagedSession) -> Result<()> { + session.shutdown_with_client(&self.client).await + } +} + +impl TailscaleHelperProcess { + pub fn session_id(&self) -> &str { + &self.session_id + } + + pub fn packet_socket(&self) -> Option<&Path> { + self.packet_socket.as_deref() + } + + pub fn control_url(&self) -> Option<&str> { + self.control_url.as_deref() + } + + pub fn state_dir(&self) -> &Path { + &self.state_dir + } + + pub async fn status(&self) -> Result { + self.status_with_client(&Client::new()).await + } + + pub async fn shutdown(&self) -> Result<()> { + self.shutdown_with_client(&Client::new()).await + } + + async fn status_with_client(&self, client: &Client) -> Result { + let mut child = self.child.lock().await; + if let Some(status) = child.try_wait()? { + return Err(anyhow!( + "tailscale helper exited with status {status} for {}", + self.state_dir.display() + )); + } + drop(child); + + let response = client + .get(format!("{}/status", self.listen_url)) + .send() + .await + .context("failed to query tailscale helper status")? + .error_for_status() + .context("tailscale helper status request failed")?; + + let status = response + .json::() + .await + .context("invalid tailscale helper status response")?; + + log::info!( + "tailscale helper status session={} backend_state={} running={} needs_login={} auth_url={:?}", + self.session_id, + status.backend_state, + status.running, + status.needs_login, + status.auth_url + ); + Ok(status) + } + + async fn shutdown_with_client(&self, client: &Client) -> Result<()> { + let _ = client.post(format!("{}/shutdown", self.listen_url)).send().await; + + for _ in 0..10 { + let mut child = self.child.lock().await; + if child.try_wait()?.is_some() { + return Ok(()); + } + drop(child); + tokio::time::sleep(Duration::from_millis(100)).await; + } + + let mut child = self.child.lock().await; + child + .start_kill() + .context("failed to kill tailscale helper")?; + let _ = child.wait().await; + Ok(()) + } +} + +pub async fn spawn_tailscale_helper( + request: &TailscaleLoginStartRequest, +) -> Result { + let state_dir = state_root().join(session_dir_name(request)); + tokio::fs::create_dir_all(&state_dir) + .await + .with_context(|| format!("failed to create {}", state_dir.display()))?; + + let mut child = helper_command(request, &state_dir)? + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .context("failed to spawn tailscale login helper")?; + + let stdout = child + .stdout + .take() + .context("tailscale helper stdout unavailable")?; + let stderr = child + .stderr + .take() + .context("tailscale helper stderr unavailable")?; + + let hello_line = tokio::time::timeout(Duration::from_secs(20), async move { + let mut lines = BufReader::new(stdout).lines(); + lines.next_line().await + }) + .await + .context("timed out waiting for tailscale helper startup")?? + .context("tailscale helper exited before reporting listen address")?; + + let hello: HelperHello = + serde_json::from_str(&hello_line).context("invalid tailscale helper startup line")?; + + let stderr_task = tokio::spawn(async move { + let mut lines = BufReader::new(stderr).lines(); + while let Ok(Some(line)) = lines.next_line().await { + log::info!("tailscale-login-bridge: {line}"); + } + }); + + Ok(TailscaleHelperProcess { + session_id: random_session_id(), + listen_url: format!("http://{}", hello.listen_addr), + packet_socket: hello.packet_socket.map(PathBuf::from), + control_url: request.control_url.clone(), + state_dir, + child: Arc::new(Mutex::new(child)), + _stderr_task: stderr_task, + }) +} + +fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result { + let mut command = if let Ok(path) = env::var("BURROW_TAILSCALE_HELPER") { + Command::new(path) + } else { + let helper_dir = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("..") + .join("Tools/tailscale-login-bridge"); + let mut command = Command::new("go"); + command.current_dir(helper_dir).arg("run").arg("."); + command.env("GOWORK", "off"); + command + }; + + command + .arg("--listen") + .arg("127.0.0.1:0") + .arg("--state-dir") + .arg(state_dir) + .arg("--hostname") + .arg(default_hostname(request)); + + if let Some(control_url) = request.control_url.as_deref() { + let trimmed = control_url.trim(); + if !trimmed.is_empty() { + command.arg("--control-url").arg(trimmed); + } + } + + if let Some(packet_socket) = request.packet_socket.as_deref() { + let trimmed = packet_socket.trim(); + if !trimmed.is_empty() { + command.arg("--packet-socket").arg(trimmed); + } + } + + Ok(command) +} + +pub(crate) fn packet_socket_path(request: &TailscaleLoginStartRequest) -> PathBuf { + state_root().join(session_dir_name(request)).join("packet.sock") +} + +pub(crate) fn state_root() -> PathBuf { + if let Ok(path) = env::var("BURROW_TAILSCALE_STATE_ROOT") { + return PathBuf::from(path); + } + + let home = env::var_os("HOME") + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from(".")); + if cfg!(target_vendor = "apple") { + return home + .join("Library") + .join("Application Support") + .join("Burrow") + .join("tailscale"); + } + home.join(".local") + .join("share") + .join("burrow") + .join("tailscale") +} + +pub(crate) fn session_dir_name(request: &TailscaleLoginStartRequest) -> String { + format!( + "{}-{}-{}", + slug(&request.account_name), + slug(&request.identity_name), + slug(control_scope(request)) + ) +} + +fn session_key_for_request(request: &TailscaleLoginStartRequest) -> String { + format!( + "{}:{}:{}", + request.account_name, + request.identity_name, + control_scope(request) + ) +} + +fn control_scope(request: &TailscaleLoginStartRequest) -> &str { + request + .control_url + .as_deref() + .map(str::trim) + .filter(|value| !value.is_empty()) + .unwrap_or("tailscale-managed") +} + +pub(crate) fn default_hostname(request: &TailscaleLoginStartRequest) -> String { + request + .hostname + .as_deref() + .filter(|value| !value.trim().is_empty()) + .map(ToOwned::to_owned) + .unwrap_or_else(|| format!("burrow-{}", slug(&request.identity_name))) +} + +fn random_session_id() -> String { + let mut bytes = [0_u8; 12]; + rand::thread_rng().fill_bytes(&mut bytes); + bytes.iter().map(|byte| format!("{byte:02x}")).collect() +} + +fn slug(input: &str) -> String { + let mut output = String::with_capacity(input.len()); + for ch in input.chars() { + if ch.is_ascii_alphanumeric() { + output.push(ch.to_ascii_lowercase()); + } else if ch == '-' || ch == '_' { + output.push('-'); + } + } + if output.is_empty() { + "default".to_owned() + } else { + output + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn slug_sanitizes_input() { + assert_eq!(slug("Apple Phone"), "applephone"); + assert_eq!(slug("default_identity"), "default-identity"); + assert_eq!(slug(""), "default"); + } + + #[test] + fn state_dir_is_scoped_by_account_identity_and_control_plane() { + let request = TailscaleLoginStartRequest { + account_name: "default".to_owned(), + identity_name: "apple".to_owned(), + hostname: None, + control_url: None, + packet_socket: None, + }; + assert_eq!(session_dir_name(&request), "default-apple-tailscale-managed"); + assert_eq!(default_hostname(&request), "burrow-apple"); + + let custom_request = TailscaleLoginStartRequest { + control_url: Some("https://ts.burrow.net".to_owned()), + ..request + }; + assert_eq!( + session_dir_name(&custom_request), + "default-apple-httpstsburrownet" + ); + } +} diff --git a/burrow/src/control/config.rs b/burrow/src/control/config.rs new file mode 100644 index 0000000..3862bcd --- /dev/null +++ b/burrow/src/control/config.rs @@ -0,0 +1,87 @@ +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum TailnetProvider { + Tailscale, + Headscale, + Burrow, +} + +impl Default for TailnetProvider { + fn default() -> Self { + Self::Tailscale + } +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct TailnetConfig { + #[serde(default)] + pub provider: TailnetProvider, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub authority: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub account: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub identity: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tailnet: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostname: Option, +} + +impl TailnetConfig { + pub fn from_slice(bytes: &[u8]) -> Result { + let payload = std::str::from_utf8(bytes).context("tailnet payload must be valid UTF-8")?; + Self::from_str(payload) + } + + pub fn from_str(payload: &str) -> Result { + let trimmed = payload.trim(); + if trimmed.starts_with('{') { + return serde_json::from_str(trimmed).context("invalid tailnet JSON payload"); + } + toml::from_str(trimmed).context("invalid tailnet TOML payload") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_json_payload() { + let config = TailnetConfig::from_str( + r#"{ + "provider":"tailscale", + "account":"default", + "identity":"apple", + "tailnet":"example.ts.net", + "hostname":"burrow-phone" + }"#, + ) + .unwrap(); + assert_eq!(config.provider, TailnetProvider::Tailscale); + assert_eq!(config.account.as_deref(), Some("default")); + assert_eq!(config.identity.as_deref(), Some("apple")); + } + + #[test] + fn parses_toml_payload() { + let config = TailnetConfig::from_str( + r#" +provider = "headscale" +authority = "https://headscale.example.com" +account = "default" +identity = "apple" +"#, + ) + .unwrap(); + assert_eq!(config.provider, TailnetProvider::Headscale); + assert_eq!( + config.authority.as_deref(), + Some("https://headscale.example.com") + ); + } +} diff --git a/burrow/src/control/discovery.rs b/burrow/src/control/discovery.rs new file mode 100644 index 0000000..d044a62 --- /dev/null +++ b/burrow/src/control/discovery.rs @@ -0,0 +1,359 @@ +use anyhow::{anyhow, Context, Result}; +use reqwest::{Client, StatusCode, Url}; +use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; + +use super::TailnetProvider; + +pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server"; +const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet"; +const WEBFINGER_PATH: &str = "/.well-known/webfinger"; +const MANAGED_TAILSCALE_AUTHORITY: &str = "controlplane.tailscale.com"; + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct TailnetDiscovery { + pub domain: String, + pub provider: TailnetProvider, + pub authority: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub oidc_issuer: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct TailnetAuthorityProbe { + pub authority: String, + pub status_code: i32, + pub summary: String, + pub detail: String, + pub reachable: bool, +} + +#[derive(Clone, Debug, Default, Deserialize)] +struct WebFingerDocument { + #[serde(default)] + links: Vec, +} + +#[derive(Clone, Debug, Default, Deserialize)] +struct WebFingerLink { + #[serde(default)] + rel: String, + #[serde(default)] + href: Option, +} + +pub async fn discover_tailnet(email: &str) -> Result { + let domain = email_domain(email)?; + info!(%email, %domain, "tailnet discovery requested"); + let base_url = Url::parse(&format!("https://{domain}")) + .with_context(|| format!("invalid discovery domain {domain}"))?; + let client = Client::builder() + .user_agent("burrow-tailnet-discovery") + .timeout(std::time::Duration::from_secs(10)) + .build() + .context("failed to build tailnet discovery client")?; + discover_tailnet_at(&client, email, &base_url).await +} + +pub fn normalize_authority(authority: &str) -> String { + let trimmed = authority.trim(); + if trimmed.contains("://") { + trimmed.to_owned() + } else { + format!("https://{trimmed}") + } +} + +pub fn is_managed_tailscale_authority(authority: &str) -> bool { + let normalized = normalize_authority(authority) + .trim_end_matches('/') + .to_ascii_lowercase(); + normalized == format!("https://{MANAGED_TAILSCALE_AUTHORITY}") + || normalized == format!("http://{MANAGED_TAILSCALE_AUTHORITY}") +} + +pub async fn probe_tailnet_authority(authority: &str) -> Result { + let authority = normalize_authority(authority); + if is_managed_tailscale_authority(&authority) { + return Ok(TailnetAuthorityProbe { + authority, + status_code: 200, + summary: "Tailscale-managed control plane".to_owned(), + detail: "Using Tailscale's default login server.".to_owned(), + reachable: true, + }); + } + + let base_url = + Url::parse(&authority).with_context(|| format!("invalid tailnet authority {authority}"))?; + let client = Client::builder() + .user_agent("burrow-tailnet-probe") + .timeout(std::time::Duration::from_secs(10)) + .build() + .context("failed to build tailnet authority probe client")?; + + if let Some(status) = + probe_url(&client, base_url.join("/health")?, &authority, "Tailnet server reachable").await? + { + return Ok(status); + } + + if let Some(status) = probe_url( + &client, + base_url.clone(), + &authority, + "Tailnet server reachable", + ) + .await? + { + return Ok(status); + } + + Err(anyhow!("could not connect to the server")) +} + +pub async fn discover_tailnet_at( + client: &Client, + email: &str, + base_url: &Url, +) -> Result { + let domain = email_domain(email)?; + debug!(%email, %domain, base_url = %base_url, "starting tailnet domain discovery"); + + if let Some(discovery) = discover_well_known(client, base_url).await? { + info!( + %email, + %domain, + authority = %discovery.authority, + provider = ?discovery.provider, + "resolved tailnet discovery from well-known document" + ); + return Ok(TailnetDiscovery { domain, ..discovery }); + } + + if let Some(authority) = discover_webfinger(client, email, base_url).await? { + info!(%email, %domain, %authority, "resolved tailnet discovery from webfinger"); + return Ok(TailnetDiscovery { + domain, + provider: inferred_provider(Some(&authority), None), + authority, + oidc_issuer: None, + }); + } + + Err(anyhow!("no tailnet discovery metadata found for {domain}")) +} + +pub fn email_domain(email: &str) -> Result { + let trimmed = email.trim(); + let (_, domain) = trimmed + .rsplit_once('@') + .ok_or_else(|| anyhow!("email address must include a domain"))?; + let domain = domain.trim().trim_matches('.').to_ascii_lowercase(); + if domain.is_empty() { + return Err(anyhow!("email address must include a domain")); + } + Ok(domain) +} + +pub fn inferred_provider( + authority: Option<&str>, + explicit: Option<&TailnetProvider>, +) -> TailnetProvider { + if matches!(explicit, Some(TailnetProvider::Burrow)) { + return TailnetProvider::Burrow; + } + if authority.is_some_and(is_managed_tailscale_authority) { + return TailnetProvider::Tailscale; + } + TailnetProvider::Headscale +} + +async fn discover_well_known(client: &Client, base_url: &Url) -> Result> { + let url = base_url + .join(TAILNET_DISCOVERY_PATH) + .context("failed to build tailnet discovery URL")?; + debug!(%url, "requesting tailnet well-known document"); + let response = client + .get(url) + .header("accept", "application/json") + .send() + .await + .context("tailnet well-known request failed")?; + + match response.status() { + StatusCode::OK => response + .json::() + .await + .context("invalid tailnet discovery document") + .map(Some), + StatusCode::NOT_FOUND => Ok(None), + status => Err(anyhow!("tailnet well-known lookup failed with HTTP {status}")), + } +} + +async fn discover_webfinger(client: &Client, email: &str, base_url: &Url) -> Result> { + let mut url = base_url + .join(WEBFINGER_PATH) + .context("failed to build webfinger URL")?; + url.query_pairs_mut() + .append_pair("resource", &format!("acct:{email}")) + .append_pair("rel", TAILNET_DISCOVERY_REL); + debug!(%email, url = %url, "requesting tailnet webfinger document"); + + let response = client + .get(url) + .header("accept", "application/jrd+json, application/json") + .send() + .await + .context("tailnet webfinger request failed")?; + + match response.status() { + StatusCode::OK => { + let document = response + .json::() + .await + .context("invalid webfinger document")?; + Ok(document + .links + .into_iter() + .find(|link| link.rel == TAILNET_DISCOVERY_REL) + .and_then(|link| link.href) + .filter(|href| !href.trim().is_empty())) + } + StatusCode::NOT_FOUND => Ok(None), + status => Err(anyhow!("tailnet webfinger lookup failed with HTTP {status}")), + } +} + +async fn probe_url( + client: &Client, + url: Url, + authority: &str, + summary: &str, +) -> Result> { + let response = match client + .get(url) + .header("accept", "application/json") + .send() + .await + { + Ok(response) => response, + Err(_) => return Ok(None), + }; + + let status = response.status(); + if !status.is_success() { + return Ok(None); + } + + let detail = response.text().await.unwrap_or_default().trim().to_owned(); + Ok(Some(TailnetAuthorityProbe { + authority: authority.to_owned(), + status_code: i32::from(status.as_u16()), + summary: summary.to_owned(), + detail, + reachable: true, + })) +} + +#[cfg(test)] +mod tests { + use axum::{routing::get, Router}; + use serde_json::json; + use tokio::net::TcpListener; + + use super::*; + + #[test] + fn extracts_domain_from_email() { + assert_eq!(email_domain("Contact@Burrow.net").unwrap(), "burrow.net"); + assert!(email_domain("contact").is_err()); + } + + #[test] + fn detects_managed_tailscale_authority() { + assert!(is_managed_tailscale_authority("controlplane.tailscale.com")); + assert!(is_managed_tailscale_authority("https://controlplane.tailscale.com/")); + assert!(!is_managed_tailscale_authority("https://ts.burrow.net")); + } + + #[tokio::test] + async fn discovers_from_well_known_document() -> Result<()> { + let router = Router::new().route( + TAILNET_DISCOVERY_PATH, + get(|| async { + axum::Json(json!({ + "domain": "burrow.net", + "provider": "headscale", + "authority": "https://ts.burrow.net", + "oidc_issuer": "https://auth.burrow.net/application/o/ts/" + })) + }), + ); + + let listener = TcpListener::bind("127.0.0.1:0").await?; + let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?; + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let client = Client::builder().build()?; + let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?; + assert_eq!(discovery.provider, TailnetProvider::Headscale); + assert_eq!(discovery.authority, "https://ts.burrow.net"); + assert_eq!(discovery.domain, "burrow.net"); + + server.abort(); + Ok(()) + } + + #[tokio::test] + async fn falls_back_to_webfinger_authority() -> Result<()> { + let router = Router::new() + .route( + TAILNET_DISCOVERY_PATH, + get(|| async { (StatusCode::NOT_FOUND, "") }), + ) + .route( + WEBFINGER_PATH, + get(|| async { + axum::Json(json!({ + "subject": "acct:contact@burrow.net", + "links": [ + { + "rel": TAILNET_DISCOVERY_REL, + "href": "https://ts.burrow.net" + } + ] + })) + }), + ); + + let listener = TcpListener::bind("127.0.0.1:0").await?; + let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?; + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let client = Client::builder().build()?; + let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?; + assert_eq!(discovery.provider, TailnetProvider::Headscale); + assert_eq!(discovery.authority, "https://ts.burrow.net"); + + server.abort(); + Ok(()) + } + + #[tokio::test] + async fn probes_custom_authority() -> Result<()> { + let router = Router::new().route("/health", get(|| async { "ok" })); + let listener = TcpListener::bind("127.0.0.1:0").await?; + let authority = format!("http://{}", listener.local_addr()?); + let server = tokio::spawn(async move { axum::serve(listener, router).await }); + + let status = probe_tailnet_authority(&authority).await?; + assert_eq!(status.authority, authority); + assert_eq!(status.status_code, 200); + assert!(status.reachable); + + server.abort(); + Ok(()) + } +} diff --git a/burrow/src/control/mod.rs b/burrow/src/control/mod.rs new file mode 100644 index 0000000..472f673 --- /dev/null +++ b/burrow/src/control/mod.rs @@ -0,0 +1,255 @@ +pub mod config; +pub mod discovery; + +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +pub use config::{TailnetConfig, TailnetProvider}; +pub use discovery::{TailnetDiscovery, TAILNET_DISCOVERY_REL}; + +pub const BURROW_CAPABILITY_VERSION: i32 = 1; +pub const BURROW_TAILNET_DOMAIN: &str = "burrow.net"; + +pub type NodeCapMap = BTreeMap>; +pub type PeerCapMap = BTreeMap>; + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct Hostinfo { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub os: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub os_version: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub services: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub request_tags: Vec, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct UserProfile { + pub id: i64, + pub login_name: String, + pub display_name: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub profile_pic_url: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub groups: Vec, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct RegisterAuth { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub auth_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub oauth_access_token: Option, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +pub struct Node { + pub id: i64, + pub stable_id: String, + pub name: String, + pub user_id: i64, + pub node_key: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub machine_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disco_key: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub addresses: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub allowed_ips: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub endpoints: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub home_derp: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostinfo: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tags: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub primary_routes: Vec, + #[serde(default = "default_capability_version")] + pub cap_version: i32, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub cap_map: NodeCapMap, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub peer_cap_map: PeerCapMap, + #[serde(default)] + pub machine_authorized: bool, + #[serde(default)] + pub node_key_expired: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub created_at: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub updated_at: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub last_seen: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub online: Option, +} + +impl Node { + pub fn preferred_name(request: &RegisterRequest) -> String { + if let Some(name) = request.name.as_deref() { + return name.to_owned(); + } + if let Some(hostname) = request + .hostinfo + .as_ref() + .and_then(|hostinfo| hostinfo.hostname.as_deref()) + { + return hostname.to_owned(); + } + format!("node-{}", short_key(&request.node_key)) + } + + pub fn normalized_allowed_ips(request: &RegisterRequest) -> Vec { + if request.allowed_ips.is_empty() { + return request.addresses.clone(); + } + request.allowed_ips.clone() + } +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct RegisterRequest { + #[serde(default = "default_capability_version")] + pub version: i32, + pub node_key: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub old_node_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub machine_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disco_key: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub auth: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub expiry: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub followup: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostinfo: Option, + #[serde(default)] + pub ephemeral: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub tailnet: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub addresses: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub allowed_ips: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub endpoints: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub home_derp: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub tags: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub primary_routes: Vec, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub cap_map: NodeCapMap, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub peer_cap_map: PeerCapMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +pub struct RegisterResponse { + pub user: UserProfile, + pub node: Node, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub auth_url: Option, + pub machine_authorized: bool, + pub node_key_expired: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct MapRequest { + #[serde(default = "default_capability_version")] + pub version: i32, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub compress: Option, + #[serde(default)] + pub keep_alive: bool, + pub node_key: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub disco_key: Option, + #[serde(default)] + pub stream: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub hostinfo: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub map_session_handle: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub map_session_seq: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub endpoints: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub debug_flags: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub connection_handle: Option, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct DnsConfig { + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub resolvers: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub search_domains: Vec, + #[serde(default)] + pub magic_dns: bool, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct PacketFilter { + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub sources: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub destinations: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub protocols: Vec, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +pub struct MapResponse { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub map_session_handle: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub seq: Option, + pub node: Node, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub peers: Vec, + pub domain: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub dns: Option, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub packet_filters: Vec, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct LocalAuthRequest { + pub identifier: String, + pub password: String, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] +pub struct LocalAuthResponse { + pub access_token: String, + pub user: UserProfile, +} + +fn default_capability_version() -> i32 { + BURROW_CAPABILITY_VERSION +} + +fn short_key(key: &str) -> String { + key.chars().take(8).collect() +} diff --git a/burrow/src/daemon/apple.rs b/burrow/src/daemon/apple.rs index c60f131..f369ea9 100644 --- a/burrow/src/daemon/apple.rs +++ b/burrow/src/daemon/apple.rs @@ -1,11 +1,11 @@ use std::{ ffi::{c_char, CStr}, path::PathBuf, - sync::Arc, + sync::{Arc, Mutex}, thread, }; -use once_cell::sync::OnceCell; +use once_cell::sync::{Lazy, OnceCell}; use tokio::{ runtime::{Builder, Handle}, sync::Notify, @@ -14,25 +14,35 @@ use tracing::error; use crate::daemon::daemon_main; -static BURROW_NOTIFY: OnceCell> = OnceCell::new(); static BURROW_HANDLE: OnceCell = OnceCell::new(); +static BURROW_READY: OnceCell<()> = OnceCell::new(); +static BURROW_SPAWN_LOCK: Lazy> = Lazy::new(|| Mutex::new(())); #[no_mangle] pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) { + let path_buf = if path.is_null() { + None + } else { + Some(PathBuf::from(CStr::from_ptr(path).to_str().unwrap())) + }; + let db_path_buf = if db_path.is_null() { + None + } else { + Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap())) + }; + spawn_in_process_with_paths(path_buf, db_path_buf); +} + +pub fn spawn_in_process_with_paths(path_buf: Option, db_path_buf: Option) { crate::tracing::initialize(); - let notify = BURROW_NOTIFY.get_or_init(|| Arc::new(Notify::new())); + let _guard = BURROW_SPAWN_LOCK.lock().unwrap(); + if BURROW_READY.get().is_some() { + return; + } + + let notify = Arc::new(Notify::new()); let handle = BURROW_HANDLE.get_or_init(|| { - let path_buf = if path.is_null() { - None - } else { - Some(PathBuf::from(CStr::from_ptr(path).to_str().unwrap())) - }; - let db_path_buf = if db_path.is_null() { - None - } else { - Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap())) - }; let sender = notify.clone(); let (handle_tx, handle_rx) = tokio::sync::oneshot::channel(); @@ -62,4 +72,5 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c let receiver = notify.clone(); handle.block_on(async move { receiver.notified().await }); + let _ = BURROW_READY.set(()); } diff --git a/burrow/src/daemon/instance.rs b/burrow/src/daemon/instance.rs index f21678e..9b2e138 100644 --- a/burrow/src/daemon/instance.rs +++ b/burrow/src/daemon/instance.rs @@ -3,32 +3,35 @@ use std::{ sync::Arc, }; -use anyhow::{anyhow, Context, Result}; +use anyhow::Result; use rusqlite::Connection; -use tokio::{ - sync::{mpsc, watch, RwLock}, - task::JoinHandle, -}; +use tokio::sync::{mpsc, watch, RwLock}; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status as RspStatus}; -use tracing::warn; -use tun::{tokio::TunInterface, TunOptions}; +use tracing::{debug, info, warn}; +use tun::tokio::TunInterface; -use super::rpc::{ - grpc_defs::{ - networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, - NetworkListResponse, NetworkReorderRequest, NetworkType, State as RPCTunnelState, - TunnelConfigurationResponse, TunnelStatusResponse, +use super::{ + rpc::grpc_defs::{ + networks_server::Networks, tailnet_control_server::TailnetControl, tunnel_server::Tunnel, + Empty, Network, NetworkDeleteRequest, NetworkListResponse, NetworkReorderRequest, + State as RPCTunnelState, TailnetDiscoverRequest, TailnetDiscoverResponse, + TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, TunnelPacket, + TunnelStatusResponse, }, - ServerConfig, + runtime::{tailnet_helper_request, ActiveTunnel, ResolvedTunnel}, }; use crate::{ + auth::server::tailscale::{ + packet_socket_path, TailscaleBridgeManager, + TailscaleLoginStartRequest as BridgeLoginStartRequest, TailscaleLoginStatus, + }, + control::discovery, + daemon::rpc::ServerConfig, database::{add_network, delete_network, get_connection, list_networks, reorder_network}, - tor::{self, Config as TorConfig, TorHandle}, - wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface}, }; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone)] enum RunState { Running, Idle, @@ -43,167 +46,25 @@ impl RunState { } } -#[derive(Clone, Debug, PartialEq, Eq)] -enum RuntimeIdentity { - DefaultWireGuard, - Network { id: i32, network_type: NetworkType }, -} - -#[derive(Clone, Debug)] -enum ResolvedTunnel { - WireGuard { - identity: RuntimeIdentity, - config: WireGuardConfig, - }, - Tor { - identity: RuntimeIdentity, - config: TorConfig, - }, -} - -impl ResolvedTunnel { - fn from_networks(networks: &[Network], fallback: &WireGuardConfig) -> Result { - let Some(network) = networks.first() else { - return Ok(Self::WireGuard { - identity: RuntimeIdentity::DefaultWireGuard, - config: fallback.clone(), - }); - }; - - let identity = RuntimeIdentity::Network { - id: network.id, - network_type: network.r#type(), - }; - - match network.r#type() { - NetworkType::WireGuard => { - let payload = String::from_utf8(network.payload.clone()) - .context("wireguard payload must be valid UTF-8")?; - let config = WireGuardConfig::from_content_fmt(&payload, "ini")?; - Ok(Self::WireGuard { identity, config }) - } - NetworkType::Tor => { - let config = TorConfig::from_payload(&network.payload)?; - Ok(Self::Tor { identity, config }) - } - NetworkType::HackClub => { - Err(anyhow!("HackClub runtime is not available on this branch")) - } - } - } - - fn identity(&self) -> &RuntimeIdentity { - match self { - Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity, - } - } - - fn server_config(&self) -> Result { - match self { - Self::WireGuard { config, .. } => ServerConfig::try_from(config), - Self::Tor { config, .. } => Ok(ServerConfig { - address: config.address.clone(), - name: config.tun_name.clone(), - mtu: config.mtu.map(|mtu| mtu as i32), - }), - } - } - - async fn start(self, tun_interface: Arc>>) -> Result { - match self { - Self::WireGuard { identity, config } => { - let tun = TunOptions::new() - .address(config.interface.address.clone()) - .open()?; - tun_interface.write().await.replace(tun); - - let mut interface: WireGuardInterface = config.try_into()?; - interface.set_tun_ref(tun_interface.clone()).await; - let interface = Arc::new(RwLock::new(interface)); - let run_interface = interface.clone(); - let task = tokio::spawn(async move { - let guard = run_interface.read().await; - guard.run().await - }); - - Ok(ActiveTunnel::WireGuard { identity, interface, task }) - } - Self::Tor { identity, config } => { - let mut tun_options = TunOptions::new().address(config.address.clone()); - if let Some(name) = config.tun_name.as_deref() { - tun_options = tun_options.name(name); - } - let tun = tun_options.open()?; - tun_interface.write().await.replace(tun); - - match tor::spawn(config).await { - Ok(handle) => Ok(ActiveTunnel::Tor { identity, handle }), - Err(err) => { - tun_interface.write().await.take(); - Err(err) - } - } - } - } - } -} - -enum ActiveTunnel { - WireGuard { - identity: RuntimeIdentity, - interface: Arc>, - task: JoinHandle>, - }, - Tor { - identity: RuntimeIdentity, - handle: TorHandle, - }, -} - -impl ActiveTunnel { - fn identity(&self) -> &RuntimeIdentity { - match self { - Self::WireGuard { identity, .. } | Self::Tor { identity, .. } => identity, - } - } - - async fn shutdown(self, tun_interface: &Arc>>) -> Result<()> { - match self { - Self::WireGuard { interface, task, .. } => { - interface.read().await.remove_tun().await; - let task_result = task.await; - tun_interface.write().await.take(); - task_result??; - Ok(()) - } - Self::Tor { handle, .. } => { - let result = handle.shutdown().await; - tun_interface.write().await.take(); - result - } - } - } -} - #[derive(Clone)] pub struct DaemonRPCServer { tun_interface: Arc>>, - default_config: Arc>, db_path: Option, wg_state_chan: (watch::Sender, watch::Receiver), network_update_chan: (watch::Sender<()>, watch::Receiver<()>), active_tunnel: Arc>>, + tailnet_login: TailscaleBridgeManager, } impl DaemonRPCServer { - pub fn new(config: Arc>, db_path: Option<&Path>) -> Result { + pub fn new(db_path: Option<&Path>) -> Result { Ok(Self { tun_interface: Arc::new(RwLock::new(None)), - default_config: config, db_path: db_path.map(Path::to_owned), wg_state_chan: watch::channel(RunState::Idle), network_update_chan: watch::channel(()), active_tunnel: Arc::new(RwLock::new(None)), + tailnet_login: TailscaleBridgeManager::default(), }) } @@ -222,20 +83,25 @@ impl DaemonRPCServer { async fn resolve_tunnel(&self) -> Result { let conn = self.get_connection()?; let networks = list_networks(&conn).map_err(proc_err)?; - let fallback = self.default_config.read().await.clone(); - ResolvedTunnel::from_networks(&networks, &fallback).map_err(proc_err) + ResolvedTunnel::from_networks(&networks).map_err(proc_err) } async fn current_tunnel_configuration(&self) -> Result { - let config = self - .resolve_tunnel() - .await? - .server_config() - .map_err(proc_err)?; - Ok(TunnelConfigurationResponse { - addresses: config.address, - mtu: config.mtu.unwrap_or(1500), - }) + let config = { + let active = self.active_tunnel.read().await; + active + .as_ref() + .map(|tunnel| tunnel.server_config().clone()) + }; + let config = match config { + Some(config) => config, + None => self + .resolve_tunnel() + .await? + .server_config() + .map_err(proc_err)?, + }; + Ok(configuration_rsp(config)) } async fn stop_active_tunnel(&self) -> Result { @@ -254,8 +120,18 @@ impl DaemonRPCServer { async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> { let _ = self.stop_active_tunnel().await?; + let tailnet_helper = match &desired { + ResolvedTunnel::Tailnet { identity, config } => Some( + self.tailnet_login + .ensure_session(tailnet_helper_request(identity, config)) + .await + .map_err(proc_err)? + .helper, + ), + _ => None, + }; let active = desired - .start(self.tun_interface.clone()) + .start(self.tun_interface.clone(), tailnet_helper) .await .map_err(proc_err)?; self.active_tunnel.write().await.replace(active); @@ -279,11 +155,34 @@ impl DaemonRPCServer { Ok(()) } + + fn tailnet_bridge_request( + account_name: String, + identity_name: String, + hostname: String, + authority: String, + ) -> BridgeLoginStartRequest { + let mut request = BridgeLoginStartRequest { + account_name, + identity_name, + hostname: (!hostname.trim().is_empty()).then_some(hostname), + control_url: Self::tailnet_control_url(&authority), + packet_socket: None, + }; + request.packet_socket = Some(packet_socket_path(&request).display().to_string()); + request + } + + fn tailnet_control_url(authority: &str) -> Option { + let authority = discovery::normalize_authority(authority); + (!discovery::is_managed_tailscale_authority(&authority)).then_some(authority) + } } #[tonic::async_trait] impl Tunnel for DaemonRPCServer { type TunnelConfigurationStream = ReceiverStream>; + type TunnelPacketsStream = ReceiverStream>; type TunnelStatusStream = ReceiverStream>; async fn tunnel_configuration( @@ -309,6 +208,62 @@ impl Tunnel for DaemonRPCServer { Ok(Response::new(ReceiverStream::new(rx))) } + async fn tunnel_packets( + &self, + request: Request>, + ) -> Result, RspStatus> { + let (packet_tx, mut packet_rx) = { + let guard = self.active_tunnel.read().await; + let Some(active) = guard.as_ref() else { + return Err(RspStatus::failed_precondition("no active tunnel")); + }; + active.packet_stream().ok_or_else(|| { + RspStatus::failed_precondition( + "active tunnel does not support packet streaming", + ) + })? + }; + + let (tx, rx) = mpsc::channel(128); + tokio::spawn(async move { + loop { + match packet_rx.recv().await { + Ok(payload) => { + if tx.send(Ok(TunnelPacket { payload })).await.is_err() { + break; + } + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + } + } + }); + + let mut inbound = request.into_inner(); + tokio::spawn(async move { + loop { + match inbound.message().await { + Ok(Some(packet)) => { + debug!( + "daemon tunnel packet stream received {} bytes from client", + packet.payload.len() + ); + if packet_tx.send(packet.payload).await.is_err() { + break; + } + } + Ok(None) => break, + Err(error) => { + warn!("tailnet packet stream receive error: {error}"); + break; + } + } + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } + async fn tunnel_start(&self, _request: Request) -> Result, RspStatus> { let desired = self.resolve_tunnel().await?; let already_running = { @@ -418,13 +373,168 @@ impl Networks for DaemonRPCServer { } } +#[tonic::async_trait] +impl TailnetControl for DaemonRPCServer { + async fn discover( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + info!(email = %request.email, "daemon tailnet discover RPC received"); + let discovery = discovery::discover_tailnet(&request.email) + .await + .map_err(proc_err)?; + info!( + email = %request.email, + authority = %discovery.authority, + provider = ?discovery.provider, + "daemon tailnet discover RPC resolved" + ); + + Ok(Response::new(TailnetDiscoverResponse { + domain: discovery.domain, + authority: discovery.authority.clone(), + oidc_issuer: discovery.oidc_issuer.unwrap_or_default(), + managed: matches!( + discovery::inferred_provider(Some(&discovery.authority), Some(&discovery.provider)), + crate::control::TailnetProvider::Tailscale + ), + })) + } + + async fn probe( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let status = discovery::probe_tailnet_authority(&request.authority) + .await + .map_err(proc_err)?; + + Ok(Response::new(TailnetProbeResponse { + authority: status.authority, + status_code: status.status_code, + summary: status.summary, + detail: status.detail, + reachable: status.reachable, + })) + } + + async fn login_start( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + info!( + account = %request.account_name, + identity = %request.identity_name, + authority = %request.authority, + "daemon tailnet login start RPC received" + ); + let response = self + .tailnet_login + .start_login(Self::tailnet_bridge_request( + request.account_name, + request.identity_name, + request.hostname, + request.authority, + )) + .await + .map_err(proc_err)?; + + info!( + session_id = %response.session_id, + backend_state = %response.status.backend_state, + running = response.status.running, + needs_login = response.status.needs_login, + auth_url = ?response.status.auth_url, + "daemon tailnet login start RPC resolved" + ); + + Ok(Response::new(tailnet_login_rsp( + response.session_id, + response.status, + ))) + } + + async fn login_status( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + info!(session_id = %request.session_id, "daemon tailnet login status RPC received"); + let status = self + .tailnet_login + .status(&request.session_id) + .await + .map_err(proc_err)?; + let Some(status) = status else { + return Err(RspStatus::not_found("tailnet login session not found")); + }; + info!( + session_id = %request.session_id, + backend_state = %status.backend_state, + running = status.running, + needs_login = status.needs_login, + auth_url = ?status.auth_url, + "daemon tailnet login status RPC resolved" + ); + Ok(Response::new(tailnet_login_rsp(request.session_id, status))) + } + + async fn login_cancel( + &self, + request: Request, + ) -> Result, RspStatus> { + let request = request.into_inner(); + let canceled = self + .tailnet_login + .cancel(&request.session_id) + .await + .map_err(proc_err)?; + if !canceled { + return Err(RspStatus::not_found("tailnet login session not found")); + } + Ok(Response::new(Empty {})) + } +} + fn proc_err(err: impl ToString) -> RspStatus { RspStatus::internal(err.to_string()) } +fn configuration_rsp(config: ServerConfig) -> TunnelConfigurationResponse { + TunnelConfigurationResponse { + addresses: config.address, + mtu: config.mtu.unwrap_or(1000), + routes: config.routes, + dns_servers: config.dns_servers, + search_domains: config.search_domains, + include_default_route: config.include_default_route, + } +} + fn status_rsp(state: RunState) -> TunnelStatusResponse { TunnelStatusResponse { state: state.to_rpc().into(), - start: None, + start: None, // TODO: Add timestamp + } +} + +fn tailnet_login_rsp( + session_id: String, + status: TailscaleLoginStatus, +) -> super::rpc::grpc_defs::TailnetLoginStatusResponse { + super::rpc::grpc_defs::TailnetLoginStatusResponse { + session_id, + backend_state: status.backend_state, + auth_url: status.auth_url.unwrap_or_default(), + running: status.running, + needs_login: status.needs_login, + tailnet_name: status.tailnet_name.unwrap_or_default(), + magic_dns_suffix: status.magic_dns_suffix.unwrap_or_default(), + self_dns_name: status.self_dns_name.unwrap_or_default(), + tailnet_ips: status.tailscale_ips, + health: status.health, } } diff --git a/burrow/src/daemon/mod.rs b/burrow/src/daemon/mod.rs index 8ec0ce2..724e3bb 100644 --- a/burrow/src/daemon/mod.rs +++ b/burrow/src/daemon/mod.rs @@ -4,22 +4,23 @@ pub mod apple; mod instance; mod net; pub mod rpc; +mod runtime; use anyhow::{Error as AhError, Result}; use instance::DaemonRPCServer; pub use net::{get_socket_path, DaemonClient}; pub use rpc::{DaemonCommand, DaemonResponseData, DaemonStartOptions}; -use tokio::{ - net::UnixListener, - sync::{Notify, RwLock}, -}; +use tokio::{net::UnixListener, sync::Notify}; use tokio_stream::wrappers::UnixListenerStream; use tonic::transport::Server; use tracing::info; use crate::{ - daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer}, - database::{get_connection, load_interface}, + daemon::rpc::grpc_defs::{ + networks_server::NetworksServer, tailnet_control_server::TailnetControlServer, + tunnel_server::TunnelServer, + }, + database::get_connection, }; pub async fn daemon_main( @@ -27,12 +28,8 @@ pub async fn daemon_main( db_path: Option<&Path>, notify_ready: Option>, ) -> Result<()> { - if let Some(n) = notify_ready { - n.notify_one() - } - let conn = get_connection(db_path)?; - let config = load_interface(&conn, "1")?; - let burrow_server = DaemonRPCServer::new(Arc::new(RwLock::new(config)), db_path.clone())?; + let _conn = get_connection(db_path)?; + let burrow_server = DaemonRPCServer::new(db_path)?; let spp = socket_path.clone(); let tmp = get_socket_path(); let sock_path = spp.unwrap_or(Path::new(tmp.as_str())); @@ -42,17 +39,243 @@ pub async fn daemon_main( let uds = UnixListener::bind(sock_path)?; let serve_job = tokio::spawn(async move { let uds_stream = UnixListenerStream::new(uds); + let tailnet_server = burrow_server.clone(); let _srv = Server::builder() .add_service(TunnelServer::new(burrow_server.clone())) .add_service(NetworksServer::new(burrow_server)) + .add_service(TailnetControlServer::new(tailnet_server)) .serve_with_incoming(uds_stream) .await?; Ok::<(), AhError>(()) }); + if let Some(n) = notify_ready { + n.notify_one(); + } + info!("Starting daemon..."); tokio::try_join!(serve_job) .map(|_| ()) .map_err(|e| e.into()) } + +#[cfg(test)] +mod tests { + use std::{ + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, + }; + + use anyhow::{anyhow, Result}; + use tokio::time::{timeout, Duration}; + + use super::*; + use crate::daemon::rpc::{ + client::BurrowClient, + grpc_defs::{ + Empty, Network, NetworkListResponse, NetworkReorderRequest, NetworkType, + TunnelConfigurationResponse, TunnelStatusResponse, + }, + }; + + #[tokio::test] + async fn daemon_tracks_network_priority_via_grpc() -> Result<()> { + let socket_path = temp_path("sock"); + let db_path = temp_path("sqlite3"); + let ready = Arc::new(Notify::new()); + + let daemon_ready = ready.clone(); + let daemon_socket_path = socket_path.clone(); + let daemon_db_path = db_path.clone(); + let daemon_task = tokio::spawn(async move { + daemon_main( + Some(daemon_socket_path.as_path()), + Some(daemon_db_path.as_path()), + Some(daemon_ready), + ) + .await + }); + + timeout(Duration::from_secs(5), ready.notified()).await?; + + let mut client = timeout( + Duration::from_secs(5), + BurrowClient::from_uds_path(&socket_path), + ) + .await??; + let mut config_stream = client + .tunnel_client + .tunnel_configuration(Empty {}) + .await? + .into_inner(); + let mut network_stream = client + .networks_client + .network_list(Empty {}) + .await? + .into_inner(); + let mut status_stream = client + .tunnel_client + .tunnel_status(Empty {}) + .await? + .into_inner(); + + let initial_config = next_configuration(&mut config_stream).await?; + assert!(initial_config.addresses.is_empty()); + assert_eq!(initial_config.mtu, 1500); + + let initial_networks = next_networks(&mut network_stream).await?; + assert!(initial_networks.network.is_empty()); + + let initial_status = next_status(&mut status_stream).await?; + assert_eq!( + initial_status.state(), + crate::daemon::rpc::grpc_defs::State::Stopped + ); + + client.tunnel_client.tunnel_start(Empty {}).await?; + + let passthrough_status = next_status(&mut status_stream).await?; + assert_eq!( + passthrough_status.state(), + crate::daemon::rpc::grpc_defs::State::Running + ); + + client.tunnel_client.tunnel_stop(Empty {}).await?; + + let stopped_status = next_status(&mut status_stream).await?; + assert_eq!( + stopped_status.state(), + crate::daemon::rpc::grpc_defs::State::Stopped + ); + + client + .networks_client + .network_add(Network { + id: 1, + r#type: NetworkType::WireGuard.into(), + payload: sample_wireguard_payload(), + }) + .await?; + + let networks_after_wg = next_networks(&mut network_stream).await?; + assert_eq!( + network_ids(&networks_after_wg), + vec![(1, NetworkType::WireGuard)] + ); + + let wireguard_config = next_configuration(&mut config_stream).await?; + assert_eq!( + wireguard_config.addresses, + vec!["10.8.0.2/32", "fd00::2/128"] + ); + assert_eq!(wireguard_config.mtu, 1420); + + client + .networks_client + .network_add(Network { + id: 2, + r#type: NetworkType::WireGuard.into(), + payload: sample_wireguard_payload_with("10.77.0.2/32", 1380), + }) + .await?; + + let networks_after_second_add = next_networks(&mut network_stream).await?; + assert_eq!( + network_ids(&networks_after_second_add), + vec![(1, NetworkType::WireGuard), (2, NetworkType::WireGuard)] + ); + + let still_wireguard = next_configuration(&mut config_stream).await?; + assert_eq!(still_wireguard.addresses, wireguard_config.addresses); + + client + .networks_client + .network_reorder(NetworkReorderRequest { id: 2, index: 0 }) + .await?; + + let networks_after_reorder = next_networks(&mut network_stream).await?; + assert_eq!( + network_ids(&networks_after_reorder), + vec![(2, NetworkType::WireGuard), (1, NetworkType::WireGuard)] + ); + + let second_wireguard_config = next_configuration(&mut config_stream).await?; + assert_eq!(second_wireguard_config.addresses, vec!["10.77.0.2/32"]); + assert_eq!(second_wireguard_config.mtu, 1380); + + daemon_task.abort(); + let _ = daemon_task.await; + cleanup_path(&socket_path); + cleanup_path(&db_path); + + Ok(()) + } + + fn temp_path(ext: &str) -> PathBuf { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system time is after unix epoch") + .as_nanos(); + std::env::temp_dir().join(format!("burrow-daemon-test-{now}.{ext}")) + } + + fn cleanup_path(path: &Path) { + let _ = std::fs::remove_file(path); + } + + fn sample_wireguard_payload() -> Vec { + br#"[Interface] +PrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8= +Address = 10.8.0.2/32, fd00::2/128 +ListenPort = 51820 +MTU = 1420 + +[Peer] +PublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM= +PresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698= +AllowedIPs = 0.0.0.0/0, ::/0 +Endpoint = wg.burrow.rs:51820 +"# + .to_vec() + } + + fn sample_wireguard_payload_with(address: &str, mtu: u16) -> Vec { + format!( + "[Interface]\nPrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=\nAddress = {address}\nListenPort = 51820\nMTU = {mtu}\n\n[Peer]\nPublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=\nPresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=\nAllowedIPs = 0.0.0.0/0, ::/0\nEndpoint = wg.burrow.rs:51820\n" + ) + .into_bytes() + } + + async fn next_configuration( + stream: &mut tonic::Streaming, + ) -> Result { + timeout(Duration::from_secs(5), stream.message()) + .await?? + .ok_or_else(|| anyhow!("configuration stream ended unexpectedly")) + } + + async fn next_networks( + stream: &mut tonic::Streaming, + ) -> Result { + timeout(Duration::from_secs(5), stream.message()) + .await?? + .ok_or_else(|| anyhow!("network stream ended unexpectedly")) + } + + async fn next_status( + stream: &mut tonic::Streaming, + ) -> Result { + timeout(Duration::from_secs(5), stream.message()) + .await?? + .ok_or_else(|| anyhow!("status stream ended unexpectedly")) + } + + fn network_ids(response: &NetworkListResponse) -> Vec<(i32, NetworkType)> { + response + .network + .iter() + .map(|network| (network.id, network.r#type())) + .collect() + } +} diff --git a/burrow/src/daemon/net/unix.rs b/burrow/src/daemon/net/unix.rs index 975c470..f7f9433 100644 --- a/burrow/src/daemon/net/unix.rs +++ b/burrow/src/daemon/net/unix.rs @@ -11,11 +11,7 @@ use tokio::{ use tracing::{debug, error, info}; use crate::daemon::rpc::{ - DaemonCommand, - DaemonMessage, - DaemonNotification, - DaemonRequest, - DaemonResponse, + DaemonCommand, DaemonMessage, DaemonNotification, DaemonRequest, DaemonResponse, DaemonResponseData, }; diff --git a/burrow/src/daemon/rpc/client.rs b/burrow/src/daemon/rpc/client.rs index 862e34c..aa84c64 100644 --- a/burrow/src/daemon/rpc/client.rs +++ b/burrow/src/daemon/rpc/client.rs @@ -1,30 +1,45 @@ use anyhow::Result; use hyper_util::rt::TokioIo; +use std::path::Path; use tokio::net::UnixStream; use tonic::transport::{Endpoint, Uri}; use tower::service_fn; -use super::grpc_defs::{networks_client::NetworksClient, tunnel_client::TunnelClient}; +use super::grpc_defs::{ + networks_client::NetworksClient, tailnet_control_client::TailnetControlClient, + tunnel_client::TunnelClient, +}; use crate::daemon::get_socket_path; pub struct BurrowClient { pub networks_client: NetworksClient, + pub tailnet_client: TailnetControlClient, pub tunnel_client: TunnelClient, } impl BurrowClient { #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub async fn from_uds() -> Result { + Self::from_uds_path(get_socket_path()).await + } + + #[cfg(any(target_os = "linux", target_vendor = "apple"))] + pub async fn from_uds_path(path: impl AsRef) -> Result { + let socket_path = path.as_ref().to_owned(); let channel = Endpoint::try_from("http://[::]:50051")? // NOTE: this is a hack(?) - .connect_with_connector(service_fn(|_: Uri| async { - let sock_path = get_socket_path(); - Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(sock_path).await?)) + .connect_with_connector(service_fn(move |_: Uri| { + let socket_path = socket_path.clone(); + async move { + Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(&socket_path).await?)) + } })) .await?; let nw_client = NetworksClient::new(channel.clone()); + let tailnet_client = TailnetControlClient::new(channel.clone()); let tun_client = TunnelClient::new(channel.clone()); Ok(BurrowClient { networks_client: nw_client, + tailnet_client, tunnel_client: tun_client, }) } diff --git a/burrow/src/daemon/rpc/request.rs b/burrow/src/daemon/rpc/request.rs index e9480aa..91562cc 100644 --- a/burrow/src/daemon/rpc/request.rs +++ b/burrow/src/daemon/rpc/request.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use tun::TunOptions; #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -#[serde(tag="method", content="params")] +#[serde(tag = "method", content = "params")] pub enum DaemonCommand { Start(DaemonStartOptions), ServerInfo, diff --git a/burrow/src/daemon/rpc/response.rs b/burrow/src/daemon/rpc/response.rs index 8948ca4..6d03581 100644 --- a/burrow/src/daemon/rpc/response.rs +++ b/burrow/src/daemon/rpc/response.rs @@ -68,6 +68,14 @@ impl TryFrom<&TunInterface> for ServerInfo { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] pub struct ServerConfig { pub address: Vec, + #[serde(default)] + pub routes: Vec, + #[serde(default)] + pub dns_servers: Vec, + #[serde(default)] + pub search_domains: Vec, + #[serde(default)] + pub include_default_route: bool, pub name: Option, pub mtu: Option, } @@ -78,6 +86,14 @@ impl TryFrom<&Config> for ServerConfig { fn try_from(config: &Config) -> anyhow::Result { Ok(ServerConfig { address: config.interface.address.clone(), + routes: config + .peers + .iter() + .flat_map(|peer| peer.allowed_ips.iter().cloned()) + .collect(), + dns_servers: config.interface.dns.clone(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: config.interface.mtu.map(|mtu| mtu as i32), }) @@ -88,6 +104,10 @@ impl Default for ServerConfig { fn default() -> Self { Self { address: vec!["10.13.13.2".to_string()], // Dummy remote address + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, name: None, mtu: None, } diff --git a/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap b/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap index c40db25..68b4195 100644 --- a/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap +++ b/burrow/src/daemon/rpc/snapshots/burrow__daemon__rpc__response__response_serialization-4.snap @@ -2,4 +2,4 @@ source: burrow/src/daemon/rpc/response.rs expression: "serde_json::to_string(&DaemonResponse::new(Ok::(DaemonResponseData::ServerConfig(ServerConfig::default()))))?" --- -{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"name":null,"mtu":null}},"id":0} +{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"routes":[],"dns_servers":[],"search_domains":[],"include_default_route":false,"name":null,"mtu":null}},"id":0} diff --git a/burrow/src/daemon/runtime.rs b/burrow/src/daemon/runtime.rs new file mode 100644 index 0000000..31821a2 --- /dev/null +++ b/burrow/src/daemon/runtime.rs @@ -0,0 +1,618 @@ +use std::{path::PathBuf, sync::Arc}; + +use anyhow::{bail, Context, Result}; +use tokio::{ + io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, + net::UnixStream, + sync::{broadcast, mpsc, RwLock}, + task::JoinHandle, + time::{sleep, Duration}, +}; +use tun::{tokio::TunInterface, TunOptions}; + +use super::rpc::{ + grpc_defs::{Network, NetworkType}, + ServerConfig, +}; +use crate::{ + auth::server::tailscale::{ + default_hostname, packet_socket_path, spawn_tailscale_helper, TailscaleHelperProcess, + TailscaleLoginStartRequest, TailscaleLoginStatus, + }, + control::{discovery, TailnetConfig}, + wireguard::{Config, Interface as WireGuardInterface}, +}; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum RuntimeIdentity { + Passthrough, + Network { + id: i32, + network_type: NetworkType, + payload: Vec, + }, +} + +#[derive(Clone, Debug)] +pub enum ResolvedTunnel { + Passthrough { + identity: RuntimeIdentity, + }, + Tailnet { + identity: RuntimeIdentity, + config: TailnetConfig, + }, + WireGuard { + identity: RuntimeIdentity, + config: Config, + }, +} + +impl ResolvedTunnel { + pub fn from_networks(networks: &[Network]) -> Result { + let Some(network) = networks.first() else { + return Ok(Self::Passthrough { + identity: RuntimeIdentity::Passthrough, + }); + }; + + let identity = RuntimeIdentity::Network { + id: network.id, + network_type: network.r#type(), + payload: network.payload.clone(), + }; + + match network.r#type() { + NetworkType::Tailnet => { + let config = TailnetConfig::from_slice(&network.payload)?; + Ok(Self::Tailnet { identity, config }) + } + NetworkType::WireGuard => { + let payload = String::from_utf8(network.payload.clone()) + .context("wireguard payload must be valid UTF-8")?; + let config = Config::from_content_fmt(&payload, "ini")?; + Ok(Self::WireGuard { identity, config }) + } + } + } + + pub fn identity(&self) -> &RuntimeIdentity { + match self { + Self::Passthrough { identity } + | Self::Tailnet { identity, .. } + | Self::WireGuard { identity, .. } => identity, + } + } + + pub fn server_config(&self) -> Result { + match self { + Self::Passthrough { .. } => Ok(ServerConfig { + address: Vec::new(), + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, + name: None, + mtu: Some(1500), + }), + Self::Tailnet { .. } => Ok(ServerConfig { + address: Vec::new(), + routes: tailnet_routes(), + dns_servers: tailnet_dns_servers(), + search_domains: Vec::new(), + include_default_route: false, + name: None, + mtu: Some(1280), + }), + Self::WireGuard { config, .. } => ServerConfig::try_from(config), + } + } + + pub async fn start( + self, + tun_interface: Arc>>, + tailnet_helper: Option>, + ) -> Result { + match self { + Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough { + identity, + server_config: ServerConfig { + address: Vec::new(), + routes: Vec::new(), + dns_servers: Vec::new(), + search_domains: Vec::new(), + include_default_route: false, + name: None, + mtu: Some(1500), + }, + }), + Self::Tailnet { identity, config } => { + let (helper, shutdown_helper_on_stop) = match tailnet_helper { + Some(helper) => (helper, false), + None => { + let helper_request = tailnet_helper_request(&identity, &config); + let helper = Arc::new(spawn_tailscale_helper(&helper_request).await?); + (helper, true) + } + }; + let status = wait_for_tailnet_ready(helper.as_ref()).await?; + let server_config = tailnet_server_config(&status); + let packet_socket = helper + .packet_socket() + .map(PathBuf::from) + .ok_or_else(|| anyhow::anyhow!("tailnet helper did not report a packet socket"))?; + let packet_bridge = connect_tailnet_packet_bridge(packet_socket).await?; + #[cfg(target_vendor = "apple")] + let tun_task = None; + #[cfg(not(target_vendor = "apple"))] + let tun_task = { + let tun = TunOptions::new().open()?; + tun_interface.write().await.replace(tun); + Some(tokio::spawn(run_tailnet_tun_bridge( + tun_interface.clone(), + packet_bridge.outbound_sender(), + packet_bridge.subscribe(), + ))) + }; + + Ok(ActiveTunnel::Tailnet { + identity, + server_config, + helper, + shutdown_helper_on_stop, + packet_bridge, + tun_task, + }) + } + Self::WireGuard { identity, config } => { + let server_config = ServerConfig::try_from(&config)?; + let tun = TunOptions::new().open()?; + tun_interface.write().await.replace(tun); + + match start_wireguard_runtime(config, tun_interface.clone()).await { + Ok((interface, task)) => Ok(ActiveTunnel::WireGuard { + identity, + server_config, + interface, + task, + }), + Err(err) => { + tun_interface.write().await.take(); + Err(err) + } + } + } + } + } +} + +pub enum ActiveTunnel { + Passthrough { + identity: RuntimeIdentity, + server_config: ServerConfig, + }, + Tailnet { + identity: RuntimeIdentity, + server_config: ServerConfig, + helper: Arc, + shutdown_helper_on_stop: bool, + packet_bridge: TailnetPacketBridge, + tun_task: Option>>, + }, + WireGuard { + identity: RuntimeIdentity, + server_config: ServerConfig, + interface: Arc>, + task: JoinHandle>, + }, +} + +impl ActiveTunnel { + pub fn identity(&self) -> &RuntimeIdentity { + match self { + Self::Passthrough { identity, .. } + | Self::Tailnet { identity, .. } + | Self::WireGuard { identity, .. } => identity, + } + } + + pub fn server_config(&self) -> &ServerConfig { + match self { + Self::Passthrough { server_config, .. } + | Self::Tailnet { server_config, .. } + | Self::WireGuard { server_config, .. } => server_config, + } + } + + pub fn packet_stream( + &self, + ) -> Option<(mpsc::Sender>, broadcast::Receiver>)> { + match self { + Self::Tailnet { packet_bridge, .. } => Some(( + packet_bridge.outbound_sender(), + packet_bridge.subscribe(), + )), + _ => None, + } + } + + pub async fn shutdown(self, tun_interface: &Arc>>) -> Result<()> { + match self { + Self::Passthrough { .. } => Ok(()), + Self::Tailnet { + helper, + shutdown_helper_on_stop, + packet_bridge, + tun_task, + .. + } => { + if let Some(tun_task) = tun_task { + tun_task.abort(); + match tun_task.await { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(err) if err.is_cancelled() => {} + Err(err) => return Err(err.into()), + } + } + packet_bridge.task.abort(); + match packet_bridge.task.await { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(err) if err.is_cancelled() => {} + Err(err) => return Err(err.into()), + } + tun_interface.write().await.take(); + if shutdown_helper_on_stop { + helper.shutdown().await?; + } + Ok(()) + } + Self::WireGuard { + interface, + task, + .. + } => { + interface.read().await.remove_tun().await; + let task_result = task.await; + tun_interface.write().await.take(); + task_result??; + Ok(()) + } + } + } +} + +pub struct TailnetPacketBridge { + outbound: mpsc::Sender>, + inbound: broadcast::Sender>, + task: JoinHandle>, +} + +impl TailnetPacketBridge { + fn outbound_sender(&self) -> mpsc::Sender> { + self.outbound.clone() + } + + fn subscribe(&self) -> broadcast::Receiver> { + self.inbound.subscribe() + } +} + +async fn start_wireguard_runtime( + config: Config, + tun_interface: Arc>>, +) -> Result<(Arc>, JoinHandle>)> { + let mut interface: WireGuardInterface = config.try_into()?; + interface.set_tun_ref(tun_interface).await; + let interface = Arc::new(RwLock::new(interface)); + let run_interface = interface.clone(); + let task = tokio::spawn(async move { + let guard = run_interface.read().await; + guard.run().await + }); + Ok((interface, task)) +} + +pub(crate) fn tailnet_helper_request( + identity: &RuntimeIdentity, + config: &TailnetConfig, +) -> TailscaleLoginStartRequest { + let account_name = config + .account + .as_deref() + .filter(|value| !value.trim().is_empty()) + .unwrap_or("default") + .to_owned(); + let identity_name = config + .identity + .as_deref() + .filter(|value| !value.trim().is_empty()) + .map(ToOwned::to_owned) + .unwrap_or_else(|| match identity { + RuntimeIdentity::Network { id, .. } => format!("network-{id}"), + RuntimeIdentity::Passthrough => "apple".to_owned(), + }); + let control_url = config.authority.as_deref().and_then(|authority| { + let authority = discovery::normalize_authority(authority); + (!discovery::is_managed_tailscale_authority(&authority)).then_some(authority) + }); + + let mut request = TailscaleLoginStartRequest { + account_name, + identity_name, + hostname: config.hostname.clone(), + control_url, + packet_socket: None, + }; + request.packet_socket = Some(packet_socket_path(&request).display().to_string()); + if request + .hostname + .as_deref() + .map(|value| value.trim().is_empty()) + .unwrap_or(true) + { + request.hostname = Some(default_hostname(&request)); + } + request +} + +async fn wait_for_tailnet_ready(helper: &TailscaleHelperProcess) -> Result { + let mut last_status = None; + for _ in 0..120 { + let status = helper.status().await?; + if status.running && !status.tailscale_ips.is_empty() { + return Ok(status); + } + if status.needs_login || status.auth_url.is_some() { + bail!("tailnet runtime requires a completed login before the tunnel can start"); + } + last_status = Some(status); + sleep(Duration::from_millis(250)).await; + } + + if let Some(status) = last_status { + bail!( + "tailnet helper never became ready (backend_state={})", + status.backend_state + ); + } + bail!("tailnet helper never produced a status update") +} + +fn tailnet_server_config(status: &TailscaleLoginStatus) -> ServerConfig { + let mut search_domains = Vec::new(); + if let Some(suffix) = status.magic_dns_suffix.as_deref() { + let suffix = suffix.trim().trim_end_matches('.'); + if !suffix.is_empty() { + search_domains.push(suffix.to_owned()); + } + } + + ServerConfig { + address: status + .tailscale_ips + .iter() + .map(|ip| tailnet_cidr(ip)) + .collect(), + routes: tailnet_routes(), + dns_servers: tailnet_dns_servers(), + search_domains, + include_default_route: false, + name: status.self_dns_name.clone(), + mtu: Some(1280), + } +} + +fn tailnet_routes() -> Vec { + vec!["100.64.0.0/10".to_owned(), "fd7a:115c:a1e0::/48".to_owned()] +} + +fn tailnet_dns_servers() -> Vec { + vec!["100.100.100.100".to_owned()] +} + +fn tailnet_cidr(ip: &str) -> String { + if ip.contains('/') { + return ip.to_owned(); + } + if ip.contains(':') { + format!("{ip}/128") + } else { + format!("{ip}/32") + } +} + +async fn connect_tailnet_packet_bridge(packet_socket: PathBuf) -> Result { + let mut last_error = None; + let mut stream = None; + for _ in 0..50 { + match UnixStream::connect(&packet_socket).await { + Ok(connected) => { + stream = Some(connected); + break; + } + Err(err) => { + last_error = Some(err); + sleep(Duration::from_millis(100)).await; + } + } + } + let stream = if let Some(stream) = stream { + stream + } else { + return Err(last_error + .context("failed to connect to tailnet helper packet socket")? + .into()); + }; + + let (outbound_tx, outbound_rx) = mpsc::channel(128); + let (inbound_tx, _) = broadcast::channel(128); + let task = tokio::spawn(run_tailnet_socket_bridge( + stream, + outbound_rx, + inbound_tx.clone(), + )); + + Ok(TailnetPacketBridge { + outbound: outbound_tx, + inbound: inbound_tx, + task, + }) +} + +async fn run_tailnet_socket_bridge( + stream: UnixStream, + mut outbound_rx: mpsc::Receiver>, + inbound_tx: broadcast::Sender>, +) -> Result<()> { + let (mut reader, mut writer) = stream.into_split(); + + let inbound = tokio::spawn(async move { + loop { + let packet = read_packet_frame(&mut reader).await?; + tracing::debug!( + "tailnet packet bridge received {} bytes from helper socket", + packet.len() + ); + let _ = inbound_tx.send(packet); + } + #[allow(unreachable_code)] + Result::<()>::Ok(()) + }); + + let outbound = tokio::spawn(async move { + while let Some(packet) = outbound_rx.recv().await { + tracing::debug!( + "tailnet packet bridge writing {} bytes to helper socket", + packet.len() + ); + write_packet_frame(&mut writer, &packet).await?; + } + Result::<()>::Ok(()) + }); + + let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?; + inbound_result?; + outbound_result?; + Ok(()) +} + +#[cfg(not(target_vendor = "apple"))] +async fn run_tailnet_tun_bridge( + tun_interface: Arc>>, + outbound_tx: mpsc::Sender>, + mut inbound_rx: broadcast::Receiver>, +) -> Result<()> { + let inbound_tun = tun_interface.clone(); + let inbound = tokio::spawn(async move { + loop { + let packet = match inbound_rx.recv().await { + Ok(packet) => packet, + Err(broadcast::error::RecvError::Lagged(_)) => continue, + Err(broadcast::error::RecvError::Closed) => break, + }; + let guard = inbound_tun.read().await; + let Some(tun) = guard.as_ref() else { + bail!("tailnet tun interface unavailable"); + }; + tun.send(&packet) + .await + .context("failed to write tailnet packet to tun")?; + } + Result::<()>::Ok(()) + }); + + let outbound_tun = tun_interface.clone(); + let outbound = tokio::spawn(async move { + let mut buf = vec![0u8; 65_535]; + loop { + let len = { + let guard = outbound_tun.read().await; + let Some(tun) = guard.as_ref() else { + bail!("tailnet tun interface unavailable"); + }; + tun.recv(&mut buf) + .await + .context("failed to read packet from tailnet tun")? + }; + outbound_tx + .send(buf[..len].to_vec()) + .await + .context("failed to forward packet to tailnet helper")?; + } + #[allow(unreachable_code)] + Result::<()>::Ok(()) + }); + + let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?; + inbound_result?; + outbound_result?; + Ok(()) +} + +async fn read_packet_frame(reader: &mut R) -> Result> +where + R: AsyncRead + Unpin, +{ + let mut len_buf = [0u8; 4]; + reader + .read_exact(&mut len_buf) + .await + .context("failed to read tailnet packet frame length")?; + let len = u32::from_be_bytes(len_buf) as usize; + let mut packet = vec![0u8; len]; + reader + .read_exact(&mut packet) + .await + .context("failed to read tailnet packet frame payload")?; + Ok(packet) +} + +async fn write_packet_frame(writer: &mut W, packet: &[u8]) -> Result<()> +where + W: AsyncWrite + Unpin, +{ + writer + .write_all(&(packet.len() as u32).to_be_bytes()) + .await + .context("failed to write tailnet packet frame length")?; + writer + .write_all(packet) + .await + .context("failed to write tailnet packet frame payload")?; + writer + .flush() + .await + .context("failed to flush tailnet packet frame") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn no_networks_resolve_to_passthrough() { + let resolved = ResolvedTunnel::from_networks(&[]).unwrap(); + assert_eq!(resolved.identity(), &RuntimeIdentity::Passthrough); + assert_eq!( + resolved.server_config().unwrap().address, + Vec::::new() + ); + } + + #[test] + fn tailnet_server_config_uses_host_prefixes() { + let status = TailscaleLoginStatus { + running: true, + tailscale_ips: vec!["100.101.102.103".to_owned(), "fd7a:115c:a1e0::123".to_owned()], + ..Default::default() + }; + let config = tailnet_server_config(&status); + assert_eq!( + config.address, + vec!["100.101.102.103/32", "fd7a:115c:a1e0::123/128"] + ); + assert_eq!(config.mtu, Some(1280)); + } +} diff --git a/burrow/src/database.rs b/burrow/src/database.rs index c650d55..fe9a3c7 100644 --- a/burrow/src/database.rs +++ b/burrow/src/database.rs @@ -4,11 +4,9 @@ use anyhow::Result; use rusqlite::{params, Connection}; use crate::{ + control::TailnetConfig, daemon::rpc::grpc_defs::{ - Network as RPCNetwork, - NetworkDeleteRequest, - NetworkReorderRequest, - NetworkType, + Network as RPCNetwork, NetworkDeleteRequest, NetworkReorderRequest, NetworkType, }, wireguard::config::{Config, Interface, Peer}, }; @@ -124,35 +122,26 @@ pub fn dump_interface(conn: &Connection, config: &Config) -> Result<()> { pub fn get_connection(path: Option<&Path>) -> Result { let p = path.unwrap_or_else(|| std::path::Path::new(DB_PATH)); - if !p.exists() { - let conn = Connection::open(p)?; - initialize_tables(&conn)?; - dump_interface(&conn, &Config::default())?; - return Ok(conn); - } - Ok(Connection::open(p)?) + let conn = Connection::open(p)?; + initialize_tables(&conn)?; + Ok(conn) } pub fn add_network(conn: &Connection, network: &RPCNetwork) -> Result<()> { + validate_network_payload(network)?; let mut stmt = conn.prepare("INSERT INTO network (id, type, payload) VALUES (?, ?, ?)")?; stmt.execute(params![ network.id, network.r#type().as_str_name(), &network.payload ])?; - if network.r#type() == NetworkType::WireGuard { - let payload_str = String::from_utf8(network.payload.clone())?; - let wg_config = Config::from_content_fmt(&payload_str, "ini")?; - dump_interface(conn, &wg_config)?; - } Ok(()) } pub fn list_networks(conn: &Connection) -> Result> { - let mut stmt = conn.prepare("SELECT id, type, payload FROM network ORDER BY idx")?; + let mut stmt = conn.prepare("SELECT id, type, payload FROM network ORDER BY idx, id")?; let networks: Vec = stmt .query_map([], |row| { - println!("row: {:?}", row); let network_id: i32 = row.get(0)?; let network_type: String = row.get(1)?; let network_type = NetworkType::from_str_name(network_type.as_str()) @@ -169,12 +158,19 @@ pub fn list_networks(conn: &Connection) -> Result> { } pub fn reorder_network(conn: &Connection, req: NetworkReorderRequest) -> Result<()> { - let mut stmt = conn.prepare("UPDATE network SET idx = ? WHERE id = ?")?; - let res = stmt.execute(params![req.index, req.id])?; - if res == 0 { + let mut ordered_ids = ordered_network_ids(conn)?; + let Some(current_idx) = ordered_ids.iter().position(|id| *id == req.id) else { return Err(anyhow::anyhow!("No such network exists")); - } - Ok(()) + }; + + let target_idx = usize::try_from(req.index) + .map_err(|_| anyhow::anyhow!("Network index must be non-negative"))?; + + let moved_id = ordered_ids.remove(current_idx); + let target_idx = target_idx.min(ordered_ids.len()); + ordered_ids.insert(target_idx, moved_id); + + renumber_networks(conn, &ordered_ids) } pub fn delete_network(conn: &Connection, req: NetworkDeleteRequest) -> Result<()> { @@ -183,7 +179,8 @@ pub fn delete_network(conn: &Connection, req: NetworkDeleteRequest) -> Result<() if res == 0 { return Err(anyhow::anyhow!("No such network exists")); } - Ok(()) + let ordered_ids = ordered_network_ids(conn)?; + renumber_networks(conn, &ordered_ids) } fn parse_lst(s: &str) -> Vec { @@ -200,9 +197,86 @@ fn to_lst(v: &Vec) -> String { .join(",") } +fn validate_network_payload(network: &RPCNetwork) -> Result<()> { + match network.r#type() { + NetworkType::WireGuard => { + let payload_str = String::from_utf8(network.payload.clone())?; + Config::from_content_fmt(&payload_str, "ini")?; + } + NetworkType::Tailnet => { + TailnetConfig::from_slice(&network.payload)?; + } + } + Ok(()) +} + +fn ordered_network_ids(conn: &Connection) -> Result> { + let mut stmt = conn.prepare("SELECT id FROM network ORDER BY idx, id")?; + let ids = stmt + .query_map([], |row| row.get::<_, i32>(0))? + .collect::>>()?; + Ok(ids) +} + +fn renumber_networks(conn: &Connection, ordered_ids: &[i32]) -> Result<()> { + conn.execute_batch("BEGIN IMMEDIATE")?; + let result = (|| -> Result<()> { + let mut stmt = conn.prepare("UPDATE network SET idx = ? WHERE id = ?")?; + for (idx, id) in ordered_ids.iter().enumerate() { + stmt.execute(params![idx as i32, id])?; + } + Ok(()) + })(); + + match result { + Ok(()) => { + conn.execute_batch("COMMIT")?; + Ok(()) + } + Err(err) => { + let _ = conn.execute_batch("ROLLBACK"); + Err(err) + } + } +} + #[cfg(test)] mod tests { use super::*; + use tempfile::tempdir; + + fn sample_wireguard_payload() -> Vec { + br#"[Interface] +PrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8= +Address = 10.13.13.2/24 +ListenPort = 51820 + +[Peer] +PublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM= +PresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698= +AllowedIPs = 0.0.0.0/0, 8.8.8.8/32 +Endpoint = wg.burrow.rs:51820 +"# + .to_vec() + } + + fn sample_wireguard_payload_with_address(address: &str, mtu: u16) -> Vec { + format!( + "[Interface]\nPrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=\nAddress = {address}\nListenPort = 51820\nMTU = {mtu}\n\n[Peer]\nPublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=\nPresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=\nAllowedIPs = 0.0.0.0/0\nEndpoint = wg.burrow.rs:51820\n" + ) + .into_bytes() + } + + fn sample_tailnet_payload() -> Vec { + br#"{ + "provider":"tailscale", + "account":"default", + "identity":"apple", + "tailnet":"example.ts.net", + "hostname":"burrow-phone" +}"# + .to_vec() + } #[test] fn test_db() { @@ -213,4 +287,123 @@ mod tests { let loaded = load_interface(&conn, "1").unwrap(); assert_eq!(config, loaded); } + + #[test] + fn add_network_validates_payloads() { + let conn = Connection::open_in_memory().unwrap(); + initialize_tables(&conn).unwrap(); + + add_network( + &conn, + &RPCNetwork { + id: 1, + r#type: NetworkType::WireGuard.into(), + payload: sample_wireguard_payload(), + }, + ) + .unwrap(); + + add_network( + &conn, + &RPCNetwork { + id: 2, + r#type: NetworkType::Tailnet.into(), + payload: sample_tailnet_payload(), + }, + ) + .unwrap(); + + add_network( + &conn, + &RPCNetwork { + id: 3, + r#type: NetworkType::WireGuard.into(), + payload: sample_wireguard_payload_with_address("10.42.0.2/32", 1380), + }, + ) + .unwrap(); + + assert!(add_network( + &conn, + &RPCNetwork { + id: 4, + r#type: NetworkType::WireGuard.into(), + payload: b"not-a-config".to_vec(), + }, + ) + .is_err()); + + assert!(add_network( + &conn, + &RPCNetwork { + id: 5, + r#type: NetworkType::Tailnet.into(), + payload: b"not-a-tailnet-config".to_vec(), + }, + ) + .is_err()); + + let ids: Vec = list_networks(&conn) + .unwrap() + .into_iter() + .map(|n| n.id) + .collect(); + assert_eq!(ids, vec![1, 2, 3]); + } + + #[test] + fn reorder_and_delete_networks_keep_priority_stable() { + let conn = Connection::open_in_memory().unwrap(); + initialize_tables(&conn).unwrap(); + + for (id, address, mtu) in [ + (1, "10.42.0.2/32", 1380), + (2, "10.42.0.3/32", 1381), + (3, "10.42.0.4/32", 1382), + ] { + add_network( + &conn, + &RPCNetwork { + id, + r#type: NetworkType::WireGuard.into(), + payload: sample_wireguard_payload_with_address(address, mtu), + }, + ) + .unwrap(); + } + + reorder_network(&conn, NetworkReorderRequest { id: 3, index: 0 }).unwrap(); + let ids: Vec = list_networks(&conn) + .unwrap() + .into_iter() + .map(|n| n.id) + .collect(); + assert_eq!(ids, vec![3, 1, 2]); + + delete_network(&conn, NetworkDeleteRequest { id: 1 }).unwrap(); + let ids: Vec = list_networks(&conn) + .unwrap() + .into_iter() + .map(|n| n.id) + .collect(); + assert_eq!(ids, vec![3, 2]); + } + + #[test] + fn get_connection_does_not_seed_a_default_interface() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("burrow.sqlite3"); + + let conn = get_connection(Some(db_path.as_path())).unwrap(); + + let interface_count: i64 = conn + .query_row("SELECT COUNT(*) FROM wg_interface", [], |row| row.get(0)) + .unwrap(); + let network_count: i64 = conn + .query_row("SELECT COUNT(*) FROM network", [], |row| row.get(0)) + .unwrap(); + + assert_eq!(interface_count, 0); + assert_eq!(network_count, 0); + } } diff --git a/burrow/src/lib.rs b/burrow/src/lib.rs index b77ce36..7867d18 100644 --- a/burrow/src/lib.rs +++ b/burrow/src/lib.rs @@ -1,5 +1,6 @@ #[cfg(any(target_os = "linux", target_vendor = "apple"))] -pub mod tor; +pub mod control; + #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub mod wireguard; @@ -9,12 +10,16 @@ mod auth; mod daemon; #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub mod database; +#[cfg(target_os = "linux")] +pub mod tor; pub(crate) mod tracing; +#[cfg(target_os = "linux")] +pub mod usernet; -#[cfg(target_vendor = "apple")] -pub use daemon::apple::spawn_in_process; +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +pub use daemon::apple::{spawn_in_process, spawn_in_process_with_paths}; #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub use daemon::{ - rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData, - DaemonStartOptions, + rpc::grpc_defs, rpc::BurrowClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, + DaemonCommand, DaemonResponseData, DaemonStartOptions, }; diff --git a/burrow/src/main.rs b/burrow/src/main.rs index db62a7b..cfa2085 100644 --- a/burrow/src/main.rs +++ b/burrow/src/main.rs @@ -2,15 +2,19 @@ use anyhow::Result; use clap::{Args, Parser, Subcommand}; #[cfg(any(target_os = "linux", target_vendor = "apple"))] -mod daemon; +mod control; #[cfg(any(target_os = "linux", target_vendor = "apple"))] -mod tor; +mod daemon; pub(crate) mod tracing; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod wireguard; #[cfg(any(target_os = "linux", target_vendor = "apple"))] mod auth; +#[cfg(target_os = "linux")] +mod tor; +#[cfg(target_os = "linux")] +mod usernet; #[cfg(any(target_os = "linux", target_vendor = "apple"))] use daemon::{DaemonClient, DaemonCommand}; @@ -68,6 +72,20 @@ enum Commands { NetworkReorder(NetworkReorderArgs), /// Delete Network NetworkDelete(NetworkDeleteArgs), + /// Discover a Tailnet authority through the daemon + TailnetDiscover(TailnetDiscoverArgs), + /// Probe a Tailnet authority through the daemon + TailnetProbe(TailnetProbeArgs), + /// Send an ICMP echo probe through the active Tailnet tunnel over daemon packet streaming + TailnetPing(TailnetPingArgs), + /// Send a UDP echo probe through the active Tailnet tunnel over daemon packet streaming + TailnetUdpEcho(TailnetUdpEchoArgs), + #[cfg(target_os = "linux")] + /// Run a command in an unshared Linux namespace using a Burrow backend + Exec(ExecArgs), + #[cfg(target_os = "linux")] + /// Run a command in a Linux user namespace with Tor-backed networking + TorExec(TorExecArgs), } #[derive(Args)] @@ -100,6 +118,55 @@ struct NetworkDeleteArgs { id: i32, } +#[derive(Args)] +struct TailnetDiscoverArgs { + email: String, +} + +#[derive(Args)] +struct TailnetProbeArgs { + authority: String, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +#[derive(Args)] +struct TailnetPingArgs { + remote: String, + #[arg(long, default_value = "burrow-tailnet-smoke")] + payload: String, + #[arg(long, default_value_t = 5000)] + timeout_ms: u64, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +#[derive(Args)] +struct TailnetUdpEchoArgs { + remote: String, + #[arg(long, default_value = "burrow-tailnet-smoke")] + message: String, + #[arg(long, default_value_t = 5000)] + timeout_ms: u64, +} + +#[cfg(target_os = "linux")] +#[derive(Args)] +struct TorExecArgs { + payload_path: String, + #[arg(required = true, num_args = 1.., trailing_var_arg = true)] + command: Vec, +} + +#[cfg(target_os = "linux")] +#[derive(Args)] +struct ExecArgs { + #[arg(long, value_enum)] + backend: usernet::ExecBackendKind, + #[arg(long)] + payload: Option, + #[arg(required = true, num_args = 1.., trailing_var_arg = true)] + command: Vec, +} + #[cfg(any(target_os = "linux", target_vendor = "apple"))] async fn try_start() -> Result<()> { let mut client = BurrowClient::from_uds().await?; @@ -211,6 +278,419 @@ async fn try_network_delete(id: i32) -> Result<()> { Ok(()) } +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_discover(email: &str) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + let response = client + .tailnet_client + .discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest { email: email.to_owned() }) + .await? + .into_inner(); + println!("Tailnet Discover Response: {:?}", response); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_probe(authority: &str) -> Result<()> { + let mut client = BurrowClient::from_uds().await?; + let response = client + .tailnet_client + .probe(crate::daemon::rpc::grpc_defs::TailnetProbeRequest { + authority: authority.to_owned(), + }) + .await? + .into_inner(); + println!("Tailnet Probe Response: {:?}", response); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_ping(remote: &str, payload: &str, timeout_ms: u64) -> Result<()> { + use std::net::IpAddr; + + use anyhow::Context; + use rand::Rng; + use tokio::{ + sync::mpsc, + time::{timeout, Duration}, + }; + use tokio_stream::wrappers::ReceiverStream; + + use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket}; + + let remote_ip: IpAddr = remote + .parse() + .with_context(|| format!("invalid remote IP address {remote}"))?; + let message = payload.as_bytes().to_vec(); + + let mut client = BurrowClient::from_uds().await?; + client.tunnel_client.tunnel_start(Empty {}).await?; + + let mut config_stream = client + .tunnel_client + .tunnel_configuration(Empty {}) + .await? + .into_inner(); + let config = config_stream + .message() + .await? + .context("tunnel configuration stream ended before yielding a config")?; + let local_ip = select_tailnet_local_ip(&config.addresses, remote_ip)?; + + let identifier = rand::thread_rng().gen::(); + let sequence = 1_u16; + let packet = build_icmp_echo_request(local_ip, remote_ip, identifier, sequence, &message)?; + + let (outbound_tx, outbound_rx) = mpsc::channel::(128); + let mut tunnel_packets = client + .tunnel_client + .tunnel_packets(ReceiverStream::new(outbound_rx)) + .await? + .into_inner(); + + outbound_tx + .send(TunnelPacket { payload: packet }) + .await + .context("failed to send ICMP echo probe into daemon packet stream")?; + log::debug!( + "tailnet ping probe queued from {local_ip} to {remote_ip} identifier={identifier} sequence={sequence}" + ); + drop(outbound_tx); + + let reply = timeout(Duration::from_millis(timeout_ms), async { + loop { + let packet = tunnel_packets + .message() + .await + .context("failed to read packet from daemon packet stream")? + .context("daemon packet stream ended before returning a reply")?; + log::debug!( + "tailnet ping received {} bytes from daemon packet stream", + packet.payload.len() + ); + if let Some(reply) = + parse_icmp_echo_reply(&packet.payload, local_ip, remote_ip, identifier, sequence)? + { + break Ok::<_, anyhow::Error>(reply); + } + } + }) + .await + .with_context(|| format!("timed out waiting for ICMP echo reply from {remote_ip}"))??; + + println!("Tailnet Ping Source: {}", reply.source); + println!("Tailnet Ping Destination: {}", reply.destination); + println!( + "Tailnet Ping Payload: {}", + String::from_utf8_lossy(&reply.payload) + ); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> Result<()> { + use std::net::SocketAddr; + + use anyhow::{bail, Context}; + use futures::{SinkExt, StreamExt}; + use netstack_smoltcp::StackBuilder; + use tokio::{ + sync::mpsc, + time::{timeout, Duration}, + }; + use tokio_stream::wrappers::ReceiverStream; + + use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket}; + + let remote_addr: SocketAddr = remote + .parse() + .with_context(|| format!("invalid remote socket address {remote}"))?; + + let mut client = BurrowClient::from_uds().await?; + client.tunnel_client.tunnel_start(Empty {}).await?; + + let mut config_stream = client + .tunnel_client + .tunnel_configuration(Empty {}) + .await? + .into_inner(); + let config = config_stream + .message() + .await? + .context("tunnel configuration stream ended before yielding a config")?; + let local_addr = select_tailnet_local_socket(&config.addresses, remote_addr.ip())?; + + let (stack, runner, udp_socket, _) = StackBuilder::default() + .enable_udp(true) + .enable_tcp(true) + .build() + .context("failed to build userspace UDP stack")?; + let runner = runner.context("userspace UDP stack runner unavailable")?; + let udp_socket = udp_socket.context("userspace UDP stack socket unavailable")?; + let (mut stack_sink, mut stack_stream) = stack.split(); + let (mut udp_reader, mut udp_writer) = udp_socket.split(); + + let (outbound_tx, outbound_rx) = mpsc::channel::(128); + let mut tunnel_packets = client + .tunnel_client + .tunnel_packets(ReceiverStream::new(outbound_rx)) + .await? + .into_inner(); + + let ingress_task = tokio::spawn(async move { + loop { + match tunnel_packets.message().await? { + Some(packet) => { + log::debug!( + "tailnet udp echo received {} bytes from daemon packet stream", + packet.payload.len() + ); + stack_sink + .send(packet.payload) + .await + .context("failed to feed inbound tailnet packet into userspace stack")?; + } + None => break, + } + } + Result::<()>::Ok(()) + }); + + let egress_task = tokio::spawn(async move { + while let Some(packet) = stack_stream.next().await { + let payload = packet.context("failed to read outbound packet from userspace stack")?; + log::debug!( + "tailnet udp echo sending {} bytes into daemon packet stream", + payload.len() + ); + outbound_tx + .send(TunnelPacket { payload }) + .await + .context("failed to forward outbound tailnet packet to daemon")?; + } + Result::<()>::Ok(()) + }); + + let runner_task = tokio::spawn(async move { runner.await.map_err(anyhow::Error::from) }); + + udp_writer + .send((message.as_bytes().to_vec(), local_addr, remote_addr)) + .await + .context("failed to send UDP echo probe into userspace stack")?; + log::debug!("tailnet udp echo probe queued from {local_addr} to {remote_addr}"); + + let response = timeout(Duration::from_millis(timeout_ms), udp_reader.next()) + .await + .with_context(|| format!("timed out waiting for UDP echo from {remote_addr}"))? + .context("userspace UDP stack ended before returning a reply")?; + let (payload, reply_source, reply_destination) = response; + let response_text = String::from_utf8_lossy(&payload); + + ingress_task.abort(); + egress_task.abort(); + runner_task.abort(); + + if reply_source != remote_addr { + bail!("received UDP reply from unexpected source {reply_source}"); + } + if reply_destination != local_addr { + bail!("received UDP reply for unexpected local socket {reply_destination}"); + } + if payload != message.as_bytes() { + bail!("UDP echo payload mismatch"); + } + + println!("Tailnet UDP Echo Source: {reply_source}"); + println!("Tailnet UDP Echo Destination: {reply_destination}"); + println!("Tailnet UDP Echo Payload: {response_text}"); + Ok(()) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn select_tailnet_local_ip( + addresses: &[String], + remote_ip: std::net::IpAddr, +) -> Result { + use anyhow::Context; + + let family_is_v4 = remote_ip.is_ipv4(); + addresses + .iter() + .filter_map(|cidr| cidr.split('/').next()) + .filter_map(|ip| ip.parse::().ok()) + .find(|ip| ip.is_ipv4() == family_is_v4) + .with_context(|| { + format!( + "no local {} tailnet address found in daemon config {:?}", + if family_is_v4 { "IPv4" } else { "IPv6" }, + addresses + ) + }) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn select_tailnet_local_socket( + addresses: &[String], + remote_ip: std::net::IpAddr, +) -> Result { + use rand::Rng; + + let local_ip = select_tailnet_local_ip(addresses, remote_ip)?; + let port = rand::thread_rng().gen_range(40000..50000); + Ok(std::net::SocketAddr::new(local_ip, port)) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +struct IcmpEchoReply { + source: std::net::IpAddr, + destination: std::net::IpAddr, + payload: Vec, +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn build_icmp_echo_request( + source: std::net::IpAddr, + destination: std::net::IpAddr, + identifier: u16, + sequence: u16, + payload: &[u8], +) -> Result> { + use anyhow::bail; + + let (source, destination) = match (source, destination) { + (std::net::IpAddr::V4(source), std::net::IpAddr::V4(destination)) => (source, destination), + _ => bail!("tailnet ping currently supports IPv4 only"), + }; + + let mut icmp = Vec::with_capacity(8 + payload.len()); + icmp.push(8); + icmp.push(0); + icmp.extend_from_slice(&[0, 0]); + icmp.extend_from_slice(&identifier.to_be_bytes()); + icmp.extend_from_slice(&sequence.to_be_bytes()); + icmp.extend_from_slice(payload); + let icmp_checksum = internet_checksum(&icmp); + icmp[2..4].copy_from_slice(&icmp_checksum.to_be_bytes()); + + let total_len = 20 + icmp.len(); + let mut packet = Vec::with_capacity(total_len); + packet.push(0x45); + packet.push(0); + packet.extend_from_slice(&(total_len as u16).to_be_bytes()); + packet.extend_from_slice(&0u16.to_be_bytes()); + packet.extend_from_slice(&0u16.to_be_bytes()); + packet.push(64); + packet.push(1); + packet.extend_from_slice(&[0, 0]); + packet.extend_from_slice(&source.octets()); + packet.extend_from_slice(&destination.octets()); + let header_checksum = internet_checksum(&packet); + packet[10..12].copy_from_slice(&header_checksum.to_be_bytes()); + packet.extend_from_slice(&icmp); + Ok(packet) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn parse_icmp_echo_reply( + packet: &[u8], + local_ip: std::net::IpAddr, + remote_ip: std::net::IpAddr, + identifier: u16, + sequence: u16, +) -> Result> { + use anyhow::bail; + + let (local_ip, remote_ip) = match (local_ip, remote_ip) { + (std::net::IpAddr::V4(local_ip), std::net::IpAddr::V4(remote_ip)) => (local_ip, remote_ip), + _ => bail!("tailnet ping currently supports IPv4 only"), + }; + + if packet.len() < 20 { + return Ok(None); + } + let version = packet[0] >> 4; + if version != 4 { + return Ok(None); + } + let ihl = (packet[0] & 0x0f) as usize * 4; + if packet.len() < ihl + 8 { + return Ok(None); + } + if packet[9] != 1 { + return Ok(None); + } + + let source = std::net::Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]); + let destination = std::net::Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]); + if source != remote_ip || destination != local_ip { + return Ok(None); + } + + let icmp = &packet[ihl..]; + if icmp[0] != 0 || icmp[1] != 0 { + return Ok(None); + } + let reply_identifier = u16::from_be_bytes([icmp[4], icmp[5]]); + let reply_sequence = u16::from_be_bytes([icmp[6], icmp[7]]); + if reply_identifier != identifier || reply_sequence != sequence { + return Ok(None); + } + + Ok(Some(IcmpEchoReply { + source: std::net::IpAddr::V4(source), + destination: std::net::IpAddr::V4(destination), + payload: icmp[8..].to_vec(), + })) +} + +#[cfg(any(target_os = "linux", target_vendor = "apple"))] +fn internet_checksum(bytes: &[u8]) -> u16 { + let mut sum = 0u32; + let mut chunks = bytes.chunks_exact(2); + for chunk in &mut chunks { + sum += u16::from_be_bytes([chunk[0], chunk[1]]) as u32; + } + if let Some(&last) = chunks.remainder().first() { + sum += (last as u32) << 8; + } + while (sum >> 16) != 0 { + sum = (sum & 0xffff) + (sum >> 16); + } + !(sum as u16) +} + +#[cfg(target_os = "linux")] +async fn try_tor_exec(payload_path: &str, command: Vec) -> Result<()> { + let exit_code = usernet::run_exec(usernet::ExecInvocation { + backend: usernet::ExecBackendKind::Tor, + payload_path: Some(payload_path.into()), + command, + }) + .await?; + if exit_code != 0 { + std::process::exit(exit_code); + } + Ok(()) +} + +#[cfg(target_os = "linux")] +async fn try_exec( + backend: usernet::ExecBackendKind, + payload: Option, + command: Vec, +) -> Result<()> { + let exit_code = usernet::run_exec(usernet::ExecInvocation { + backend, + payload_path: payload.map(Into::into), + command, + }) + .await?; + if exit_code != 0 { + std::process::exit(exit_code); + } + Ok(()) +} + #[cfg(any(target_os = "linux", target_vendor = "apple"))] fn handle_unexpected(res: Result) { match res { @@ -287,6 +767,25 @@ async fn main() -> Result<()> { Commands::NetworkList => try_network_list().await?, Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?, Commands::NetworkDelete(args) => try_network_delete(args.id).await?, + Commands::TailnetDiscover(args) => try_tailnet_discover(&args.email).await?, + Commands::TailnetProbe(args) => try_tailnet_probe(&args.authority).await?, + Commands::TailnetPing(args) => { + try_tailnet_ping(&args.remote, &args.payload, args.timeout_ms).await? + } + Commands::TailnetUdpEcho(args) => { + try_tailnet_udp_echo(&args.remote, &args.message, args.timeout_ms).await? + } + #[cfg(target_os = "linux")] + Commands::Exec(args) => { + try_exec( + args.backend.clone(), + args.payload.clone(), + args.command.clone(), + ) + .await? + } + #[cfg(target_os = "linux")] + Commands::TorExec(args) => try_tor_exec(&args.payload_path, args.command.clone()).await?, } Ok(()) diff --git a/burrow/src/tor/config.rs b/burrow/src/tor/config.rs index c2e0bc2..d3de9ec 100644 --- a/burrow/src/tor/config.rs +++ b/burrow/src/tor/config.rs @@ -1,10 +1,14 @@ -use std::{net::SocketAddr, str}; +use std::{net::SocketAddr, path::PathBuf, str}; use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Config { + #[serde(default)] + pub account: Option, + #[serde(default)] + pub identity: Option, #[serde(default)] pub address: Vec, #[serde(default)] @@ -78,12 +82,68 @@ impl Config { .with_context(|| format!("invalid system tcp listen address '{}'", config.listen)), } } + + pub fn authority(&self) -> String { + "arti://local".to_owned() + } + + pub fn account_name(&self) -> String { + self.account + .clone() + .filter(|value| !value.trim().is_empty()) + .unwrap_or_else(|| "default".to_owned()) + } + + pub fn identity_name(&self, network_id: i32) -> String { + self.identity + .clone() + .filter(|value| !value.trim().is_empty()) + .or_else(|| self.tun_name.clone()) + .unwrap_or_else(|| format!("tor-{network_id}")) + } + + pub fn runtime_dirs(&self, network_id: i32) -> (String, String) { + let authority = sanitize_path_component(&self.authority()); + let account = sanitize_path_component(&self.account_name()); + let identity = sanitize_path_component(&self.identity_name(network_id)); + ( + append_runtime_path(&self.arti.state_dir, &[&authority, &account, &identity]), + append_runtime_path(&self.arti.cache_dir, &[&authority, &account, &identity]), + ) + } } fn default_system_listen() -> String { "127.0.0.1:9040".to_string() } +fn append_runtime_path(base: &str, parts: &[&str]) -> String { + let mut path = PathBuf::from(base); + for part in parts { + path.push(part); + } + path.to_string_lossy().to_string() +} + +fn sanitize_path_component(value: &str) -> String { + let sanitized: String = value + .chars() + .map(|ch| { + if ch.is_ascii_alphanumeric() || ch == '-' || ch == '_' { + ch + } else { + '_' + } + }) + .collect(); + + if sanitized.is_empty() { + "default".to_owned() + } else { + sanitized + } +} + #[cfg(test)] mod tests { use super::*; @@ -100,6 +160,7 @@ mod tests { let config = Config::from_payload(payload).unwrap(); assert_eq!(config.address, vec!["100.64.0.2/32"]); assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9150"); + assert!(config.runtime_dirs(7).0.contains("arti___local")); } #[test] @@ -121,5 +182,6 @@ listen = "127.0.0.1:9140" let config = Config::from_payload(payload.as_bytes()).unwrap(); assert_eq!(config.tun_name.as_deref(), Some("burrow-tor")); assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9140"); + assert_eq!(config.identity_name(11), "burrow-tor"); } } diff --git a/burrow/src/tor/dns.rs b/burrow/src/tor/dns.rs new file mode 100644 index 0000000..d918fc4 --- /dev/null +++ b/burrow/src/tor/dns.rs @@ -0,0 +1,177 @@ +use std::{ + net::{IpAddr, SocketAddr}, + sync::Arc, +}; + +use anyhow::{Context, Result}; +use arti_client::TorClient; +use hickory_proto::{ + op::{Message, MessageType, ResponseCode}, + rr::{rdata::A, rdata::AAAA, RData, Record, RecordType}, +}; +use tokio::{net::UdpSocket, sync::watch, task::JoinError}; +use tor_rtcompat::PreferredRuntime; +use tracing::{debug, warn}; + +const DNS_TTL_SECS: u32 = 60; + +#[derive(Debug)] +pub struct TorDnsHandle { + shutdown: watch::Sender, + task: tokio::task::JoinHandle<()>, +} + +impl TorDnsHandle { + pub async fn shutdown(self) -> Result<()> { + let _ = self.shutdown.send(true); + match self.task.await { + Ok(()) => Ok(()), + Err(err) if err.is_cancelled() => Ok(()), + Err(err) => Err(join_error(err)), + } + } +} + +pub async fn spawn( + bind_addr: SocketAddr, + tor_client: Arc>, +) -> Result { + let socket = UdpSocket::bind(bind_addr) + .await + .with_context(|| format!("failed to bind tor dns proxy on {bind_addr}"))?; + let (shutdown_tx, mut shutdown_rx) = watch::channel(false); + let task = tokio::spawn(async move { + let mut buffer = [0u8; 4096]; + loop { + tokio::select! { + changed = shutdown_rx.changed() => { + match changed { + Ok(()) if *shutdown_rx.borrow() => break, + Ok(()) => continue, + Err(_) => break, + } + } + received = socket.recv_from(&mut buffer) => { + let (len, peer_addr) = match received { + Ok(value) => value, + Err(err) => { + warn!(?err, "tor dns proxy recv failed"); + continue; + } + }; + + let response = match build_response(&buffer[..len], tor_client.as_ref()).await { + Ok(message) => message, + Err(err) => { + debug!(?err, "tor dns proxy failed to answer query"); + continue; + } + }; + + if let Err(err) = socket.send_to(&response, peer_addr).await { + warn!(?err, "tor dns proxy send failed"); + } + } + } + } + }); + + Ok(TorDnsHandle { shutdown: shutdown_tx, task }) +} + +pub(crate) async fn build_response( + packet: &[u8], + tor_client: &TorClient, +) -> Result> { + let request = Message::from_vec(packet).context("failed to parse dns packet")?; + let mut response = Message::new(); + response + .set_id(request.id()) + .set_op_code(request.op_code()) + .set_message_type(MessageType::Response) + .set_recursion_desired(request.recursion_desired()) + .set_recursion_available(true) + .set_response_code(ResponseCode::NoError); + + for query in request.queries().iter().cloned() { + response.add_query(query.clone()); + match query.query_type() { + RecordType::A | RecordType::AAAA => { + let hostname = query.name().to_utf8(); + let hostname = hostname.trim_end_matches('.'); + match tor_client.resolve(hostname).await { + Ok(addrs) => { + for addr in addrs { + if let Some(answer) = + record_for_address(query.name().clone(), query.query_type(), addr) + { + response.add_answer(answer); + } + } + } + Err(err) => { + debug!(hostname, ?err, "tor dns lookup failed"); + response.set_response_code(ResponseCode::ServFail); + } + } + } + _ => { + response.set_response_code(ResponseCode::NotImp); + } + } + } + + response.to_vec().context("failed to encode dns response") +} + +fn record_for_address( + name: hickory_proto::rr::Name, + record_type: RecordType, + addr: IpAddr, +) -> Option { + match (record_type, addr) { + (RecordType::A, IpAddr::V4(ip)) => Some(Record::from_rdata( + name, + DNS_TTL_SECS, + RData::A(A::from(ip)), + )), + (RecordType::AAAA, IpAddr::V6(ip)) => Some(Record::from_rdata( + name, + DNS_TTL_SECS, + RData::AAAA(AAAA::from(ip)), + )), + _ => None, + } +} + +fn join_error(err: JoinError) -> anyhow::Error { + anyhow::anyhow!("tor dns task failed: {err}") +} + +#[cfg(test)] +mod tests { + use super::*; + use hickory_proto::rr::Name; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn builds_a_record_for_ipv4_answer() { + let record = record_for_address( + Name::from_ascii("example.com.").unwrap(), + RecordType::A, + IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), + ) + .unwrap(); + assert_eq!(record.record_type(), RecordType::A); + } + + #[test] + fn skips_mismatched_record_type() { + let record = record_for_address( + Name::from_ascii("example.com.").unwrap(), + RecordType::A, + IpAddr::V6(Ipv6Addr::LOCALHOST), + ); + assert!(record.is_none()); + } +} diff --git a/burrow/src/tor/exec.rs b/burrow/src/tor/exec.rs new file mode 100644 index 0000000..7f4317d --- /dev/null +++ b/burrow/src/tor/exec.rs @@ -0,0 +1,439 @@ +use std::{ + ffi::{OsStr, OsString}, + fs, + net::{IpAddr, Ipv4Addr, SocketAddr}, + os::unix::process::ExitStatusExt, + path::PathBuf, + process::{Command, ExitStatus, Stdio}, + sync::Arc, + time::Duration, +}; + +use anyhow::{bail, Context, Result}; +use tokio::process::Command as TokioCommand; +use tor_rtcompat::PreferredRuntime; +use tracing::{debug, info}; + +use super::{ + bootstrap_client, + dns::{spawn as spawn_dns, TorDnsHandle}, + runtime::{spawn_with_client, TorHandle}, + Config, SystemTcpStackConfig, TcpStackConfig, +}; + +const CHILD_PREFIX_LEN: u8 = 30; +const CHILD_DNS_PORT: u16 = 53; +const LISTENER_READY_TIMEOUT: Duration = Duration::from_secs(10); +const LISTENER_READY_POLL: Duration = Duration::from_millis(100); + +pub async fn run_exec(mut config: Config, command: Vec) -> Result { + if command.is_empty() { + bail!("tor-exec requires a command to run"); + } + ensure_root()?; + ensure_host_tool("ip")?; + ensure_host_tool("iptables")?; + ensure_host_tool("unshare")?; + + let requested_listener = config.listen_addr()?; + if requested_listener.port() == 0 { + bail!("tor-exec requires a fixed listener port"); + } + + let plan = NamespacePlan::new(requested_listener.port()); + let (state_dir, cache_dir) = config.runtime_dirs(std::process::id() as i32); + config.arti.state_dir = state_dir; + config.arti.cache_dir = cache_dir; + config.tcp_stack = TcpStackConfig::System(SystemTcpStackConfig { + listen: format!("{}:{}", plan.host_ip, plan.listener_port), + }); + + let namespace = NamespaceGuard::create(&plan)?; + let tor_client = bootstrap_client(&config).await?; + let tor_handle = spawn_with_client(config, tor_client.clone()).await?; + wait_for_listener(SocketAddr::new( + IpAddr::V4(plan.host_ip), + plan.listener_port, + )) + .await?; + let dns_handle = spawn_dns( + SocketAddr::new(IpAddr::V4(plan.host_ip), CHILD_DNS_PORT), + tor_client, + ) + .await?; + + let status = namespace.run_child(&command).await; + let dns_shutdown = dns_handle.shutdown().await; + let tor_shutdown = tor_handle.shutdown().await; + + let status = status?; + dns_shutdown?; + tor_shutdown?; + child_exit_code(status) +} + +fn ensure_root() -> Result<()> { + if unsafe { libc::geteuid() } != 0 { + bail!("tor-exec currently requires root on linux"); + } + Ok(()) +} + +fn ensure_host_tool(tool: &str) -> Result<()> { + let status = Command::new("sh") + .args(["-lc", &format!("command -v {tool} >/dev/null")]) + .status() + .with_context(|| format!("failed to probe required tool '{tool}'"))?; + if !status.success() { + bail!("required host tool '{tool}' is not available"); + } + Ok(()) +} + +async fn wait_for_listener(addr: SocketAddr) -> Result<()> { + let deadline = tokio::time::Instant::now() + LISTENER_READY_TIMEOUT; + loop { + match tokio::net::TcpStream::connect(addr).await { + Ok(stream) => { + drop(stream); + return Ok(()); + } + Err(err) if tokio::time::Instant::now() < deadline => { + debug!(%addr, ?err, "waiting for tor transparent listener"); + tokio::time::sleep(LISTENER_READY_POLL).await; + } + Err(err) => return Err(err).with_context(|| format!("timed out waiting for {addr}")), + } + } +} + +fn child_exit_code(status: ExitStatus) -> Result { + if let Some(code) = status.code() { + return Ok(code); + } + if let Some(signal) = status.signal() { + return Ok(128 + signal); + } + bail!("child process terminated without an exit code"); +} + +#[derive(Debug, Clone)] +struct NamespacePlan { + netns_name: String, + host_if: String, + child_if: String, + host_ip: Ipv4Addr, + child_ip: Ipv4Addr, + listener_port: u16, +} + +impl NamespacePlan { + fn new(listener_port: u16) -> Self { + let token = std::process::id() % 10_000; + let segment = ((std::process::id() % 200) as u8) + 20; + Self { + netns_name: format!("burrow-tor-{token}"), + host_if: format!("bth{token}"), + child_if: format!("btc{token}"), + host_ip: Ipv4Addr::new(100, 90, segment, 1), + child_ip: Ipv4Addr::new(100, 90, segment, 2), + listener_port, + } + } + + fn host_cidr(&self) -> String { + format!("{}/{}", self.host_ip, CHILD_PREFIX_LEN) + } + + fn child_cidr(&self) -> String { + format!("{}/{}", self.child_ip, CHILD_PREFIX_LEN) + } +} + +struct NamespaceGuard { + plan: NamespacePlan, + resolv_conf: PathBuf, + nat_rule_installed: bool, + forward_rule_installed: bool, + netns_created: bool, + host_link_created: bool, +} + +impl NamespaceGuard { + fn create(plan: &NamespacePlan) -> Result { + let mut guard = Self { + plan: plan.clone(), + resolv_conf: write_resolv_conf(plan.host_ip)?, + nat_rule_installed: false, + forward_rule_installed: false, + netns_created: false, + host_link_created: false, + }; + + let setup = (|| -> Result<()> { + run_host_command(["ip", "netns", "add", &guard.plan.netns_name])?; + guard.netns_created = true; + + run_host_command([ + "ip", + "link", + "add", + &guard.plan.host_if, + "type", + "veth", + "peer", + "name", + &guard.plan.child_if, + ])?; + guard.host_link_created = true; + + run_host_command([ + "ip", + "addr", + "add", + &guard.plan.host_cidr(), + "dev", + &guard.plan.host_if, + ])?; + run_host_command(["ip", "link", "set", &guard.plan.host_if, "up"])?; + run_host_command([ + "ip", + "link", + "set", + &guard.plan.child_if, + "netns", + &guard.plan.netns_name, + ])?; + run_host_command([ + "ip", + "netns", + "exec", + &guard.plan.netns_name, + "ip", + "link", + "set", + "lo", + "up", + ])?; + run_host_command([ + "ip", + "netns", + "exec", + &guard.plan.netns_name, + "ip", + "addr", + "add", + &guard.plan.child_cidr(), + "dev", + &guard.plan.child_if, + ])?; + run_host_command([ + "ip", + "netns", + "exec", + &guard.plan.netns_name, + "ip", + "link", + "set", + &guard.plan.child_if, + "up", + ])?; + run_host_command([ + "ip", + "netns", + "exec", + &guard.plan.netns_name, + "ip", + "route", + "add", + "default", + "via", + &guard.plan.host_ip.to_string(), + "dev", + &guard.plan.child_if, + ])?; + run_host_command([ + "iptables", + "-t", + "nat", + "-A", + "PREROUTING", + "-i", + &guard.plan.host_if, + "-p", + "tcp", + "-j", + "DNAT", + "--to-destination", + &format!("{}:{}", guard.plan.host_ip, guard.plan.listener_port), + ])?; + guard.nat_rule_installed = true; + + run_host_command([ + "iptables", + "-A", + "FORWARD", + "-i", + &guard.plan.host_if, + "-j", + "REJECT", + ])?; + guard.forward_rule_installed = true; + Ok(()) + })(); + + if let Err(err) = setup { + guard.cleanup(); + return Err(err); + } + + Ok(guard) + } + + async fn run_child(&self, command: &[String]) -> Result { + let mut args = vec![ + OsString::from("netns"), + OsString::from("exec"), + OsString::from(&self.plan.netns_name), + OsString::from("unshare"), + OsString::from("--user"), + OsString::from("--map-root-user"), + OsString::from("--mount"), + OsString::from("--pid"), + OsString::from("--fork"), + OsString::from("--kill-child"), + OsString::from("sh"), + OsString::from("-ceu"), + OsString::from(CHILD_SCRIPT), + OsString::from("sh"), + self.resolv_conf.as_os_str().to_os_string(), + ]; + args.extend(command.iter().map(OsString::from)); + + let status = TokioCommand::new("ip") + .args(args) + .stdin(Stdio::inherit()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status() + .await + .context("failed to execute child in tor namespace")?; + Ok(status) + } + + fn cleanup(&mut self) { + if self.forward_rule_installed { + let _ = run_host_command([ + "iptables", + "-D", + "FORWARD", + "-i", + &self.plan.host_if, + "-j", + "REJECT", + ]); + self.forward_rule_installed = false; + } + if self.nat_rule_installed { + let _ = run_host_command([ + "iptables", + "-t", + "nat", + "-D", + "PREROUTING", + "-i", + &self.plan.host_if, + "-p", + "tcp", + "-j", + "DNAT", + "--to-destination", + &format!("{}:{}", self.plan.host_ip, self.plan.listener_port), + ]); + self.nat_rule_installed = false; + } + if self.host_link_created { + let _ = run_host_command(["ip", "link", "delete", &self.plan.host_if]); + self.host_link_created = false; + } + if self.netns_created { + let _ = run_host_command(["ip", "netns", "delete", &self.plan.netns_name]); + self.netns_created = false; + } + let _ = fs::remove_file(&self.resolv_conf); + } +} + +impl Drop for NamespaceGuard { + fn drop(&mut self) { + self.cleanup(); + } +} + +fn write_resolv_conf(nameserver: Ipv4Addr) -> Result { + let path = std::env::temp_dir().join(format!("burrow-tor-resolv-{}.conf", std::process::id())); + fs::write(&path, format!("nameserver {nameserver}\noptions ndots:1\n")) + .with_context(|| format!("failed to write {}", path.display()))?; + Ok(path) +} + +fn run_host_command(args: [&str; N]) -> Result<()> { + let (program, rest) = args + .split_first() + .expect("run_host_command requires a program and arguments"); + let status = Command::new(program) + .args(rest) + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .status() + .with_context(|| format!("failed to start host command {}", shell_words(&args)))?; + if status.success() { + Ok(()) + } else { + bail!("host command failed: {}", shell_words(&args)); + } +} + +fn shell_words(args: &[&str]) -> String { + args.iter() + .map(|arg| shlex_escape(arg)) + .collect::>() + .join(" ") +} + +fn shlex_escape(value: &str) -> String { + if value + .chars() + .all(|ch| ch.is_ascii_alphanumeric() || "-_./:=+".contains(ch)) + { + value.to_string() + } else { + format!("'{}'", value.replace('\'', "'\\''")) + } +} + +const CHILD_SCRIPT: &str = r#" +mount -t proc proc /proc +mount --bind "$1" /etc/resolv.conf +shift +exec "$@" +"#; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn namespace_plan_uses_short_interface_names() { + let plan = NamespacePlan::new(9040); + assert!(plan.host_if.len() <= 15); + assert!(plan.child_if.len() <= 15); + } + + #[test] + fn signal_exit_code_uses_shell_convention() { + let status = ExitStatus::from_raw(libc::SIGTERM); + assert_eq!(child_exit_code(status).unwrap(), 128 + libc::SIGTERM); + } +} diff --git a/burrow/src/tor/mod.rs b/burrow/src/tor/mod.rs index d275c7e..635c355 100644 --- a/burrow/src/tor/mod.rs +++ b/burrow/src/tor/mod.rs @@ -1,6 +1,9 @@ mod config; +pub(crate) mod dns; +mod exec; mod runtime; mod system; pub use config::{ArtiConfig, Config, SystemTcpStackConfig, TcpStackConfig}; -pub use runtime::{spawn, TorHandle}; +pub use exec::run_exec; +pub use runtime::{bootstrap_client, spawn, spawn_with_client, TorHandle}; diff --git a/burrow/src/tor/runtime.rs b/burrow/src/tor/runtime.rs index a7deb3c..45690ee 100644 --- a/burrow/src/tor/runtime.rs +++ b/burrow/src/tor/runtime.rs @@ -7,6 +7,7 @@ use tokio::{ task::{JoinError, JoinSet}, }; use tokio_util::compat::FuturesAsyncReadCompatExt; +use tor_rtcompat::PreferredRuntime; use tracing::{debug, error, info, warn}; use super::{system::SystemTcpStackRuntime, Config, TcpStackConfig}; @@ -28,16 +29,25 @@ impl TorHandle { } } -pub async fn spawn(config: Config) -> Result { +pub async fn bootstrap_client(config: &Config) -> Result>> { let builder = TorClientConfigBuilder::from_directories(&config.arti.state_dir, &config.arti.cache_dir); let tor_config = builder.build().context("failed to build arti config")?; - let tor_client = Arc::new( - TorClient::create_bootstrapped(tor_config) - .await - .context("failed to bootstrap arti client")?, - ); + let tor_client = TorClient::create_bootstrapped(tor_config) + .await + .context("failed to bootstrap arti client")?; + Ok(Arc::new(tor_client)) +} +pub async fn spawn(config: Config) -> Result { + let tor_client = bootstrap_client(&config).await?; + spawn_with_client(config, tor_client).await +} + +pub async fn spawn_with_client( + config: Config, + tor_client: Arc>, +) -> Result { let (shutdown_tx, mut shutdown_rx) = watch::channel(false); let task = match config.tcp_stack.clone() { TcpStackConfig::System(system_config) => tokio::spawn(async move { diff --git a/burrow/src/tor/system.rs b/burrow/src/tor/system.rs index c049835..74f8157 100644 --- a/burrow/src/tor/system.rs +++ b/burrow/src/tor/system.rs @@ -7,8 +7,6 @@ use super::SystemTcpStackConfig; pub struct SystemTcpStackRuntime { listener: TcpListener, - #[cfg(target_vendor = "apple")] - flow_tracker: AppleFlowTracker, } impl SystemTcpStackRuntime { @@ -16,18 +14,7 @@ impl SystemTcpStackRuntime { let listener = TcpListener::bind(&config.listen) .await .with_context(|| format!("failed to bind transparent listener on {}", config.listen))?; - #[cfg(target_vendor = "apple")] - let flow_tracker = AppleFlowTracker::new( - listener - .local_addr() - .expect("listener should always have a local address"), - ) - .context("failed to open /dev/pf for transparent destination lookups")?; - Ok(Self { - listener, - #[cfg(target_vendor = "apple")] - flow_tracker, - }) + Ok(Self { listener }) } pub fn local_addr(&self) -> SocketAddr { @@ -42,9 +29,6 @@ impl SystemTcpStackRuntime { .accept() .await .context("failed to accept transparent listener connection")?; - #[cfg(target_vendor = "apple")] - let original_dst = self.flow_tracker.resolve(&stream)?; - #[cfg(not(target_vendor = "apple"))] let original_dst = original_destination(&stream)?; Ok((stream, original_dst)) } @@ -81,708 +65,11 @@ fn original_destination(stream: &TcpStream) -> Result { socket_addr_from_storage(unsafe { &addr.assume_init() }, len as usize) } -#[cfg(all(not(target_os = "linux"), not(target_vendor = "apple")))] +#[cfg(not(target_os = "linux"))] fn original_destination(_stream: &TcpStream) -> Result { anyhow::bail!("system tcp stack transparent destination lookup is only implemented on linux") } -#[cfg(target_vendor = "apple")] -mod apple_pf { - use std::{ - collections::HashMap, - fs::File, - io, - mem::zeroed, - io::Read, - net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, - os::fd::{AsRawFd, RawFd}, - time::{Duration, Instant}, - }; - - use anyhow::{anyhow, bail, Context, Result}; - use nix::{ioctl_readwrite, libc}; - use parking_lot::Mutex; - use tokio::net::TcpStream; - - ioctl_readwrite!(pf_natlook, b'D', 23, PfiocNatlook); - - const FLOW_CACHE_LIMIT: usize = 4096; - const FLOW_CACHE_TTL: Duration = Duration::from_secs(30); - const PF_OUT: u8 = 2; - const PFLOG_RULESET_NAME_SIZE: usize = 16; - const PFLOG_DEVICE: &str = "pflog0"; - const OBSERVER_WAIT_STEPS: usize = 20; - const OBSERVER_WAIT_INTERVAL: Duration = Duration::from_millis(10); - - pub(super) struct AppleFlowTracker { - pf: File, - listener_addr: SocketAddr, - state: Mutex, - } - - impl AppleFlowTracker { - pub(super) fn new(listener_addr: SocketAddr) -> io::Result { - Ok(Self { - pf: File::options().read(true).write(true).open("/dev/pf")?, - listener_addr, - state: Mutex::new(FlowState { - cache: HashMap::new(), - observer: PacketObserver::new(listener_addr).ok(), - }), - }) - } - - pub(super) fn resolve(&self, stream: &TcpStream) -> Result { - let key = FlowKey::from_stream(stream)?; - if let Some(original_dst) = self.cached_destination(key) { - return Ok(original_dst); - } - - match lookup_pf_original_destination(self.pf.as_raw_fd(), key.peer, key.local) { - Ok(original_dst) => { - self.remember(key, original_dst); - Ok(original_dst) - } - Err(err) - if matches!( - err.raw_os_error(), - Some(code) if code == libc::EPERM || code == libc::ENOENT - ) => - { - if let Some(original_dst) = self.wait_for_observer(key) { - return Ok(original_dst); - } - match err.raw_os_error() { - Some(code) if code == libc::EPERM => Err(anyhow!( - "PF NAT lookups are denied on this macOS build and no logged redirect flow was observed for {} -> {}", - key.peer, - key.local - )), - Some(code) if code == libc::ENOENT => Err(anyhow!( - "PF did not have a redirect state for {} -> {} and no logged redirect flow was observed; ensure outbound TCP is redirected and logged before Burrow accepts it", - key.peer, - key.local - )), - _ => unreachable!(), - } - } - Err(err) => Err(err).context("DIOCNATLOOK failed"), - } - } - - fn cached_destination(&self, key: FlowKey) -> Option { - let mut state = self.state.lock(); - state.prune(); - state.drain_observer(self.listener_addr); - state.cache.get(&key).map(|entry| entry.original_dst) - } - - fn remember(&self, key: FlowKey, original_dst: SocketAddr) { - let mut state = self.state.lock(); - state.prune(); - remember_flow(&mut state.cache, key, original_dst, Instant::now()); - } - - fn wait_for_observer(&self, key: FlowKey) -> Option { - for _ in 0..OBSERVER_WAIT_STEPS { - if let Some(original_dst) = self.cached_destination(key) { - return Some(original_dst); - } - std::thread::sleep(OBSERVER_WAIT_INTERVAL); - } - None - } - } - - struct FlowState { - cache: HashMap, - observer: Option, - } - - impl FlowState { - fn prune(&mut self) { - let now = Instant::now(); - self.cache.retain(|_, entry| entry.expires_at > now); - } - - fn drain_observer(&mut self, listener_addr: SocketAddr) { - let Some(mut observer) = self.observer.take() else { - return; - }; - if observer.drain(listener_addr, &mut self.cache).is_ok() { - self.observer = Some(observer); - } - } - } - - #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] - struct FlowKey { - peer: SocketAddr, - local: SocketAddr, - } - - impl FlowKey { - fn from_stream(stream: &TcpStream) -> Result { - let peer = stream.peer_addr().context("failed to read transparent peer address")?; - let local = stream - .local_addr() - .context("failed to read transparent listener address")?; - match (peer, local) { - (SocketAddr::V4(_), SocketAddr::V4(_)) | (SocketAddr::V6(_), SocketAddr::V6(_)) => { - Ok(Self { peer, local }) - } - _ => bail!("transparent socket had mismatched source/destination address families"), - } - } - } - - #[derive(Clone, Copy, Debug)] - struct FlowEntry { - original_dst: SocketAddr, - expires_at: Instant, - } - - fn remember_flow( - cache: &mut HashMap, - key: FlowKey, - original_dst: SocketAddr, - now: Instant, - ) { - cache.retain(|_, entry| entry.expires_at > now); - if cache.len() >= FLOW_CACHE_LIMIT { - if let Some(oldest) = cache - .iter() - .min_by_key(|(_, entry)| entry.expires_at) - .map(|(flow_key, _)| *flow_key) - { - cache.remove(&oldest); - } - } - cache.insert( - key, - FlowEntry { - original_dst, - expires_at: now + FLOW_CACHE_TTL, - }, - ); - } - - fn lookup_pf_original_destination( - fd: RawFd, - peer: SocketAddr, - local: SocketAddr, - ) -> io::Result { - let mut request = PfiocNatlook::for_flow(peer, local) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; - let ioctl_result = unsafe { pf_natlook(fd, &mut request) }; - if let Err(errno) = ioctl_result { - return Err(io::Error::from(errno)); - } - request - .original_destination() - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) - } - - struct PacketObserver { - file: File, - buffer: Vec, - } - - impl PacketObserver { - fn new(listener_addr: SocketAddr) -> io::Result { - if listener_addr.ip().is_unspecified() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "packet observer requires an explicit listener address", - )); - } - - let file = open_bpf_device()?; - bind_bpf_to_interface(file.as_raw_fd(), PFLOG_DEVICE)?; - set_bpf_flag(file.as_raw_fd(), libc::BIOCIMMEDIATE, 1)?; - set_bpf_flag(file.as_raw_fd(), libc::BIOCSSEESENT, 1)?; - set_nonblocking(file.as_raw_fd())?; - - let mut buffer_len: libc::c_uint = 0; - ioctl_value(file.as_raw_fd(), libc::BIOCGBLEN, &mut buffer_len)?; - Ok(Self { - file, - buffer: vec![0; buffer_len as usize], - }) - } - - fn drain( - &mut self, - listener_addr: SocketAddr, - cache: &mut HashMap, - ) -> io::Result<()> { - loop { - match self.file.read(&mut self.buffer) { - Ok(0) => break, - Ok(read) => self.consume(&self.buffer[..read], listener_addr, cache), - Err(err) if err.kind() == io::ErrorKind::WouldBlock => break, - Err(err) => return Err(err), - } - } - Ok(()) - } - - fn consume( - &self, - buffer: &[u8], - listener_addr: SocketAddr, - cache: &mut HashMap, - ) { - let mut offset = 0usize; - let now = Instant::now(); - while offset + std::mem::size_of::() <= buffer.len() { - let header = unsafe { - std::ptr::read_unaligned(buffer[offset..].as_ptr() as *const libc::bpf_hdr) - }; - let header_len = header.bh_hdrlen as usize; - let captured_len = header.bh_caplen as usize; - let packet_start = offset + header_len; - let packet_end = packet_start + captured_len; - let next_record = offset + bpf_wordalign(header_len + captured_len); - if packet_end > buffer.len() || next_record > buffer.len() { - break; - } - - if let Some((peer, original_dst)) = - parse_logged_syn(&buffer[packet_start..packet_end], listener_addr) - { - remember_flow( - cache, - FlowKey { - peer, - local: listener_addr, - }, - original_dst, - now, - ); - } - - offset = next_record; - } - } - } - - fn open_bpf_device() -> io::Result { - for index in 0..=255 { - match File::options() - .read(true) - .open(format!("/dev/bpf{index}")) - { - Ok(file) => return Ok(file), - Err(err) if err.raw_os_error() == Some(libc::EBUSY) => continue, - Err(err) => return Err(err), - } - } - Err(io::Error::new( - io::ErrorKind::NotFound, - "no free /dev/bpf devices were available", - )) - } - - fn bind_bpf_to_interface(fd: RawFd, ifname: &str) -> io::Result<()> { - let mut ifreq = unsafe { zeroed::() }; - let bytes = ifname.as_bytes(); - let max = std::cmp::min(bytes.len(), libc::IFNAMSIZ.saturating_sub(1)); - for (index, byte) in bytes.iter().take(max).enumerate() { - ifreq.ifr_name[index] = *byte as libc::c_char; - } - ioctl_value(fd, libc::BIOCSETIF, &mut ifreq) - } - - fn set_bpf_flag(fd: RawFd, request: libc::c_ulong, value: libc::c_uint) -> io::Result<()> { - let mut flag = value; - ioctl_value(fd, request, &mut flag) - } - - fn set_nonblocking(fd: RawFd) -> io::Result<()> { - let current = unsafe { libc::fcntl(fd, libc::F_GETFL) }; - if current < 0 { - return Err(io::Error::last_os_error()); - } - if unsafe { libc::fcntl(fd, libc::F_SETFL, current | libc::O_NONBLOCK) } != 0 { - return Err(io::Error::last_os_error()); - } - Ok(()) - } - - fn ioctl_value(fd: RawFd, request: libc::c_ulong, value: &mut T) -> io::Result<()> { - if unsafe { libc::ioctl(fd, request, value) } != 0 { - return Err(io::Error::last_os_error()); - } - Ok(()) - } - - fn parse_logged_syn( - record: &[u8], - listener_addr: SocketAddr, - ) -> Option<(SocketAddr, SocketAddr)> { - let header = read_pflog_header(record)?; - if header.dir != PF_OUT { - return None; - } - let packet = record.get(header.length as usize..)?; - match header.af as i32 { - libc::AF_INET => parse_ipv4_syn(packet, listener_addr), - libc::AF_INET6 => parse_ipv6_syn(packet, listener_addr), - _ => None, - } - } - - fn parse_ipv4_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> { - if !matches!(listener_addr, SocketAddr::V4(_)) || packet.len() < 20 || packet[0] >> 4 != 4 { - return None; - } - let header_len = usize::from(packet[0] & 0x0f) * 4; - if header_len < 20 || packet.len() < header_len + 20 || packet[9] != libc::IPPROTO_TCP as u8 { - return None; - } - let tcp = &packet[header_len..]; - let flags = tcp[13]; - if flags & 0x02 == 0 || flags & 0x10 != 0 { - return None; - } - let source_ip = Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]); - let dest_ip = Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]); - let source_port = u16::from_be_bytes([tcp[0], tcp[1]]); - let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]); - Some(( - SocketAddr::V4(SocketAddrV4::new(source_ip, source_port)), - SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port)), - )) - } - - fn parse_ipv6_syn(packet: &[u8], listener_addr: SocketAddr) -> Option<(SocketAddr, SocketAddr)> { - if !matches!(listener_addr, SocketAddr::V6(_)) || packet.len() < 40 || packet[0] >> 4 != 6 { - return None; - } - if packet[6] != libc::IPPROTO_TCP as u8 || packet.len() < 60 { - return None; - } - let tcp = &packet[40..]; - let flags = tcp[13]; - if flags & 0x02 == 0 || flags & 0x10 != 0 { - return None; - } - let source_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[8..24]).ok()?); - let dest_ip = Ipv6Addr::from(<[u8; 16]>::try_from(&packet[24..40]).ok()?); - let source_port = u16::from_be_bytes([tcp[0], tcp[1]]); - let dest_port = u16::from_be_bytes([tcp[2], tcp[3]]); - Some(( - SocketAddr::V6(SocketAddrV6::new(source_ip, source_port, 0, 0)), - SocketAddr::V6(SocketAddrV6::new(dest_ip, dest_port, 0, 0)), - )) - } - - fn read_pflog_header(record: &[u8]) -> Option { - if record.len() < std::mem::size_of::() { - return None; - } - let header = - unsafe { std::ptr::read_unaligned(record.as_ptr() as *const PflogHdr) }; - if header.length as usize > record.len() || (header.length as usize) < PFLOG_REAL_HDRLEN { - return None; - } - Some(header) - } - - const fn bpf_wordalign(len: usize) -> usize { - let alignment = std::mem::size_of::(); - (len + (alignment - 1)) & !(alignment - 1) - } - - #[repr(C)] - #[derive(Clone, Copy)] - struct PfiocNatlook { - saddr: PfAddr, - daddr: PfAddr, - rsaddr: PfAddr, - rdaddr: PfAddr, - sxport: PfStateXport, - dxport: PfStateXport, - rsxport: PfStateXport, - rdxport: PfStateXport, - af: libc::sa_family_t, - proto: u8, - proto_variant: u8, - direction: u8, - } - - impl PfiocNatlook { - fn for_flow(peer: SocketAddr, local: SocketAddr) -> Result { - let (saddr, sxport, source_af) = pf_endpoint(peer); - let (daddr, dxport, destination_af) = pf_endpoint(local); - if source_af != destination_af { - bail!("transparent flow key changed address family across redirect"); - } - Ok(Self { - saddr, - daddr, - rsaddr: PfAddr::default(), - rdaddr: PfAddr::default(), - sxport, - dxport, - rsxport: PfStateXport::default(), - rdxport: PfStateXport::default(), - af: source_af, - proto: libc::IPPROTO_TCP as u8, - proto_variant: 0, - direction: PF_OUT, - }) - } - - fn original_destination(&self) -> Result { - socket_addr_from_pf(self.af, self.rdaddr, self.rdxport) - } - } - - fn pf_endpoint(addr: SocketAddr) -> (PfAddr, PfStateXport, libc::sa_family_t) { - let port = PfStateXport { - port: u16::to_be(addr.port()), - }; - match addr { - SocketAddr::V4(addr) => ( - PfAddr::from_ipv4(*addr.ip()), - port, - libc::AF_INET as libc::sa_family_t, - ), - SocketAddr::V6(addr) => ( - PfAddr::from_ipv6(*addr.ip()), - port, - libc::AF_INET6 as libc::sa_family_t, - ), - } - } - - fn socket_addr_from_pf( - af: libc::sa_family_t, - addr: PfAddr, - port: PfStateXport, - ) -> Result { - match af as i32 { - libc::AF_INET => Ok(SocketAddr::V4(SocketAddrV4::new( - Ipv4Addr::from(addr.v4_octets()), - u16::from_be(unsafe { port.port }), - ))), - libc::AF_INET6 => Ok(SocketAddr::V6(SocketAddrV6::new( - Ipv6Addr::from(addr.v6_octets()), - u16::from_be(unsafe { port.port }), - 0, - 0, - ))), - family => bail!("unsupported PF address family {family}"), - } - } - - #[repr(C)] - #[derive(Clone, Copy)] - union PfAddrRepr { - v4addr: libc::in_addr, - v6addr: libc::in6_addr, - addr8: [u8; 16], - addr16: [u16; 8], - addr32: [u32; 4], - } - - #[repr(C)] - #[derive(Clone, Copy)] - struct PfAddr { - pfa: PfAddrRepr, - } - - impl Default for PfAddr { - fn default() -> Self { - Self { - pfa: PfAddrRepr { addr32: [0; 4] }, - } - } - } - - impl PfAddr { - fn from_ipv4(ip: Ipv4Addr) -> Self { - let mut bytes = [0u8; 16]; - bytes[..4].copy_from_slice(&ip.octets()); - Self { - pfa: PfAddrRepr { addr8: bytes }, - } - } - - fn from_ipv6(ip: Ipv6Addr) -> Self { - Self { - pfa: PfAddrRepr { - addr8: ip.octets(), - }, - } - } - - fn v4_octets(self) -> [u8; 4] { - let bytes = unsafe { self.pfa.addr8 }; - [bytes[0], bytes[1], bytes[2], bytes[3]] - } - - fn v6_octets(self) -> [u8; 16] { - unsafe { self.pfa.addr8 } - } - } - - #[repr(C)] - #[derive(Clone, Copy)] - union PfStateXport { - port: u16, - call_id: u16, - spi: u32, - } - - #[repr(C)] - #[derive(Clone, Copy)] - struct PflogHdr { - length: u8, - af: libc::sa_family_t, - action: u8, - reason: u8, - ifname: [libc::c_char; libc::IFNAMSIZ], - ruleset: [libc::c_char; PFLOG_RULESET_NAME_SIZE], - rulenr: u32, - subrulenr: u32, - uid: libc::uid_t, - pid: libc::pid_t, - rule_uid: libc::uid_t, - rule_pid: libc::pid_t, - dir: u8, - pad: [u8; 3], - } - - const PFLOG_REAL_HDRLEN: usize = std::mem::offset_of!(PflogHdr, pad); - - impl Default for PfStateXport { - fn default() -> Self { - unsafe { zeroed() } - } - } - - #[cfg(test)] - mod tests { - use super::*; - - #[test] - fn builds_natlook_request_from_redirected_flow() { - let request = PfiocNatlook::for_flow( - "192.0.2.10:41000".parse().unwrap(), - "127.0.0.1:9040".parse().unwrap(), - ) - .unwrap(); - assert_eq!(request.af as i32, libc::AF_INET); - assert_eq!(request.proto, libc::IPPROTO_TCP as u8); - assert_eq!(request.direction, PF_OUT); - assert_eq!(request.saddr.v4_octets(), [192, 0, 2, 10]); - assert_eq!(request.daddr.v4_octets(), [127, 0, 0, 1]); - assert_eq!(u16::from_be(unsafe { request.sxport.port }), 41000); - assert_eq!(u16::from_be(unsafe { request.dxport.port }), 9040); - } - - #[test] - fn decodes_original_ipv6_destination() { - let mut request = - PfiocNatlook::for_flow("[::1]:41000".parse().unwrap(), "[::1]:9040".parse().unwrap()) - .unwrap(); - request.rdaddr = PfAddr::from_ipv6("2001:db8::42".parse().unwrap()); - request.rdxport = PfStateXport { - port: u16::to_be(443), - }; - - assert_eq!( - request.original_destination().unwrap(), - "[2001:db8::42]:443".parse::().unwrap() - ); - } - - #[test] - fn parses_logged_ipv4_syn() { - let mut record = Vec::new(); - record.extend_from_slice(&[ - PFLOG_REAL_HDRLEN as u8, - libc::AF_INET as u8, - 0, - 0, - ]); - record.extend_from_slice(&[0; libc::IFNAMSIZ]); - record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.push(PF_OUT); - - record.extend_from_slice(&[ - 0x45, 0, 0, 40, 0, 0, 0, 0, 64, libc::IPPROTO_TCP as u8, 0, 0, 192, 0, 2, 10, - 198, 51, 100, 42, - ]); - record.extend_from_slice(&[ - 0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0, - 0, - ]); - - assert_eq!( - parse_logged_syn(&record, "127.0.0.1:9040".parse().unwrap()), - Some(( - "192.0.2.10:39976".parse().unwrap(), - "198.51.100.42:443".parse().unwrap(), - )) - ); - } - - #[test] - fn parses_logged_ipv6_syn() { - let mut record = Vec::new(); - record.extend_from_slice(&[ - PFLOG_REAL_HDRLEN as u8, - libc::AF_INET6 as u8, - 0, - 0, - ]); - record.extend_from_slice(&[0; libc::IFNAMSIZ]); - record.extend_from_slice(&[0; PFLOG_RULESET_NAME_SIZE]); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.extend_from_slice(&0u32.to_ne_bytes()); - record.push(PF_OUT); - - let source_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x10).octets(); - let dest_ip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0x42).octets(); - record.extend_from_slice(&[ - 0x60, 0, 0, 0, 0, 20, libc::IPPROTO_TCP as u8, 64, - ]); - record.extend_from_slice(&source_ip); - record.extend_from_slice(&dest_ip); - record.extend_from_slice(&[ - 0x9c, 0x28, 0x01, 0xbb, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0x02, 0x20, 0, 0, 0, 0, - 0, - ]); - - assert_eq!( - parse_logged_syn(&record, "[::1]:9040".parse().unwrap()), - Some(( - "[2001:db8::10]:39976".parse().unwrap(), - "[2001:db8::42]:443".parse().unwrap(), - )) - ); - } - } -} - -#[cfg(target_vendor = "apple")] -use apple_pf::AppleFlowTracker; - -#[cfg(target_os = "linux")] fn socket_addr_from_storage(addr: &libc::sockaddr_storage, len: usize) -> Result { use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; @@ -833,7 +120,7 @@ mod tests { let parsed = socket_addr_from_storage(&storage, size_of::()).unwrap(); assert_eq!( parsed, - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 9040)) + SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 9040)) ); } diff --git a/burrow/src/tracing.rs b/burrow/src/tracing.rs index d48c53b..8a245ef 100644 --- a/burrow/src/tracing.rs +++ b/burrow/src/tracing.rs @@ -3,8 +3,7 @@ use std::sync::Once; use tracing::{error, info}; use tracing_subscriber::{ layer::{Layer, SubscriberExt}, - EnvFilter, - Registry, + EnvFilter, Registry, }; static TRACING: Once = Once::new(); @@ -15,39 +14,55 @@ pub fn initialize() { error!("Failed to initialize LogTracer: {}", e); } - #[cfg(target_os = "windows")] - let system_log = Some(tracing_subscriber::fmt::layer()); - - #[cfg(target_os = "linux")] - let system_log = match tracing_journald::layer() { - Ok(layer) => Some(layer), - Err(e) => { - if e.kind() != std::io::ErrorKind::NotFound { - error!("Failed to initialize journald: {}", e); - } - None - } - }; - - #[cfg(target_os = "macos")] - let system_log = Some(tracing_oslog::OsLogger::new( - "com.hackclub.burrow", - "tracing", - )); - - #[cfg(all(target_vendor = "apple", not(target_os = "macos")))] - let system_log = None::; - - let stderr = (console::user_attended_stderr() || system_log.is_none()).then(|| { + let make_stderr = || { tracing_subscriber::fmt::layer() .with_level(true) .with_writer(std::io::stderr) .with_line_number(true) .compact() .with_filter(EnvFilter::from_default_env()) - }); + }; - let subscriber = Registry::default().with(stderr).with(system_log); + #[cfg(target_os = "windows")] + let subscriber = { + let system_log = Some(tracing_subscriber::fmt::layer()); + let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr); + Registry::default().with(stderr).with(system_log) + }; + + #[cfg(target_os = "linux")] + let subscriber = { + let system_log = match tracing_journald::layer() { + Ok(layer) => Some(layer), + Err(e) => { + if e.kind() != std::io::ErrorKind::NotFound { + error!("Failed to initialize journald: {}", e); + } + None + } + }; + let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr); + Registry::default().with(stderr).with(system_log) + }; + + #[cfg(target_os = "macos")] + let subscriber = { + // `tracing_oslog` is crashing under Tokio/h2 span churn in the host daemon on + // current macOS. Keep logging on stderr by default and allow opt-in OSLog + // only when explicitly requested for local debugging. + let enable_oslog = matches!( + std::env::var("BURROW_ENABLE_OSLOG").as_deref(), + Ok("1" | "true" | "TRUE" | "yes" | "YES") + ); + let system_log = enable_oslog.then(|| { + tracing_oslog::OsLogger::new("com.hackclub.burrow", "tracing") + }); + let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr); + Registry::default().with(stderr).with(system_log) + }; + + #[cfg(not(any(target_os = "windows", target_os = "linux", target_os = "macos")))] + let subscriber = Registry::default().with(Some(make_stderr())); #[cfg(feature = "tokio-console")] let subscriber = subscriber.with( diff --git a/burrow/src/usernet/mod.rs b/burrow/src/usernet/mod.rs new file mode 100644 index 0000000..12de810 --- /dev/null +++ b/burrow/src/usernet/mod.rs @@ -0,0 +1,935 @@ +use std::{ + collections::HashMap, + env, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + os::fd::{AsRawFd, FromRawFd, RawFd}, + os::unix::net::UnixStream as StdUnixStream, + os::unix::process::ExitStatusExt, + path::{Path, PathBuf}, + process::{Command as StdCommand, ExitStatus}, + str, + sync::Arc, + time::Duration, +}; + +use anyhow::{anyhow, bail, Context, Result}; +use clap::ValueEnum; +use futures::{SinkExt, StreamExt}; +use ipnetwork::IpNetwork; +use netstack_smoltcp::{ + StackBuilder, TcpListener as StackTcpListener, TcpStream as StackTcpStream, + UdpSocket as StackUdpSocket, +}; +use nix::{ + cmsg_space, + fcntl::{fcntl, FcntlArg, FdFlag}, + sys::socket::{recvmsg, sendmsg, ControlMessage, ControlMessageOwned, MsgFlags}, +}; +use serde::{Deserialize, Serialize}; +use tokio::{ + io::copy_bidirectional, + net::{TcpStream, UdpSocket}, + process::{Child, Command}, + sync::{mpsc, Mutex, RwLock}, + task::JoinSet, +}; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tracing::{debug, warn}; +use tun::{tokio::TunInterface as TokioTunInterface, TunOptions}; + +use crate::{ + tor::{bootstrap_client, dns::build_response as build_tor_dns_response, Config as TorConfig}, + wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface}, +}; + +const INNER_ENV: &str = "BURROW_USERNET_INNER"; +const INNER_CONTROL_FD_ENV: &str = "BURROW_USERNET_CONTROL_FD"; +const INNER_TUN_CONFIG_ENV: &str = "BURROW_USERNET_TUN_CONFIG"; +const DEFAULT_MTU: u32 = 1500; +const DEFAULT_TUN_V4: &str = "100.64.0.2/24"; +const DEFAULT_TUN_V6: &str = "fd00:64::2/64"; +const UDP_IDLE_TIMEOUT: Duration = Duration::from_secs(30); +const READY_ACK: &[u8; 1] = b"1"; + +#[derive(Clone, Debug, Eq, PartialEq, ValueEnum)] +pub enum ExecBackendKind { + Direct, + Tor, + Wireguard, +} + +impl ExecBackendKind { + fn cli_name(&self) -> &'static str { + match self { + Self::Direct => "direct", + Self::Tor => "tor", + Self::Wireguard => "wireguard", + } + } +} + +#[derive(Clone, Debug)] +pub struct ExecInvocation { + pub backend: ExecBackendKind, + pub payload_path: Option, + pub command: Vec, +} + +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct DirectConfig { + #[serde(default)] + pub address: Vec, + #[serde(default)] + pub dns: Vec, + #[serde(default)] + pub mtu: Option, + #[serde(default)] + pub tun_name: Option, +} + +impl DirectConfig { + pub fn from_payload(payload: &[u8]) -> Result { + if payload.is_empty() { + return Ok(Self::default()); + } + + if let Ok(config) = serde_json::from_slice(payload) { + return Ok(config); + } + + let payload = str::from_utf8(payload).context("direct payload must be valid UTF-8")?; + toml::from_str(payload).context("failed to parse direct payload as JSON or TOML") + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct TunNetworkConfig { + tun_name: String, + addresses: Vec, + mtu: u32, +} + +enum PreparedBackend { + Socket { + backend: SocketBackend, + tun_config: TunNetworkConfig, + }, + Wireguard { + config: WireGuardConfig, + tun_config: TunNetworkConfig, + }, +} + +impl PreparedBackend { + fn tun_config(&self) -> &TunNetworkConfig { + match self { + Self::Socket { tun_config, .. } => tun_config, + Self::Wireguard { tun_config, .. } => tun_config, + } + } +} + +struct NamespaceChild { + child: Child, + control: StdUnixStream, +} + +#[derive(Clone)] +enum SocketBackend { + Direct, + Tor(Arc>), +} + +#[derive(Debug)] +struct UdpReply { + payload: Vec, + source: SocketAddr, + destination: SocketAddr, +} + +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +struct UdpFlowKey { + local: SocketAddr, + remote: SocketAddr, +} + +pub async fn run_exec(invocation: ExecInvocation) -> Result { + if invocation.command.is_empty() { + bail!("exec requires a command to run"); + } + + if env::var_os(INNER_ENV).is_some() { + run_inner(invocation.command).await + } else { + run_supervisor(invocation).await + } +} + +async fn run_supervisor(invocation: ExecInvocation) -> Result { + let prepared = prepare_backend(&invocation).await?; + let mut child = spawn_namespaced_child(&invocation, prepared.tun_config())?; + let tun = child.receive_tun().await?; + + match prepared { + PreparedBackend::Socket { backend, .. } => run_socket_backend(backend, tun, child).await, + PreparedBackend::Wireguard { config, .. } => { + run_wireguard_backend(config, tun, child).await + } + } +} + +async fn prepare_backend(invocation: &ExecInvocation) -> Result { + match invocation.backend { + ExecBackendKind::Direct => { + let payload = read_optional_payload(invocation.payload_path.as_deref()).await?; + let config = DirectConfig::from_payload(&payload)?; + let tun_config = socket_tun_config( + &config.address, + config.mtu, + config.tun_name.as_deref(), + "burrow-direct", + )?; + Ok(PreparedBackend::Socket { + backend: SocketBackend::Direct, + tun_config, + }) + } + ExecBackendKind::Tor => { + let payload = read_required_payload(invocation.payload_path.as_deref(), "tor").await?; + let mut config = TorConfig::from_payload(&payload)?; + let (state_dir, cache_dir) = config.runtime_dirs(std::process::id() as i32); + config.arti.state_dir = state_dir; + config.arti.cache_dir = cache_dir; + let tun_config = socket_tun_config( + &config.address, + config.mtu, + config.tun_name.as_deref(), + "burrow-tor", + )?; + let tor_client = bootstrap_client(&config).await?; + Ok(PreparedBackend::Socket { + backend: SocketBackend::Tor(tor_client), + tun_config, + }) + } + ExecBackendKind::Wireguard => { + let payload = + read_required_payload(invocation.payload_path.as_deref(), "wireguard").await?; + let config = parse_wireguard_payload(&payload, invocation.payload_path.as_deref())?; + let tun_config = wireguard_tun_config(&config)?; + Ok(PreparedBackend::Wireguard { config, tun_config }) + } + } +} + +fn spawn_namespaced_child( + invocation: &ExecInvocation, + tun_config: &TunNetworkConfig, +) -> Result { + ensure_tool("unshare")?; + ensure_tool("ip")?; + + let (parent_control, child_control) = + StdUnixStream::pair().context("failed to create namespace control socket")?; + set_inheritable(child_control.as_raw_fd())?; + + let current_exe = env::current_exe().context("failed to locate current burrow binary")?; + let mut cmd = Command::new("unshare"); + cmd.args([ + "--user", + "--map-root-user", + "--net", + "--mount", + "--pid", + "--fork", + "--kill-child", + "--mount-proc", + ]); + cmd.env(INNER_ENV, "1"); + cmd.env(INNER_CONTROL_FD_ENV, child_control.as_raw_fd().to_string()); + cmd.env( + INNER_TUN_CONFIG_ENV, + serde_json::to_string(tun_config).context("failed to encode namespace tun config")?, + ); + cmd.arg(current_exe); + cmd.arg("exec"); + cmd.args(["--backend", invocation.backend.cli_name()]); + if let Some(payload_path) = &invocation.payload_path { + cmd.arg("--payload"); + cmd.arg(payload_path); + } + cmd.arg("--"); + cmd.args(&invocation.command); + + let child = cmd + .spawn() + .context("failed to enter unshared Linux namespace")?; + drop(child_control); + + Ok(NamespaceChild { child, control: parent_control }) +} + +async fn run_inner(command: Vec) -> Result { + run_ip(["link", "set", "lo", "up"])?; + let tun_config = read_inner_tun_config()?; + let tun = open_tun_device(&tun_config)?; + configure_tun_addresses(&tun, &tun_config.addresses, tun_config.mtu)?; + let name = tun.name().context("failed to retrieve tun device name")?; + run_ip(["link", "set", "dev", &name, "up"])?; + install_default_routes(&name, &tun_config.addresses)?; + + let control_fd = env::var(INNER_CONTROL_FD_ENV) + .context("missing namespace control fd")? + .parse::() + .context("invalid namespace control fd")?; + send_tun_fd(control_fd, tun.as_raw_fd())?; + await_parent_ready(control_fd).await?; + drop(tun); + + let status = spawn_child(&command).await?; + child_exit_code(status) +} + +impl NamespaceChild { + async fn receive_tun(&mut self) -> Result { + let control = self + .control + .try_clone() + .context("failed to clone namespace control socket")?; + let fd = tokio::task::spawn_blocking(move || recv_tun_fd(&control)) + .await + .context("failed to join namespace tun receive task")??; + tokio_tun_from_fd(fd) + } + + async fn signal_ready(&self) -> Result<()> { + let mut control = self + .control + .try_clone() + .context("failed to clone namespace control socket")?; + tokio::task::spawn_blocking(move || -> Result<()> { + std::io::Write::write_all(&mut control, READY_ACK) + .context("failed to acknowledge namespace readiness")?; + Ok(()) + }) + .await + .context("failed to join namespace ready task")??; + Ok(()) + } + + async fn wait(mut self) -> Result { + self.child + .wait() + .await + .context("failed to wait for namespace child") + } +} + +async fn run_socket_backend( + backend: SocketBackend, + tun: TokioTunInterface, + child: NamespaceChild, +) -> Result { + let tun = Arc::new(tun); + let (stack, runner, udp_socket, tcp_listener) = StackBuilder::default() + .stack_buffer_size(1024) + .udp_buffer_size(1024) + .tcp_buffer_size(1024) + .enable_udp(true) + .enable_tcp(true) + .enable_icmp(true) + .build() + .context("failed to build userspace netstack")?; + let (mut stack_sink, mut stack_stream) = stack.split(); + + let mut tasks = JoinSet::new(); + if let Some(runner) = runner { + tasks.spawn(async move { runner.await.map_err(anyhow::Error::from) }); + } + + { + let tun = tun.clone(); + tasks.spawn(async move { + let mut buf = vec![0u8; 65_535]; + loop { + let len = tun + .recv(&mut buf) + .await + .context("failed to read packet from tun")?; + if len == 0 { + continue; + } + stack_sink + .send(buf[..len].to_vec()) + .await + .context("failed to send tun packet into userspace stack")?; + } + #[allow(unreachable_code)] + Result::<()>::Ok(()) + }); + } + + { + let tun = tun.clone(); + tasks.spawn(async move { + while let Some(packet) = stack_stream.next().await { + let packet = packet.context("failed to receive packet from userspace stack")?; + tun.send(&packet) + .await + .context("failed to write userspace stack packet to tun")?; + } + Result::<()>::Ok(()) + }); + } + + if let Some(tcp_listener) = tcp_listener { + let backend = backend.clone(); + tasks.spawn(async move { tcp_dispatch_loop(tcp_listener, backend).await }); + } + + if let Some(udp_socket) = udp_socket { + tasks.spawn(async move { udp_dispatch_loop(udp_socket, backend).await }); + } + + child.signal_ready().await?; + let status = child.wait().await?; + + tasks.abort_all(); + while let Some(joined) = tasks.join_next().await { + match joined { + Ok(Ok(())) => {} + Ok(Err(err)) => debug!(?err, "usernet background task exited with error"), + Err(err) if err.is_cancelled() => {} + Err(err) => debug!(?err, "usernet background task panicked"), + } + } + + child_exit_code(status) +} + +async fn run_wireguard_backend( + config: WireGuardConfig, + tun: TokioTunInterface, + child: NamespaceChild, +) -> Result { + let interface: WireGuardInterface = config.try_into()?; + interface.set_tun(tun).await; + let interface = Arc::new(interface); + let runner = { + let interface = interface.clone(); + tokio::spawn(async move { interface.run().await }) + }; + + child.signal_ready().await?; + let status = child.wait().await?; + + interface.remove_tun().await; + match runner.await { + Ok(Ok(())) => {} + Ok(Err(err)) => debug!(?err, "wireguard exec runtime exited with error"), + Err(err) if err.is_cancelled() => {} + Err(err) => debug!(?err, "wireguard exec runtime panicked"), + } + + child_exit_code(status) +} + +async fn tcp_dispatch_loop(mut listener: StackTcpListener, backend: SocketBackend) -> Result<()> { + let mut tasks = JoinSet::new(); + loop { + tokio::select! { + Some(result) = tasks.join_next(), if !tasks.is_empty() => { + match result { + Ok(Ok(())) => {} + Ok(Err(err)) => warn!(?err, "tcp bridge task failed"), + Err(err) if err.is_cancelled() => {} + Err(err) => warn!(?err, "tcp bridge task panicked"), + } + } + next = listener.next() => match next { + Some((stream, local_addr, remote_addr)) => { + debug!(%local_addr, %remote_addr, "accepted userspace tcp stream"); + let backend = backend.clone(); + tasks.spawn(async move { + bridge_tcp(backend, stream, local_addr, remote_addr).await + }); + } + None => break, + } + } + } + + tasks.abort_all(); + while let Some(result) = tasks.join_next().await { + match result { + Ok(Ok(())) => {} + Ok(Err(err)) => debug!(?err, "tcp bridge task exited during shutdown"), + Err(err) if err.is_cancelled() => {} + Err(err) => debug!(?err, "tcp bridge task panicked during shutdown"), + } + } + Ok(()) +} + +async fn bridge_tcp( + backend: SocketBackend, + mut inbound: StackTcpStream, + _local_addr: SocketAddr, + remote_addr: SocketAddr, +) -> Result<()> { + match backend { + SocketBackend::Direct => { + debug!(%remote_addr, "dialing direct outbound tcp"); + let mut outbound = TcpStream::connect(remote_addr) + .await + .with_context(|| format!("failed to connect to {remote_addr}"))?; + copy_bidirectional(&mut inbound, &mut outbound) + .await + .with_context(|| format!("failed to bridge tcp stream for {remote_addr}"))?; + } + SocketBackend::Tor(tor_client) => { + debug!(%remote_addr, "dialing tor outbound tcp"); + let tor_stream = tor_client + .connect((remote_addr.ip().to_string(), remote_addr.port())) + .await + .with_context(|| format!("failed to connect to {remote_addr} over tor"))?; + let mut tor_stream = tor_stream.compat(); + copy_bidirectional(&mut inbound, &mut tor_stream) + .await + .with_context(|| format!("failed to bridge tor stream for {remote_addr}"))?; + } + } + Ok(()) +} + +async fn udp_dispatch_loop(socket: StackUdpSocket, backend: SocketBackend) -> Result<()> { + let (mut udp_reader, mut udp_writer) = socket.split(); + let (reply_tx, mut reply_rx) = mpsc::channel::(128); + let direct_sessions = Arc::new(Mutex::new( + HashMap::>>::new(), + )); + let mut session_tasks = JoinSet::new(); + + loop { + tokio::select! { + Some(result) = session_tasks.join_next(), if !session_tasks.is_empty() => { + match result { + Ok(Ok(())) => {} + Ok(Err(err)) => warn!(?err, "udp session task failed"), + Err(err) if err.is_cancelled() => {} + Err(err) => warn!(?err, "udp session task panicked"), + } + } + maybe_reply = reply_rx.recv() => match maybe_reply { + Some(reply) => { + udp_writer + .send((reply.payload, reply.source, reply.destination)) + .await + .context("failed to write udp reply into userspace stack")?; + } + None => break, + }, + maybe_datagram = udp_reader.next() => match maybe_datagram { + Some((payload, local_addr, remote_addr)) => { + match &backend { + SocketBackend::Direct => { + dispatch_direct_udp( + payload, + local_addr, + remote_addr, + reply_tx.clone(), + direct_sessions.clone(), + &mut session_tasks, + ).await?; + } + SocketBackend::Tor(tor_client) => { + if remote_addr.port() != 53 { + debug!(%remote_addr, "dropping non-DNS UDP datagram for tor backend"); + continue; + } + let response = build_tor_dns_response(&payload, tor_client.as_ref()).await?; + reply_tx + .send(UdpReply { + payload: response, + source: remote_addr, + destination: local_addr, + }) + .await + .context("failed to enqueue tor dns response")?; + } + } + } + None => break, + } + } + } + + session_tasks.abort_all(); + while let Some(result) = session_tasks.join_next().await { + match result { + Ok(Ok(())) => {} + Ok(Err(err)) => debug!(?err, "udp session task exited during shutdown"), + Err(err) if err.is_cancelled() => {} + Err(err) => debug!(?err, "udp session task panicked during shutdown"), + } + } + Ok(()) +} + +async fn dispatch_direct_udp( + payload: Vec, + local_addr: SocketAddr, + remote_addr: SocketAddr, + reply_tx: mpsc::Sender, + sessions: Arc>>>>, + session_tasks: &mut JoinSet>, +) -> Result<()> { + let key = UdpFlowKey { + local: local_addr, + remote: remote_addr, + }; + let existing = { sessions.lock().await.get(&key).cloned() }; + if let Some(sender) = existing { + if sender.send(payload.clone()).await.is_ok() { + return Ok(()); + } + sessions.lock().await.remove(&key); + } + + let (tx, rx) = mpsc::channel::>(32); + tx.send(payload) + .await + .context("failed to enqueue outbound udp payload")?; + sessions.lock().await.insert(key.clone(), tx); + + session_tasks.spawn(async move { run_direct_udp_session(key, rx, reply_tx, sessions).await }); + Ok(()) +} + +async fn run_direct_udp_session( + key: UdpFlowKey, + mut outbound_rx: mpsc::Receiver>, + reply_tx: mpsc::Sender, + sessions: Arc>>>>, +) -> Result<()> { + let bind_addr = match key.remote { + SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + SocketAddr::V6(_) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), + }; + let socket = UdpSocket::bind(bind_addr) + .await + .with_context(|| format!("failed to bind udp socket for {}", key.remote))?; + socket + .connect(key.remote) + .await + .with_context(|| format!("failed to connect udp socket to {}", key.remote))?; + + let mut buf = vec![0u8; 65_535]; + loop { + tokio::select! { + maybe_payload = outbound_rx.recv() => match maybe_payload { + Some(payload) => { + socket + .send(&payload) + .await + .with_context(|| format!("failed to send udp payload to {}", key.remote))?; + } + None => break, + }, + recv = tokio::time::timeout(UDP_IDLE_TIMEOUT, socket.recv(&mut buf)) => match recv { + Ok(Ok(len)) => { + reply_tx + .send(UdpReply { + payload: buf[..len].to_vec(), + source: key.remote, + destination: key.local, + }) + .await + .context("failed to enqueue inbound udp reply")?; + } + Ok(Err(err)) => return Err(err).with_context(|| format!("failed to receive udp response from {}", key.remote)), + Err(_) => break, + } + } + } + + sessions.lock().await.remove(&key); + Ok(()) +} + +fn wireguard_tun_config(config: &WireGuardConfig) -> Result { + parse_tun_config( + &config.interface.address, + config.interface.mtu, + Some("burrow-wireguard"), + ) +} + +fn socket_tun_config( + addresses: &[String], + mtu: Option, + tun_name: Option<&str>, + default_name: &str, +) -> Result { + let default_addresses; + let addresses = if addresses.is_empty() { + default_addresses = vec![DEFAULT_TUN_V4.to_string(), DEFAULT_TUN_V6.to_string()]; + default_addresses.as_slice() + } else { + addresses + }; + parse_tun_config(addresses, mtu, Some(tun_name.unwrap_or(default_name))) +} + +fn parse_tun_config( + addresses: &[String], + mtu: Option, + tun_name: Option<&str>, +) -> Result { + let addresses = addresses + .iter() + .map(|addr| { + addr.parse::() + .with_context(|| format!("invalid tunnel address '{addr}'")) + }) + .collect::>>()?; + + Ok(TunNetworkConfig { + tun_name: tun_name.unwrap_or("burrow-exec").to_string(), + addresses, + mtu: mtu.unwrap_or(DEFAULT_MTU), + }) +} + +fn open_tun_device(config: &TunNetworkConfig) -> Result { + let tun = TunOptions::new() + .name(&config.tun_name) + .no_pi(true) + .tun_excl(true) + .open() + .context("failed to create tun device")?; + Ok(tun.inner.into_inner()) +} + +fn tokio_tun_from_fd(fd: RawFd) -> Result { + let tun = unsafe { tun::TunInterface::from_raw_fd(fd) }; + TokioTunInterface::new(tun).context("failed to wrap tun fd in tokio interface") +} + +fn read_inner_tun_config() -> Result { + let raw = env::var(INNER_TUN_CONFIG_ENV).context("missing namespace tun config")?; + serde_json::from_str(&raw).context("invalid namespace tun config") +} + +fn configure_tun_addresses( + iface: &tun::TunInterface, + networks: &[IpNetwork], + mtu: u32, +) -> Result<()> { + for network in networks { + match network { + IpNetwork::V4(net) => { + iface.set_ipv4_addr(net.ip())?; + let netmask = prefix_to_netmask_v4(net.prefix()); + iface.set_netmask(netmask)?; + iface.set_broadcast_addr(broadcast_v4(net.ip(), netmask))?; + } + IpNetwork::V6(net) => iface.add_ipv6_addr(net.ip(), net.prefix())?, + } + } + iface.set_mtu(mtu as i32)?; + Ok(()) +} + +fn install_default_routes(name: &str, networks: &[IpNetwork]) -> Result<()> { + if networks + .iter() + .any(|network| matches!(network, IpNetwork::V4(_))) + { + run_ip(["route", "replace", "default", "dev", name])?; + } + if networks + .iter() + .any(|network| matches!(network, IpNetwork::V6(_))) + { + run_ip(["-6", "route", "replace", "default", "dev", name])?; + } + Ok(()) +} + +fn run_ip(args: [&str; N]) -> Result<()> { + let status = StdCommand::new("ip") + .args(args) + .status() + .context("failed to execute ip command")?; + if !status.success() { + bail!("ip {} failed with status {}", args.join(" "), status); + } + Ok(()) +} + +fn set_inheritable(fd: RawFd) -> Result<()> { + let flags = FdFlag::from_bits_truncate( + fcntl(fd, FcntlArg::F_GETFD).context("failed to query descriptor flags")?, + ); + let flags = flags & !FdFlag::FD_CLOEXEC; + fcntl(fd, FcntlArg::F_SETFD(flags)).context("failed to clear close-on-exec")?; + Ok(()) +} + +async fn await_parent_ready(control_fd: RawFd) -> Result<()> { + tokio::task::spawn_blocking(move || -> Result<()> { + let mut control = unsafe { StdUnixStream::from_raw_fd(control_fd) }; + let mut ack = [0u8; 1]; + std::io::Read::read_exact(&mut control, &mut ack) + .context("failed to read namespace ready ack")?; + if ack != *READY_ACK { + bail!("unexpected namespace ready ack"); + } + Ok(()) + }) + .await + .context("failed to join namespace ready wait task")??; + Ok(()) +} + +fn send_tun_fd(control_fd: RawFd, tun_fd: RawFd) -> Result<()> { + let buf = [0u8; 1]; + let iov = [std::io::IoSlice::new(&buf)]; + let fds = [tun_fd]; + sendmsg::<()>( + control_fd, + &iov, + &[ControlMessage::ScmRights(&fds)], + MsgFlags::empty(), + None, + ) + .context("failed to send tun fd to parent")?; + Ok(()) +} + +fn recv_tun_fd(control: &StdUnixStream) -> Result { + let mut buf = [0u8; 1]; + let mut iov = [std::io::IoSliceMut::new(&mut buf)]; + let mut cmsgspace = cmsg_space!([RawFd; 1]); + let msg = recvmsg::<()>( + control.as_raw_fd(), + &mut iov, + Some(&mut cmsgspace), + MsgFlags::empty(), + ) + .context("failed to receive tun fd from namespace child")?; + for cmsg in msg.cmsgs() { + if let ControlMessageOwned::ScmRights(fds) = cmsg { + if let Some(fd) = fds.first() { + return Ok(*fd); + } + } + } + bail!("namespace child did not send a tun fd") +} + +fn ensure_tool(tool: &str) -> Result<()> { + let status = StdCommand::new("sh") + .args(["-lc", &format!("command -v {tool} >/dev/null")]) + .status() + .with_context(|| format!("failed to probe required tool '{tool}'"))?; + if !status.success() { + bail!("required host tool '{tool}' is not available"); + } + Ok(()) +} + +async fn read_optional_payload(path: Option<&Path>) -> Result> { + match path { + Some(path) => tokio::fs::read(path) + .await + .with_context(|| format!("failed to read payload from {}", path.display())), + None => Ok(Vec::new()), + } +} + +async fn read_required_payload(path: Option<&Path>, backend: &str) -> Result> { + let path = path.ok_or_else(|| anyhow!("{backend} exec requires --payload"))?; + tokio::fs::read(path) + .await + .with_context(|| format!("failed to read payload from {}", path.display())) +} + +fn parse_wireguard_payload(payload: &[u8], path: Option<&Path>) -> Result { + let payload = str::from_utf8(payload).context("wireguard payload must be valid UTF-8")?; + if let Some(path) = path { + if let Some(ext) = path.extension().and_then(|ext| ext.to_str()) { + return WireGuardConfig::from_content_fmt(payload, ext); + } + } + + WireGuardConfig::from_toml(payload).or_else(|_| WireGuardConfig::from_ini(payload)) +} + +async fn spawn_child(command: &[String]) -> Result { + let mut cmd = Command::new(&command[0]); + if command.len() > 1 { + cmd.args(&command[1..]); + } + cmd.stdin(std::process::Stdio::inherit()); + cmd.stdout(std::process::Stdio::inherit()); + cmd.stderr(std::process::Stdio::inherit()); + cmd.kill_on_drop(true); + cmd.status() + .await + .with_context(|| format!("failed to spawn '{}'", command[0])) +} + +fn child_exit_code(status: ExitStatus) -> Result { + if let Some(code) = status.code() { + return Ok(code); + } + if let Some(signal) = status.signal() { + return Ok(128 + signal); + } + bail!("child process terminated without an exit code"); +} + +fn prefix_to_netmask_v4(prefix: u8) -> Ipv4Addr { + if prefix == 0 { + Ipv4Addr::new(0, 0, 0, 0) + } else { + let mask = (!0u32) << (32 - prefix); + Ipv4Addr::from(mask) + } +} + +fn broadcast_v4(ip: Ipv4Addr, netmask: Ipv4Addr) -> Ipv4Addr { + let ip_u32 = u32::from(ip); + let mask = u32::from(netmask); + Ipv4Addr::from(ip_u32 | !mask) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parses_direct_json_payload() { + let payload = br#"{"address":["10.0.0.2/24"],"mtu":1400,"tun_name":"burrow0"}"#; + let config = DirectConfig::from_payload(payload).unwrap(); + assert_eq!(config.address, vec!["10.0.0.2/24"]); + assert_eq!(config.mtu, Some(1400)); + assert_eq!(config.tun_name.as_deref(), Some("burrow0")); + } + + #[test] + fn socket_tun_config_uses_dual_stack_defaults() { + let config = socket_tun_config(&[], None, None, "burrow-test").unwrap(); + assert_eq!(config.tun_name, "burrow-test"); + assert!(config + .addresses + .iter() + .any(|network| matches!(network, IpNetwork::V4(_)))); + assert!(config + .addresses + .iter() + .any(|network| matches!(network, IpNetwork::V6(_)))); + } +} diff --git a/burrow/src/wireguard/iface.rs b/burrow/src/wireguard/iface.rs index 321801b..5b61861 100755 --- a/burrow/src/wireguard/iface.rs +++ b/burrow/src/wireguard/iface.rs @@ -148,7 +148,7 @@ impl Interface { debug!("Routing packet to {}", dst_addr); let Some(idx) = pcbs.find(dst_addr) else { - continue + continue; }; debug!("Found peer:{}", idx); diff --git a/burrow/src/wireguard/noise/handshake.rs b/burrow/src/wireguard/noise/handshake.rs index 2ec0c6a..65136bc 100755 --- a/burrow/src/wireguard/noise/handshake.rs +++ b/burrow/src/wireguard/noise/handshake.rs @@ -9,20 +9,15 @@ use std::{ use aead::{Aead, Payload}; use blake2::{ digest::{FixedOutput, KeyInit}, - Blake2s256, - Blake2sMac, - Digest, + Blake2s256, Blake2sMac, Digest, }; use chacha20poly1305::XChaCha20Poly1305; use rand_core::OsRng; use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; +use subtle::ConstantTimeEq; use super::{ - errors::WireGuardError, - session::Session, - x25519, - HandshakeInit, - HandshakeResponse, + errors::WireGuardError, session::Session, x25519, HandshakeInit, HandshakeResponse, PacketCookieReply, }; @@ -209,7 +204,7 @@ impl Tai64N { /// Parse a timestamp from a 12 byte u8 slice fn parse(buf: &[u8; 12]) -> Result { if buf.len() < 12 { - return Err(WireGuardError::InvalidTai64nTimestamp) + return Err(WireGuardError::InvalidTai64nTimestamp); } let (sec_bytes, nano_bytes) = buf.split_at(std::mem::size_of::()); @@ -534,11 +529,14 @@ impl Handshake { &hash, )?; - ring::constant_time::verify_slices_are_equal( - self.params.peer_static_public.as_bytes(), - &peer_static_public_decrypted, - ) - .map_err(|_| WireGuardError::WrongKey)?; + if !bool::from( + self.params + .peer_static_public + .as_bytes() + .ct_eq(&peer_static_public_decrypted), + ) { + return Err(WireGuardError::WrongKey); + } // initiator.hash = HASH(initiator.hash || msg.encrypted_static) hash = b2s_hash(&hash, packet.encrypted_static); @@ -556,19 +554,22 @@ impl Handshake { let timestamp = Tai64N::parse(×tamp)?; if !timestamp.after(&self.last_handshake_timestamp) { // Possibly a replay - return Err(WireGuardError::WrongTai64nTimestamp) + return Err(WireGuardError::WrongTai64nTimestamp); } self.last_handshake_timestamp = timestamp; // initiator.hash = HASH(initiator.hash || msg.encrypted_timestamp) hash = b2s_hash(&hash, packet.encrypted_timestamp); - self.previous = std::mem::replace(&mut self.state, HandshakeState::InitReceived { - chaining_key, - hash, - peer_ephemeral_public, - peer_index, - }); + self.previous = std::mem::replace( + &mut self.state, + HandshakeState::InitReceived { + chaining_key, + hash, + peer_ephemeral_public, + peer_index, + }, + ); self.format_handshake_response(dst) } @@ -669,7 +670,7 @@ impl Handshake { let local_index = self.cookies.index; if packet.receiver_idx != local_index { - return Err(WireGuardError::WrongIndex) + return Err(WireGuardError::WrongIndex); } // msg.encrypted_cookie = XAEAD(HASH(LABEL_COOKIE || responder.static_public), // msg.nonce, cookie, last_received_msg.mac1) @@ -725,7 +726,7 @@ impl Handshake { dst: &'a mut [u8], ) -> Result<&'a mut [u8], WireGuardError> { if dst.len() < super::HANDSHAKE_INIT_SZ { - return Err(WireGuardError::DestinationBufferTooSmall) + return Err(WireGuardError::DestinationBufferTooSmall); } let (message_type, rest) = dst.split_at_mut(4); @@ -808,7 +809,7 @@ impl Handshake { dst: &'a mut [u8], ) -> Result<(&'a mut [u8], Session), WireGuardError> { if dst.len() < super::HANDSHAKE_RESP_SZ { - return Err(WireGuardError::DestinationBufferTooSmall) + return Err(WireGuardError::DestinationBufferTooSmall); } let state = std::mem::replace(&mut self.state, HandshakeState::None); diff --git a/burrow/src/wireguard/noise/mod.rs b/burrow/src/wireguard/noise/mod.rs index aa06652..86bcc73 100755 --- a/burrow/src/wireguard/noise/mod.rs +++ b/burrow/src/wireguard/noise/mod.rs @@ -133,9 +133,9 @@ pub enum Packet<'a> { impl Tunnel { #[inline(always)] - pub fn parse_incoming_packet(src: &[u8]) -> Result { + pub fn parse_incoming_packet(src: &[u8]) -> Result, WireGuardError> { if src.len() < 4 { - return Err(WireGuardError::InvalidPacket) + return Err(WireGuardError::InvalidPacket); } // Checks the type, as well as the reserved zero fields @@ -177,7 +177,7 @@ impl Tunnel { pub fn dst_address(packet: &[u8]) -> Option { if packet.is_empty() { - return None + return None; } match packet[0] >> 4 { @@ -201,7 +201,7 @@ impl Tunnel { pub fn src_address(packet: &[u8]) -> Option { if packet.is_empty() { - return None + return None; } match packet[0] >> 4 { @@ -296,7 +296,7 @@ impl Tunnel { self.timer_tick(TimerName::TimeLastDataPacketSent); } self.tx_bytes += src.len(); - return TunnResult::WriteToNetwork(packet) + return TunnResult::WriteToNetwork(packet); } // If there is no session, queue the packet for future retry @@ -320,7 +320,7 @@ impl Tunnel { ) -> TunnResult<'a> { if datagram.is_empty() { // Indicates a repeated call - return self.send_queued_packet(dst) + return self.send_queued_packet(dst); } let mut cookie = [0u8; COOKIE_REPLY_SZ]; @@ -331,7 +331,7 @@ impl Tunnel { Ok(packet) => packet, Err(TunnResult::WriteToNetwork(cookie)) => { dst[..cookie.len()].copy_from_slice(cookie); - return TunnResult::WriteToNetwork(&mut dst[..cookie.len()]) + return TunnResult::WriteToNetwork(&mut dst[..cookie.len()]); } Err(TunnResult::Err(e)) => return TunnResult::Err(e), _ => unreachable!(), @@ -435,7 +435,7 @@ impl Tunnel { let cur_idx = self.current; if cur_idx == new_idx { // There is nothing to do, already using this session, this is the common case - return + return; } if self.sessions[cur_idx % N_SESSIONS].is_none() || self.timers.session_timers[new_idx % N_SESSIONS] @@ -481,7 +481,7 @@ impl Tunnel { force_resend: bool, ) -> TunnResult<'a> { if self.handshake.is_in_progress() && !force_resend { - return TunnResult::Done + return TunnResult::Done; } if self.handshake.is_expired() { @@ -540,7 +540,7 @@ impl Tunnel { }; if computed_len > packet.len() { - return TunnResult::Err(WireGuardError::InvalidPacket) + return TunnResult::Err(WireGuardError::InvalidPacket); } self.timer_tick(TimerName::TimeLastDataPacketReceived); diff --git a/burrow/src/wireguard/noise/rate_limiter.rs b/burrow/src/wireguard/noise/rate_limiter.rs index ff19efd..e4fde02 100755 --- a/burrow/src/wireguard/noise/rate_limiter.rs +++ b/burrow/src/wireguard/noise/rate_limiter.rs @@ -8,23 +8,13 @@ use aead::{generic_array::GenericArray, AeadInPlace, KeyInit}; use chacha20poly1305::{Key, XChaCha20Poly1305}; use parking_lot::Mutex; use rand_core::{OsRng, RngCore}; -use ring::constant_time::verify_slices_are_equal; +use subtle::ConstantTimeEq; use super::{ handshake::{ - b2s_hash, - b2s_keyed_mac_16, - b2s_keyed_mac_16_2, - b2s_mac_24, - LABEL_COOKIE, - LABEL_MAC1, + b2s_hash, b2s_keyed_mac_16, b2s_keyed_mac_16_2, b2s_mac_24, LABEL_COOKIE, LABEL_MAC1, }, - HandshakeInit, - HandshakeResponse, - Packet, - TunnResult, - Tunnel, - WireGuardError, + HandshakeInit, HandshakeResponse, Packet, TunnResult, Tunnel, WireGuardError, }; const COOKIE_REFRESH: u64 = 128; // Use 128 and not 120 so the compiler can optimize out the division @@ -136,7 +126,7 @@ impl RateLimiter { dst: &'a mut [u8], ) -> Result<&'a mut [u8], WireGuardError> { if dst.len() < super::COOKIE_REPLY_SZ { - return Err(WireGuardError::DestinationBufferTooSmall) + return Err(WireGuardError::DestinationBufferTooSmall); } let (message_type, rest) = dst.split_at_mut(4); @@ -185,8 +175,9 @@ impl RateLimiter { let (mac1, mac2) = macs.split_at(16); let computed_mac1 = b2s_keyed_mac_16(&self.mac1_key, msg); - verify_slices_are_equal(&computed_mac1[..16], mac1) - .map_err(|_| TunnResult::Err(WireGuardError::InvalidMac))?; + if !bool::from(computed_mac1[..16].ct_eq(mac1)) { + return Err(TunnResult::Err(WireGuardError::InvalidMac)); + } if self.is_under_load() { let addr = match src_addr { @@ -198,11 +189,11 @@ impl RateLimiter { let cookie = self.current_cookie(addr); let computed_mac2 = b2s_keyed_mac_16_2(&cookie, msg, mac1); - if verify_slices_are_equal(&computed_mac2[..16], mac2).is_err() { + if !bool::from(computed_mac2[..16].ct_eq(mac2)) { let cookie_packet = self .format_cookie_reply(sender_idx, cookie, mac1, dst) .map_err(TunnResult::Err)?; - return Err(TunnResult::WriteToNetwork(cookie_packet)) + return Err(TunnResult::WriteToNetwork(cookie_packet)); } } } diff --git a/burrow/src/wireguard/noise/session.rs b/burrow/src/wireguard/noise/session.rs index 8988728..14c191b 100755 --- a/burrow/src/wireguard/noise/session.rs +++ b/burrow/src/wireguard/noise/session.rs @@ -88,11 +88,11 @@ impl ReceivingKeyCounterValidator { fn will_accept(&self, counter: u64) -> Result<(), WireGuardError> { if counter >= self.next { // As long as the counter is growing no replay took place for sure - return Ok(()) + return Ok(()); } if counter + N_BITS < self.next { // Drop if too far back - return Err(WireGuardError::InvalidCounter) + return Err(WireGuardError::InvalidCounter); } if !self.check_bit(counter) { Ok(()) @@ -107,22 +107,22 @@ impl ReceivingKeyCounterValidator { fn mark_did_receive(&mut self, counter: u64) -> Result<(), WireGuardError> { if counter + N_BITS < self.next { // Drop if too far back - return Err(WireGuardError::InvalidCounter) + return Err(WireGuardError::InvalidCounter); } if counter == self.next { // Usually the packets arrive in order, in that case we simply mark the bit and // increment the counter self.set_bit(counter); self.next += 1; - return Ok(()) + return Ok(()); } if counter < self.next { // A packet arrived out of order, check if it is valid, and mark if self.check_bit(counter) { - return Err(WireGuardError::InvalidCounter) + return Err(WireGuardError::InvalidCounter); } self.set_bit(counter); - return Ok(()) + return Ok(()); } // Packets where dropped, or maybe reordered, skip them and mark unused if counter - self.next >= N_BITS { @@ -247,7 +247,7 @@ impl Session { panic!("The destination buffer is too small"); } if packet.receiver_idx != self.receiving_index { - return Err(WireGuardError::WrongIndex) + return Err(WireGuardError::WrongIndex); } // Don't reuse counters, in case this is a replay attack we want to quickly // check the counter without running expensive decryption diff --git a/burrow/src/wireguard/noise/timers.rs b/burrow/src/wireguard/noise/timers.rs index 1d0cf1f..f713e6f 100755 --- a/burrow/src/wireguard/noise/timers.rs +++ b/burrow/src/wireguard/noise/timers.rs @@ -190,7 +190,7 @@ impl Tunnel { { if self.handshake.is_expired() { - return TunnResult::Err(WireGuardError::ConnectionExpired) + return TunnResult::Err(WireGuardError::ConnectionExpired); } // Clear cookie after COOKIE_EXPIRATION_TIME @@ -206,7 +206,7 @@ impl Tunnel { tracing::error!("CONNECTION_EXPIRED(REJECT_AFTER_TIME * 3)"); self.handshake.set_expired(); self.clear_all(); - return TunnResult::Err(WireGuardError::ConnectionExpired) + return TunnResult::Err(WireGuardError::ConnectionExpired); } if let Some(time_init_sent) = self.handshake.timer() { @@ -219,7 +219,7 @@ impl Tunnel { tracing::error!("CONNECTION_EXPIRED(REKEY_ATTEMPT_TIME)"); self.handshake.set_expired(); self.clear_all(); - return TunnResult::Err(WireGuardError::ConnectionExpired) + return TunnResult::Err(WireGuardError::ConnectionExpired); } if time_init_sent.elapsed() >= REKEY_TIMEOUT { @@ -299,11 +299,11 @@ impl Tunnel { } if handshake_initiation_required { - return self.format_handshake_initiation(dst, true) + return self.format_handshake_initiation(dst, true); } if keepalive_required { - return self.encapsulate(&[], dst) + return self.encapsulate(&[], dst); } TunnResult::Done diff --git a/burrow/src/wireguard/pcb.rs b/burrow/src/wireguard/pcb.rs index 974d84e..6e5e6c0 100755 --- a/burrow/src/wireguard/pcb.rs +++ b/burrow/src/wireguard/pcb.rs @@ -64,7 +64,7 @@ impl PeerPcb { let guard = self.socket.read().await; let Some(socket) = guard.as_ref() else { self.open_if_closed().await?; - continue + continue; }; let mut res_buf = [0; 1500]; // tracing::debug!("{} : waiting for readability on {:?}", rid, socket); @@ -72,7 +72,7 @@ impl PeerPcb { Ok(l) => l, Err(e) => { log::error!("{}: error reading from socket: {:?}", rid, e); - continue + continue; } }; let mut res_dat = &res_buf[..len]; @@ -88,7 +88,7 @@ impl PeerPcb { TunnResult::Done => break, TunnResult::Err(e) => { tracing::error!(message = "Decapsulate error", error = ?e); - break + break; } TunnResult::WriteToNetwork(packet) => { tracing::debug!("WriteToNetwork: {:?}", packet); @@ -102,17 +102,29 @@ impl PeerPcb { .await?; tracing::debug!("WriteToNetwork done"); res_dat = &[]; - continue + continue; } TunnResult::WriteToTunnelV4(packet, addr) => { tracing::debug!("WriteToTunnelV4: {:?}, {:?}", packet, addr); - tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?; - break + tun_interface + .read() + .await + .as_ref() + .ok_or(anyhow::anyhow!("tun interface does not exist"))? + .send(packet) + .await?; + break; } TunnResult::WriteToTunnelV6(packet, addr) => { tracing::debug!("WriteToTunnelV6: {:?}, {:?}", packet, addr); - tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?; - break + tun_interface + .read() + .await + .as_ref() + .ok_or(anyhow::anyhow!("tun interface does not exist"))? + .send(packet) + .await?; + break; } } } @@ -134,7 +146,7 @@ impl PeerPcb { let handle = self.socket.read().await; let Some(socket) = handle.as_ref() else { tracing::error!("No socket for peer"); - return Ok(()) + return Ok(()); }; tracing::debug!("Our Encapsulated packet: {:?}", packet); socket.send(packet).await?; @@ -157,7 +169,7 @@ impl PeerPcb { let handle = self.socket.read().await; let Some(socket) = handle.as_ref() else { tracing::error!("No socket for peer"); - return Ok(()) + return Ok(()); }; socket.send(packet).await?; tracing::debug!("Sent Packet for timer update"); diff --git a/contributors.nix b/contributors.nix new file mode 100644 index 0000000..60501d1 --- /dev/null +++ b/contributors.nix @@ -0,0 +1,91 @@ +{ + groups = { + users = "burrow-users"; + admins = "burrow-admins"; + linear = { + owners = "linear-owners"; + admins = "linear-admins"; + guests = "linear-guests"; + }; + }; + + identities = { + contact = { + displayName = "Burrow"; + canonicalEmail = "contact@burrow.net"; + isAdmin = true; + forgeAuthorized = true; + bootstrapAuthentik = true; + sshPublicKeyPath = ./nixos/keys/contact_at_burrow_net.pub; + roles = [ + "operator" + "forge-admin" + ]; + }; + + conrad = { + displayName = "Conrad Kramer"; + canonicalEmail = "conrad@burrow.net"; + isAdmin = true; + forgeAuthorized = false; + bootstrapAuthentik = true; + roles = [ + "operator" + "founder" + ]; + }; + + jett = { + displayName = "Jett"; + canonicalEmail = "jett@burrow.net"; + isAdmin = true; + forgeAuthorized = false; + forgeUnixUser = true; + bootstrapAuthentik = true; + sshPublicKeyPath = ./nixos/keys/jett_at_burrow_net.pub; + roles = [ + "member" + "operator" + "forge-admin" + ]; + }; + + davnotdev = { + displayName = "David"; + canonicalEmail = "davnotdev@burrow.net"; + isAdmin = true; + forgeAuthorized = false; + bootstrapAuthentik = true; + roles = [ + "member" + "operator" + "forge-admin" + ]; + }; + + agent = { + displayName = "Burrow Agent"; + canonicalEmail = "agent@burrow.net"; + isAdmin = false; + forgeAuthorized = true; + bootstrapAuthentik = false; + sshPublicKeyPath = ./nixos/keys/agent_at_burrow_net.pub; + roles = [ + "automation" + ]; + }; + + ui-test = { + displayName = "Burrow UI Test"; + canonicalEmail = "ui-test@burrow.net"; + isAdmin = false; + forgeAuthorized = false; + bootstrapAuthentik = true; + authentikPasswordSecret = "burrowAuthentikUiTestPassword"; + roles = [ + "testing" + "apple-ui" + ]; + }; + }; +} diff --git a/docs/FORWARDEMAIL.md b/docs/FORWARDEMAIL.md index d7ffb34..798f3e5 100644 --- a/docs/FORWARDEMAIL.md +++ b/docs/FORWARDEMAIL.md @@ -26,14 +26,11 @@ Forward Email also documents these operational constraints: ## Burrow Secret Layout -Authoritative secrets now live in: +Present in `intake/` today: -- `secrets/forwardemail/api-token.age` -- `secrets/forwardemail/hetzner-s3-user.age` -- `secrets/forwardemail/hetzner-s3-secret.age` - -Legacy plaintext `intake/` files may still exist locally for debugging, but the -tooling now prefers the age-encrypted files above. +- `intake/forwardemail_api_token.txt` +- `intake/hetzner-s3-user.txt` +- `intake/hetzner-s3-secret.txt` - Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com` - Hetzner object storage region: `hel1` - Hetzner bucket used for Forward Email backups: `burrow` @@ -72,12 +69,12 @@ Example: ```sh Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file secrets/forwardemail/api-token.age \ + --api-token-file intake/forwardemail_api_token.txt \ --s3-endpoint https://hel1.your-objectstorage.com \ --s3-region hel1 \ --s3-bucket burrow \ - --s3-access-key-file secrets/forwardemail/hetzner-s3-user.age \ - --s3-secret-key-file secrets/forwardemail/hetzner-s3-secret.age + --s3-access-key-file intake/hetzner-s3-user.txt \ + --s3-secret-key-file intake/hetzner-s3-secret.txt ``` Retest an existing domain configuration without rewriting it: @@ -85,7 +82,7 @@ Retest an existing domain configuration without rewriting it: ```sh Tools/forwardemail-custom-s3.sh \ --domain burrow.net \ - --api-token-file secrets/forwardemail/api-token.age \ + --api-token-file intake/forwardemail_api_token.txt \ --test-only ``` diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md index 764c219..346f7e7 100644 --- a/docs/GETTING_STARTED.md +++ b/docs/GETTING_STARTED.md @@ -98,10 +98,14 @@ code burrow You can run burrow on the command line with cargo: ``` -cargo run +sudo -E cargo run -- start ``` -Cargo will ask for your password because burrow needs permission in order to create a tunnel. +Creating the tunnel requires elevated privileges. Regular checks and tests can run without `sudo`: + +``` +cargo test --workspace --all-features +``` diff --git a/docs/GTK_APP.md b/docs/GTK_APP.md index ef73d2b..582b0a2 100644 --- a/docs/GTK_APP.md +++ b/docs/GTK_APP.md @@ -15,7 +15,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo apt install -y clang meson cmake pkg-config libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils + sudo apt install -y clang meson cmake pkg-config libssl-dev libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils ``` 2. Install flatpak builder (Optional) @@ -38,7 +38,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo dnf install -y clang ninja-build cmake meson gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib + sudo dnf install -y clang ninja-build cmake meson openssl-devel gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib ``` 2. Install flatpak builder (Optional) @@ -61,7 +61,7 @@ Note that the flatpak version can compile but will not run properly! 1. Install build dependencies ``` - sudo xbps-install -Sy gcc clang meson cmake pkg-config gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib + sudo xbps-install -Sy gcc clang meson cmake pkg-config openssl-devel gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib ``` 2. Install flatpak builder (Optional) @@ -88,6 +88,12 @@ flatpak install --user \ ## Building +With Nix, enter the focused GTK shell before running the Meson build: + +```bash +nix develop .#gtk +``` +
General @@ -139,6 +145,16 @@ flatpak install --user \ ## Running +The GTK app mirrors the Apple home surface: a Burrow header, Networks carousel, +Accounts section, Tunnel action, and the same add flows for WireGuard, Tor, and +Tailnet. It talks to the daemon over the same gRPC API used by Apple clients for +network storage, tunnel state, Tailnet discovery, authority probing, browser +sign-in, and Tailnet payloads. + +On Linux the GTK app first looks for a daemon on the configured gRPC socket. If +none is reachable, it starts an embedded user-scoped daemon with a socket under +`XDG_RUNTIME_DIR` and a database under `XDG_DATA_HOME` before refreshing the UI. +
General diff --git a/docs/PROTOCOL_ROADMAP.md b/docs/PROTOCOL_ROADMAP.md index 6bfde42..37c7228 100644 --- a/docs/PROTOCOL_ROADMAP.md +++ b/docs/PROTOCOL_ROADMAP.md @@ -3,7 +3,7 @@ Burrow currently has two tunnel paths in-tree: - a WireGuard data plane -- a mesh transport built on `iroh` +- a Tor-backed userspace TCP path What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer: diff --git a/docs/TOR.md b/docs/TOR.md deleted file mode 100644 index 81b8a1a..0000000 --- a/docs/TOR.md +++ /dev/null @@ -1,41 +0,0 @@ -# Tor Transport - -Burrow now has a `Tor` network type that boots an in-process [Arti](https://gitlab.torproject.org/tpo/core/arti) client and exposes a transparent TCP listener for outbound stream forwarding. - -The first implementation is intentionally narrow: - -- `tcp_stack.kind = "system"` is the only supported TCP stack backend. -- transparent destination recovery uses Linux `SO_ORIGINAL_DST` and macOS PF lookups. -- on macOS, Burrow first tries PF `DIOCNATLOOK`, then falls back to a `pflog0` observer backed by an in-memory flow cache keyed by the redirected socket tuple. -- Burrow does not yet install firewall redirect rules for you. -- traffic reaches Arti only if the host already redirects outbound TCP flows to Burrow's local listener. -- the macOS observer fallback only works when the redirect rule is logged to `pflog0` and Burrow listens on an explicit local address such as `127.0.0.1:9040`. -- destination handling is IP-and-port based, so this does not yet capture DNS or `.onion` names before local resolution. -- Burrow still does not install loop-avoidance rules for Arti's own relay connections, so redirect rules must exempt those flows externally for now. - -## Payload format - -`Network.payload` can be JSON or TOML. - -```json -{ - "address": ["100.64.0.2/32"], - "tun_name": "burrow-tor", - "mtu": 1400, - "arti": { - "state_dir": "/var/lib/burrow/arti/state", - "cache_dir": "/var/cache/burrow/arti" - }, - "tcp_stack": { - "kind": "system", - "listen": "127.0.0.1:9040" - } -} -``` - -## Next steps - -- teach Burrow to program and tear down redirect rules safely. -- add loop-avoidance for Arti's own relay connections before enabling automatic redirect. -- add DNS capture or hostname-aware forwarding for `.onion` and other unresolved destinations. -- add alternate pure-Rust TCP stack backends behind the same `tcp_stack` enum. diff --git a/docs/WIREGUARD_LINEAGE.md b/docs/WIREGUARD_LINEAGE.md index 15ca67a..63e8839 100644 --- a/docs/WIREGUARD_LINEAGE.md +++ b/docs/WIREGUARD_LINEAGE.md @@ -15,7 +15,7 @@ Burrow does not embed BoringTun unchanged. - The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`. - Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`. - The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload. -- Burrow added its own runtime switching path so WireGuard and mesh transports can share one daemon lifecycle. +- Burrow added its own runtime switching path so WireGuard can share one daemon lifecycle with the rest of the managed runtime system. ## What Was Improved @@ -23,7 +23,7 @@ The lifted code has been tightened further in-repo. - Deprecated constant-time comparisons were replaced with `subtle`. - Network ordering and runtime selection are now deterministic and test-covered. -- The Burrow runtime can swap between WireGuard and mesh-backed networks without restarting the daemon process itself. +- The Burrow runtime can swap between WireGuard configurations without restarting the daemon process itself. ## Why This Matters diff --git a/evolution/README.md b/evolution/README.md index e55a347..794b1fe 100644 --- a/evolution/README.md +++ b/evolution/README.md @@ -58,3 +58,17 @@ evolution/ ``` Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly. + +## BEP Helper + +Use the `bep` helper under `Scripts/` to browse or list proposals: + +- `Scripts/bep` opens a quick browser for `evolution/`. +- `Scripts/bep list --status Draft` lists proposals by status. +- `Scripts/bep open BEP-0005` opens a proposal in `$EDITOR`. + +Validate proposal metadata with: + +```bash +python3 Scripts/check-bep-metadata.py +``` diff --git a/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md new file mode 100644 index 0000000..a34a609 --- /dev/null +++ b/evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md @@ -0,0 +1,81 @@ +# `BEP-0005` - Daemon IPC and Apple Boundary + +```text +Status: Draft +Proposal: BEP-0005 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should formalize one Apple/runtime boundary: Apple clients speak only to the daemon over gRPC on the app-group Unix socket, and the daemon owns all external control-plane, helper-process, and runtime coordination work. This prevents UI code from accreting side HTTP paths or ad hoc control-plane integrations that bypass the system Burrow is supposed to own. + +## Motivation + +- The current Tailnet work already showed the failure mode: Swift UI code started reaching around the daemon boundary to talk to helper HTTP endpoints directly. +- Apple-specific process ownership is easy to blur between the app, the network extension, and helper daemons unless the contract is explicit. +- If Burrow wants a durable multi-runtime architecture, the daemon must remain the only orchestration boundary between clients and control/data-plane behavior. + +## Detailed Design + +- Apple UI and Apple support libraries may call only daemon gRPC methods over the declared Burrow Unix socket. +- Direct Swift calls to external control-plane HTTP APIs, localhost helper HTTP servers, or runtime-specific subprocesses are forbidden. +- The daemon is responsible for: + - discovery of Tailnet authorities and related metadata + - control-plane session setup and tracking + - login/session lifecycle brokering + - runtime start/stop/reconcile + - translating helper or bridge processes into stable daemon RPCs +- `burrow/src/control/` owns transport-neutral control-plane semantics such as discovery, authority normalization, and request/response shaping. +- Apple UI owns presentation only: + - forms + - local state + - presenting returned auth URLs or statuses + - surfacing daemon availability and errors +- Any new Apple-facing runtime capability requires a daemon RPC first. + +## Security and Operational Considerations + +- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code. +- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized. +- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly. +- Non-Apple presentation clients should follow the same daemon-first lifecycle pattern: connect to a managed daemon when present, or start a user-scoped embedded daemon before issuing RPCs, without adding platform-local control-plane paths. + +## Contributor Playbook + +- Before adding a new Apple-side workflow, identify the daemon RPC that should own it. +- If the RPC does not exist, add the protocol shape in `proto/burrow.proto`, implement it in the daemon, and only then wire Swift UI. +- Verify that no Swift UI or support code calls external control-plane HTTP endpoints directly. +- For Tailnet and similar flows, test: + - daemon unavailable behavior + - successful RPC path + - error propagation through the UI +- Keep Linux GTK and Apple clients visually and functionally aligned around the same daemon-backed home surface: Networks, Accounts, Tunnel, and add flows should remain corresponding views over the daemon API. + +## Alternatives Considered + +- Let Apple UI call control-plane endpoints directly for convenience. Rejected because it creates parallel orchestration paths and breaks the daemon contract. +- Allow one-off exceptions for login helpers. Rejected because those exceptions become the architecture. + +## Impact on Other Work + +- Governs the Tailnet refactor and future Apple runtime work. +- Governs Linux GTK daemon startup parity where the same daemon API is reused from a user-scoped presentation process. +- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring. + +## Decision + +Pending. + +## References + +- `Apple/UI/` +- `Apple/Core/` +- `Apple/NetworkExtension/` +- `burrow/src/daemon/` +- `burrow/src/control/` diff --git a/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md new file mode 100644 index 0000000..36458ef --- /dev/null +++ b/evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md @@ -0,0 +1,74 @@ +# `BEP-0006` - Tailnet Authority-First Control Plane + +```text +Status: Draft +Proposal: BEP-0006 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: I, II, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a distinct user-facing protocol. Burrow’s config and UI should therefore be authority-first rather than provider-first. + +## Motivation + +- Splitting Tailscale and Headscale into separate user-facing providers causes fake architectural divergence. +- Discovery already naturally returns an authority and optional issuer; that is the stable contract users actually need. +- Future managed or enterprise deployments should fit the same model without requiring another protocol picker. + +## Detailed Design + +- Tailnet configuration is centered on: + - account + - identity + - authority/login server URL + - optional tailnet name + - optional hostname + - auth method/material +- User-facing surfaces should not force a protocol choice between Tailscale and Headscale. +- Provider inference may remain internal metadata for compatibility and diagnostics: + - default managed Tailscale authority + - custom self-hosted authority + - Burrow-owned authority when explicitly applicable +- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server. +- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority. +- Platform clients consume the same daemon gRPC surface for Tailnet discovery, authority probing, browser sign-in, and saved network payloads. macOS/iOS SwiftUI and Linux GTK may differ in presentation and local credential stores, but neither should introduce a second control-plane path. + +## Security and Operational Considerations + +- Authority-first config reduces UI complexity and makes misconfiguration easier to reason about. +- Provider-specific assumptions must not leak into packet or control-plane semantics unless the authority actually requires them. +- Auth material must remain authority-scoped and identity-scoped in daemon storage. + +## Contributor Playbook + +- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one. +- Store the authority explicitly in payloads and infer provider internally only when needed. +- Keep Linux GTK and Apple clients at functional parity by routing Tailnet add/discover/probe/login through `TailnetControl` and `Networks` RPCs instead of platform-local HTTP or legacy JSON daemon commands. +- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching. + +## Alternatives Considered + +- Keep separate user-facing providers for Tailscale and Headscale. Rejected because it models deployment shape as protocol shape. +- Collapse all control planes into one opaque Burrow provider. Rejected because the authority still matters operationally and diagnostically. + +## Impact on Other Work + +- Refines BEP-0002’s Tailscale-shaped control-plane work. +- Constrains the Tailnet Apple and Linux GTK refactors plus future daemon control-plane storage. + +## Decision + +Pending. + +## References + +- `burrow/src/control/` +- `Apple/UI/Networks/` +- `burrow-gtk/src/` +- `proto/burrow.proto` diff --git a/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md b/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md new file mode 100644 index 0000000..1fde0fb --- /dev/null +++ b/evolution/proposals/BEP-0007-identity-registry-and-operator-bootstrap.md @@ -0,0 +1,73 @@ +# `BEP-0007` - Identity Registry and Operator Bootstrap + +```text +Status: Draft +Proposal: BEP-0007 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, IV, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should maintain one canonical registry for project identities, aliases, bootstrap users, SSH keys, and admin-group mappings. Forgejo, Authentik, and related bootstrap configuration should derive from that registry instead of hardcoding overlapping identity facts in multiple modules. + +## Motivation + +- Burrow currently hardcodes operator and admin/bootstrap user facts directly in host configuration. +- Multi-account and self-hosted identity are becoming core architecture, not incidental infra details. +- A single registry reduces drift across Forgejo, Authentik, Headscale, SSH authorization, and future control-plane bootstrap. + +## Detailed Design + +- Add a root-level identity registry (`contributors.nix`) as the canonical source of truth for: + - usernames + - display names + - canonical emails + - external source emails or aliases + - admin scope + - bootstrap eligibility + - forge authorized SSH keys + - named roles +- Consume that registry from host configuration for: + - Forgejo authorized keys + - Forgejo bootstrap admin defaults + - Authentik bootstrap users + - Burrow user/admin group names +- Future work may derive contributor docs, OIDC bootstrap, and additional runtime configuration from the same registry. + +## Security and Operational Considerations + +- Identity drift is a security bug when it affects admin groups, bootstrap accounts, or SSH authorization. +- The registry stores metadata only; secrets remain in agenix or other declared secret paths. +- Changes to the registry should receive explicit review because they affect access and governance. + +## Contributor Playbook + +- Edit `contributors.nix` first when changing operator, admin, alias, or bootstrap identity state. +- Derive runtime configuration from the registry instead of duplicating the same facts elsewhere. +- Keep secret references separate from identity metadata. + +## Alternatives Considered + +- Continue hardcoding users in module options. Rejected because drift is inevitable once Forgejo, Authentik, and Headscale all depend on the same identities. +- Create separate per-service user lists. Rejected because it duplicates governance facts and weakens review. + +## Impact on Other Work + +- Supports forge auth, Authentik group sync, and future multi-account Burrow control-plane work. +- Creates the basis for stronger contributor and operator provenance later. + +## Decision + +Pending. + +## References + +- `contributors.nix` +- `nixos/hosts/burrow-forge/default.nix` +- `nixos/modules/burrow-authentik.nix` +- `nixos/modules/burrow-forge.nix` diff --git a/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md new file mode 100644 index 0000000..0ce03a6 --- /dev/null +++ b/evolution/proposals/BEP-0008-authentik-backed-team-chat-and-workspace-sso.md @@ -0,0 +1,169 @@ +# `BEP-0008` - Authentik-Backed Team Chat and Workspace Identity + +```text +Status: Draft +Proposal: BEP-0008 +Authors: gpt-5.4 +Coordinator: gpt-5.4 +Reviewers: Pending +Constitution Sections: II, III, V +Implementation PRs: Pending +Decision Date: Pending +``` + +## Summary + +Burrow should add a self-hosted team chat surface at `chat.burrow.net` and +continue the project-wide move toward Authentik as the identity authority for +external work systems. The immediate targets are a self-hosted Zulip +deployment rooted in Authentik SAML, a Linear SAML configuration when the +workspace plan supports it, and a 1Password Unlock-with-SSO deployment rooted +in the same Authentik-backed OIDC authority. + +This keeps Burrow's day-to-day coordination surfaces aligned with the same +admin groups, canonical users, and secret-handling model already used for +Forgejo, Headscale, and Tailscale. It also avoids fragmenting login state +across vendor-native Google auth flows when Burrow already operates an IdP. + +## Motivation + +- Forge, Tailnet, operator identity, and Tailscale custom OIDC are already + rooted in Authentik. Team chat, work tracking, and password-manager access + should not become separate authority islands. +- Zulip provides a self-hosted chat system under Burrow's control, which fits + the constitution better than adding another hosted chat dependency. +- Linear remains a SaaS dependency, but its workspace access should still be + derived from Burrow-managed identities and domains when the vendor plan + exposes SAML configuration. +- 1Password Business is another external work surface where Burrow-controlled + identities are preferable to vendor-native Google-only auth. Its current + vendor flow is OIDC-based Unlock with SSO rather than SAML, so the proposal + needs to preserve protocol accuracy instead of flattening everything into + one SAML bucket. +- Burrow already has a canonical public identity registry and a secret-backed + external-email alias map. Reusing that structure is lower-risk than + inventing per-app user bootstrap logic. + +## Detailed Design + +- Add a Burrow-managed Zulip workload on the forge host at `chat.burrow.net`. + The deployment should be repo-owned and rebuildable from Nix, even if the + runtime uses vendor-supported container images internally. +- Prefer host-managed NixOS services for Zulip's stateful dependencies + (PostgreSQL, Redis, RabbitMQ, memcached, backups) so Burrow owns the + operational surface directly rather than composing a container-side service + mesh. +- Zulip should authenticate through Authentik SAML rather than local passwords + as the primary path. Initial bootstrap may still keep an operational escape + hatch while the deployment is being validated. +- Add Authentik-managed SAML applications for: + - Zulip at `chat.burrow.net` + - Linear using Burrow's claimed domains and Authentik metadata +- Add an Authentik-managed SCIM backchannel for Linear so Burrow can push + role groups declaratively instead of hand-maintaining workspace roles. +- Add an Authentik-managed OIDC application for 1Password Business under the + Burrow team sign-in address. +- Treat Zulip and Linear as downstream applications of the same identity + authority, and treat 1Password as part of that same authority even though + its vendor protocol is OIDC rather than SAML. The source of truth remains: + - public identities and admin intent in `contributors.nix` + - private alias mappings and external accounts in agenix-encrypted secrets +- Keep app-specific configuration in dedicated reconciliation code or module + options instead of hand-edited UI state. +- Prefer service-specific reconciliation over ad hoc manual setup so rebuilds + and host replacement converge automatically. +- When Burrow wants an external-user launcher surface in Authentik, configure + the brand's `default_application` explicitly instead of relying on + `/if/user/`, which otherwise remains internal-user-only. +- Derive Linear SCIM role groups from Burrow's canonical identity metadata. + If Burrow-wide admin intent says a user is an operator/admin, the repo-owned + configuration should map that intent onto the Linear push group without a + second manual roster. +- Model 1Password according to the vendor's actual integration contract: + - OIDC Authorization Code Flow with PKCE + - public client rather than a confidential client + - no Burrow-side dependence on a stored client secret unless the vendor flow + changes + +## Security and Operational Considerations + +- Do not store external personal email mappings in public registry files. + Public tree data may include Burrow usernames and canonical `@burrow.net` + addresses, but external aliases must stay in encrypted secrets. +- Zulip internal service credentials, Django secret material, and any mail + credentials must have explicit storage and rotation paths. +- Linear SAML must not become Burrow's only admin recovery path. At least one + owner login path outside the enforced SAML flow should remain available until + rollout is proven. +- Linear SCIM group push should be role-scoped and explicit. Burrow should + avoid blanket ownership mapping unless that intent is recorded in the repo. +- 1Password Owners cannot be forced onto Unlock with SSO during initial setup. + Burrow should preserve the owner recovery path and treat OIDC rollout as a + scoped migration for non-owner users first. +- If Zulip is deployed without production-grade outbound email at first, that + limitation must be documented and treated as an operational constraint, not a + hidden assumption. +- Rollback should be straightforward: + - disable or stop the Zulip module + - remove the Authentik SAML apps + - remove the Authentik OIDC app used for 1Password if necessary + - leave the underlying Burrow identities unchanged + +## Contributor Playbook + +- Define the app and identity intent in the repository before modifying the + forge host. +- Add or update Nix modules so `burrow-forge` can rebuild Zulip and the + corresponding Authentik SAML configuration from the tree. +- Verify: + - `chat.burrow.net` serves a working Zulip login surface + - Authentik exposes working metadata for Zulip and Linear +- Authentik exposes a working OIDC issuer for 1Password + - users in Burrow admin groups receive the expected access on first login + - external Burrow users landing on `auth.burrow.net` reach the intended + app launcher target instead of the internal-only Authentik user interface +- Record concrete evidence for: + - host deployment generation + - Authentik reconciliation success + - Zulip login success + - Linear SAML configuration state + - 1Password Unlock with SSO configuration state + +## Alternatives Considered + +- Use Zulip Cloud instead of self-hosting. Rejected because the ask is to host + chat under `chat.burrow.net`, and Burrow already operates a forge host with a + self-managed identity plane. +- Keep Linear on Google-native login. Rejected because it leaves Burrow work + access outside the project's operator and group model. +- Treat 1Password as a SAML app for consistency. Rejected because the live + vendor flow is OIDC and Burrow should not pretend otherwise in repo-owned + infrastructure. +- Add per-app manual Authentik configuration without repository automation. + Rejected because it violates Burrow's infrastructure-in-repo commitment. + +## Impact on Other Work + +- Extends Burrow's Authentik role from control-plane identity into team-work + surfaces. +- Introduces a persistent chat workload on the forge host, with resource and + monitoring implications. +- Creates a likely follow-up for SCIM or richer group synchronization if Linear + or Zulip role mapping needs to become fully declarative later. +- Adds a second OIDC relying party beyond Forgejo, Headscale, and Tailscale, + which raises the importance of keeping Burrow's Authentik scope mappings and + redirect handling consistent across applications. + +## Decision + +Pending. + +## References + +- `CONSTITUTION.md` +- `contributors.nix` +- `evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md` +- Authentik docs: SAML provider and metadata endpoints +- Zulip docs: SAML authentication and docker deployment +- Linear docs: SAML and access control +- 1Password docs: Unlock with SSO using OpenID Connect diff --git a/flake.lock b/flake.lock index 6f7f20c..0067dab 100644 --- a/flake.lock +++ b/flake.lock @@ -12,12 +12,15 @@ "locked": { "lastModified": 1770165109, "narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=", - "type": "tarball", - "url": "https://codeload.github.com/ryantm/agenix/tar.gz/main" + "owner": "ryantm", + "repo": "agenix", + "rev": "b027ee29d959fda4b60b57566d64c98a202e0feb", + "type": "github" }, "original": { - "type": "tarball", - "url": "https://codeload.github.com/ryantm/agenix/tar.gz/main" + "owner": "ryantm", + "repo": "agenix", + "type": "github" } }, "darwin": { @@ -49,8 +52,8 @@ ] }, "locked": { - "lastModified": 1773506317, - "narHash": "sha256-qWKbLUJpavIpvOdX1fhHYm0WGerytFHRoh9lVck6Bh0=", + "lastModified": 1773889306, + "narHash": "sha256-PAqwnsBSI9SVC2QugvQ3xeYCB0otOwCacB1ueQj2tgw=", "type": "tarball", "url": "https://codeload.github.com/nix-community/disko/tar.gz/master" }, @@ -120,13 +123,37 @@ "url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable" } }, + "nsc-autoscaler": { + "inputs": { + "flake-utils": [ + "flake-utils" + ], + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1775221037, + "narHash": "sha256-tv6Y3cqn76PEyZpSMMItVW96KKIboovBWTOv5Lt7PXg=", + "ref": "refs/heads/main", + "rev": "2c485752fde28ec3be2f228b571d1906f4bcf917", + "revCount": 10, + "type": "git", + "url": "https://compatible.systems/conrad/nsc-autoscaler.git" + }, + "original": { + "type": "git", + "url": "https://compatible.systems/conrad/nsc-autoscaler.git" + } + }, "root": { "inputs": { "agenix": "agenix", "disko": "disko", "flake-utils": "flake-utils", "hcloud-upload-image-src": "hcloud-upload-image-src", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "nsc-autoscaler": "nsc-autoscaler" } }, "systems": { diff --git a/flake.nix b/flake.nix index ed59619..e842fba 100644 --- a/flake.nix +++ b/flake.nix @@ -5,20 +5,25 @@ nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"; flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main"; agenix = { - url = "tarball+https://codeload.github.com/ryantm/agenix/tar.gz/main"; + url = "github:ryantm/agenix"; inputs.nixpkgs.follows = "nixpkgs"; }; disko = { url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master"; inputs.nixpkgs.follows = "nixpkgs"; }; + nsc-autoscaler = { + url = "git+https://compatible.systems/conrad/nsc-autoscaler.git"; + inputs.nixpkgs.follows = "nixpkgs"; + inputs.flake-utils.follows = "flake-utils"; + }; hcloud-upload-image-src = { url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"; flake = false; }; }; - outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }: + outputs = { self, nixpkgs, flake-utils, agenix, disko, nsc-autoscaler, hcloud-upload-image-src }: let supportedSystems = [ "x86_64-linux" @@ -33,10 +38,8 @@ inherit system; }; lib = pkgs.lib; - agenixPkg = agenix.packages.${system}.agenix; commonPackages = with pkgs; [ cargo - sccache rustc rustfmt clippy @@ -54,7 +57,7 @@ nscPkg = if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then let - version = "0.0.484"; + version = "0.0.452"; osName = if pkgs.stdenv.isLinux then "linux" @@ -68,18 +71,18 @@ arch = "amd64"; hash = if pkgs.stdenv.isLinux then - "sha256-sT4YWSjQ7dU6/QV+vucm1ARSXf5yIcAtHoCYxbXJpRs=" + "sha256-FBqOJ0UQWTv2r4HWMHrR/aqFzDa0ej/mS8dSoaCe6fY=" else - "sha256-u0pSyUQw0IJcIipkLtm0MemD9BFO2/ZoAlBuFpfX1HI="; + "sha256-3fRKWO0SCCa5PEym5yCB7dtyEx3xSxXSHfJYz8B+/4M="; } else if pkgs.stdenv.hostPlatform.isAarch64 then { arch = "arm64"; hash = if pkgs.stdenv.isLinux then - "sha256-n3nOIBjGnHdNUhfWD7QHvGOW+DdrZaNlfatj4o17NvM=" + "sha256-A6twO8Ievbu7Gi5Hqon4ug5rCGOm/uHhlCya3px6+io=" else - "sha256-8k2Jby6HCPClBaSGUrqIKP6MioVFrGD6HwAsjKZSSQA="; + "sha256-n363xLaGhy+a6lw2F+WicQYGXnGYnqRW8aTQCSppwcw="; } else throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}"; @@ -91,6 +94,7 @@ pkgs.stdenvNoCC.mkDerivation { pname = "nsc"; inherit version src; + meta.mainProgram = "nsc"; dontConfigure = true; dontBuild = true; unpackPhase = '' @@ -141,13 +145,41 @@ subPackages = [ "./cmd/forgejo-nsc-autoscaler" ]; vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs="; }; + burrowSrc = lib.cleanSourceWith { + src = ./.; + filter = path: type: + let + p = toString path; + name = builtins.baseNameOf path; + hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p; + in + !(hasDir ".git" || hasDir "target" || hasDir "node_modules" || name == "result"); + }; + burrowPkg = pkgs.rustPlatform.buildRustPackage { + pname = "burrow"; + version = "0.1.0"; + src = burrowSrc; + cargoLock = { + lockFile = ./Cargo.lock; + outputHashes = { + "tracing-oslog-0.1.2" = "sha256-DjJDiPCTn43zJmmOfuRnyti8iQf9qoXICMKIx4bAG3I="; + }; + }; + cargoBuildFlags = [ + "-p" + "burrow" + "--bin" + "burrow" + ]; + nativeBuildInputs = [ pkgs.protobuf ]; + meta.mainProgram = "burrow"; + }; in { devShells.default = pkgs.mkShell { packages = commonPackages ++ [ - agenixPkg hcloudUploadImagePkg forgejoNscDispatcher forgejoNscAutoscaler @@ -159,7 +191,6 @@ packages = commonPackages ++ [ - agenixPkg hcloudUploadImagePkg ] ++ lib.optionals (nscPkg != null) [ nscPkg ]; @@ -169,7 +200,8 @@ packages = { - agenix = agenixPkg; + agenix = agenix.packages.${system}.agenix; + burrow = burrowPkg; hcloud-upload-image = hcloudUploadImagePkg; forgejo-nsc-dispatcher = forgejoNscDispatcher; forgejo-nsc-autoscaler = forgejoNscAutoscaler; @@ -179,13 +211,14 @@ // { nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix; nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix; - nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix; - + nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default; + nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; + nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; + nixosModules.burrow-zulip = import ./nixos/modules/burrow-zulip.nix; nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem { system = "x86_64-linux"; specialArgs = { inherit self; - agenixPackage = agenix.packages.x86_64-linux.agenix; }; modules = [ agenix.nixosModules.default diff --git a/nixos/README.md b/nixos/README.md index ebdb2dc..23907f3 100644 --- a/nixos/README.md +++ b/nixos/README.md @@ -9,41 +9,48 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B - `hosts/burrow-forge/default.nix`: host entrypoint - `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module - `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap -- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services +- upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input +- `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes +- `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC +- `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets - `hetzner-cloud-config.yaml`: desired Hetzner host shape - `keys/contact_at_burrow_net.pub`: initial operator SSH public key - `keys/agent_at_burrow_net.pub`: automation SSH public key - `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow - `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot -- `../Scripts/bootstrap-forge-intake.sh`: legacy intake bootstrap helper; current forge runtime secrets should live in `../secrets/forgejo/*.age` -- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, and optional NSC services after boot +- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/` +- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, optional NSC services, and optional Tailnet services after boot - `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers - `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host -- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler bootstrap inputs and ensure the default Forgejo scope exists -- `../secrets/forgejo/*.age`: authoritative encrypted forge admin password, agent SSH key, and Namespace runtime configs for the forge host +- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists +- `../Scripts/seal-forgejo-nsc-secrets.sh`: encrypt forgejo-nsc runtime inputs into the agenix secrets consumed by `burrow-forge` ## Intended Flow 1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`. 2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`. -3. Encrypt the Forgejo admin password and agent SSH key into `secrets/forgejo/{admin-password,agent-ssh-key}.age`. -4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account from the agenix secret path. +3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. +4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent `. -6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`, then deploy with `Scripts/forge-deploy.sh` so agenix updates the live forgejo-nsc runtime paths. -7. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. -8. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. -9. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. +6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `intake/forgejo_nsc_token.txt`, `intake/forgejo_nsc_dispatcher.yaml`, and `intake/forgejo_nsc_autoscaler.yaml`. +7. Run `Scripts/seal-forgejo-nsc-secrets.sh` to encrypt those runtime inputs into the agenix secrets used by `burrow-forge`. +8. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, `secrets/infra/headscale-oidc-client-secret.age`, `secrets/infra/forgejo-nsc-token.age`, `secrets/infra/forgejo-nsc-dispatcher-config.age`, and `secrets/infra/forgejo-nsc-autoscaler-config.age`, and let agenix materialize them under `/run/agenix/`. +9. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. +10. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. +11. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`. ## Current Constraints -- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host. +- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`. +- `services.forgejo-nsc` now expects agenix-backed runtime inputs at `/run/agenix/burrowForgejoNscToken`, `/run/agenix/burrowForgejoNscDispatcherConfig`, and `/run/agenix/burrowForgejoNscAutoscalerConfig`. +- Authentik and Headscale secrets now live in tracked agenix blobs under `secrets/infra/` and decrypt to `/run/agenix/` on the forge host. - Public Burrow forge cutover completed on March 15, 2026: - `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21` - HTTP redirects to HTTPS on all three names - `https://burrow.net` returns the root forge landing response - `https://git.burrow.net` returns the live Forgejo front door - `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/` -- The Cloudflare token now lives in `secrets/cloudflare/api-token.age`; the current token is account-scoped: `POST /accounts//tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`. +- The Cloudflare token currently in `intake/cloudflare-token.txt` is an account-scoped token: `POST /accounts//tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`. - `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response. - Both domains publish Forward Email MX/TXT records. - Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`. diff --git a/nixos/hosts/burrow-forge/default.nix b/nixos/hosts/burrow-forge/default.nix index 0ce7964..c4fc92e 100644 --- a/nixos/hosts/burrow-forge/default.nix +++ b/nixos/hosts/burrow-forge/default.nix @@ -1,4 +1,56 @@ -{ config, self, ... }: +{ config, lib, pkgs, self, ... }: + +let + contributors = import ../../../contributors.nix; + identities = contributors.identities; + linearGroups = contributors.groups.linear; + stripNewline = value: lib.replaceStrings [ "\n" ] [ "" ] value; + authentikPasswordSecretPath = identity: + if identity ? authentikPasswordSecret + then config.age.secrets.${identity.authentikPasswordSecret}.path + else null; + bootstrapUsers = lib.mapAttrsToList + ( + username: identity: { + inherit username; + name = identity.displayName; + email = identity.canonicalEmail; + isAdmin = identity.isAdmin or false; + groups = lib.optionals (identity.isAdmin or false) [ linearGroups.owners ]; + passwordFile = authentikPasswordSecretPath identity; + } + ) + (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); + headscaleBootstrapUsers = lib.mapAttrsToList + ( + username: identity: { + name = username; + displayName = identity.displayName; + email = identity.canonicalEmail; + } + ) + (lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities); + forgeUnixUsernames = + builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeUnixUser or false) identities); + forgeUnixUsers = lib.genAttrs forgeUnixUsernames (username: + let + identity = identities.${username}; + sshKeys = lib.optional (identity ? sshPublicKeyPath) (stripNewline (builtins.readFile identity.sshPublicKeyPath)); + in + { + isNormalUser = true; + createHome = true; + home = "/home/${username}"; + shell = pkgs.bashInteractive; + extraGroups = lib.optional (identity.isAdmin or false) "wheel"; + openssh.authorizedKeys.keys = sshKeys; + }); + forgeUnixAdminUsernames = + builtins.attrNames (lib.filterAttrs (_: identity: (identity.forgeUnixUser or false) && (identity.isAdmin or false)) identities); + forgeAuthorizedKeys = map + (username: builtins.readFile identities.${username}.sshPublicKeyPath) + (builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities)); +in { imports = [ @@ -7,6 +59,9 @@ self.nixosModules.burrow-forge self.nixosModules.burrow-forge-runner self.nixosModules.burrow-forgejo-nsc + self.nixosModules.burrow-authentik + self.nixosModules.burrow-headscale + self.nixosModules.burrow-zulip ]; system.stateVersion = "24.11"; @@ -18,64 +73,203 @@ "flakes" ]; + users.users = forgeUnixUsers; + + security.sudo.extraRules = lib.map (username: { + users = [ username ]; + commands = [ + { + command = "ALL"; + options = [ "NOPASSWD" ]; + } + ]; + }) forgeUnixAdminUsernames; + + environment.systemPackages = lib.optionals config.services.forgejo-nsc.enable [ + self.packages.${pkgs.stdenv.hostPlatform.system}.nsc + ]; + + age.identityPaths = [ "/var/lib/agenix/agenix.key" ]; + age.secrets.burrowAuthentikEnv = { + file = ../../../secrets/infra/authentik.env.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowHeadscaleOidcClientSecret = { + file = ../../../secrets/infra/headscale-oidc-client-secret.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowForgejoOidcClientSecret = { + file = ../../../secrets/infra/forgejo-oidc-client-secret.age; + owner = "forgejo"; + group = "forgejo"; + mode = "0440"; + }; + age.secrets.burrowTailscaleOidcClientSecret = { + file = ../../../secrets/infra/tailscale-oidc-client-secret.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowLinearScimToken = { + file = ../../../secrets/infra/linear-scim-token.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowAuthentikGoogleClientId = { + file = ../../../secrets/infra/authentik-google-client-id.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowAuthentikGoogleClientSecret = { + file = ../../../secrets/infra/authentik-google-client-secret.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowAuthentikGoogleAccountMap = { + file = ../../../secrets/infra/authentik-google-account-map.json.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowAuthentikUiTestPassword = { + file = ../../../secrets/infra/authentik-ui-test-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + age.secrets.burrowForgejoNscToken = { + file = ../../../secrets/infra/forgejo-nsc-token.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; + age.secrets.burrowForgejoNscDispatcherConfig = { + file = ../../../secrets/infra/forgejo-nsc-dispatcher-config.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; + age.secrets.burrowForgejoNscAutoscalerConfig = { + file = ../../../secrets/infra/forgejo-nsc-autoscaler-config.age; + owner = "forgejo-nsc"; + group = "forgejo-nsc"; + mode = "0400"; + }; + + age.secrets.burrowZulipPostgresPassword = { + file = ../../../secrets/infra/zulip-postgres-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipRabbitmqPassword = { + file = ../../../secrets/infra/zulip-rabbitmq-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipRedisPassword = { + file = ../../../secrets/infra/zulip-redis-password.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + age.secrets.burrowZulipSecretKey = { + file = ../../../secrets/infra/zulip-secret-key.age; + owner = "root"; + group = "root"; + mode = "0400"; + }; + + networking.extraHosts = '' + 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net + ::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net + ''; + services.burrow.forge = { enable = true; - adminPasswordFile = config.age.secrets.forgejoAdminPassword.path; - authorizedKeys = [ - (builtins.readFile ../../keys/contact_at_burrow_net.pub) - (builtins.readFile ../../keys/agent_at_burrow_net.pub) - ]; + contactEmail = identities.contact.canonicalEmail; + adminUsername = "contact"; + adminEmail = identities.contact.canonicalEmail; + adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; + oidcAdminGroup = contributors.groups.admins; + oidcRestrictedGroup = contributors.groups.users; + oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; + authorizedKeys = forgeAuthorizedKeys; }; services.burrow.forgeRunner = { enable = true; - sshPrivateKeyFile = config.age.secrets.forgejoAgentSshKey.path; + sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; + labels = [ + "self-hosted" + "linux" + "x86_64" + "burrow-forge" + ]; }; - age.secrets.forgejoAdminPassword = { - file = ../../../secrets/forgejo/admin-password.age; - mode = "0400"; - owner = "forgejo"; - group = "forgejo"; - }; - - age.secrets.forgejoAgentSshKey = { - file = ../../../secrets/forgejo/agent-ssh-key.age; - mode = "0400"; - owner = "root"; - group = "root"; - }; - - age.secrets.forgejoNscToken = { - file = ../../../secrets/forgejo/nsc-token.age; - mode = "0400"; - owner = "forgejo-nsc"; - group = "forgejo-nsc"; - }; - - age.secrets.forgejoNscDispatcherConfig = { - file = ../../../secrets/forgejo/nsc-dispatcher-config.age; - mode = "0400"; - owner = "forgejo-nsc"; - group = "forgejo-nsc"; - }; - - age.secrets.forgejoNscAutoscalerConfig = { - file = ../../../secrets/forgejo/nsc-autoscaler-config.age; - mode = "0400"; - owner = "forgejo-nsc"; - group = "forgejo-nsc"; - }; - - services.burrow.forgejoNsc = { + services.forgejo-nsc = { enable = true; - nscTokenFile = config.age.secrets.forgejoNscToken.path; + nscTokenFile = config.age.secrets.burrowForgejoNscToken.path; dispatcher = { - configFile = config.age.secrets.forgejoNscDispatcherConfig.path; + configFile = config.age.secrets.burrowForgejoNscDispatcherConfig.path; }; autoscaler = { enable = true; - configFile = config.age.secrets.forgejoNscAutoscalerConfig.path; + configFile = config.age.secrets.burrowForgejoNscAutoscalerConfig.path; }; }; + + services.burrow.authentik = { + enable = true; + envFile = config.age.secrets.burrowAuthentikEnv.path; + forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; + headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; + tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path; + defaultExternalApplicationSlug = "tailscale"; + googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; + googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; + googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path; + googleLoginMode = "redirect"; + userGroupName = contributors.groups.users; + adminGroupName = contributors.groups.admins; + tailscaleAccessGroupName = contributors.groups.users; + bootstrapUsers = bootstrapUsers; + linearAcsUrl = "https://api.linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de/acs"; + linearAudience = "https://auth.linear.app/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearDefaultRelayState = "https://linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearScimUrl = "https://api.linear.app/auth/scim/d0ca13dc-ac41-4824-8aab-e0ca352fc3de"; + linearScimTokenFile = config.age.secrets.burrowLinearScimToken.path; + linearScimUserIdentifier = "email"; + linearOwnerGroupName = linearGroups.owners; + linearAdminGroupName = linearGroups.admins; + linearGuestGroupName = linearGroups.guests; + zulipAccessGroupName = contributors.groups.users; + }; + + services.burrow.headscale = { + enable = true; + oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; + bootstrapUsers = headscaleBootstrapUsers; + }; + + services.burrow.zulip = { + enable = true; + administratorEmail = identities.contact.canonicalEmail; + postgresPasswordFile = config.age.secrets.burrowZulipPostgresPassword.path; + rabbitmqPasswordFile = config.age.secrets.burrowZulipRabbitmqPassword.path; + redisPasswordFile = config.age.secrets.burrowZulipRedisPassword.path; + secretKeyFile = config.age.secrets.burrowZulipSecretKey.path; + }; } diff --git a/nixos/keys/jett_at_burrow_net.pub b/nixos/keys/jett_at_burrow_net.pub new file mode 100644 index 0000000..36c85ee --- /dev/null +++ b/nixos/keys/jett_at_burrow_net.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMe960j6TC869F6RvElpICxlBauIT3E0uLyy0m7n70ZC diff --git a/nixos/modules/burrow-authentik.nix b/nixos/modules/burrow-authentik.nix new file mode 100644 index 0000000..977b641 --- /dev/null +++ b/nixos/modules/burrow-authentik.nix @@ -0,0 +1,1072 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.authentik; + runtimeDir = "/run/burrow-authentik"; + envFile = "${runtimeDir}/authentik.env"; + blueprintDir = "${runtimeDir}/blueprints"; + blueprintFile = "${blueprintDir}/burrow-authentik.yaml"; + postgresVolume = "burrow-authentik-postgresql:/var/lib/postgresql/data"; + dataVolume = "burrow-authentik-data:/data"; + directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; + forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; + tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh; + onePasswordOidcSyncScript = ../../Scripts/authentik-sync-1password-oidc.sh; + zulipSamlSyncScript = ../../Scripts/authentik-sync-zulip-saml.sh; + linearSamlSyncScript = ../../Scripts/authentik-sync-linear-saml.sh; + linearScimSyncScript = ../../Scripts/authentik-sync-linear-scim.sh; + googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; + tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh; + authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' + version: 1 + metadata: + name: Burrow Authentik + labels: + blueprints.goauthentik.io/description: Minimal Burrow Authentik applications + entries: + - model: authentik_providers_oauth2.scopemapping + id: burrow-oidc-email + identifiers: + name: Burrow OIDC Email + attrs: + name: Burrow OIDC Email + scope_name: email + description: Verified email mapping for Burrow + expression: | + return { + "email": request.user.email, + "email_verified": True, + } + + - model: authentik_providers_oauth2.scopemapping + id: burrow-oidc-groups + identifiers: + name: Burrow OIDC Groups + attrs: + name: Burrow OIDC Groups + scope_name: groups + description: Group membership mapping for Burrow + expression: | + return { + "groups": [group.name for group in request.user.ak_groups.all()], + } + + - model: authentik_providers_oauth2.oauth2provider + id: burrow-oidc-provider-ts + identifiers: + name: Burrow Tailnet + attrs: + authorization_flow: !Find [authentik_flows.flow, [slug, default-provider-authorization-implicit-consent]] + invalidation_flow: !Find [authentik_flows.flow, [slug, default-provider-invalidation-flow]] + issuer_mode: per_provider + slug: ${cfg.headscaleProviderSlug} + client_type: confidential + client_id: ${cfg.headscaleDomain} + client_secret: !Env [AUTHENTIK_BURROW_TS_CLIENT_SECRET, ""] + include_claims_in_id_token: true + redirect_uris: + - matching_mode: strict + url: https://${cfg.headscaleDomain}/oidc/callback + property_mappings: + - !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-openid]] + - !KeyOf burrow-oidc-email + - !KeyOf burrow-oidc-groups + - !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-profile]] + signing_key: !Find [authentik_crypto.certificatekeypair, [name, authentik Self-signed Certificate]] + + - model: authentik_core.application + identifiers: + slug: ${cfg.headscaleProviderSlug} + attrs: + name: Burrow Tailnet + slug: ${cfg.headscaleProviderSlug} + provider: !KeyOf burrow-oidc-provider-ts + meta_launch_url: https://${cfg.headscaleDomain}/ + ''; +in +{ + options.services.burrow.authentik = { + enable = lib.mkEnableOption "the Burrow Authentik identity provider"; + + domain = lib.mkOption { + type = lib.types.str; + default = "auth.burrow.net"; + description = "Public Authentik domain."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 9002; + description = "Local Authentik HTTP listen port."; + }; + + image = lib.mkOption { + type = lib.types.str; + default = "ghcr.io/goauthentik/server:2026.2.1"; + description = "Authentik container image reference."; + }; + + envFile = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/intake/authentik.env"; + description = "Host-local Authentik bootstrap environment file."; + }; + + headscaleDomain = lib.mkOption { + type = lib.types.str; + default = "ts.burrow.net"; + description = "Headscale public domain used for the bundled OIDC client."; + }; + + headscaleProviderSlug = lib.mkOption { + type = lib.types.str; + default = "ts"; + description = "Authentik provider slug for Headscale."; + }; + + forgejoDomain = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "Forgejo public domain used for the bundled OIDC client."; + }; + + forgejoProviderSlug = lib.mkOption { + type = lib.types.str; + default = "git"; + description = "Authentik application slug for Forgejo."; + }; + + tailscaleProviderSlug = lib.mkOption { + type = lib.types.str; + default = "tailscale"; + description = "Authentik application slug for Tailscale custom OIDC sign-in."; + }; + + tailscaleClientId = lib.mkOption { + type = lib.types.str; + default = "tailscale.burrow.net"; + description = "Client ID Authentik should present to Tailscale."; + }; + + tailscaleClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Authentik Tailscale OIDC client secret."; + }; + + tailscaleAccessGroupName = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik group that should be allowed to launch the Tailscale application."; + }; + + defaultExternalApplicationSlug = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik application slug that external users should land on instead of /if/user/."; + }; + + onePasswordDomain = lib.mkOption { + type = lib.types.str; + default = "burrow-team.1password.com"; + description = "1Password team sign-in domain used for Burrow Unlock with SSO."; + }; + + onePasswordProviderSlug = lib.mkOption { + type = lib.types.str; + default = "onepassword"; + description = "Authentik application slug for 1Password Unlock with SSO."; + }; + + onePasswordClientId = lib.mkOption { + type = lib.types.str; + default = "1password.burrow.net"; + description = "Public OIDC client ID Authentik should present to 1Password."; + }; + + onePasswordRedirectUris = lib.mkOption { + type = lib.types.listOf lib.types.str; + default = [ + "https://burrow-team.1password.com/sso/oidc/redirect/" + "onepassword://sso/oidc/redirect" + ]; + description = "Allowed 1Password OIDC redirect URIs."; + }; + + linearProviderSlug = lib.mkOption { + type = lib.types.str; + default = "linear"; + description = "Authentik application slug for Linear SAML."; + }; + + zulipDomain = lib.mkOption { + type = lib.types.str; + default = "chat.burrow.net"; + description = "Public Zulip domain exposed through Authentik SAML."; + }; + + zulipProviderSlug = lib.mkOption { + type = lib.types.str; + default = "zulip"; + description = "Authentik application slug for Zulip SAML."; + }; + + zulipAcsUrl = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}/complete/saml/"; + description = "Zulip SAML ACS URL."; + }; + + zulipAudience = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}"; + description = "Zulip SAML audience/entity identifier."; + }; + + zulipLaunchUrl = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.zulipDomain}/"; + description = "Zulip URL exposed in Authentik."; + }; + + zulipAccessGroupName = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Authentik group allowed to launch Zulip from Burrow SSO surfaces."; + }; + + linearAcsUrl = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SAML ACS URL."; + }; + + linearAudience = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SAML audience/entity identifier."; + }; + + linearLaunchUrl = lib.mkOption { + type = lib.types.str; + default = "https://linear.app/burrownet"; + description = "Linear workspace URL exposed in Authentik."; + }; + + linearDefaultRelayState = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional Linear relay state or login URL for IdP-initiated launches."; + }; + + linearScimUrl = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Linear SCIM base connector URL."; + }; + + linearScimTokenFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Linear SCIM bearer token."; + }; + + linearScimUserIdentifier = lib.mkOption { + type = lib.types.str; + default = "email"; + description = "Linear SCIM unique identifier field for users."; + }; + + linearOwnerGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-owners"; + description = "Authentik group name that should map to Linear owners."; + }; + + linearAdminGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-admins"; + description = "Authentik group name that should map to Linear admins."; + }; + + linearGuestGroupName = lib.mkOption { + type = lib.types.str; + default = "linear-guests"; + description = "Authentik group name that should map to Linear guests."; + }; + + forgejoClientId = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "Client ID Authentik should present to Forgejo."; + }; + + forgejoClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Authentik Forgejo OIDC client secret."; + }; + + headscaleClientSecretFile = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/intake/authentik_headscale_client_secret.txt"; + description = "Host-local file containing the Authentik Headscale OIDC client secret."; + }; + + googleClientIDFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Google OAuth client ID for the Authentik source."; + }; + + googleClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local file containing the Google OAuth client secret for the Authentik source."; + }; + + googleAccountMapFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Optional host-local JSON file mapping external Google accounts onto Burrow Authentik users."; + }; + + googleSourceSlug = lib.mkOption { + type = lib.types.str; + default = "google"; + description = "Authentik OAuth source slug used for Google login."; + }; + + googleLoginMode = lib.mkOption { + type = lib.types.enum [ + "promoted" + "redirect" + ]; + default = "redirect"; + description = "Identification-stage behavior for the Google Authentik source."; + }; + + headscaleAuthenticationFlowSlug = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-authentication"; + description = "Authentik authentication flow slug used for Burrow Tailnet sign-in."; + }; + + headscaleAuthenticationFlowName = lib.mkOption { + type = lib.types.str; + default = "Burrow Tailnet Authentication"; + description = "Authentik authentication flow name used for Burrow Tailnet sign-in."; + }; + + headscaleIdentificationStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-identification-stage"; + description = "Authentik identification stage used for Burrow Tailnet sign-in."; + }; + + headscalePasswordStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-password-stage"; + description = "Authentik password stage used for Burrow Tailnet sign-in."; + }; + + headscaleUserLoginStageName = lib.mkOption { + type = lib.types.str; + default = "burrow-tailnet-user-login-stage"; + description = "Authentik user-login stage used for Burrow Tailnet sign-in."; + }; + + userGroupName = lib.mkOption { + type = lib.types.str; + default = "burrow-users"; + description = "Authentik group granted baseline Burrow access."; + }; + + adminGroupName = lib.mkOption { + type = lib.types.str; + default = "burrow-admins"; + description = "Authentik group granted Burrow administrator access."; + }; + + bootstrapUsers = lib.mkOption { + type = with lib.types; listOf (submodule { + options = { + username = lib.mkOption { + type = str; + description = "Authentik username."; + }; + name = lib.mkOption { + type = str; + description = "Display name for the user."; + }; + email = lib.mkOption { + type = str; + description = "Canonical email stored in Authentik."; + }; + sourceEmail = lib.mkOption { + type = nullOr str; + default = null; + description = "External Google account email that should map onto this Authentik user."; + }; + groups = lib.mkOption { + type = listOf str; + default = [ ]; + description = "Additional Authentik groups for this user."; + }; + isAdmin = lib.mkOption { + type = bool; + default = false; + description = "Whether this user should be in the Burrow admin group."; + }; + passwordFile = lib.mkOption { + type = nullOr str; + default = null; + description = "Optional host-local file containing a bootstrap password for this user."; + }; + }; + }); + default = [ ]; + description = "Declarative Burrow users to create in Authentik."; + }; + }; + + config = lib.mkIf cfg.enable { + virtualisation.podman.enable = true; + + systemd.tmpfiles.rules = [ + "d ${runtimeDir} 0750 root root -" + "d ${blueprintDir} 0750 root root -" + ]; + + systemd.services.burrow-authentik-runtime = { + description = "Render the Burrow Authentik runtime environment"; + before = [ + "podman-burrow-authentik-postgresql.service" + "podman-burrow-authentik-server.service" + "podman-burrow-authentik-worker.service" + ]; + wantedBy = [ + "podman-burrow-authentik-postgresql.service" + "podman-burrow-authentik-server.service" + "podman-burrow-authentik-worker.service" + ]; + after = lib.optionals config.services.burrow.headscale.enable [ + "burrow-headscale-client-secret.service" + ]; + wants = lib.optionals config.services.burrow.headscale.enable [ + "burrow-headscale-client-secret.service" + ]; + path = [ pkgs.coreutils ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + RemainAfterExit = true; + }; + script = '' + set -euo pipefail + + if [ ! -s ${lib.escapeShellArg cfg.envFile} ]; then + echo "Authentik env file missing: ${cfg.envFile}" >&2 + exit 1 + fi + + if [ ! -s ${lib.escapeShellArg cfg.headscaleClientSecretFile} ]; then + echo "Headscale client secret missing: ${cfg.headscaleClientSecretFile}" >&2 + exit 1 + fi + + ${lib.optionalString (cfg.forgejoClientSecretFile != null) '' + if [ ! -s ${lib.escapeShellArg cfg.forgejoClientSecretFile} ]; then + echo "Forgejo client secret missing: ${cfg.forgejoClientSecretFile}" >&2 + exit 1 + fi + ''} + + ${lib.optionalString (cfg.tailscaleClientSecretFile != null) '' + if [ ! -s ${lib.escapeShellArg cfg.tailscaleClientSecretFile} ]; then + echo "Tailscale client secret missing: ${cfg.tailscaleClientSecretFile}" >&2 + exit 1 + fi + ''} + + install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir} + install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile} + + source ${lib.escapeShellArg cfg.envFile} + + read_secret() { + tr -d '\r\n' < "$1" + } + + cat > ${envFile} </dev/null; then + exit 0 + fi + sleep 2 + done + + echo "Authentik did not become ready on ${cfg.domain}" >&2 + exit 1 + ''; + }; + + systemd.services.burrow-authentik-google-source = lib.mkIf ( + cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null + ) { + description = "Reconcile the Burrow Authentik Google OAuth source"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + googleSourceSyncScript + cfg.envFile + cfg.googleClientIDFile + cfg.googleClientSecretFile + ] ++ lib.optional (cfg.googleAccountMapFile != null) cfg.googleAccountMapFile; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug} + export AUTHENTIK_GOOGLE_LOGIN_MODE=${lib.escapeShellArg cfg.googleLoginMode} + export AUTHENTIK_GOOGLE_USER_MATCHING_MODE=email_link + export AUTHENTIK_GOOGLE_CLIENT_ID="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientIDFile})" + export AUTHENTIK_GOOGLE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientSecretFile})" + if [ -n ${lib.escapeShellArg (cfg.googleAccountMapFile or "")} ]; then + export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON="$(tr -d '\n' < ${lib.escapeShellArg (cfg.googleAccountMapFile or "/dev/null")})" + else + export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON='${builtins.toJSON (map (user: { + source_email = user.sourceEmail; + username = user.username; + email = user.email; + name = user.name; + }) (lib.filter (user: user.sourceEmail != null) cfg.bootstrapUsers))}' + fi + + ${pkgs.bash}/bin/bash ${googleSourceSyncScript} + ''; + }; + + systemd.services.burrow-authentik-directory = lib.mkIf (cfg.bootstrapUsers != [ ]) { + description = "Reconcile Burrow Authentik users and groups"; + after = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ]; + wants = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + directorySyncScript + cfg.envFile + ] ++ lib.concatMap (user: lib.optional (user.passwordFile != null) user.passwordFile) cfg.bootstrapUsers; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_BURROW_USERS_GROUP=${lib.escapeShellArg cfg.userGroupName} + export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName} + export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} + export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: { + inherit (user) username name email isAdmin passwordFile; + groups = user.groups; + }) cfg.bootstrapUsers)}' + + ${pkgs.bash}/bin/bash ${directorySyncScript} + ''; + }; + + systemd.services.burrow-authentik-tailnet-auth-flow = { + description = "Reconcile the Burrow Tailnet authentication flow"; + after = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals ( + cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null + ) [ "burrow-authentik-google-source.service" ]; + wants = + [ + "burrow-authentik-ready.service" + "network-online.target" + ] + ++ lib.optionals ( + cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null + ) [ "burrow-authentik-google-source.service" ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + tailnetAuthFlowSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_TAILNET_PROVIDER_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON='["${cfg.headscaleProviderSlug}","${cfg.tailscaleProviderSlug}"]' + export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME=${lib.escapeShellArg cfg.headscaleAuthenticationFlowName} + export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG=${lib.escapeShellArg cfg.headscaleAuthenticationFlowSlug} + export AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME=${lib.escapeShellArg cfg.headscaleIdentificationStageName} + export AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME=${lib.escapeShellArg cfg.headscalePasswordStageName} + export AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME=${lib.escapeShellArg cfg.headscaleUserLoginStageName} + export AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug} + + ${pkgs.bash}/bin/bash ${tailnetAuthFlowSyncScript} + ''; + }; + + systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) { + description = "Reconcile the Burrow Authentik Forgejo OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + forgejoOidcSyncScript + cfg.envFile + cfg.forgejoClientSecretFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} + export AUTHENTIK_FORGEJO_APPLICATION_NAME=burrow.net + export AUTHENTIK_FORGEJO_PROVIDER_NAME=burrow.net + export AUTHENTIK_FORGEJO_CLIENT_ID=${lib.escapeShellArg cfg.forgejoClientId} + export AUTHENTIK_FORGEJO_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.forgejoClientSecretFile})" + export AUTHENTIK_FORGEJO_LAUNCH_URL=https://${cfg.forgejoDomain}/ + export AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON='["https://${cfg.forgejoDomain}/user/oauth2/burrow.net/callback","https://${cfg.forgejoDomain}/user/oauth2/authentik/callback","https://${cfg.forgejoDomain}/user/oauth2/GitHub/callback"]' + + ${pkgs.bash}/bin/bash ${forgejoOidcSyncScript} + ''; + }; + + systemd.services.burrow-authentik-tailscale-oidc = lib.mkIf (cfg.tailscaleClientSecretFile != null) { + description = "Reconcile the Burrow Authentik Tailscale OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + tailscaleOidcSyncScript + cfg.envFile + cfg.tailscaleClientSecretFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_TAILSCALE_APPLICATION_SLUG=${lib.escapeShellArg cfg.tailscaleProviderSlug} + export AUTHENTIK_TAILSCALE_APPLICATION_NAME=Tailscale + export AUTHENTIK_TAILSCALE_PROVIDER_NAME=Tailscale + export AUTHENTIK_TAILSCALE_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_TAILSCALE_CLIENT_ID=${lib.escapeShellArg cfg.tailscaleClientId} + export AUTHENTIK_TAILSCALE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.tailscaleClientSecretFile})" + export AUTHENTIK_TAILSCALE_LAUNCH_URL=https://login.tailscale.com/start/oidc + export AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON='["https://login.tailscale.com/a/oauth_response"]' + ${lib.optionalString (cfg.tailscaleAccessGroupName != null) '' + export AUTHENTIK_TAILSCALE_ACCESS_GROUP=${lib.escapeShellArg cfg.tailscaleAccessGroupName} + ''} + ${lib.optionalString (cfg.defaultExternalApplicationSlug != null) '' + export AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG=${lib.escapeShellArg cfg.defaultExternalApplicationSlug} + ''} + + ${pkgs.bash}/bin/bash ${tailscaleOidcSyncScript} + ''; + }; + + systemd.services.burrow-authentik-1password-oidc = { + description = "Reconcile the Burrow Authentik 1Password OIDC application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + onePasswordOidcSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG=${lib.escapeShellArg cfg.onePasswordProviderSlug} + export AUTHENTIK_ONEPASSWORD_APPLICATION_NAME=1Password + export AUTHENTIK_ONEPASSWORD_PROVIDER_NAME=1Password + export AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug} + export AUTHENTIK_ONEPASSWORD_CLIENT_ID=${lib.escapeShellArg cfg.onePasswordClientId} + export AUTHENTIK_ONEPASSWORD_LAUNCH_URL=https://${cfg.onePasswordDomain}/ + export AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON='${builtins.toJSON cfg.onePasswordRedirectUris}' + + ${pkgs.bash}/bin/bash ${onePasswordOidcSyncScript} + ''; + }; + + systemd.services.burrow-authentik-zulip-saml = { + description = "Reconcile the Burrow Authentik Zulip SAML application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + zulipSamlSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_ZULIP_APPLICATION_SLUG=${lib.escapeShellArg cfg.zulipProviderSlug} + export AUTHENTIK_ZULIP_APPLICATION_NAME=Zulip + export AUTHENTIK_ZULIP_PROVIDER_NAME=Zulip + export AUTHENTIK_ZULIP_ACS_URL=${lib.escapeShellArg cfg.zulipAcsUrl} + export AUTHENTIK_ZULIP_AUDIENCE=${lib.escapeShellArg cfg.zulipAudience} + export AUTHENTIK_ZULIP_LAUNCH_URL=${lib.escapeShellArg cfg.zulipLaunchUrl} + ${lib.optionalString (cfg.zulipAccessGroupName != null) '' + export AUTHENTIK_ZULIP_ACCESS_GROUP=${lib.escapeShellArg cfg.zulipAccessGroupName} + ''} + export AUTHENTIK_ZULIP_ADMIN_GROUP=${lib.escapeShellArg cfg.adminGroupName} + + ${pkgs.bash}/bin/bash ${zulipSamlSyncScript} + ''; + }; + + systemd.services.burrow-authentik-linear-saml = lib.mkIf ( + cfg.linearAcsUrl != null && cfg.linearAudience != null + ) { + description = "Reconcile the Burrow Authentik Linear SAML application"; + after = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + linearSamlSyncScript + cfg.envFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug} + export AUTHENTIK_LINEAR_APPLICATION_NAME=Linear + export AUTHENTIK_LINEAR_PROVIDER_NAME=Linear + export AUTHENTIK_LINEAR_ACS_URL=${lib.escapeShellArg cfg.linearAcsUrl} + export AUTHENTIK_LINEAR_AUDIENCE=${lib.escapeShellArg cfg.linearAudience} + export AUTHENTIK_LINEAR_LAUNCH_URL=${lib.escapeShellArg cfg.linearLaunchUrl} + ${lib.optionalString (cfg.linearDefaultRelayState != null) '' + export AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE=${lib.escapeShellArg cfg.linearDefaultRelayState} + ''} + + ${pkgs.bash}/bin/bash ${linearSamlSyncScript} + ''; + }; + + systemd.services.burrow-authentik-linear-scim = lib.mkIf ( + cfg.linearScimUrl != null && cfg.linearScimTokenFile != null + ) { + description = "Reconcile the Burrow Authentik Linear SCIM provider"; + after = [ + "burrow-authentik-ready.service" + "burrow-authentik-directory.service" + "burrow-authentik-linear-saml.service" + "network-online.target" + ]; + wants = [ + "burrow-authentik-ready.service" + "burrow-authentik-directory.service" + "burrow-authentik-linear-saml.service" + "network-online.target" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + linearScimSyncScript + cfg.envFile + cfg.linearScimTokenFile + ]; + path = [ + pkgs.bash + pkgs.coreutils + pkgs.curl + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + set -a + source ${lib.escapeShellArg cfg.envFile} + set +a + + export AUTHENTIK_URL=https://${cfg.domain} + export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug} + export AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME="Linear SCIM" + export AUTHENTIK_LINEAR_SCIM_URL=${lib.escapeShellArg cfg.linearScimUrl} + export AUTHENTIK_LINEAR_SCIM_TOKEN_FILE=${lib.escapeShellArg cfg.linearScimTokenFile} + export AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER=${lib.escapeShellArg cfg.linearScimUserIdentifier} + export AUTHENTIK_LINEAR_OWNER_GROUP=${lib.escapeShellArg cfg.linearOwnerGroupName} + export AUTHENTIK_LINEAR_ADMIN_GROUP=${lib.escapeShellArg cfg.linearAdminGroupName} + export AUTHENTIK_LINEAR_GUEST_GROUP=${lib.escapeShellArg cfg.linearGuestGroupName} + + ${pkgs.bash}/bin/bash ${linearScimSyncScript} + ''; + }; + + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:${toString cfg.port} + ''; + }; +} diff --git a/nixos/modules/burrow-forge-runner.nix b/nixos/modules/burrow-forge-runner.nix index 1e183d2..034fb38 100644 --- a/nixos/modules/burrow-forge-runner.nix +++ b/nixos/modules/burrow-forge-runner.nix @@ -5,8 +5,10 @@ let runnerPkg = pkgs.forgejo-runner; stateDir = cfg.stateDir; runnerFile = "${stateDir}/.runner"; + registrationFingerprintFile = "${stateDir}/.runner-registration-fingerprint"; configFile = "${stateDir}/runner.yaml"; labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels); + registrationFingerprint = builtins.hashString "sha256" "${cfg.instanceUrl}\n${cfg.name}\n${labelsCsv}"; sshPrivateKeyFile = cfg.sshPrivateKeyFile or ""; in { @@ -141,6 +143,17 @@ EOF chown ${cfg.user}:${cfg.group} ${configFile} chmod 0640 ${configFile} + expected_fingerprint=${lib.escapeShellArg registrationFingerprint} + if [ -s ${runnerFile} ]; then + current_fingerprint="" + if [ -s ${registrationFingerprintFile} ]; then + current_fingerprint="$(tr -d '\r\n' < ${registrationFingerprintFile})" + fi + if [ "${"$"}current_fingerprint" != "${"$"}expected_fingerprint" ]; then + rm -f ${runnerFile} ${registrationFingerprintFile} + fi + fi + install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh ${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \ ${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName} @@ -177,6 +190,10 @@ EOF --name ${lib.escapeShellArg cfg.name} \ --labels ${lib.escapeShellArg labelsCsv} \ --config ${configFile} + + printf '%s\n' "${"$"}expected_fingerprint" > ${registrationFingerprintFile} + chown ${cfg.user}:${cfg.group} ${registrationFingerprintFile} + chmod 0640 ${registrationFingerprintFile} fi ''; }; @@ -191,6 +208,7 @@ EOF User = cfg.user; Group = cfg.group; WorkingDirectory = stateDir; + Environment = [ "BURROW_RUNNER_REGISTRATION_FINGERPRINT=${registrationFingerprint}" ]; Restart = "on-failure"; RestartSec = 2; ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" '' diff --git a/nixos/modules/burrow-forge.nix b/nixos/modules/burrow-forge.nix index e02475f..d733135 100644 --- a/nixos/modules/burrow-forge.nix +++ b/nixos/modules/burrow-forge.nix @@ -68,6 +68,77 @@ in description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin."; }; + oidcDisplayName = lib.mkOption { + type = lib.types.str; + default = "burrow.net"; + description = "Login button label for the Forgejo OIDC provider."; + }; + + oidcClientId = lib.mkOption { + type = lib.types.str; + default = "git.burrow.net"; + description = "OIDC client ID that Forgejo should use against Authentik."; + }; + + oidcClientSecretFile = lib.mkOption { + type = lib.types.nullOr lib.types.str; + default = null; + description = "Host-local path to the Forgejo OIDC client secret."; + }; + + oidcDiscoveryUrl = lib.mkOption { + type = lib.types.str; + default = "https://auth.burrow.net/application/o/git/.well-known/openid-configuration"; + description = "OpenID Connect discovery URL for the Forgejo login source."; + }; + + oidcScopes = lib.mkOption { + type = with lib.types; listOf str; + default = [ + "openid" + "profile" + "email" + "groups" + ]; + description = "OIDC scopes requested from Authentik."; + }; + + oidcGroupClaimName = lib.mkOption { + type = lib.types.str; + default = "groups"; + description = "OIDC claim name that carries group membership."; + }; + + oidcAdminGroup = lib.mkOption { + type = lib.types.str; + default = "burrow-admins"; + description = "OIDC group that should grant Forgejo admin access."; + }; + + oidcRestrictedGroup = lib.mkOption { + type = lib.types.str; + default = "burrow-users"; + description = "OIDC group that is required to log into Forgejo."; + }; + + oidcAutoRegistration = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Whether Forgejo should automatically create users for new OIDC sign-ins."; + }; + + oidcAccountLinking = lib.mkOption { + type = lib.types.enum [ "disabled" "login" "auto" ]; + default = "auto"; + description = "How Forgejo should link existing local accounts for OIDC sign-ins."; + }; + + oidcUsernameSource = lib.mkOption { + type = lib.types.enum [ "userid" "nickname" "email" ]; + default = "email"; + description = "Which OIDC claim Forgejo should use to derive usernames for auto-registration."; + }; + authorizedKeys = lib.mkOption { type = with lib.types; listOf str; default = [ ]; @@ -132,6 +203,9 @@ in service = { DISABLE_REGISTRATION = true; + ENABLE_INTERNAL_SIGNIN = false; + ENABLE_BASIC_AUTHENTICATION = false; + SHOW_REGISTRATION_BUTTON = false; REQUIRE_SIGNIN_VIEW = false; DEFAULT_ALLOW_CREATE_ORGANIZATION = false; ENABLE_NOTIFY_MAIL = false; @@ -148,6 +222,13 @@ in ENABLE_OPENID_SIGNUP = false; }; + oauth2_client = { + OPENID_CONNECT_SCOPES = lib.concatStringsSep " " (lib.subtractLists [ "openid" ] cfg.oidcScopes); + ENABLE_AUTO_REGISTRATION = cfg.oidcAutoRegistration; + ACCOUNT_LINKING = cfg.oidcAccountLinking; + USERNAME = cfg.oidcUsernameSource; + }; + actions = { ENABLED = true; }; @@ -175,13 +256,22 @@ in reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT} ''; "${cfg.siteDomain}".extraConfig = '' + encode gzip zstd + @oidcConfig path /.well-known/openid-configuration + redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/.well-known/openid-configuration 308 + @tailnetConfig path /.well-known/burrow-tailnet + header @tailnetConfig Content-Type application/json + respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200 + @webfinger path /.well-known/webfinger + header @webfinger Content-Type application/jrd+json + respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200 @root path / redir @root ${homeRepoUrl} 308 respond 404 ''; } // lib.optionalAttrs ( - config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable + config.services.forgejo-nsc.enable && config.services.forgejo-nsc.autoscaler.enable ) { "${cfg.nscAutoscalerDomain}".extraConfig = '' encode gzip zstd @@ -243,5 +333,117 @@ in fi ''; }; + + systemd.services.burrow-forgejo-oidc-bootstrap = lib.mkIf (cfg.oidcClientSecretFile != null) { + description = "Seed the Burrow Forgejo OIDC login source"; + after = [ + "forgejo.service" + "postgresql.service" + ] ++ lib.optionals config.services.burrow.authentik.enable [ + "burrow-authentik-ready.service" + ]; + wants = lib.optionals config.services.burrow.authentik.enable [ + "burrow-authentik-ready.service" + ]; + requires = [ + "forgejo.service" + "postgresql.service" + ]; + wantedBy = [ "multi-user.target" ]; + restartTriggers = [ + cfg.oidcClientSecretFile + ]; + path = [ + pkgs.coreutils + pkgs.gnugrep + pkgs.jq + pkgs.postgresql + ]; + serviceConfig = { + Type = "oneshot"; + User = forgejoCfg.user; + Group = forgejoCfg.group; + WorkingDirectory = forgejoCfg.stateDir; + }; + script = '' + set -euo pipefail + + if [ ! -s ${lib.escapeShellArg cfg.oidcClientSecretFile} ]; then + echo "Forgejo OIDC client secret missing: ${cfg.oidcClientSecretFile}" >&2 + exit 1 + fi + + ready=0 + for attempt in $(seq 1 60); do + if ${pkgs.postgresql}/bin/psql -h /run/postgresql -U forgejo forgejo -tAc \ + "SELECT 1 FROM pg_tables WHERE schemaname='public' AND tablename='login_source';" \ + | grep -q 1; then + ready=1 + break + fi + sleep 1 + done + + if [ "$ready" -ne 1 ]; then + echo "Forgejo login_source table did not become ready" >&2 + exit 1 + fi + + oidc_secret="$(${pkgs.coreutils}/bin/tr -d '\r\n' < ${lib.escapeShellArg cfg.oidcClientSecretFile})" + if [ -z "$oidc_secret" ]; then + echo "Forgejo OIDC client secret is empty" >&2 + exit 1 + fi + + cfg_json="$(${pkgs.jq}/bin/jq -nc \ + --arg client_id ${lib.escapeShellArg cfg.oidcClientId} \ + --arg client_secret "$oidc_secret" \ + --arg discovery_url ${lib.escapeShellArg cfg.oidcDiscoveryUrl} \ + --argjson scopes '${builtins.toJSON cfg.oidcScopes}' \ + --arg group_claim_name ${lib.escapeShellArg cfg.oidcGroupClaimName} \ + --arg admin_group ${lib.escapeShellArg cfg.oidcAdminGroup} \ + --arg restricted_group ${lib.escapeShellArg cfg.oidcRestrictedGroup} \ + '{ + Provider: "openidConnect", + ClientID: $client_id, + ClientSecret: $client_secret, + OpenIDConnectAutoDiscoveryURL: $discovery_url, + CustomURLMapping: null, + IconURL: "", + Scopes: $scopes, + AttributeSSHPublicKey: "", + RequiredClaimName: "", + RequiredClaimValue: "", + GroupClaimName: $group_claim_name, + AdminGroup: $admin_group, + GroupTeamMap: "", + GroupTeamMapRemoval: false, + RestrictedGroup: $restricted_group + }')" + + ${pkgs.postgresql}/bin/psql -v ON_ERROR_STOP=1 \ + -h /run/postgresql -U forgejo forgejo \ + -v oidc_name=${lib.escapeShellArg cfg.oidcDisplayName} \ + -v cfg_json="$cfg_json" <<'SQL' + INSERT INTO login_source ( + type, name, is_active, is_sync_enabled, cfg, created_unix, updated_unix + ) VALUES ( + 6, + :'oidc_name', + TRUE, + FALSE, + :'cfg_json', + EXTRACT(EPOCH FROM NOW())::BIGINT, + EXTRACT(EPOCH FROM NOW())::BIGINT + ) + ON CONFLICT (name) DO UPDATE SET + type = EXCLUDED.type, + is_active = TRUE, + is_sync_enabled = FALSE, + cfg = EXCLUDED.cfg, + updated_unix = EXCLUDED.updated_unix; + SQL + ''; + }; }; } diff --git a/nixos/modules/burrow-forgejo-nsc.nix b/nixos/modules/burrow-forgejo-nsc.nix deleted file mode 100644 index e05b2ae..0000000 --- a/nixos/modules/burrow-forgejo-nsc.nix +++ /dev/null @@ -1,296 +0,0 @@ -{ config, lib, pkgs, self, ... }: - -let - inherit (lib) - mkEnableOption - mkIf - mkOption - types - mkAfter - mkDefault - optional - optionalAttrs - optionalString - ; - - cfg = config.services.burrow.forgejoNsc; - dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml"; - autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml"; - - pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" '' - set -euo pipefail - if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then - echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2 - exit 1 - fi - ''; - - nscTokenPath = "${cfg.stateDir}/nsc.token"; - tokenSync = optionalString (cfg.nscTokenFile != null) '' - install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath} - chown ${cfg.user}:${cfg.group} ${nscTokenPath} - chmod 600 ${nscTokenPath} - ''; - dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) '' - install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig} - chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig} - chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig} - ''; - autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) '' - install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig} - chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig} - chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig} - ''; - - dispatcherEnv = - cfg.extraEnv - // optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; } - // optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; } - // optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; }; -in { - options.services.burrow.forgejoNsc = { - enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher"; - - user = mkOption { - type = types.str; - default = "forgejo-nsc"; - description = "System user that runs the forgejo-nsc services."; - }; - - group = mkOption { - type = types.str; - default = "forgejo-nsc"; - description = "System group for the forgejo-nsc services."; - }; - - stateDir = mkOption { - type = types.str; - default = "/var/lib/forgejo-nsc"; - description = "State directory for the dispatcher/autoscaler."; - }; - - nscTokenFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC token file (exported as NSC_TOKEN_FILE)."; - }; - - nscTokenSpecFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE)."; - }; - - nscEndpoint = mkOption { - type = types.nullOr types.str; - default = null; - description = "Optional NSC endpoint override (exported as NSC_ENDPOINT)."; - }; - - extraEnv = mkOption { - type = types.attrsOf types.str; - default = { }; - description = "Extra environment variables injected into the services."; - }; - - nscPackage = mkOption { - type = types.nullOr types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null; - description = "Optional nsc CLI package added to the service PATH."; - }; - - dispatcher = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable the forgejo-nsc dispatcher service."; - }; - - package = mkOption { - type = types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher; - description = "Package providing the forgejo-nsc dispatcher binary."; - }; - - configFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Host-local YAML config file for the dispatcher."; - }; - - allowPending = mkOption { - type = types.bool; - default = false; - description = "Allow placeholder values (PENDING-) in the dispatcher config."; - }; - }; - - autoscaler = { - enable = mkOption { - type = types.bool; - default = false; - description = "Enable the forgejo-nsc autoscaler service."; - }; - - package = mkOption { - type = types.package; - default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler; - description = "Package providing the forgejo-nsc autoscaler binary."; - }; - - configFile = mkOption { - type = types.nullOr types.str; - default = null; - description = "Host-local YAML config file for the autoscaler."; - }; - - allowPending = mkOption { - type = types.bool; - default = false; - description = "Allow placeholder values (PENDING-) in the autoscaler config."; - }; - }; - - pruneRunners = { - enable = mkOption { - type = types.bool; - default = true; - description = "Enable periodic pruning of stale Forgejo action runners."; - }; - - ttlSeconds = mkOption { - type = types.ints.positive; - default = 3600; - description = "Age threshold in seconds before offline runners are marked deleted."; - }; - - onBootSec = mkOption { - type = types.str; - default = "15m"; - description = "How long after boot to wait before the first prune run."; - }; - - onUnitActiveSec = mkOption { - type = types.str; - default = "1h"; - description = "How often to rerun stale runner pruning."; - }; - - randomizedDelaySec = mkOption { - type = types.str; - default = "10m"; - description = "Randomized delay applied to the prune timer."; - }; - }; - }; - - config = mkIf cfg.enable { - assertions = [ - { - assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null; - message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled."; - } - { - assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null; - message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled."; - } - ]; - - users.groups.${cfg.group} = { }; - users.users.${cfg.user} = { - uid = mkDefault 2011; - isSystemUser = true; - group = cfg.group; - description = "Forgejo Namespace Cloud runner services"; - home = cfg.stateDir; - createHome = true; - shell = pkgs.bashInteractive; - }; - - systemd.tmpfiles.rules = mkAfter [ - "d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -" - ]; - - systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable { - description = "Forgejo Namespace Cloud dispatcher"; - wantedBy = [ "multi-user.target" ]; - after = [ "network-online.target" ]; - wants = [ "network-online.target" ]; - unitConfig.ConditionPathExists = - optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile - ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; - serviceConfig = { - Type = "simple"; - User = cfg.user; - Group = cfg.group; - WorkingDirectory = cfg.stateDir; - ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}"; - Restart = "on-failure"; - RestartSec = 5; - }; - path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; - environment = dispatcherEnv; - preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ - (optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile)) - dispatcherConfigSync - tokenSync - ]); - }; - - systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable { - description = "Forgejo Namespace Cloud autoscaler"; - wantedBy = [ "multi-user.target" ]; - after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ]; - wants = [ "network-online.target" ]; - unitConfig.ConditionPathExists = - optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile - ++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile; - serviceConfig = { - Type = "simple"; - User = cfg.user; - Group = cfg.group; - WorkingDirectory = cfg.stateDir; - ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}"; - Restart = "on-failure"; - RestartSec = 5; - }; - path = lib.optional (cfg.nscPackage != null) cfg.nscPackage; - environment = dispatcherEnv; - preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [ - (optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile)) - autoscalerConfigSync - tokenSync - ]); - }; - - systemd.services.forgejo-prune-runners = mkIf cfg.pruneRunners.enable { - description = "Prune offline Forgejo action runners"; - after = [ "forgejo.service" ]; - requires = [ "forgejo.service" ]; - serviceConfig = { - Type = "oneshot"; - User = "forgejo"; - Group = "forgejo"; - }; - environment = { - FORGEJO_PRUNE_DB = "1"; - FORGEJO_RUNNER_TTL_SEC = toString cfg.pruneRunners.ttlSeconds; - }; - path = [ pkgs.python3 pkgs.postgresql ]; - script = '' - ${pkgs.python3}/bin/python3 ${self}/Scripts/forgejo-prune-runners.py - ''; - }; - - systemd.timers.forgejo-prune-runners = mkIf cfg.pruneRunners.enable { - description = "Periodic Forgejo runner cleanup"; - wantedBy = [ "timers.target" ]; - timerConfig = { - OnBootSec = cfg.pruneRunners.onBootSec; - OnUnitActiveSec = cfg.pruneRunners.onUnitActiveSec; - RandomizedDelaySec = cfg.pruneRunners.randomizedDelaySec; - Unit = "forgejo-prune-runners.service"; - }; - }; - }; -} diff --git a/nixos/modules/burrow-headscale-policy.hujson b/nixos/modules/burrow-headscale-policy.hujson new file mode 100644 index 0000000..8f0bcd2 --- /dev/null +++ b/nixos/modules/burrow-headscale-policy.hujson @@ -0,0 +1,11 @@ +{ + // Bootstrap with a simple allow-all policy; Burrow-specific lane segmentation + // can be layered on once the control plane is live. + "acls": [ + { + "action": "accept", + "src": ["*"], + "dst": ["*:*"], + }, + ], +} diff --git a/nixos/modules/burrow-headscale.nix b/nixos/modules/burrow-headscale.nix new file mode 100644 index 0000000..ad5ec68 --- /dev/null +++ b/nixos/modules/burrow-headscale.nix @@ -0,0 +1,227 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.headscale; + policyFile = ./burrow-headscale-policy.hujson; +in +{ + options.services.burrow.headscale = { + enable = lib.mkEnableOption "the Burrow Headscale control plane"; + + domain = lib.mkOption { + type = lib.types.str; + default = "ts.burrow.net"; + description = "Public Headscale control-plane domain."; + }; + + tailDomain = lib.mkOption { + type = lib.types.str; + default = "tail.burrow.net"; + description = "MagicDNS suffix served by Headscale."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 8413; + description = "Local Headscale listen port."; + }; + + oidcIssuer = lib.mkOption { + type = lib.types.str; + default = "https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/"; + description = "OIDC issuer URL used by Headscale."; + }; + + oidcClientSecretFile = lib.mkOption { + type = lib.types.str; + default = config.services.burrow.authentik.headscaleClientSecretFile; + description = "Host-local file containing the OIDC client secret used by Headscale."; + }; + + bootstrapUsers = lib.mkOption { + type = with lib.types; listOf (submodule { + options = { + name = lib.mkOption { + type = str; + description = "Headscale username."; + }; + displayName = lib.mkOption { + type = str; + description = "Friendly display name."; + }; + email = lib.mkOption { + type = str; + description = "User email address."; + }; + }; + }); + default = [ + { + name = "contact"; + displayName = "Burrow"; + email = "contact@burrow.net"; + } + { + name = "conrad"; + displayName = "Conrad"; + email = "conrad@burrow.net"; + } + { + name = "agent"; + displayName = "Agent"; + email = "agent@burrow.net"; + } + { + name = "infra"; + displayName = "Infrastructure"; + email = "infra@burrow.net"; + } + ]; + description = "Users to create or reconcile inside Headscale."; + }; + }; + + config = lib.mkIf cfg.enable { + environment.systemPackages = [ pkgs.headscale ]; + + systemd.services.burrow-headscale-client-secret = { + description = "Ensure the Burrow Headscale OIDC client secret exists"; + before = + [ "headscale.service" ] + ++ lib.optionals config.services.burrow.authentik.enable [ "burrow-authentik-runtime.service" ]; + wantedBy = + [ "headscale.service" ] + ++ lib.optionals config.services.burrow.authentik.enable [ "burrow-authentik-runtime.service" ]; + path = [ + pkgs.coreutils + pkgs.openssl + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + RemainAfterExit = true; + }; + script = '' + set -euo pipefail + + install -d -m 0755 /var/lib/burrow/intake + + if [ ! -s ${lib.escapeShellArg cfg.oidcClientSecretFile} ]; then + umask 077 + ${pkgs.openssl}/bin/openssl rand -base64 48 > ${lib.escapeShellArg cfg.oidcClientSecretFile} + chown root:root ${lib.escapeShellArg cfg.oidcClientSecretFile} + chmod 0400 ${lib.escapeShellArg cfg.oidcClientSecretFile} + fi + ''; + }; + + services.headscale = { + enable = true; + address = "127.0.0.1"; + port = cfg.port; + settings = { + server_url = "https://${cfg.domain}"; + dns = { + magic_dns = true; + base_domain = cfg.tailDomain; + nameservers.global = [ + "1.1.1.1" + "1.0.0.1" + "2606:4700:4700::1111" + "2606:4700:4700::1001" + ]; + search_domains = [ cfg.tailDomain ]; + }; + database.sqlite.write_ahead_log = true; + log.level = "info"; + policy = { + mode = "file"; + path = policyFile; + }; + oidc = { + only_start_if_oidc_is_available = true; + issuer = cfg.oidcIssuer; + client_id = cfg.domain; + client_secret_path = "\${CREDENTIALS_DIRECTORY}/oidc_client_secret"; + scope = [ + "openid" + "profile" + "email" + ]; + pkce = { + enabled = true; + method = "S256"; + }; + }; + }; + }; + + systemd.services.headscale = { + after = + [ "burrow-headscale-client-secret.service" ] + ++ lib.optionals config.services.burrow.authentik.enable [ "burrow-authentik-ready.service" ]; + wants = + [ "burrow-headscale-client-secret.service" ] + ++ lib.optionals config.services.burrow.authentik.enable [ "burrow-authentik-ready.service" ]; + requires = + [ "burrow-headscale-client-secret.service" ] + ++ lib.optionals config.services.burrow.authentik.enable [ "burrow-authentik-ready.service" ]; + serviceConfig.LoadCredential = [ + "oidc_client_secret:${cfg.oidcClientSecretFile}" + ]; + }; + + systemd.services.headscale-bootstrap = { + description = "Bootstrap Burrow Headscale users"; + after = [ "headscale.service" ]; + requires = [ "headscale.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ + pkgs.coreutils + pkgs.headscale + pkgs.jq + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + + list_users() { + local users_json + users_json="$(${pkgs.headscale}/bin/headscale users list -o json)" + printf '%s\n' "$users_json" | ${pkgs.jq}/bin/jq -c 'if type == "array" then . else [] end' + } + + ensure_user() { + local name="$1" + local display_name="$2" + local email="$3" + if list_users | ${pkgs.jq}/bin/jq -e --arg name "$name" 'map(select(.name == $name)) | length > 0' >/dev/null; then + return 0 + fi + ${pkgs.headscale}/bin/headscale users create "$name" --display-name "$display_name" --email "$email" >/dev/null + } + + for _ in $(seq 1 60); do + if list_users >/dev/null 2>&1; then + break + fi + sleep 1 + done + + ${lib.concatMapStringsSep "\n" (user: '' + ensure_user ${lib.escapeShellArg user.name} ${lib.escapeShellArg user.displayName} ${lib.escapeShellArg user.email} + '') cfg.bootstrapUsers} + ''; + }; + + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:${toString cfg.port} + ''; + }; +} diff --git a/nixos/modules/burrow-zulip.nix b/nixos/modules/burrow-zulip.nix new file mode 100644 index 0000000..9670694 --- /dev/null +++ b/nixos/modules/burrow-zulip.nix @@ -0,0 +1,587 @@ +{ config, lib, pkgs, ... }: + +let + cfg = config.services.burrow.zulip; + realmSignupDomain = + let + parts = lib.splitString "@" cfg.administratorEmail; + in + if builtins.length parts == 2 then builtins.elemAt parts 1 else cfg.domain; + yamlFormat = pkgs.formats.yaml { }; + composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" { + services = { + zulip = { + image = "ghcr.io/zulip/zulip-server:11.6-1"; + restart = "unless-stopped"; + network_mode = "host"; + secrets = [ + "zulip__postgres_password" + "zulip__rabbitmq_password" + "zulip__redis_password" + "zulip__secret_key" + "zulip__email_password" + ]; + environment = { + SETTING_REMOTE_POSTGRES_HOST = "127.0.0.1"; + SETTING_MEMCACHED_LOCATION = "127.0.0.1:11211"; + SETTING_RABBITMQ_HOST = "127.0.0.1"; + SETTING_REDIS_HOST = "127.0.0.1"; + }; + volumes = [ "${cfg.dataDir}/data:/data:rw" ]; + ulimits.nofile = { + soft = 1000000; + hard = 1048576; + }; + }; + }; + }; +in +{ + options.services.burrow.zulip = { + enable = lib.mkEnableOption "the Burrow Zulip deployment"; + + domain = lib.mkOption { + type = lib.types.str; + default = "chat.burrow.net"; + description = "Public Zulip domain."; + }; + + port = lib.mkOption { + type = lib.types.port; + default = 18090; + description = "Local loopback port Caddy should proxy to."; + }; + + dataDir = lib.mkOption { + type = lib.types.str; + default = "/var/lib/burrow/zulip"; + description = "Host directory storing Zulip compose state and generated runtime files."; + }; + + administratorEmail = lib.mkOption { + type = lib.types.str; + default = "contact@burrow.net"; + description = "Operational Zulip administrator email."; + }; + + realmName = lib.mkOption { + type = lib.types.str; + default = "Burrow"; + description = "Initial Zulip organization name for single-tenant bootstrap."; + }; + + realmOwnerName = lib.mkOption { + type = lib.types.str; + default = "Burrow"; + description = "Display name used for the initial Zulip organization owner."; + }; + + authentikDomain = lib.mkOption { + type = lib.types.str; + default = config.services.burrow.authentik.domain; + description = "Authentik domain Zulip should trust as its SAML IdP."; + }; + + authentikProviderSlug = lib.mkOption { + type = lib.types.str; + default = config.services.burrow.authentik.zulipProviderSlug; + description = "Authentik SAML application slug used for Zulip."; + }; + + postgresPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip PostgreSQL password."; + }; + + rabbitmqPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip RabbitMQ password."; + }; + + redisPasswordFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip Redis password."; + }; + + secretKeyFile = lib.mkOption { + type = lib.types.str; + description = "File containing the Zulip Django secret key."; + }; + }; + + config = lib.mkIf cfg.enable { + environment.systemPackages = [ + pkgs.podman + pkgs.podman-compose + ]; + + services.postgresql = { + ensureDatabases = [ "zulip" ]; + ensureUsers = [ + { + name = "zulip"; + ensureDBOwnership = true; + } + ]; + settings = { + listen_addresses = lib.mkDefault "127.0.0.1"; + password_encryption = lib.mkDefault "scram-sha-256"; + }; + authentication = lib.mkAfter '' + host zulip zulip 127.0.0.1/32 scram-sha-256 + ''; + }; + + services.postgresqlBackup = { + enable = true; + backupAll = false; + databases = [ "zulip" ]; + }; + + services.memcached = { + enable = true; + listen = "127.0.0.1"; + port = 11211; + extraOptions = [ "-U 0" ]; + }; + + services.redis.servers.zulip = { + enable = true; + bind = "127.0.0.1"; + port = 6379; + requirePassFile = cfg.redisPasswordFile; + }; + + services.rabbitmq = { + enable = true; + listenAddress = "127.0.0.1"; + port = 5672; + }; + + services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' + encode gzip zstd + reverse_proxy 127.0.0.1:${toString cfg.port} + ''; + + systemd.tmpfiles.rules = [ + "d ${cfg.dataDir} 0755 root root - -" + "d ${cfg.dataDir}/data 0755 root root - -" + "d ${cfg.dataDir}/data/logs 0755 root root - -" + "d ${cfg.dataDir}/data/logs/emails 0755 root root - -" + "d ${cfg.dataDir}/data/secrets 0700 root root - -" + "d ${cfg.dataDir}/secrets 0700 root root - -" + "d ${cfg.dataDir}/logs 0755 root root - -" + ]; + + systemd.services.burrow-zulip-postgres-bootstrap = { + description = "Bootstrap PostgreSQL role for Burrow Zulip"; + after = [ "postgresql.service" ]; + wants = [ "postgresql.service" ]; + requiredBy = [ "burrow-zulip.service" ]; + before = [ "burrow-zulip.service" ]; + path = [ + config.services.postgresql.package + pkgs.bash + pkgs.coreutils + pkgs.python3 + pkgs.util-linux + ]; + serviceConfig = { + Type = "oneshot"; + User = "root"; + Group = "root"; + }; + script = '' + set -euo pipefail + + db_password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.postgresPasswordFile})" + db_password_sql="$(printf '%s' "$db_password" | python3 -c "import sys; print(sys.stdin.read().replace(chr(39), chr(39) * 2), end=\"\")")" + setup_sql="$(mktemp)" + trap 'rm -f "$setup_sql"' EXIT + + cat > "$setup_sql" < ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"} + + metadata_xml="$(${pkgs.curl}/bin/curl -fsSL https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)" + saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c ' +import xml.etree.ElementTree as ET, sys +xml = sys.stdin.read() +root = ET.fromstring(xml) +ns = {"ds": "http://www.w3.org/2000/09/xmldsig#"} +node = root.find(".//ds:X509Certificate", ns) +if node is None or not (node.text or "").strip(): + raise SystemExit("missing X509 certificate in Authentik metadata") +print((node.text or "").strip()) +')" + + cat > ${lib.escapeShellArg "${cfg.dataDir}/compose.override.yaml"} < "$zulip_data_dir/secrets/bootstrap-owner-password" + fi + chown 1000:1000 "$zulip_data_dir/secrets/bootstrap-owner-password" + chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password" + } + + wait_for_zulip_supervisor() { + local attempts=0 + while ! podman exec burrow-zulip_zulip_1 supervisorctl status >/dev/null 2>&1; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 90 ]; then + echo "error: Zulip supervisor did not become ready" >&2 + exit 1 + fi + sleep 2 + done + } + + patch_uwsgi_scheme_handling() { + wait_for_zulip_supervisor + podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/zulip-include/trusted-proto <<'EOF' +map \$remote_addr \$trusted_x_forwarded_proto { + default \$scheme; + 127.0.0.1 \$http_x_forwarded_proto; + ::1 \$http_x_forwarded_proto; + 172.31.1.1 \$http_x_forwarded_proto; +} +map \$remote_addr \$trusted_x_forwarded_for { + default \"\"; + 127.0.0.1 \$http_x_forwarded_for; + ::1 \$http_x_forwarded_for; + 172.31.1.1 \$http_x_forwarded_for; +} +map \$remote_addr \$x_proxy_misconfiguration { + default \"\"; +} +EOF +cat > /etc/nginx/uwsgi_params <<'EOF' +uwsgi_param QUERY_STRING \$query_string; +uwsgi_param REQUEST_METHOD \$request_method; +uwsgi_param CONTENT_TYPE \$content_type; +uwsgi_param CONTENT_LENGTH \$content_length; +uwsgi_param REQUEST_URI \$request_uri; +uwsgi_param PATH_INFO \$document_uri; +uwsgi_param DOCUMENT_ROOT \$document_root; +uwsgi_param SERVER_PROTOCOL \$server_protocol; +uwsgi_param REQUEST_SCHEME \$trusted_x_forwarded_proto; +uwsgi_param HTTPS on; +uwsgi_param REMOTE_ADDR \$remote_addr; +uwsgi_param REMOTE_PORT \$remote_port; +uwsgi_param SERVER_ADDR \$server_addr; +uwsgi_param SERVER_PORT \$server_port; +uwsgi_param SERVER_NAME \$server_name; +uwsgi_param HTTP_X_REAL_IP \$remote_addr; +uwsgi_param HTTP_X_FORWARDED_PROTO \$trusted_x_forwarded_proto; +uwsgi_param HTTP_X_FORWARDED_SSL \"\"; +uwsgi_param HTTP_X_PROXY_MISCONFIGURATION \$x_proxy_misconfiguration; + +# This value is the default, and is provided for explicitness; it must +# be longer than the configured 55s harakiri timeout in uwsgi +uwsgi_read_timeout 60s; + +uwsgi_pass django; +EOF +supervisorctl restart nginx zulip-django >/dev/null" + } + + bootstrap_realm_if_needed() { + wait_for_zulip_supervisor + local realm_exists + + realm_exists="$( + podman exec burrow-zulip_zulip_1 bash -lc \ + "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ + | awk '$NF == "https://${cfg.domain}" { print "yes" }' + )" + + if [ -n "$realm_exists" ]; then + return 0 + fi + + local realm_name=${lib.escapeShellArg cfg.realmName} + local admin_email=${lib.escapeShellArg cfg.administratorEmail} + local owner_name=${lib.escapeShellArg cfg.realmOwnerName} + local create_realm_cmd + + printf -v create_realm_cmd '%q ' \ + /home/zulip/deployments/current/manage.py \ + create_realm \ + --string-id= \ + --password-file /data/secrets/bootstrap-owner-password \ + --automated \ + "$realm_name" \ + "$admin_email" \ + "$owner_name" + + podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd" + } + + reconcile_realm_policy() { + wait_for_zulip_supervisor + local realm_id + realm_id="$( + podman exec burrow-zulip_zulip_1 bash -lc \ + "su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \ + | awk '$NF == "https://${cfg.domain}" { print $1 }' + )" + + podman exec burrow-zulip_zulip_1 su zulip -c \ + "/home/zulip/deployments/current/manage.py realm_domain --op add -r $realm_id ${realmSignupDomain} --allow-subdomains --automated" \ + >/dev/null 2>&1 || true + + podman exec burrow-zulip_zulip_1 su zulip -c \ + "/home/zulip/deployments/current/manage.py shell -c 'from zerver.models import Realm; realm = Realm.objects.get(id=$realm_id); realm.invite_required = False; realm.save(update_fields=[\"invite_required\"])'" + } + + if [ ! -e .initialized ]; then + compose pull + compose run --rm -T zulip app:init + touch .initialized + fi + + ensure_zulip_data_layout + compose up -d zulip + bootstrap_realm_if_needed + reconcile_realm_policy + patch_uwsgi_scheme_handling + ''; + }; + }; +} diff --git a/proto/burrow.proto b/proto/burrow.proto index efbb064..ed1f89e 100644 --- a/proto/burrow.proto +++ b/proto/burrow.proto @@ -5,6 +5,7 @@ import "google/protobuf/timestamp.proto"; service Tunnel { rpc TunnelConfiguration (Empty) returns (stream TunnelConfigurationResponse); + rpc TunnelPackets (stream TunnelPacket) returns (stream TunnelPacket); rpc TunnelStart (Empty) returns (Empty); rpc TunnelStop (Empty) returns (Empty); rpc TunnelStatus (Empty) returns (stream TunnelStatusResponse); @@ -17,6 +18,14 @@ service Networks { rpc NetworkDelete (NetworkDeleteRequest) returns (Empty); } +service TailnetControl { + rpc Discover (TailnetDiscoverRequest) returns (TailnetDiscoverResponse); + rpc Probe (TailnetProbeRequest) returns (TailnetProbeResponse); + rpc LoginStart (TailnetLoginStartRequest) returns (TailnetLoginStatusResponse); + rpc LoginStatus (TailnetLoginStatusRequest) returns (TailnetLoginStatusResponse); + rpc LoginCancel (TailnetLoginCancelRequest) returns (Empty); +} + message NetworkReorderRequest { int32 id = 1; int32 index = 2; @@ -45,8 +54,7 @@ message Network { enum NetworkType { WireGuard = 0; - HackClub = 1; - Tor = 2; + Tailnet = 1; } message NetworkListResponse { @@ -57,6 +65,57 @@ message Empty { } +message TailnetDiscoverRequest { + string email = 1; +} + +message TailnetDiscoverResponse { + string domain = 1; + string authority = 2; + string oidc_issuer = 3; + bool managed = 4; +} + +message TailnetProbeRequest { + string authority = 1; +} + +message TailnetProbeResponse { + string authority = 1; + int32 status_code = 2; + string summary = 3; + string detail = 4; + bool reachable = 5; +} + +message TailnetLoginStartRequest { + string account_name = 1; + string identity_name = 2; + string hostname = 3; + string authority = 4; +} + +message TailnetLoginStatusRequest { + string session_id = 1; +} + +message TailnetLoginCancelRequest { + string session_id = 1; +} + +message TailnetLoginStatusResponse { + string session_id = 1; + string backend_state = 2; + string auth_url = 3; + bool running = 4; + bool needs_login = 5; + string tailnet_name = 6; + string magic_dns_suffix = 7; + string self_dns_name = 8; + repeated string tailnet_ips = 9; + repeated string health = 10; +} + enum State { Stopped = 0; Running = 1; @@ -70,4 +129,12 @@ message TunnelStatusResponse { message TunnelConfigurationResponse { repeated string addresses = 1; int32 mtu = 2; + repeated string routes = 3; + repeated string dns_servers = 4; + repeated string search_domains = 5; + bool include_default_route = 6; +} + +message TunnelPacket { + bytes payload = 1; } diff --git a/secrets.nix b/secrets.nix index 1e49f5d..3f9bba4 100644 --- a/secrets.nix +++ b/secrets.nix @@ -1 +1,33 @@ -import ./secrets/secrets.nix +let + conradev = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBueQxNbP2246pxr/m7au4zNVm+ShC96xuOcfEcpIjWZ"; + contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; + agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; + jett = builtins.replaceStrings [ "\n" ] [ "" ] (builtins.readFile ./nixos/keys/jett_at_burrow_net.pub); + burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l"; + burrowForgeRecipients = [ + contact + agent + jett + burrowForgeHost + ]; + uiTestRecipients = burrowForgeRecipients ++ [ conradev ]; +in +{ + "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-google-account-map.json.age".publicKeys = burrowForgeRecipients; + "secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients; + "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-autoscaler-config.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-dispatcher-config.age".publicKeys = burrowForgeRecipients; + "secrets/infra/forgejo-nsc-token.age".publicKeys = burrowForgeRecipients; + "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/linear-scim-token.age".publicKeys = burrowForgeRecipients; + "secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; + "secrets/infra/zulip-postgres-password.age".publicKeys = burrowForgeRecipients; + "secrets/infra/zulip-memcached-password.age".publicKeys = burrowForgeRecipients; + "secrets/infra/zulip-rabbitmq-password.age".publicKeys = burrowForgeRecipients; + "secrets/infra/zulip-redis-password.age".publicKeys = burrowForgeRecipients; + "secrets/infra/zulip-secret-key.age".publicKeys = burrowForgeRecipients; +} diff --git a/secrets/README.md b/secrets/README.md deleted file mode 100644 index 706b374..0000000 --- a/secrets/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Secrets - -Burrow secrets live in `secrets/.age` and are managed with `agenix`. - -For the Forgejo Namespace Cloud runtime: - -- `secrets/forgejo/admin-password.age` -- `secrets/forgejo/agent-ssh-key.age` -- `secrets/forgejo/nsc-token.age` -- `secrets/forgejo/nsc-dispatcher-config.age` -- `secrets/forgejo/nsc-autoscaler-config.age` -- `secrets/cloudflare/api-token.age` -- `secrets/hetzner/api-token.age` -- `secrets/forwardemail/api-token.age` -- `secrets/forwardemail/hetzner-s3-user.age` -- `secrets/forwardemail/hetzner-s3-secret.age` - -Use: - -- `make secret name=forgejo/nsc-token` -- `make secret-file name=forgejo/agent-ssh-key file=/path/to/source` -- `Scripts/provision-forgejo-nsc.sh` to refresh the Forgejo Namespace token and runtime configs in `secrets/forgejo/*.age` -- `make secret-file name=cloudflare/api-token file=/path/to/cloudflare-token.txt` -- `make secret-file name=hetzner/api-token file=/path/to/hetzner-api-token.txt` - -The forge host decrypts these files at activation time and feeds the resulting -paths into `services.burrow.forge`, `services.burrow.forgeRunner`, and -`services.burrow.forgejoNsc`. diff --git a/secrets/cloudflare/api-token.age b/secrets/cloudflare/api-token.age deleted file mode 100644 index caf8135..0000000 --- a/secrets/cloudflare/api-token.age +++ /dev/null @@ -1,7 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q rX5+bmtxyHNgD+xNdHkB1fKdjUlrX275DaKTIHssYyA -KwbfKHx14QXRKBIGWwJDR8+DONyCdVssh8Ti8mdajyQ --> ssh-ed25519 IrZmAg SOG/KvURA6PrxVhtZyIbazFGNQZyp0BR4MH+YInHGB4 -79pENXhtLwlCQVnqkPEzoFgrXMmTqRsfs4ULluTevWA ---- gDA64KNbgN+eGHsQbIbKvhOg1T/Nqui6I/wy2MK8VWE -[|V{['E .{CǶ {ha \ No newline at end of file diff --git a/secrets/forgejo/admin-password.age b/secrets/forgejo/admin-password.age deleted file mode 100644 index 53cfa83..0000000 --- a/secrets/forgejo/admin-password.age +++ /dev/null @@ -1,11 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q nmGFzw38TKiVVuA9CM8wHQDVib0RddB+M/UjQnD45jk -iZNLNBlS32zR+TNfcK27T1V3w27sFKJkWfuOzHwcOL0 --> ssh-ed25519 IrZmAg Y53DC0wGX8mjaXkD3+jZn2DviO5iSXsnZDBNCBTmLgA -XLz+YXzT4fYb7q0xuZMKgv88lAd0gGKaquSMcA6Yu3c --> ssh-ed25519 JzXUWA EDAXBKEvHccJ4KKtHjUTA+KA+wN9bBu9v+kzRTFt9AI -JNADezBCxx26+QPD2tIpz5O8cncrJwnqaYQEWY56VGY ---- RpjdftRPUGT80IMYKFDFuHkKEr1heJOvqrqYLufhc10 -_ -F( -((0ɉ',8d]d%T[MKRQxiIf0 \ No newline at end of file diff --git a/secrets/forgejo/agent-ssh-key.age b/secrets/forgejo/agent-ssh-key.age deleted file mode 100644 index 44ce114..0000000 Binary files a/secrets/forgejo/agent-ssh-key.age and /dev/null differ diff --git a/secrets/forgejo/nsc-autoscaler-config.age b/secrets/forgejo/nsc-autoscaler-config.age deleted file mode 100644 index 94e1535..0000000 Binary files a/secrets/forgejo/nsc-autoscaler-config.age and /dev/null differ diff --git a/secrets/forgejo/nsc-dispatcher-config.age b/secrets/forgejo/nsc-dispatcher-config.age deleted file mode 100644 index ab4bff7..0000000 Binary files a/secrets/forgejo/nsc-dispatcher-config.age and /dev/null differ diff --git a/secrets/forgejo/nsc-token.age b/secrets/forgejo/nsc-token.age deleted file mode 100644 index 2f26639..0000000 Binary files a/secrets/forgejo/nsc-token.age and /dev/null differ diff --git a/secrets/forwardemail/api-token.age b/secrets/forwardemail/api-token.age deleted file mode 100644 index 4d4ea15..0000000 --- a/secrets/forwardemail/api-token.age +++ /dev/null @@ -1,7 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q ICuXuDsZiw1ShfUX9qjq8bCkeNdsbHWnG4e+3ZOC3jg -wswxqzQtf7jumSYB8ZeQzRBpMrBPVsUnWOYsmlDvpSs --> ssh-ed25519 IrZmAg Xrvp/tXzXrHF1+NxgTZs9nNufyxtTq5NoYT5gaW6p1M -UWGlhZpV19CWMR9abp30vkQwZUMb/ylvInGEBlDdjjE ---- qhAaAECwhmAY4g3/e+Dz9RvL1MBQkHGWyoe1NkdTuqA -d?)<36F:a˝ ųֲ \ No newline at end of file diff --git a/secrets/forwardemail/hetzner-s3-secret.age b/secrets/forwardemail/hetzner-s3-secret.age deleted file mode 100644 index 55b5be3..0000000 --- a/secrets/forwardemail/hetzner-s3-secret.age +++ /dev/null @@ -1,7 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q jwJzvmXUV5rCB6ku7ILLQUDInuQJL2gN+pjmX/ccXWE -q9OSyVhTuzERRRZZOCQzbwAwLOvOFIT/l9MxJ0V3UTo --> ssh-ed25519 IrZmAg 8IutYG3CnNP9gw5fTFOaXm1Ue4i/cVs1apA88bNs9mo -daaf+6HoE3bmUEKR8/zu9jKTstVFCXqBlBxBdNVpQ90 ---- gRGNkWqoh+lZWpDG7yNLd4fjoX2jCyHTWbzImzoFGko -R@+fu9RBX2 [I \ No newline at end of file diff --git a/secrets/forwardemail/hetzner-s3-user.age b/secrets/forwardemail/hetzner-s3-user.age deleted file mode 100644 index 733d6e8..0000000 --- a/secrets/forwardemail/hetzner-s3-user.age +++ /dev/null @@ -1,7 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q jwyFpeVX18Q/1vnK2A1gwETTTH/QDUmW7vhCA+E/1lc -vtG1Ra+hR0cc/o9oJw7YTWMc2+JmrehzBE5QkIHQMKY --> ssh-ed25519 IrZmAg KljcDNRlBmn7ElVfXq/E2prFHnRQD2TkQY9Vto+OQUA -T37sFc3xVrhky6e0n4KbsX18/fBqP3VjS/mNbxX6bfI ---- lvSjWGriUCYC14eI2eH9MdO2cB76Pe3gWD7pidw8Qjo -s&x*4}z&F \ No newline at end of file diff --git a/secrets/hetzner/api-token.age b/secrets/hetzner/api-token.age deleted file mode 100644 index a409a7d..0000000 --- a/secrets/hetzner/api-token.age +++ /dev/null @@ -1,7 +0,0 @@ -age-encryption.org/v1 --> ssh-ed25519 ux4N8Q pEJA2VJkPC+NzA9yFvBrpXHD8qFMTD9iIHYSkx8P2RI -AGE1QJya77d92ERA1yQYylvZPNAJEQKoCL32BY5XBzo --> ssh-ed25519 IrZmAg VMpoTBpNG/TAlnbJ2APwc4VMt2CX5rQwlrrihtmojFo -caOwayLgVDGPrjqLLH8hHHQ3Fy2WeRI2tf+R02HFqx0 ---- Ey1DYpyA4lnVqPaabNsEuSihl4fvZ2vpSc/IRGZwYBw -U2Q*mFޞ|^EV" \ No newline at end of file diff --git a/secrets/infra/authentik-google-account-map.json.age b/secrets/infra/authentik-google-account-map.json.age new file mode 100644 index 0000000..158814a Binary files /dev/null and b/secrets/infra/authentik-google-account-map.json.age differ diff --git a/secrets/infra/authentik-google-client-id.age b/secrets/infra/authentik-google-client-id.age new file mode 100644 index 0000000..344c73b --- /dev/null +++ b/secrets/infra/authentik-google-client-id.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q Cmf/vgRBGrP8KGwpc9XCXKo5H23Gcgi6dN688oazITQ +poYU28mmvkWFdciOiWLQ+powQcsHzof3Gyzq61V2olY +-> ssh-ed25519 IrZmAg mowUPV3BbYR1IupBoT1o3KB+Fo7Q3E3DT0wRx82f4ic +TZ4r/L5EdHP9wwIbJWBjIITja2L2Pd4AX/U7JSfLm/Y +-> ssh-ed25519 0kWPgQ v9NoFxsRERSgK5cgCHSdtZpn4EcPhvj4JCRR1axGqUM +ogDiLkSFr8i39b3y2WlnbTMprXiVJPG5KNHGKJIagLo +-> X25519 4xouhPGq8wCmbbjLQsfZeGabsXxc4f74e2gXd+13kB4 +UM7/P0RZyu3PoU5mMY0aoGCdoqrOTgDshGuVjagoaEc +--- r6gIEDysfaqsHMaFF/vuLVaJv85uShPlNNTktMdpUvw +2TXěM"T뇵S=_U.=w -\Y/x*|tjZ'#uOcqL_hA$)ic{L @F \ No newline at end of file diff --git a/secrets/infra/authentik-google-client-secret.age b/secrets/infra/authentik-google-client-secret.age new file mode 100644 index 0000000..9a841c7 --- /dev/null +++ b/secrets/infra/authentik-google-client-secret.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q Q3rYrGroJXarMLdatYCHVERefWDyGwM0Ii/kOp5m3Fs +W3tgHNXLSVfGU5p8MhBj0mX72SNgMl8nf8sQX29yvBw +-> ssh-ed25519 IrZmAg fyFQQkd51GthNZ4R+W5Al266LnlKbr4ZoMERlCM1OTQ +rNjnHTGCfF8LkqU8mzTrHlL5G4az1k62gvH4gW8zmjc +-> ssh-ed25519 0kWPgQ OWokv9XAphqbkDi1cznb9V09VcM6Li1eIh0JpcIlVTY +TnPVlqKB78y7NPYp02UJmuRXdBMKJKCngpvo8TjpFZ8 +-> X25519 HWaWhyejjo4IjDrNsBYxU1JaGU0899FqiBYgstInuiU +enbBGnhH+uJKY3NBD6mmy09Uos+in6ytRQ5BakvTUvI +--- gOBrh88hnvlUSmnRiowJiUIwgIz5zzVKH8YCRb8Ckdw +xokPn8v򵄙HRʏoMË9&Tb]ĉ'|<Pbe \ No newline at end of file diff --git a/secrets/infra/authentik-ui-test-password.age b/secrets/infra/authentik-ui-test-password.age new file mode 100644 index 0000000..773833e Binary files /dev/null and b/secrets/infra/authentik-ui-test-password.age differ diff --git a/secrets/infra/authentik.env.age b/secrets/infra/authentik.env.age new file mode 100644 index 0000000..dbada85 Binary files /dev/null and b/secrets/infra/authentik.env.age differ diff --git a/secrets/infra/forgejo-nsc-autoscaler-config.age b/secrets/infra/forgejo-nsc-autoscaler-config.age new file mode 100644 index 0000000..5b5da65 Binary files /dev/null and b/secrets/infra/forgejo-nsc-autoscaler-config.age differ diff --git a/secrets/infra/forgejo-nsc-dispatcher-config.age b/secrets/infra/forgejo-nsc-dispatcher-config.age new file mode 100644 index 0000000..4ab9cc0 Binary files /dev/null and b/secrets/infra/forgejo-nsc-dispatcher-config.age differ diff --git a/secrets/infra/forgejo-nsc-token.age b/secrets/infra/forgejo-nsc-token.age new file mode 100644 index 0000000..68b6572 Binary files /dev/null and b/secrets/infra/forgejo-nsc-token.age differ diff --git a/secrets/infra/forgejo-oidc-client-secret.age b/secrets/infra/forgejo-oidc-client-secret.age new file mode 100644 index 0000000..68c35e9 Binary files /dev/null and b/secrets/infra/forgejo-oidc-client-secret.age differ diff --git a/secrets/infra/headscale-oidc-client-secret.age b/secrets/infra/headscale-oidc-client-secret.age new file mode 100644 index 0000000..81cff1c --- /dev/null +++ b/secrets/infra/headscale-oidc-client-secret.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q 8QtHVR8defharS9ppAsLOnwX1A3T5CqNLhaLDG41q0E +tQBUL3Wzh4lIwmIBGjLH5gjsvChWo6GJ4YxXc+cNddU +-> ssh-ed25519 IrZmAg TgL3trgA3+4ivxpIpv/rEegjmZakSEx7B6e2sc4xhRw +NW4OgVJZhVJUXMBHaajk06CxEJjzrumqTNI2/6RDM4A +-> ssh-ed25519 0kWPgQ uBosFXj4NCXBw5X+h/zr2QLCHnkhtgVZEYOHEBBGoFY +LTrparOr5iwAEEPM+rTZyDxJFJX/nQsTYpNdGSgKTes +-> X25519 zbO7ax9E3Fya7mvNP/ueB/XL2UN1sHe8Is+2g6hM8WA +PnjKLk/ZQFrJ0mGIbX8fc9pqw3T2FTT0WSUaDjN1C+w +--- Aknf9dPdr3qD+tu5HyT74L2JMtg46ClYL0FBDhiLrxI +3_:޿rbA~vn™G/->5K\닊|iX~sX!EF'cv9>ԦrHxEP`> \ No newline at end of file diff --git a/secrets/infra/linear-scim-token.age b/secrets/infra/linear-scim-token.age new file mode 100644 index 0000000..5bed53e --- /dev/null +++ b/secrets/infra/linear-scim-token.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q Tb3hxc6ZscCQpr7s8raup25FA8YAmq30jHZfOQp28Xs +L9YhaX9IVinud0IOs5K55ldGx82wjXHxnVBHZnRjiTA +-> ssh-ed25519 IrZmAg etIe6hWDP9YkqDFCWybnvsOh7h8YO+z3tKc95pG64lU +BT3rH5a+LJZWv2xtWPbMJGS2oM9v4mOI9WPmnHebiew +-> ssh-ed25519 0kWPgQ YpCf5m16VaKp7d+C3oF9MJQB/0xzCNtD7ODsTiV8t1o +xG8G/kSM+7VrWHm299A7fG/kBFnoiWZPiDZuldvimLw +-> X25519 ETltnMPR7lWbBWJvJKmNZhS7wqX0WCa4aNu8UKzxMVE +Ys57VNuclgvN1nJIrLjNrwekbosa7KK9lFt0PTpr/MQ +--- ZeUmSOf8+NycQAFRGCJHYcQvTJqSBIGKEOEdCnNfJbE +<q1.O_դ7A۷_@%/5l7JɵčA xb "B \ No newline at end of file diff --git a/secrets/infra/tailscale-oidc-client-secret.age b/secrets/infra/tailscale-oidc-client-secret.age new file mode 100644 index 0000000..3c3c074 Binary files /dev/null and b/secrets/infra/tailscale-oidc-client-secret.age differ diff --git a/secrets/infra/zulip-memcached-password.age b/secrets/infra/zulip-memcached-password.age new file mode 100644 index 0000000..0769512 --- /dev/null +++ b/secrets/infra/zulip-memcached-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q x0r1UHgSibFIvKU34kP0+mnvQa5xXnac3P5fyqb7qFc +MfKnr5N0DV2NIoo4MFVFV0ULMayy0zzZqIq4FDzgDGc +-> ssh-ed25519 IrZmAg rzoR8knGrsTGuh9Hqg/NB0NQKI1vx1WI0ZRyrLIPwVY +7gV/d1slrIT+W0+iX5YK/uUWjHGJfee6vA+f9a35nEY +-> ssh-ed25519 0kWPgQ SyuEAfqmBAqLcuuQUHM5OzAv2hoquMMYtVdbKpBVhjI +7QqXens2363ln0euoormMh9a3Csh+nS2eBkHuQJmOWc +-> X25519 qDjNNkYBUhWTYyBhrw9tYl8a7G6TCkVZbR4aPcP+J0c +QF33V6hFUuYRj0B8Eo4jqyyvCpBbpD2ViVWoS8A8f3E +--- 1/Jb0nvWlcszMmxI0yVr6kfexDN0sSk1p+wsTUL4WvU +9a5IكV[f,Db \v&LZ7!?4=JxFeV \ No newline at end of file diff --git a/secrets/infra/zulip-postgres-password.age b/secrets/infra/zulip-postgres-password.age new file mode 100644 index 0000000..b03556c Binary files /dev/null and b/secrets/infra/zulip-postgres-password.age differ diff --git a/secrets/infra/zulip-rabbitmq-password.age b/secrets/infra/zulip-rabbitmq-password.age new file mode 100644 index 0000000..9b1f6ec --- /dev/null +++ b/secrets/infra/zulip-rabbitmq-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q s1hLIWvkXmlIv/VeHXpDSCe+dh09mE+iZd7xJiQccy0 +8WosTJQLGRPhTR06SIDjgtXNebcf+H/pFzY/lBCjXcs +-> ssh-ed25519 IrZmAg zBNlK+o/RCTCyp8BRkoAYqsDn//kIKtYk3SICkMu3BA +EhBQy8QdSnCZKkdGzQho7zEMmAbJVoU5jZOMPN6tHG0 +-> ssh-ed25519 0kWPgQ hv06idPXqAATkLeUC5vILdEO2NXNWPczlWnwMFvOdkA +3EeajviunGlcfcF1QlRJrVA9bwPT+fJZFX0uneYVs0c +-> X25519 vm9rPYnQB16VSidi7+nr70lFaH0W/jIGY8zwUObZUV8 +jFgPy/w4j0/p1USKGjQY+coo1OUFXiIjJ5apIZCrZVI +--- Cf2c6WzLYOi8xE/sIn7ZtUqBy5AToASDUNpAxyjrI9M +:,+!ϨϬB4DmH|(9l9LPZ^zed=imz? \ No newline at end of file diff --git a/secrets/infra/zulip-redis-password.age b/secrets/infra/zulip-redis-password.age new file mode 100644 index 0000000..2aff8b6 --- /dev/null +++ b/secrets/infra/zulip-redis-password.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q DqDE3ZZlPUWUyyLA185xsOmfGi146SNk+hENMQXaiFY +D6FhZgynbdccPJQiFRJ18EYvCyDLz3cak0YuQa4f5p4 +-> ssh-ed25519 IrZmAg lXgVeADmgjeHeVOOIS5oHqrhkN59ZWDemMOBJo3ubH8 +AQ24P+DnxNoHEguNnLaROIW4/Sq96w/UxzzQwEOyGRc +-> ssh-ed25519 0kWPgQ 8x0pMohdACYueLY6jbNwg7MYVaZcjwBU4axthvDoFx4 +SgUVnd6MK1MccWVYOu9R3PtoMCBBNGKQ7jt5MSA+KkI +-> X25519 UaO5huJPx8d8eMUnGhbI77tZjsFlIPWEffT4fgoO22w +DVz016ibRxJoa4TDmb2m0Qu9Dn8jpjWEBVtdm2TZx0c +--- 5+MHuvC26SjEBFSmRm0kXjiI27QnJGxvPl2w13EkMrw +FoQ]ȟeU//no.XGJ Э|+ž \ No newline at end of file diff --git a/secrets/infra/zulip-secret-key.age b/secrets/infra/zulip-secret-key.age new file mode 100644 index 0000000..d903d66 --- /dev/null +++ b/secrets/infra/zulip-secret-key.age @@ -0,0 +1,11 @@ +age-encryption.org/v1 +-> ssh-ed25519 ux4N8Q ml+kmLmuRb2nMXJyhKigby2+lPddxM/U7tjhGGQ/JGk +B3UCv/3+4GHeKR964o/m0CoicHwDgWQGEarPW94tb3I +-> ssh-ed25519 IrZmAg AO0ELOuGGj+WanDZFRkHKUEJyZqJYFdhWbqmUfwbpiM +5RZMxVBvW5+TzCBFnn66ry3o5V5cJykweyoYMVBgczY +-> ssh-ed25519 0kWPgQ gqQ/S33Re2OYLz1D9LoSAoqOKxuL4aUes8r6+NyAoXw +NHo2xFsxxJO1ZjnG9r3oxMuvjOUsCyyPvcar2ejZp9w +-> X25519 vUAjBCE197YsckVNM4SYVIPBEESTWnBPCWnUlEwYs1I +L3l85DXFoAVm2ssHfjBeqRpWGlo1UGbmcNkEgoUB9fM +--- X/2O8ufjbTGrt2zCm4gSRqqoxT5v6a+13XjH4dpRsHs +Mkf"(qxF2BdMRYji ܴ<ґb_.!r+<Ussu?gD\V am(Ȉ&.& c/|w(WH4rѠ+j"B  \ No newline at end of file diff --git a/secrets/secrets.nix b/secrets/secrets.nix deleted file mode 100644 index 4a78a69..0000000 --- a/secrets/secrets.nix +++ /dev/null @@ -1,19 +0,0 @@ -let - contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; - agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; - forge = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAlkGo4lwpwIIZ0J01KjTuJuf/U/wGgy4/aKwPIUzutL root@burrow-forge"; - - operatorSecrets = [ contact agent ]; - forgeAutomation = [ contact agent forge ]; -in { - "secrets/forgejo/admin-password.age".publicKeys = forgeAutomation; - "secrets/forgejo/agent-ssh-key.age".publicKeys = forgeAutomation; - "secrets/forgejo/nsc-token.age".publicKeys = forgeAutomation; - "secrets/forgejo/nsc-dispatcher-config.age".publicKeys = forgeAutomation; - "secrets/forgejo/nsc-autoscaler-config.age".publicKeys = forgeAutomation; - "secrets/cloudflare/api-token.age".publicKeys = operatorSecrets; - "secrets/hetzner/api-token.age".publicKeys = operatorSecrets; - "secrets/forwardemail/api-token.age".publicKeys = operatorSecrets; - "secrets/forwardemail/hetzner-s3-user.age".publicKeys = operatorSecrets; - "secrets/forwardemail/hetzner-s3-secret.age".publicKeys = operatorSecrets; -} diff --git a/services/forgejo-nsc/README.md b/services/forgejo-nsc/README.md index 95167c1..79058bb 100644 --- a/services/forgejo-nsc/README.md +++ b/services/forgejo-nsc/README.md @@ -45,15 +45,6 @@ profile. The important knobs are: - `namespace.machine_type` / `namespace.duration` – shape + TTL for the ephemeral Namespace environment. The dispatcher destroys the instance after a job so the TTL acts as a hard cap, not an idle timeout. -- macOS fallback launches still use `nsc create`. Bootstrap prefers the - Compute SSH config endpoint, and falls back to keychain-backed `nsc ssh` - only when the Compute bearer is rejected. That keeps the fast path on direct - TCP while preserving a working fallback when tenant auth drifts. -- `namespace.linux_cache_*` / `namespace.macos_cache_*` – persistent cache - volumes mounted into runners so Linux can keep `/nix` plus shared build - caches warm and macOS can reuse Rust toolchains, Xcode package caches, and - lane-local derived data. If Namespace keeps reusing an older undersized cache - volume, bump the cache tag name to force a fresh allocation at the new size. ### Running locally @@ -136,7 +127,7 @@ instances: token: "PENDING-FORGEJO-PAT" scope: level: "repository" - owner: "hackclub" + owner: "burrow" name: "burrow" disable_polling: true # webhook-only mode poll_interval: "30s" @@ -150,10 +141,6 @@ instances: - labels: ["namespace-profile-linux-medium"] min_idle: 0 # set to 0 to scale-to-zero between jobs ttl: "20m" - - labels: ["namespace-profile-macos-large"] - min_idle: 0 - ttl: "90m" - machine_type: "6x14" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" @@ -161,28 +148,17 @@ instances: ``` For Burrow, use `Scripts/provision-forgejo-nsc.sh` to mint the Forgejo PAT, -generate a Namespace token from the logged-in Namespace account, and refresh -`secrets/forgejo/{nsc-token,nsc-dispatcher-config,nsc-autoscaler-config}.age`. -The token file is emitted as JSON with a long-lived `session_token` plus the -current `bearer_token`. The `nsc` CLI paths use the session-backed login flow, -while the Compute API path can consume the bearer token directly. The forge -host consumes the encrypted secrets through agenix; avoid keeping local -plaintext `intake/` copies around. +generate a Namespace token from the logged-in namespace account, and render the +dispatcher/autoscaler configs into `intake/forgejo_nsc_{dispatcher,autoscaler}.yaml` +plus `intake/forgejo_nsc_token.txt`. -Long-lived runtime state is now sourced from age-encrypted files: +For ongoing operations, use `Scripts/sync-forgejo-nsc-config.sh`: -- `secrets/forgejo/admin-password.age` -- `secrets/forgejo/agent-ssh-key.age` -- `secrets/forgejo/nsc-token.age` -- `secrets/forgejo/nsc-dispatcher-config.age` -- `secrets/forgejo/nsc-autoscaler-config.age` - -After refreshing the encrypted secrets, deploy the forge host so -`config.age.secrets.*` updates the live paths for `services.burrow.forge`, -`services.burrow.forgeRunner`, and `services.burrow.forgejoNsc`. -The Nix host module also installs a periodic `forgejo-prune-runners` timer that -marks stale offline runners deleted in Forgejo's database so wedged instances do -not leave the queue polluted indefinitely. +- `Scripts/sync-forgejo-nsc-config.sh` copies the intake-backed configs and + Namespace token onto `/var/lib/burrow/intake/` on the forge host, reapplies + file ownership for `forgejo-nsc`, and restarts the dispatcher/autoscaler. +- `Scripts/sync-forgejo-nsc-config.sh --rotate-pat` additionally mints a new + Forgejo PAT on the Burrow forge host and refreshes the local intake files. Run it next to the dispatcher: diff --git a/services/forgejo-nsc/autoscaler.example.yaml b/services/forgejo-nsc/autoscaler.example.yaml index 2185469..866d3b5 100644 --- a/services/forgejo-nsc/autoscaler.example.yaml +++ b/services/forgejo-nsc/autoscaler.example.yaml @@ -9,7 +9,7 @@ instances: token: "PENDING-FORGEJO-PAT" scope: level: "repository" - owner: "hackclub" + owner: "burrow" name: "burrow" disable_polling: true poll_interval: "30s" @@ -23,11 +23,7 @@ instances: - labels: ["namespace-profile-linux-medium"] min_idle: 1 ttl: "20m" - machine_type: "4x8" - - labels: ["namespace-profile-macos-large"] - min_idle: 0 - ttl: "90m" - machine_type: "6x14" + machine_type: "8x16" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" diff --git a/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go index 3a04a26..9dcbfb1 100644 --- a/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go +++ b/services/forgejo-nsc/cmd/forgejo-nsc-dispatcher/main.go @@ -43,23 +43,19 @@ func main() { } dispatcher, err := nsc.NewDispatcher(nsc.Options{ - BinaryPath: cfg.Namespace.NSCBinary, - ComputeBaseURL: cfg.Namespace.ComputeBaseURL, - DefaultImage: cfg.Namespace.Image, - DefaultMachine: cfg.Namespace.MachineType, - MacosBaseImageID: cfg.Namespace.MacosBaseImageID, - MacosMachineArch: cfg.Namespace.MacosMachineArch, - DefaultDuration: cfg.Namespace.Duration.Duration, - WorkDir: cfg.Namespace.WorkDir, - MaxParallel: cfg.Namespace.MaxParallel, - LinuxCachePath: cfg.Namespace.LinuxCachePath, - LinuxCacheVolumes: toNSCCacheVolumes(cfg.Namespace.LinuxCacheVolumes), - MacosCachePath: cfg.Namespace.MacosCachePath, - MacosCacheVolumes: toNSCCacheVolumes(cfg.Namespace.MacosCacheVolumes), - RunnerNamePrefix: cfg.Runner.NamePrefix, - Executor: cfg.Runner.Executor, - Network: cfg.Namespace.Network, - Logger: logger, + BinaryPath: cfg.Namespace.NSCBinary, + ComputeBaseURL: cfg.Namespace.ComputeBaseURL, + DefaultImage: cfg.Namespace.Image, + DefaultMachine: cfg.Namespace.MachineType, + MacosBaseImageID: cfg.Namespace.MacosBaseImageID, + MacosMachineArch: cfg.Namespace.MacosMachineArch, + DefaultDuration: cfg.Namespace.Duration.Duration, + WorkDir: cfg.Namespace.WorkDir, + MaxParallel: cfg.Namespace.MaxParallel, + RunnerNamePrefix: cfg.Runner.NamePrefix, + Executor: cfg.Runner.Executor, + Network: cfg.Namespace.Network, + Logger: logger, }) if err != nil { logger.Error("failed to create dispatcher", "error", err) @@ -92,15 +88,3 @@ func main() { defer cancel() _ = srv.Shutdown(ctx) } - -func toNSCCacheVolumes(volumes []config.CacheVolumeConfig) []nsc.CacheVolume { - out := make([]nsc.CacheVolume, 0, len(volumes)) - for _, volume := range volumes { - out = append(out, nsc.CacheVolume{ - Tag: volume.Tag, - MountPoint: volume.MountPoint, - SizeGb: volume.SizeGb, - }) - } - return out -} diff --git a/services/forgejo-nsc/config.example.yaml b/services/forgejo-nsc/config.example.yaml index b45234f..5dc7551 100644 --- a/services/forgejo-nsc/config.example.yaml +++ b/services/forgejo-nsc/config.example.yaml @@ -11,35 +11,16 @@ forgejo: timeout: "30s" namespace: - nsc_binary: "nsc" + nsc_binary: "/app/bin/nsc" compute_base_url: "https://ord4.compute.namespaceapis.com" - image: "code.forgejo.org/forgejo/runner:11" - machine_type: "4x8" + image: "ghcr.io/forgejo/runner:3" + machine_type: "8x16" macos_base_image_id: "tahoe" macos_machine_arch: "arm64" duration: "30m" workdir: "/var/lib/forgejo-runner" max_parallel: 4 network: "" - linux_cache_path: "/var/cache/burrow" - linux_cache_volumes: - - tag: "burrow-forgejo-linux-nix-v2" - mount_point: "/nix" - size_gb: 80 - - tag: "burrow-forgejo-linux-cache-v2" - mount_point: "/var/cache/burrow" - size_gb: 80 - macos_cache_path: "/Users/runner/.cache/burrow" - macos_cache_volumes: - - tag: "burrow-forgejo-macos-shared-v1" - mount_point: "/Users/runner/.cache/burrow/shared" - size_gb: 80 - - tag: "burrow-forgejo-macos-macos-v1" - mount_point: "/Users/runner/.cache/burrow/lane/macos" - size_gb: 80 - - tag: "burrow-forgejo-macos-ios-simulator-v1" - mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator" - size_gb: 80 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/deploy/autoscaler.yaml b/services/forgejo-nsc/deploy/autoscaler.yaml index 30b2729..084b076 100644 --- a/services/forgejo-nsc/deploy/autoscaler.yaml +++ b/services/forgejo-nsc/deploy/autoscaler.yaml @@ -10,7 +10,7 @@ instances: token: "PENDING-FORGEJO-PAT" scope: level: "repository" - owner: "hackclub" + owner: "burrow" name: "burrow" disable_polling: false poll_interval: "30s" @@ -24,11 +24,7 @@ instances: - labels: ["namespace-profile-linux-medium"] min_idle: 0 ttl: "20m" - machine_type: "4x8" - - labels: ["namespace-profile-macos-large"] - min_idle: 0 - ttl: "90m" - machine_type: "6x14" + machine_type: "8x16" - labels: ["namespace-profile-windows-large"] min_idle: 0 ttl: "45m" diff --git a/services/forgejo-nsc/deploy/dispatcher.yaml b/services/forgejo-nsc/deploy/dispatcher.yaml index 0f183e1..6d2aac5 100644 --- a/services/forgejo-nsc/deploy/dispatcher.yaml +++ b/services/forgejo-nsc/deploy/dispatcher.yaml @@ -6,50 +6,23 @@ forgejo: token: "PENDING-FORGEJO-PAT" default_scope: level: "repository" - owner: "hackclub" + owner: "burrow" name: "burrow" default_labels: - namespace-profile-linux-medium timeout: "30s" namespace: - nsc_binary: "nsc" + nsc_binary: "/run/current-system/sw/bin/nsc" compute_base_url: "https://ord4.compute.namespaceapis.com" - image: "code.forgejo.org/forgejo/runner:11" - machine_type: "4x8" + image: "code.forgejo.org/forgejo/runner:3" + machine_type: "8x16" macos_base_image_id: "tahoe" macos_machine_arch: "arm64" duration: "30m" workdir: "/var/lib/forgejo-runner" max_parallel: 4 - allow_labels: - - namespace-profile-linux-medium - - namespace-profile-macos-large - - namespace-profile-windows-large - allow_scopes: - - "repository:hackclub/burrow" - instance_tags: - - "burrow" network: "" - linux_cache_path: "/var/cache/burrow" - linux_cache_volumes: - - tag: "burrow-forgejo-linux-nix-v2" - mount_point: "/nix" - size_gb: 80 - - tag: "burrow-forgejo-linux-cache-v2" - mount_point: "/var/cache/burrow" - size_gb: 80 - macos_cache_path: "/Users/runner/.cache/burrow" - macos_cache_volumes: - - tag: "burrow-forgejo-macos-shared-v1" - mount_point: "/Users/runner/.cache/burrow/shared" - size_gb: 80 - - tag: "burrow-forgejo-macos-macos-v1" - mount_point: "/Users/runner/.cache/burrow/lane/macos" - size_gb: 80 - - tag: "burrow-forgejo-macos-ios-simulator-v1" - mount_point: "/Users/runner/.cache/burrow/lane/ios-simulator" - size_gb: 80 runner: name_prefix: "nscloud-" diff --git a/services/forgejo-nsc/internal/app/service.go b/services/forgejo-nsc/internal/app/service.go index 10639a5..45b66eb 100644 --- a/services/forgejo-nsc/internal/app/service.go +++ b/services/forgejo-nsc/internal/app/service.go @@ -94,17 +94,6 @@ type RunnerHandle struct { Name string `json:"name"` } -func launchContext(ttl time.Duration) (context.Context, context.CancelFunc) { - if ttl <= 0 { - return context.WithTimeout(context.Background(), 2*time.Hour) - } - // Provisioning can legitimately take several minutes before the runner starts - // processing the actual Forgejo job. Keep the launch context independent from - // the caller's HTTP timeout so autoscaler/webhook requests don't kill active - // bootstraps mid-flight. - return context.WithTimeout(context.Background(), ttl+30*time.Minute) -} - func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchResponse, error) { count := req.Count if count <= 0 { @@ -145,10 +134,7 @@ func (s *Service) Dispatch(ctx context.Context, req DispatchRequest) (DispatchRe return fmt.Errorf("fetching registration token: %w", err) } - launchCtx, cancel := launchContext(ttl) - defer cancel() - - name, err := s.dispatcher.LaunchRunner(launchCtx, nsc.LaunchRequest{ + name, err := s.dispatcher.LaunchRunner(egCtx, nsc.LaunchRequest{ Token: token, InstanceURL: s.instanceURL, Labels: labels, diff --git a/services/forgejo-nsc/internal/config/config.go b/services/forgejo-nsc/internal/config/config.go index 5ef8a7a..264cbd0 100644 --- a/services/forgejo-nsc/internal/config/config.go +++ b/services/forgejo-nsc/internal/config/config.go @@ -49,14 +49,8 @@ type Config struct { Runner RunnerConfig `yaml:"runner"` } -type CacheVolumeConfig struct { - Tag string `yaml:"tag"` - MountPoint string `yaml:"mount_point"` - SizeGb int64 `yaml:"size_gb"` -} - type ForgejoConfig struct { - BaseURL string `yaml:"base_url"` + BaseURL string `yaml:"base_url"` // InstanceURL is the URL runners should use when registering with Forgejo. // This must be reachable from the spawned runner (e.g. the public URL like // https://git.burrow.net), and may differ from BaseURL (which can be a local @@ -86,19 +80,15 @@ type NamespaceConfig struct { // MacosBaseImageID selects which macOS base image to use (e.g. "tahoe"). MacosBaseImageID string `yaml:"macos_base_image_id"` // MacosMachineArch is the architecture used for macOS instances (typically "arm64"). - MacosMachineArch string `yaml:"macos_machine_arch"` - Duration Duration `yaml:"duration"` - WorkDir string `yaml:"workdir"` - MaxParallel int64 `yaml:"max_parallel"` - Environment []string `yaml:"environment"` - AllowLabels []string `yaml:"allow_labels"` - AllowScopes []string `yaml:"allow_scopes"` - Network string `yaml:"network"` - InstanceTags []string `yaml:"instance_tags"` - LinuxCachePath string `yaml:"linux_cache_path"` - LinuxCacheVolumes []CacheVolumeConfig `yaml:"linux_cache_volumes"` - MacosCachePath string `yaml:"macos_cache_path"` - MacosCacheVolumes []CacheVolumeConfig `yaml:"macos_cache_volumes"` + MacosMachineArch string `yaml:"macos_machine_arch"` + Duration Duration `yaml:"duration"` + WorkDir string `yaml:"workdir"` + MaxParallel int64 `yaml:"max_parallel"` + Environment []string `yaml:"environment"` + AllowLabels []string `yaml:"allow_labels"` + AllowScopes []string `yaml:"allow_scopes"` + Network string `yaml:"network"` + InstanceTags []string `yaml:"instance_tags"` } type RunnerConfig struct { @@ -170,56 +160,6 @@ func (c *Config) Validate() error { if c.Namespace.MaxParallel <= 0 { c.Namespace.MaxParallel = 4 } - if c.Namespace.LinuxCachePath == "" { - c.Namespace.LinuxCachePath = "/var/cache/burrow" - } - if len(c.Namespace.LinuxCacheVolumes) == 0 { - c.Namespace.LinuxCacheVolumes = []CacheVolumeConfig{ - { - Tag: "burrow-forgejo-linux-nix-v2", - MountPoint: "/nix", - SizeGb: 80, - }, - { - Tag: "burrow-forgejo-linux-cache-v2", - MountPoint: c.Namespace.LinuxCachePath, - SizeGb: 80, - }, - } - } - if c.Namespace.MacosCachePath == "" { - c.Namespace.MacosCachePath = "/Users/runner/.cache/burrow" - } - if len(c.Namespace.MacosCacheVolumes) == 0 { - c.Namespace.MacosCacheVolumes = []CacheVolumeConfig{ - { - Tag: "burrow-forgejo-macos-shared-v1", - MountPoint: c.Namespace.MacosCachePath + "/shared", - SizeGb: 80, - }, - { - Tag: "burrow-forgejo-macos-macos-v1", - MountPoint: c.Namespace.MacosCachePath + "/lane/macos", - SizeGb: 80, - }, - { - Tag: "burrow-forgejo-macos-ios-simulator-v1", - MountPoint: c.Namespace.MacosCachePath + "/lane/ios-simulator", - SizeGb: 80, - }, - } - } - for _, volume := range append(append([]CacheVolumeConfig{}, c.Namespace.LinuxCacheVolumes...), c.Namespace.MacosCacheVolumes...) { - if strings.TrimSpace(volume.Tag) == "" { - return errors.New("namespace cache volume tag is required") - } - if strings.TrimSpace(volume.MountPoint) == "" { - return fmt.Errorf("namespace cache volume %q mount_point is required", volume.Tag) - } - if volume.SizeGb <= 0 { - return fmt.Errorf("namespace cache volume %q size_gb must be positive", volume.Tag) - } - } return nil } diff --git a/services/forgejo-nsc/internal/nsc/dispatcher.go b/services/forgejo-nsc/internal/nsc/dispatcher.go index 4a579a6..49cb4ec 100644 --- a/services/forgejo-nsc/internal/nsc/dispatcher.go +++ b/services/forgejo-nsc/internal/nsc/dispatcher.go @@ -17,29 +17,19 @@ import ( ) type Options struct { - BinaryPath string - DefaultImage string - DefaultMachine string - DefaultDuration time.Duration - WorkDir string - MaxParallel int64 - RunnerNamePrefix string - Executor string - Network string - ComputeBaseURL string - MacosBaseImageID string - MacosMachineArch string - LinuxCachePath string - LinuxCacheVolumes []CacheVolume - MacosCachePath string - MacosCacheVolumes []CacheVolume - Logger *slog.Logger -} - -type CacheVolume struct { - Tag string - MountPoint string - SizeGb int64 + BinaryPath string + DefaultImage string + DefaultMachine string + DefaultDuration time.Duration + WorkDir string + MaxParallel int64 + RunnerNamePrefix string + Executor string + Network string + ComputeBaseURL string + MacosBaseImageID string + MacosMachineArch string + Logger *slog.Logger } type LaunchRequest struct { @@ -83,12 +73,6 @@ func NewDispatcher(opts Options) (*Dispatcher, error) { if opts.DefaultDuration == 0 { opts.DefaultDuration = 30 * time.Minute } - if opts.LinuxCachePath == "" { - opts.LinuxCachePath = "/var/cache/burrow" - } - if opts.MacosCachePath == "" { - opts.MacosCachePath = "/Users/runner/.cache/burrow" - } logger := opts.Logger if logger == nil { logger = slog.New(slog.NewTextHandler(io.Discard, nil)) @@ -120,9 +104,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } machineType := choose(req.MachineType, d.opts.DefaultMachine) image := choose(req.Image, d.opts.DefaultImage) - if req.ExtraEnv == nil { - req.ExtraEnv = make(map[string]string) - } if hasWindowsLabel(req.Labels) { if err := d.launchWindowsRunnerViaWinRM(ctx, runnerName, req, duration, machineType); err != nil { @@ -132,13 +113,10 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } if hasMacOSLabel(req.Labels) { - if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok { - req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.MacosCachePath - } // Compute macOS shapes differ from the Linux "run" defaults. If the request // didn't specify a machine type, ensure we pick a macOS-valid default. if machineType == "" || machineType == d.opts.DefaultMachine { - machineType = "6x14" + machineType = "12x28" } // Prefer the Compute API path because it uses the service token (NSC_TOKEN_FILE) @@ -151,9 +129,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin } return runnerName, nil } - if _, ok := req.ExtraEnv["NSC_CACHE_PATH"]; !ok { - req.ExtraEnv["NSC_CACHE_PATH"] = d.opts.LinuxCachePath - } env := map[string]string{ "FORGEJO_INSTANCE_URL": req.InstanceURL, @@ -165,6 +140,9 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin for k, v := range req.ExtraEnv { env[k] = v } + if _, ok := env["NSC_CACHE_PATH"]; !ok { + env["NSC_CACHE_PATH"] = "/nix/store" + } script := d.bootstrapScript() args := []string{ @@ -183,7 +161,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin if d.opts.Network != "" { args = append(args, "--network", d.opts.Network) } - args = appendVolumeArgs(args, d.opts.LinuxCacheVolumes) for key, value := range env { if value == "" { continue @@ -197,10 +174,6 @@ func (d *Dispatcher) LaunchRunner(ctx context.Context, req LaunchRequest) (strin args = append(args, "--", "/bin/sh", "-c", script) cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...) - // The Linux `nsc run` path uses the CLI auth flow. Keep using the service - // account's refreshed Namespace login session instead of forcing the - // short-lived NSC_TOKEN_FILE bearer token into CLI requests. - cmd.Env = nscCLIEnv() var buf bytes.Buffer cmd.Stdout = &buf cmd.Stderr = &buf @@ -397,21 +370,9 @@ func choose(values ...string) string { return "" } -func appendVolumeArgs(args []string, volumes []CacheVolume) []string { - for _, volume := range volumes { - if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 { - continue - } - args = append(args, "--volume", fmt.Sprintf("cache:%s:%s:%d", volume.Tag, volume.MountPoint, volume.SizeGb)) - } - return args -} - func (d *Dispatcher) bootstrapScript() string { var builder strings.Builder builder.WriteString(`set -euo pipefail -export HOME=/root -export USER=root mkdir -p "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" cd "${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" @@ -427,23 +388,8 @@ fi if ! command -v xz >/dev/null 2>&1; then apk add --no-cache xz >/dev/null fi -if ! command -v nix >/dev/null 2>&1; then - apk add --no-cache nix >/dev/null -fi export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -if [ -f /etc/profile.d/nix.sh ]; then - # shellcheck disable=SC1091 - . /etc/profile.d/nix.sh -fi -if [ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then - # shellcheck disable=SC1091 - . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh -fi -export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" -export NIX_CONFIG="experimental-features = nix-command flakes -accept-flake-config = true" node --version >/dev/null -nix --version >/dev/null cat > runner.yaml <<'EOF' log: @@ -467,7 +413,13 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) resolved="${label}:${runner_exec}" ;; + *) + if [ "$runner_exec" = "host" ]; then + resolved="${label}:host" + else + resolved="${label}:${runner_exec}" + fi + ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then diff --git a/services/forgejo-nsc/internal/nsc/macos.go b/services/forgejo-nsc/internal/nsc/macos.go index 0b1e39a..9bf3837 100644 --- a/services/forgejo-nsc/internal/nsc/macos.go +++ b/services/forgejo-nsc/internal/nsc/macos.go @@ -125,16 +125,6 @@ func macosComputeBaseImageID(baseImageID string) string { } } -func macosWorkDir(workdir string) string { - workdir = strings.TrimSpace(workdir) - switch workdir { - case "", "/var/lib/forgejo-runner": - return "/tmp/forgejo-runner" - default: - return workdir - } -} - type nscBearerTokenFile struct { BearerToken string `json:"bearer_token"` } @@ -193,7 +183,10 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r httpClient := &http.Client{Timeout: 60 * time.Second} client := computev1betaconnect.NewComputeServiceClient(httpClient, d.opts.ComputeBaseURL) - workdir := macosWorkDir(d.opts.WorkDir) + workdir := d.opts.WorkDir + if strings.TrimSpace(workdir) == "" { + workdir = "/tmp/forgejo-runner" + } env := map[string]string{ "FORGEJO_INSTANCE_URL": req.InstanceURL, @@ -206,8 +199,12 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r for k, v := range req.ExtraEnv { env[k] = v } + // Best-effort caching: workflows call Scripts/nscloud-cache.sh, which is a + // no-op unless NSC_CACHE_PATH is set. This may still be skipped if spacectl + // lacks credentials, but setting the path is harmless and keeps behavior + // consistent across macOS / Linux runners. if _, ok := env["NSC_CACHE_PATH"]; !ok { - env["NSC_CACHE_PATH"] = d.opts.MacosCachePath + env["NSC_CACHE_PATH"] = "/Users/runner/.cache/nscloud" } deadline := timestamppb.New(time.Now().Add(ttl)) @@ -239,15 +236,10 @@ func (d *Dispatcher) launchMacOSRunner(ctx context.Context, runnerName string, r }, }, } - experimental := &computev1beta.CreateInstanceRequest_ExperimentalFeatures{} if imageID := macosComputeBaseImageID(d.opts.MacosBaseImageID); imageID != "" { - experimental.MacosBaseImageId = imageID - } - if volumes := computeCacheVolumeRequests(d.opts.MacosCacheVolumes); len(volumes) > 0 { - experimental.Volumes = volumes - } - if experimental.MacosBaseImageId != "" || len(experimental.Volumes) > 0 { - createReq.Experimental = experimental + createReq.Experimental = &computev1beta.CreateInstanceRequest_ExperimentalFeatures{ + MacosBaseImageId: imageID, + } } d.log.Info("launching Namespace macos runner", @@ -573,22 +565,6 @@ func (d *Dispatcher) destroyComputeInstance(ctx context.Context, client computev d.log.Info("macos runner destroyed", "runner", runnerName, "instance", instanceID) } -func computeCacheVolumeRequests(volumes []CacheVolume) []*computev1beta.VolumeRequest { - var out []*computev1beta.VolumeRequest - for _, volume := range volumes { - if strings.TrimSpace(volume.Tag) == "" || strings.TrimSpace(volume.MountPoint) == "" || volume.SizeGb <= 0 { - continue - } - out = append(out, &computev1beta.VolumeRequest{ - MountPoint: volume.MountPoint, - Tag: volume.Tag, - SizeMb: volume.SizeGb * 1024, - PersistencyKind: computev1beta.VolumeRequest_CACHE, - }) - } - return out -} - func macosBootstrapScript() string { // Keep this script self-contained: it runs on a fresh macOS VM base image. var b strings.Builder @@ -597,70 +573,64 @@ func macosBootstrapScript() string { workdir="${FORGEJO_RUNNER_WORKDIR:-/tmp/forgejo-runner}" mkdir -p "${workdir}" cd "${workdir}" -if ! mkdir -p "/Users/runner/.cache/act" 2>/dev/null; then - sudo install -d -m 0775 -o "$(id -un)" -g "$(id -gn)" /Users/runner/.cache /Users/runner/.cache/act -fi export PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH}" -cache_base="${NSC_CACHE_PATH:-$HOME/.cache/burrow}" -cache_root="${NSC_SHARED_CACHE_PATH:-${cache_base}/shared}" -cache_owner="$(id -un)" -cache_group="$(id -gn)" -if ! install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \ - "${cache_root}" \ - "${cache_root}/bin" \ - "${cache_root}/downloads" \ - "${cache_root}/go/path" \ - "${cache_root}/go/mod" \ - "${cache_root}/go/build" \ - "${cache_root}/homebrew" 2>/dev/null; then - sudo install -d -m 0775 -o "${cache_owner}" -g "${cache_group}" \ - "${cache_root}" \ - "${cache_root}/bin" \ - "${cache_root}/downloads" \ - "${cache_root}/go/path" \ - "${cache_root}/go/mod" \ - "${cache_root}/go/build" \ - "${cache_root}/homebrew" -fi -export HOMEBREW_CACHE="${cache_root}/homebrew" -export GOPATH="${cache_root}/go/path" -export GOMODCACHE="${cache_root}/go/mod" -export GOCACHE="${cache_root}/go/build" if ! command -v curl >/dev/null 2>&1; then echo "curl is required" >&2 exit 1 fi -# Apple build workflows do not require Nix just to bootstrap the Forgejo runner. -# If Nix is already present on the base image, keep it on PATH; otherwise leave -# installation to the job itself. +if ! command -v nix >/dev/null 2>&1; then + echo "Installing nix (Determinate Systems installer)..." + installer="/tmp/nix-installer.$$" + curl -fsSL -o "${installer}" https://install.determinate.systems/nix + chmod +x "${installer}" + + if command -v sudo >/dev/null 2>&1; then + if sudo -n true 2>/dev/null; then + sudo -n sh "${installer}" install --no-confirm + else + sudo sh "${installer}" install --no-confirm + fi + else + sh "${installer}" install --no-confirm + fi + + rm -f "${installer}" +fi + if [[ -f /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]]; then # shellcheck disable=SC1091 . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh - export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" fi +export PATH="/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" + +# Flake builds need nix-command + flakes enabled. Workflows may layer additional +# config, but ensure a sane default exists. +mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/nix" +cat > "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" <<'EOF' +experimental-features = nix-command flakes +sandbox = true +fallback = true +substituters = https://cache.nixos.org +trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= +EOF + mkdir -p bin export PATH="${PWD}/bin:${PATH}" -# Keep the ad-hoc macOS bootstrap on the same Forgejo runner major line as the -# Linux runner image. Forgejo runner 11.x is currently published as v11.3.1. -runner_version="v11.3.1" +runner_version="v12.6.4" runner_src_tgz="forgejo-runner-${runner_version}.tar.gz" -runner_src_tgz_path="${cache_root}/downloads/${runner_src_tgz}" runner_src_url="https://code.forgejo.org/forgejo/runner/archive/${runner_version}.tar.gz" runner_src_dir="forgejo-runner-src" -runner_bin_cache="${cache_root}/bin/forgejo-runner-${runner_version}" -if [[ ! -x "${runner_bin_cache}" ]]; then +if ! command -v forgejo-runner >/dev/null 2>&1; then rm -rf "${runner_src_dir}" mkdir -p "${runner_src_dir}" - if [[ ! -f "${runner_src_tgz_path}" ]]; then - curl -fsSL "${runner_src_url}" -o "${runner_src_tgz_path}" - fi - tar -xzf "${runner_src_tgz_path}" -C "${runner_src_dir}" --strip-components=1 + curl -fsSL "${runner_src_url}" -o "${runner_src_tgz}" + tar -xzf "${runner_src_tgz}" -C "${runner_src_dir}" --strip-components=1 toolchain="$(grep -E '^toolchain ' "${runner_src_dir}/go.mod" | awk '{print $2}' | head -n 1 || true)" if [ -z "${toolchain}" ]; then @@ -670,23 +640,21 @@ if [[ ! -x "${runner_bin_cache}" ]]; then if ! command -v go >/dev/null 2>&1; then go_tgz="${toolchain}.darwin-arm64.tar.gz" go_url="https://go.dev/dl/${go_tgz}" - go_tgz_path="${cache_root}/downloads/${go_tgz}" - if [[ ! -f "${go_tgz_path}" ]]; then - curl -fsSL "${go_url}" -o "${go_tgz_path}" - fi - tar -xzf "${go_tgz_path}" + curl -fsSL "${go_url}" -o "${go_tgz}" + tar -xzf "${go_tgz}" export GOROOT="${PWD}/go" export PATH="${GOROOT}/bin:${PATH}" fi + export GOPATH="${PWD}/.gopath" + export GOMODCACHE="${PWD}/.gomodcache" + export GOCACHE="${PWD}/.gocache" mkdir -p "${GOPATH}" "${GOMODCACHE}" "${GOCACHE}" - (cd "${runner_src_dir}" && go build -o "${runner_bin_cache}" .) - chmod +x "${runner_bin_cache}" + (cd "${runner_src_dir}" && go build -o "${workdir}/bin/forgejo-runner" .) + chmod +x "${workdir}/bin/forgejo-runner" fi -ln -sf "${runner_bin_cache}" "${workdir}/bin/forgejo-runner" - cat > runner.yaml <<'EOF' log: level: info @@ -709,7 +677,9 @@ for label in ${FORGEJO_RUNNER_LABELS//,/ } ; do fi case "${label}" in *:*) resolved="${label}" ;; - *) resolved="${label}:${runner_exec}" ;; + *) + resolved="${label}:host" + ;; esac echo " - ${resolved}" >> runner.yaml if [ -z "${resolved_labels}" ]; then diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc.go b/services/forgejo-nsc/internal/nsc/macos_nsc.go index 159634a..c22fadb 100644 --- a/services/forgejo-nsc/internal/nsc/macos_nsc.go +++ b/services/forgejo-nsc/internal/nsc/macos_nsc.go @@ -12,22 +12,8 @@ import ( "path/filepath" "strings" "time" - - "connectrpc.com/connect" ) -func nscCLIEnv() []string { - env := os.Environ() - out := env[:0] - for _, entry := range env { - if strings.HasPrefix(entry, "NSC_TOKEN_FILE=") { - continue - } - out = append(out, entry) - } - return out -} - func normalizeMacOSNSCMachineType(machineType string) (normalized string, changed bool, err error) { vcpu, memoryMB, err := parseMachineTypeCPUxMemGB(machineType) if err != nil { @@ -66,17 +52,14 @@ func normalizeMacOSNSCMachineType(machineType string) (normalized string, change return normalized, changed, nil } -type macosNSCSSHOutcome int - -const ( - macosNSCSSHCompleted macosNSCSSHOutcome = iota - macosNSCSSHHandoff -) - func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName string, req LaunchRequest, ttl time.Duration, machineType string) error { if machineType == "" { return errors.New("machine_type is required for macos runners") } + if strings.TrimSpace(os.Getenv("NSC_TOKEN_FILE")) == "" { + // The Burrow forge host feeds NSC_TOKEN_FILE from the intake-backed runtime token. + return errors.New("NSC_TOKEN_FILE is required for macos runners") + } selectors := macosSelectorsArg(d.opts.MacosBaseImageID) if selectors == "" { @@ -153,13 +136,11 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str "--wait_timeout", a.waitTimeout.String(), } args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) - args = appendVolumeArgs(args, d.opts.MacosCacheVolumes) createCtx, cancel := context.WithTimeout(ctx, a.createTimeout) defer cancel() cmd := exec.CommandContext(createCtx, d.opts.BinaryPath, args...) - cmd.Env = nscCLIEnv() var buf bytes.Buffer cmd.Stdout = &buf cmd.Stderr = &buf @@ -225,38 +206,14 @@ func (d *Dispatcher) launchMacOSRunnerViaNSC(ctx context.Context, runnerName str return fmt.Errorf("nsc create failed without producing an instance id\n%s", lastOut) } - destroyOnReturn := true - defer func() { - if destroyOnReturn { - d.destroyNSCInstance(context.Background(), runnerName, instanceID) - } - }() + // Always attempt cleanup even if the runner fails. + defer d.destroyNSCInstance(context.Background(), runnerName, instanceID) script := macosBootstrapWrapperScript(runnerName, req, d.opts.Executor, d.opts.WorkDir) // Use the Compute SSH config endpoint (direct TCP) instead of `nsc ssh`, which - // relies on a websocket-based SSH proxy that is less reliable under the - // revokable tenant token flow used by the dispatcher. + // relies on a websocket-based SSH proxy that is not supported by the + // revokable tenant token we run the dispatcher with. if err := d.runMacOSComputeSSHScript(ctx, runnerName, instanceID, script); err != nil { - if shouldFallbackToNSCSSH(err) { - d.log.Warn("compute ssh bootstrap failed; falling back to nsc ssh", - "runner", runnerName, - "instance", instanceID, - "err", err, - ) - outcome, sshErr := d.runMacOSNSCSSHScript(ctx, runnerName, instanceID, script) - if sshErr != nil { - return sshErr - } - if outcome == macosNSCSSHHandoff { - destroyOnReturn = false - d.log.Info("leaving macos nsc instance running until TTL after runner handoff", - "runner", runnerName, - "instance", instanceID, - "ttl", ttl.String(), - ) - } - return nil - } return err } return nil @@ -328,7 +285,6 @@ func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanc args := []string{"destroy", "--force", instanceID} args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) cmd := exec.CommandContext(ctx, d.opts.BinaryPath, args...) - cmd.Env = nscCLIEnv() var buf bytes.Buffer cmd.Stdout = &buf cmd.Stderr = &buf @@ -340,7 +296,9 @@ func (d *Dispatcher) destroyNSCInstance(ctx context.Context, runnerName, instanc } func macosBootstrapWrapperScript(runnerName string, req LaunchRequest, executor, workdir string) string { - workdir = macosWorkDir(workdir) + if strings.TrimSpace(workdir) == "" { + workdir = "/tmp/forgejo-runner" + } // Pass all values via stdin script so secrets do not appear in the nsc ssh argv. env := map[string]string{ @@ -378,75 +336,6 @@ func shellSingleQuote(value string) string { return "'" + strings.ReplaceAll(value, "'", `'\"'\"'`) + "'" } -func shouldFallbackToNSCSSH(err error) bool { - if err == nil { - return false - } - - switch connect.CodeOf(err) { - case connect.CodeUnauthenticated, connect.CodePermissionDenied, connect.CodeUnimplemented: - return true - } - - errText := strings.ToLower(err.Error()) - return strings.Contains(errText, "compute get ssh config failed") && - (strings.Contains(errText, "unauthenticated") || - strings.Contains(errText, "permission_denied") || - strings.Contains(errText, "permission denied") || - strings.Contains(errText, "unimplemented")) -} - -func (d *Dispatcher) runMacOSNSCSSHScript(ctx context.Context, runnerName, instanceID, script string) (macosNSCSSHOutcome, error) { - sshCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - - args := []string{"ssh", "--disable-pty", instanceID, "/bin/bash"} - args = prependNSCRegionArgs(args, d.opts.ComputeBaseURL) - - cmd := exec.CommandContext(sshCtx, d.opts.BinaryPath, args...) - cmd.Env = nscCLIEnv() - cmd.Stdin = strings.NewReader(script) - - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - - if err := cmd.Run(); err != nil { - if errors.Is(sshCtx.Err(), context.DeadlineExceeded) { - return macosNSCSSHCompleted, fmt.Errorf("nsc ssh timed out after %s\n%s", 5*time.Minute, strings.TrimSpace(buf.String())) - } - if nscSSHBootstrapLikelySucceeded(err, buf.String()) { - d.log.Warn("nsc ssh exited after runner handoff; treating bootstrap as successful", - "runner", runnerName, - "instance", instanceID, - "err", err, - ) - d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return macosNSCSSHHandoff, nil - } - return macosNSCSSHCompleted, fmt.Errorf("nsc ssh runner bootstrap failed: %w\n%s", err, strings.TrimSpace(buf.String())) - } - - d.log.Info("macos runner bootstrap completed via nsc ssh", "runner", runnerName, "instance", instanceID) - return macosNSCSSHCompleted, nil -} - -func nscSSHBootstrapLikelySucceeded(err error, output string) bool { - if err == nil { - return false - } - - errText := strings.ToLower(err.Error()) - if !strings.Contains(errText, "remote command exited without exit status or exit signal") { - return false - } - - output = strings.ToLower(output) - return strings.Contains(output, "runner registered successfully") && - strings.Contains(output, "starting job") && - strings.Contains(output, "task ") -} - func prependNSCRegionArgs(args []string, computeBaseURL string) []string { region := strings.TrimSpace(os.Getenv("NSC_REGION")) if region == "" { diff --git a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go b/services/forgejo-nsc/internal/nsc/macos_nsc_test.go deleted file mode 100644 index d2aabc6..0000000 --- a/services/forgejo-nsc/internal/nsc/macos_nsc_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package nsc - -import ( - "errors" - "testing" -) - -func TestNormalizeMacOSNSCMachineTypeRoundsUp(t *testing.T) { - t.Parallel() - - got, changed, err := normalizeMacOSNSCMachineType("5x10") - if err != nil { - t.Fatalf("normalizeMacOSNSCMachineType: %v", err) - } - if !changed { - t.Fatal("expected machine type to be normalized") - } - if got != "6x14" { - t.Fatalf("expected 6x14, got %q", got) - } -} - -func TestNormalizeMacOSNSCMachineTypeKeepsAllowedShape(t *testing.T) { - t.Parallel() - - got, changed, err := normalizeMacOSNSCMachineType("6x14") - if err != nil { - t.Fatalf("normalizeMacOSNSCMachineType: %v", err) - } - if changed { - t.Fatal("expected allowed machine type to remain unchanged") - } - if got != "6x14" { - t.Fatalf("expected 6x14, got %q", got) - } -} - -func TestShouldFallbackToNSCSSHFallbackForComputeAuthErrors(t *testing.T) { - t.Parallel() - - err := errors.New("compute get ssh config failed: unauthenticated: invalid tenant credentials") - if !shouldFallbackToNSCSSH(err) { - t.Fatal("expected compute auth error to fall back to nsc ssh") - } -} - -func TestShouldFallbackToNSCSSHRejectsOtherErrors(t *testing.T) { - t.Parallel() - - err := errors.New("compute ssh runner bootstrap failed: exit status 1") - if shouldFallbackToNSCSSH(err) { - t.Fatal("expected unrelated bootstrap errors to remain fatal") - } -} - -func TestNSCSSHBootstrapLikelySucceeded(t *testing.T) { - t.Parallel() - - err := errors.New("wait: remote command exited without exit status or exit signal") - output := ` -level=info msg="Runner registered successfully." -time="2026-03-19T11:29:49Z" level=info msg="Starting job" -time="2026-03-19T11:29:50Z" level=info msg="task 124 repo is hackclub/burrow" -` - - if !nscSSHBootstrapLikelySucceeded(err, output) { - t.Fatal("expected handoff success heuristic to match") - } -} diff --git a/site/layout/layout.tsx b/site/layout/layout.tsx index 28ff24d..057aa68 100644 --- a/site/layout/layout.tsx +++ b/site/layout/layout.tsx @@ -1,20 +1,5 @@ -import { Space_Mono, Poppins } from "next/font/google"; import localFont from "next/font/local"; -const space_mono = Space_Mono({ - weight: ["400", "700"], - subsets: ["latin"], - display: "swap", - variable: "--font-space-mono", -}); - -const poppins = Poppins({ - weight: ["400", "500", "600", "700", "800", "900"], - subsets: ["latin"], - display: "swap", - variable: "--font-poppins", -}); - const phantomSans = localFont({ src: [ { @@ -36,10 +21,18 @@ const phantomSans = localFont({ variable: "--font-phantom-sans", }); +const fallbackFontVariables = { + "--font-space-mono": + '"SFMono-Regular", "SF Mono", ui-monospace, Menlo, Monaco, "Cascadia Mono", "Segoe UI Mono", "Roboto Mono", monospace', + "--font-poppins": + 'var(--font-phantom-sans), -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif', +} as React.CSSProperties; + export default function Layout({ children }: { children: React.ReactNode }) { return (
{children}
diff --git a/site/package-lock.json b/site/package-lock.json new file mode 100644 index 0000000..e1357f9 --- /dev/null +++ b/site/package-lock.json @@ -0,0 +1,3907 @@ +{ + "name": "burrow", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "burrow", + "version": "0.1.0", + "dependencies": { + "@fortawesome/fontawesome-free": "^6.4.2", + "@fortawesome/fontawesome-svg-core": "^6.4.2", + "@fortawesome/free-brands-svg-icons": "^6.4.2", + "@fortawesome/free-solid-svg-icons": "^6.4.2", + "@fortawesome/react-fontawesome": "^0.2.0", + "@headlessui/react": "^1.7.17", + "@headlessui/tailwindcss": "^0.2.0", + "@types/node": "20.5.8", + "@types/react": "18.2.21", + "@types/react-dom": "18.2.7", + "autoprefixer": "10.4.15", + "eslint": "8.48.0", + "eslint-config-next": "13.4.19", + "next": "13.4.19", + "postcss": "8.4.29", + "react": "18.2.0", + "react-dom": "18.2.0", + "tailwindcss": "3.3.3", + "typescript": "5.2.2" + }, + "devDependencies": { + "prettier": "^3.0.3", + "prettier-plugin-tailwindcss": "^0.5.4" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/runtime": { + "version": "7.22.11", + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.8.0", + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/js": { + "version": "8.48.0", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@fortawesome/fontawesome-common-types": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-free": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND OFL-1.1 AND MIT)", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-svg-core": { + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.7.2.tgz", + "integrity": "sha512-yxtOBWDrdi5DD5o1pmVdq3WMCvnobT0LU6R8RyyVXPvFRd2o79/0NCuQoCjNTeZz9EzA9xS3JxNWfv54RIHFEA==", + "license": "MIT", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.7.2" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/fontawesome-svg-core/node_modules/@fortawesome/fontawesome-common-types": { + "version": "6.7.2", + "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.7.2.tgz", + "integrity": "sha512-Zs+YeHUC5fkt7Mg1l6XTniei3k4bwG/yo3iFUtZWd/pMx9g3fdvkSK9E0FOC+++phXOka78uJcYb8JaFkW52Xg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/free-brands-svg-icons": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND MIT)", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.5.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/free-solid-svg-icons": { + "version": "6.5.1", + "hasInstallScript": true, + "license": "(CC-BY-4.0 AND MIT)", + "dependencies": { + "@fortawesome/fontawesome-common-types": "6.5.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@fortawesome/react-fontawesome": { + "version": "0.2.0", + "license": "MIT", + "dependencies": { + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "@fortawesome/fontawesome-svg-core": "~1 || ~6", + "react": ">=16.3" + } + }, + "node_modules/@headlessui/react": { + "version": "1.7.18", + "license": "MIT", + "dependencies": { + "@tanstack/react-virtual": "^3.0.0-beta.60", + "client-only": "^0.0.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16 || ^17 || ^18", + "react-dom": "^16 || ^17 || ^18" + } + }, + "node_modules/@headlessui/tailwindcss": { + "version": "0.2.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "tailwindcss": "^3.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.11", + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "license": "BSD-3-Clause" + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.19", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@next/env": { + "version": "13.4.19", + "license": "MIT" + }, + "node_modules/@next/eslint-plugin-next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "glob": "7.1.7" + } + }, + "node_modules/@next/swc-darwin-arm64": { + "version": "13.4.19", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-darwin-x64": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.19.tgz", + "integrity": "sha512-jyzO6wwYhx6F+7gD8ddZfuqO4TtpJdw3wyOduR4fxTUCm3aLw7YmHGYNjS0xRSYGAkLpBkH1E0RcelyId6lNsw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-gnu": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.19.tgz", + "integrity": "sha512-vdlnIlaAEh6H+G6HrKZB9c2zJKnpPVKnA6LBwjwT2BTjxI7e0Hx30+FoWCgi50e+YO49p6oPOtesP9mXDRiiUg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-arm64-musl": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.19.tgz", + "integrity": "sha512-aU0HkH2XPgxqrbNRBFb3si9Ahu/CpaR5RPmN2s9GiM9qJCiBBlZtRTiEca+DC+xRPyCThTtWYgxjWHgU7ZkyvA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-gnu": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.19.tgz", + "integrity": "sha512-htwOEagMa/CXNykFFeAHHvMJeqZfNQEoQvHfsA4wgg5QqGNqD5soeCer4oGlCol6NGUxknrQO6VEustcv+Md+g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-linux-x64-musl": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.19.tgz", + "integrity": "sha512-4Gj4vvtbK1JH8ApWTT214b3GwUh9EKKQjY41hH/t+u55Knxi/0wesMzwQRhppK6Ddalhu0TEttbiJ+wRcoEj5Q==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-arm64-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.19.tgz", + "integrity": "sha512-bUfDevQK4NsIAHXs3/JNgnvEY+LRyneDN788W2NYiRIIzmILjba7LaQTfihuFawZDhRtkYCv3JDC3B4TwnmRJw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-ia32-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.19.tgz", + "integrity": "sha512-Y5kikILFAr81LYIFaw6j/NrOtmiM4Sf3GtOc0pn50ez2GCkr+oejYuKGcwAwq3jiTKuzF6OF4iT2INPoxRycEA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@next/swc-win32-x64-msvc": { + "version": "13.4.19", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.19.tgz", + "integrity": "sha512-YzA78jBDXMYiINdPdJJwGgPNT3YqBNNGhsthsDoWHL9p24tEJn9ViQf/ZqTbwSpX/RrkPupLfuuTH2sf73JBAw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 10" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.3.3", + "license": "MIT" + }, + "node_modules/@swc/helpers": { + "version": "0.5.1", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tanstack/react-virtual": { + "version": "3.2.0", + "license": "MIT", + "dependencies": { + "@tanstack/virtual-core": "3.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.2.0", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@types/json5": { + "version": "0.0.29", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.5.8", + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.2.21", + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.7", + "license": "MIT", + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "license": "MIT" + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.5.0", + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "6.5.0", + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/typescript-estree": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.5.0", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.5.0", + "license": "MIT", + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.5.0", + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "@typescript-eslint/visitor-keys": "6.5.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.5.4", + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.5.0", + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.5.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/acorn": { + "version": "8.10.0", + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "license": "Python-2.0" + }, + "node_modules/aria-query": { + "version": "5.3.0", + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "is-array-buffer": "^3.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-includes": { + "version": "3.1.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "get-intrinsic": "^1.1.3", + "is-string": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.3", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flat": { + "version": "1.3.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.flatmap": { + "version": "1.3.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.1.3" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "is-array-buffer": "^3.0.2", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ast-types-flow": { + "version": "0.0.7", + "license": "ISC" + }, + "node_modules/asynciterator.prototype": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + } + }, + "node_modules/autoprefixer": { + "version": "10.4.15", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.21.10", + "caniuse-lite": "^1.0.30001520", + "fraction.js": "^4.2.0", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.0", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.5", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/axe-core": { + "version": "4.7.2", + "license": "MPL-2.0", + "engines": { + "node": ">=4" + } + }, + "node_modules/axobject-query": { + "version": "3.2.1", + "license": "Apache-2.0", + "dependencies": { + "dequal": "^2.0.3" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "license": "MIT", + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.21.10", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "caniuse-lite": "^1.0.30001517", + "electron-to-chromium": "^1.4.477", + "node-releases": "^2.0.13", + "update-browserslist-db": "^1.0.11" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/busboy": { + "version": "1.6.0", + "dependencies": { + "streamsearch": "^1.1.0" + }, + "engines": { + "node": ">=10.16.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001525", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chokidar": { + "version": "3.5.3", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/client-only": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "license": "MIT" + }, + "node_modules/commander": { + "version": "4.1.1", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.2", + "license": "MIT" + }, + "node_modules/damerau-levenshtein": { + "version": "1.0.8", + "license": "BSD-2-Clause" + }, + "node_modules/debug": { + "version": "4.3.4", + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "license": "MIT" + }, + "node_modules/define-properties": { + "version": "1.2.0", + "license": "MIT", + "dependencies": { + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "license": "Apache-2.0" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.508", + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "license": "MIT" + }, + "node_modules/enhanced-resolve": { + "version": "5.15.0", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/es-abstract": { + "version": "1.22.1", + "license": "MIT", + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "arraybuffer.prototype.slice": "^1.0.1", + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "es-set-tostringtag": "^2.0.1", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.5", + "get-intrinsic": "^1.2.1", + "get-symbol-description": "^1.0.0", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "is-array-buffer": "^3.0.2", + "is-callable": "^1.2.7", + "is-negative-zero": "^2.0.2", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.2", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.10", + "is-weakref": "^1.0.2", + "object-inspect": "^1.12.3", + "object-keys": "^1.1.1", + "object.assign": "^4.1.4", + "regexp.prototype.flags": "^1.5.0", + "safe-array-concat": "^1.0.0", + "safe-regex-test": "^1.0.0", + "string.prototype.trim": "^1.2.7", + "string.prototype.trimend": "^1.0.6", + "string.prototype.trimstart": "^1.0.6", + "typed-array-buffer": "^1.0.0", + "typed-array-byte-length": "^1.0.0", + "typed-array-byte-offset": "^1.0.0", + "typed-array-length": "^1.0.4", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.14", + "license": "MIT", + "dependencies": { + "asynciterator.prototype": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-set-tostringtag": "^2.0.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "iterator.prototype": "^1.1.0", + "safe-array-concat": "^1.0.0" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3", + "has": "^1.0.3", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has": "^1.0.3" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.48.0", + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.2", + "@eslint/js": "8.48.0", + "@humanwhocodes/config-array": "^0.11.10", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "@next/eslint-plugin-next": "13.4.19", + "@rushstack/eslint-patch": "^1.1.3", + "@typescript-eslint/parser": "^5.4.2 || ^6.0.0", + "eslint-import-resolver-node": "^0.3.6", + "eslint-import-resolver-typescript": "^3.5.2", + "eslint-plugin-import": "^2.26.0", + "eslint-plugin-jsx-a11y": "^6.5.1", + "eslint-plugin-react": "^7.31.7", + "eslint-plugin-react-hooks": "^4.5.0 || 5.0.0-canary-7118f5dd7-20230705" + }, + "peerDependencies": { + "eslint": "^7.23.0 || ^8.0.0", + "typescript": ">=3.3.1" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/eslint-import-resolver-node": { + "version": "0.3.9", + "license": "MIT", + "dependencies": { + "debug": "^3.2.7", + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" + } + }, + "node_modules/eslint-import-resolver-node/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-import-resolver-typescript": { + "version": "3.6.0", + "license": "ISC", + "dependencies": { + "debug": "^4.3.4", + "enhanced-resolve": "^5.12.0", + "eslint-module-utils": "^2.7.4", + "fast-glob": "^3.3.1", + "get-tsconfig": "^4.5.0", + "is-core-module": "^2.11.0", + "is-glob": "^4.0.3" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts/projects/eslint-import-resolver-ts" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*" + } + }, + "node_modules/eslint-module-utils": { + "version": "2.8.0", + "license": "MIT", + "dependencies": { + "debug": "^3.2.7" + }, + "engines": { + "node": ">=4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + } + } + }, + "node_modules/eslint-module-utils/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import": { + "version": "2.28.1", + "license": "MIT", + "peer": true, + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.findlastindex": "^1.2.2", + "array.prototype.flat": "^1.3.1", + "array.prototype.flatmap": "^1.3.1", + "debug": "^3.2.7", + "doctrine": "^2.1.0", + "eslint-import-resolver-node": "^0.3.7", + "eslint-module-utils": "^2.8.0", + "has": "^1.0.3", + "is-core-module": "^2.13.0", + "is-glob": "^4.0.3", + "minimatch": "^3.1.2", + "object.fromentries": "^2.0.6", + "object.groupby": "^1.0.0", + "object.values": "^1.1.6", + "semver": "^6.3.1", + "tsconfig-paths": "^3.14.2" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" + } + }, + "node_modules/eslint-plugin-import/node_modules/debug": { + "version": "3.2.7", + "license": "MIT", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/eslint-plugin-import/node_modules/doctrine": { + "version": "2.1.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-jsx-a11y": { + "version": "6.7.1", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "aria-query": "^5.1.3", + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "ast-types-flow": "^0.0.7", + "axe-core": "^4.6.2", + "axobject-query": "^3.1.1", + "damerau-levenshtein": "^1.0.8", + "emoji-regex": "^9.2.2", + "has": "^1.0.3", + "jsx-ast-utils": "^3.3.3", + "language-tags": "=1.0.5", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=4.0" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" + } + }, + "node_modules/eslint-plugin-jsx-a11y/node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-plugin-react": { + "version": "7.33.2", + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flatmap": "^1.3.1", + "array.prototype.tosorted": "^1.1.1", + "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", + "estraverse": "^5.3.0", + "jsx-ast-utils": "^2.4.1 || ^3.0.0", + "minimatch": "^3.1.2", + "object.entries": "^1.1.6", + "object.fromentries": "^2.0.6", + "object.hasown": "^1.1.2", + "object.values": "^1.1.6", + "prop-types": "^15.8.1", + "resolve": "^2.0.0-next.4", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.8" + }, + "engines": { + "node": ">=4" + }, + "peerDependencies": { + "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react/node_modules/doctrine": { + "version": "2.1.0", + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/eslint-plugin-react/node_modules/resolve": { + "version": "2.0.0-next.4", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.9.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.1", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.15.0", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "flatted": "^3.2.7", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.2.7", + "license": "ISC" + }, + "node_modules/for-each": { + "version": "0.3.3", + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/fraction.js": { + "version": "4.3.6", + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "license": "MIT" + }, + "node_modules/function.prototype.name": { + "version": "1.1.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-tsconfig": { + "version": "4.7.0", + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/glob": { + "version": "7.1.7", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "license": "BSD-2-Clause" + }, + "node_modules/globals": { + "version": "13.21.0", + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globalthis": { + "version": "1.0.3", + "license": "MIT", + "dependencies": { + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "license": "MIT" + }, + "node_modules/has": { + "version": "1.0.3", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-bigints": { + "version": "1.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ignore": { + "version": "5.2.4", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "license": "ISC" + }, + "node_modules/internal-slot": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.0", + "has": "^1.0.3", + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.0", + "is-typed-array": "^1.1.10" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-async-function": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-boolean-object": { + "version": "1.1.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.13.0", + "license": "MIT", + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-map": { + "version": "2.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-negative-zero": { + "version": "2.0.2", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.0.7", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-regex": { + "version": "1.1.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.2", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-string": { + "version": "1.0.7", + "license": "MIT", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.12", + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.11" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakmap": { + "version": "2.0.1", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "license": "ISC" + }, + "node_modules/iterator.prototype": { + "version": "1.1.1", + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.3" + } + }, + "node_modules/jiti": { + "version": "1.19.3", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "license": "MIT" + }, + "node_modules/json5": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "minimist": "^1.2.0" + }, + "bin": { + "json5": "lib/cli.js" + } + }, + "node_modules/jsx-ast-utils": { + "version": "3.3.5", + "license": "MIT", + "dependencies": { + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/keyv": { + "version": "4.5.3", + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/language-subtag-registry": { + "version": "0.3.22", + "license": "CC0-1.0" + }, + "node_modules/language-tags": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "language-subtag-registry": "~0.3.2" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "license": "MIT", + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.6", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "license": "MIT" + }, + "node_modules/next": { + "version": "13.4.19", + "license": "MIT", + "dependencies": { + "@next/env": "13.4.19", + "@swc/helpers": "0.5.1", + "busboy": "1.6.0", + "caniuse-lite": "^1.0.30001406", + "postcss": "8.4.14", + "styled-jsx": "5.1.1", + "watchpack": "2.4.0", + "zod": "3.21.4" + }, + "bin": { + "next": "dist/bin/next" + }, + "engines": { + "node": ">=16.8.0" + }, + "optionalDependencies": { + "@next/swc-darwin-arm64": "13.4.19", + "@next/swc-darwin-x64": "13.4.19", + "@next/swc-linux-arm64-gnu": "13.4.19", + "@next/swc-linux-arm64-musl": "13.4.19", + "@next/swc-linux-x64-gnu": "13.4.19", + "@next/swc-linux-x64-musl": "13.4.19", + "@next/swc-win32-arm64-msvc": "13.4.19", + "@next/swc-win32-ia32-msvc": "13.4.19", + "@next/swc-win32-x64-msvc": "13.4.19" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.1.0", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "sass": "^1.3.0" + }, + "peerDependenciesMeta": { + "@opentelemetry/api": { + "optional": true + }, + "sass": { + "optional": true + } + } + }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.14", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.4", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/node-releases": { + "version": "2.0.13", + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.entries": { + "version": "1.1.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.fromentries": { + "version": "2.0.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.groupby": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1" + } + }, + "node_modules/object.hasown": { + "version": "1.1.3", + "license": "MIT", + "dependencies": { + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.values": { + "version": "1.1.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "license": "MIT", + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.4.29", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.1", + "license": "MIT", + "dependencies": { + "lilconfig": "^2.0.5", + "yaml": "^2.1.1" + }, + "engines": { + "node": ">= 14" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.0.11" + }, + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.0.13", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.2.5", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-plugin-tailwindcss": { + "version": "0.5.13", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.21.3" + }, + "peerDependencies": { + "@ianvs/prettier-plugin-sort-imports": "*", + "@prettier/plugin-pug": "*", + "@shopify/prettier-plugin-liquid": "*", + "@trivago/prettier-plugin-sort-imports": "*", + "@zackad/prettier-plugin-twig-melody": "*", + "prettier": "^3.0", + "prettier-plugin-astro": "*", + "prettier-plugin-css-order": "*", + "prettier-plugin-import-sort": "*", + "prettier-plugin-jsdoc": "*", + "prettier-plugin-marko": "*", + "prettier-plugin-organize-attributes": "*", + "prettier-plugin-organize-imports": "*", + "prettier-plugin-sort-imports": "*", + "prettier-plugin-style-order": "*", + "prettier-plugin-svelte": "*" + }, + "peerDependenciesMeta": { + "@ianvs/prettier-plugin-sort-imports": { + "optional": true + }, + "@prettier/plugin-pug": { + "optional": true + }, + "@shopify/prettier-plugin-liquid": { + "optional": true + }, + "@trivago/prettier-plugin-sort-imports": { + "optional": true + }, + "@zackad/prettier-plugin-twig-melody": { + "optional": true + }, + "prettier-plugin-astro": { + "optional": true + }, + "prettier-plugin-css-order": { + "optional": true + }, + "prettier-plugin-import-sort": { + "optional": true + }, + "prettier-plugin-jsdoc": { + "optional": true + }, + "prettier-plugin-marko": { + "optional": true + }, + "prettier-plugin-organize-attributes": { + "optional": true + }, + "prettier-plugin-organize-imports": { + "optional": true + }, + "prettier-plugin-sort-imports": { + "optional": true + }, + "prettier-plugin-style-order": { + "optional": true + }, + "prettier-plugin-svelte": { + "optional": true + } + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/punycode": { + "version": "2.3.0", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.2.0", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "license": "MIT", + "peer": true, + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "license": "MIT" + }, + "node_modules/read-cache": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "license": "MIT" + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve": { + "version": "1.22.4", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-array-concat": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.0", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-regex-test": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "is-regex": "^1.1.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/scheduler": { + "version": "0.23.0", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/streamsearch": { + "version": "1.1.0", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.9", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "regexp.prototype.flags": "^1.5.0", + "side-channel": "^1.0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.7", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.6", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.4", + "es-abstract": "^1.20.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/styled-jsx": { + "version": "5.1.1", + "license": "MIT", + "dependencies": { + "client-only": "0.0.1" + }, + "engines": { + "node": ">= 12.0.0" + }, + "peerDependencies": { + "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/sucrase": { + "version": "3.34.0", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "7.1.6", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/glob": { + "version": "7.1.6", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.3.3", + "license": "MIT", + "peer": true, + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.5.3", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.2.12", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.18.2", + "lilconfig": "^2.1.0", + "micromatch": "^4.0.5", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", + "postcss-selector-parser": "^6.0.11", + "resolve": "^1.22.2", + "sucrase": "^3.32.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.0.2", + "license": "MIT", + "engines": { + "node": ">=16.13.0" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "license": "Apache-2.0" + }, + "node_modules/tsconfig-paths": { + "version": "3.14.2", + "license": "MIT", + "dependencies": { + "@types/json5": "^0.0.29", + "json5": "^1.0.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + } + }, + "node_modules/tslib": { + "version": "2.6.2", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "is-typed-array": "^1.1.9" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.2.2", + "license": "Apache-2.0", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.11", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/watchpack": { + "version": "2.4.0", + "license": "MIT", + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "license": "MIT", + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "license": "MIT", + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-weakmap": "^2.0.1", + "is-weakset": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "license": "ISC" + }, + "node_modules/yallist": { + "version": "4.0.0", + "license": "ISC" + }, + "node_modules/yaml": { + "version": "2.3.2", + "license": "ISC", + "engines": { + "node": ">= 14" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.21.4", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/site/pages/index.tsx b/site/pages/index.tsx index 73fbc33..20d7f1b 100644 --- a/site/pages/index.tsx +++ b/site/pages/index.tsx @@ -1,13 +1,36 @@ -import { faGithub } from "@fortawesome/free-brands-svg-icons"; -import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Head from "next/head"; -import { - faChevronDown, - faChevronUp, - faUpRightFromSquare, -} from "@fortawesome/free-solid-svg-icons"; import { Menu, Transition } from "@headlessui/react"; import { useState, useRef, useEffect } from "react"; + +function ChevronIcon({ open }: { open: boolean }) { + return ( + + ); +} + +function ExternalLinkIcon() { + return ( + + ); +} + +function GithubIcon() { + return ( + + ); +} + export default function Page() { const [chevron, setChevron] = useState(false); const menuButtonRef = useRef(null); @@ -71,17 +94,7 @@ export default function Page() { className="w-50 h-12 rounded-2xl bg-hackClubRed px-3 font-SpaceMono hover:scale-105 md:h-12 md:w-auto md:rounded-3xl md:text-xl 2xl:h-16 2xl:text-2xl " > Install for Linux - {chevron ? ( - - ) : ( - - )} +
diff --git a/tun/Cargo.toml b/tun/Cargo.toml index 019439d..1b07833 100644 --- a/tun/Cargo.toml +++ b/tun/Cargo.toml @@ -8,7 +8,7 @@ libc = "0.2" fehler = "1.0" nix = { version = "0.26", features = ["ioctl"] } socket2 = "0.5" -tokio = { version = "1.50.0", default-features = false, optional = true } +tokio = { version = "1.37", default-features = false, optional = true } byteorder = "1.4" tracing = "0.1" log = "0.4" @@ -19,7 +19,7 @@ futures = { version = "0.3.28", optional = true } [features] serde = ["dep:serde", "dep:schemars"] -tokio = ["tokio/macros", "tokio/net", "tokio/rt", "dep:tokio", "dep:futures"] +tokio = ["tokio/net", "dep:tokio", "dep:futures"] [target.'cfg(windows)'.dependencies] lazy_static = "1.4" @@ -34,7 +34,7 @@ windows = { version = "0.48", features = [ [target.'cfg(windows)'.build-dependencies] anyhow = "1.0" bindgen = "0.65" -reqwest = { version = "0.13.2" } +reqwest = { version = "0.11" } ssri = { version = "9.0", default-features = false } -tokio = { version = "1.50.0", features = ["rt", "macros"] } +tokio = { version = "1.28", features = ["rt", "macros"] } zip = { version = "0.6", features = ["deflate"] } diff --git a/tun/build.rs b/tun/build.rs index 8da8a40..03ee131 100644 --- a/tun/build.rs +++ b/tun/build.rs @@ -26,7 +26,7 @@ async fn generate(out_dir: &std::path::Path) -> anyhow::Result<()> { println!("cargo:rerun-if-changed={}", binary_path.to_str().unwrap()); if let (Ok(..), Ok(..)) = (File::open(&bindings_path), File::open(&binary_path)) { - return Ok(()) + return Ok(()); }; let archive = download(out_dir) diff --git a/tun/src/tokio/mod.rs b/tun/src/tokio/mod.rs index bd27109..f56f3d2 100644 --- a/tun/src/tokio/mod.rs +++ b/tun/src/tokio/mod.rs @@ -33,7 +33,7 @@ impl TunInterface { Ok(result) => return result, Err(_would_block) => { tracing::debug!("WouldBlock"); - continue + continue; } } } diff --git a/tun/src/unix/apple/mod.rs b/tun/src/unix/apple/mod.rs index 0fc701e..66a2f15 100644 --- a/tun/src/unix/apple/mod.rs +++ b/tun/src/unix/apple/mod.rs @@ -114,6 +114,10 @@ impl TunInterface { ifname_to_string(buf) } + pub(crate) fn packet_information_size(&self) -> usize { + 4 + } + #[throws] #[instrument] fn ifreq(&self) -> sys::ifreq { diff --git a/tun/src/unix/linux/mod.rs b/tun/src/unix/linux/mod.rs index 03b6f09..9fc963a 100644 --- a/tun/src/unix/linux/mod.rs +++ b/tun/src/unix/linux/mod.rs @@ -73,6 +73,21 @@ impl TunInterface { ifname_to_string(iff.ifr_name) } + pub(crate) fn packet_information_size(&self) -> usize { + let mut iff = unsafe { mem::zeroed::() }; + match unsafe { sys::tun_get_iff(self.socket.as_raw_fd(), &mut iff) } { + Ok(_) => { + let flags = unsafe { iff.ifr_ifru.ifru_flags }; + if flags & libc::IFF_NO_PI as i16 != 0 { + 0 + } else { + 4 + } + } + Err(_) => 4, + } + } + #[throws] #[instrument] fn ifreq(&self) -> sys::ifreq { @@ -283,6 +298,16 @@ impl TunInterface { #[throws] #[instrument] pub fn send(&self, buf: &[u8]) -> usize { - self.socket.send(buf)? + let len = unsafe { + libc::write( + self.as_raw_fd(), + buf.as_ptr().cast::(), + buf.len(), + ) + }; + if len < 0 { + Err(Error::last_os_error())?; + } + len as usize } } diff --git a/tun/src/unix/mod.rs b/tun/src/unix/mod.rs index f1d7da1..ad25667 100644 --- a/tun/src/unix/mod.rs +++ b/tun/src/unix/mod.rs @@ -48,12 +48,26 @@ impl TunInterface { #[throws] #[instrument] pub fn recv(&self, buf: &mut [u8]) -> usize { - // Use IoVec to read directly into target buffer - let mut tmp_buf = [MaybeUninit::uninit(); 1500]; - let len = self.socket.recv(&mut tmp_buf)?; - let result_buf = unsafe { assume_init(&tmp_buf[4..len]) }; - buf[..len - 4].copy_from_slice(result_buf); - len - 4 + let packet_information_size = self.packet_information_size(); + let mut tmp_buf = [MaybeUninit::uninit(); 1504]; + let len = unsafe { + libc::read( + self.as_raw_fd(), + tmp_buf.as_mut_ptr().cast::(), + tmp_buf.len(), + ) + }; + if len < 0 { + Err(Error::last_os_error())?; + } + let len = len as usize; + if len < packet_information_size { + return 0; + } + + let result_buf = unsafe { assume_init(&tmp_buf[packet_information_size..len]) }; + buf[..len - packet_information_size].copy_from_slice(result_buf); + len - packet_information_size } #[throws] diff --git a/tun/tests/configure.rs b/tun/tests/configure.rs index e5cef80..bfa56ef 100644 --- a/tun/tests/configure.rs +++ b/tun/tests/configure.rs @@ -1,34 +1,33 @@ -use std::{ - io::{Error, ErrorKind}, - net::Ipv4Addr, -}; +use std::{io::Error, net::Ipv4Addr}; +use fehler::throws; use tun::TunInterface; -fn open_test_tun() -> Result, Error> { +fn open_tun() -> Result, Error> { match TunInterface::new() { Ok(tun) => Ok(Some(tun)), - Err(error) if matches!(error.kind(), ErrorKind::NotFound | ErrorKind::PermissionDenied) => { - eprintln!("skipping test: {}", error); + Err(err) + if err.kind() == std::io::ErrorKind::PermissionDenied + || matches!(err.raw_os_error(), Some(1 | 13)) => + { + eprintln!("skipping tun test without tunnel privileges: {err}"); Ok(None) } - Err(error) => Err(error), + Err(err) => Err(err), } } #[test] -fn test_create() -> Result<(), Error> { - if open_test_tun()?.is_none() { - return Ok(()); - } - - Ok(()) +#[throws] +fn test_create() { + let _ = open_tun()?; } #[test] +#[throws] #[cfg(not(any(target_os = "windows", target_vendor = "apple")))] -fn test_set_get_broadcast_addr() -> Result<(), Error> { - let Some(tun) = open_test_tun()? else { +fn test_set_get_broadcast_addr() { + let Some(tun) = open_tun()? else { return Ok(()); }; let addr = Ipv4Addr::new(10, 0, 0, 1); @@ -39,14 +38,13 @@ fn test_set_get_broadcast_addr() -> Result<(), Error> { let result = tun.broadcast_addr()?; assert_eq!(broadcast_addr, result); - - Ok(()) } #[test] +#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_ipv4() -> Result<(), Error> { - let Some(tun) = open_test_tun()? else { +fn test_set_get_ipv4() { + let Some(tun) = open_tun()? else { return Ok(()); }; @@ -55,16 +53,15 @@ fn test_set_get_ipv4() -> Result<(), Error> { let result = tun.ipv4_addr()?; assert_eq!(addr, result); - - Ok(()) } #[test] +#[throws] #[cfg(not(any(target_os = "windows", target_vendor = "apple")))] -fn test_set_get_ipv6() -> Result<(), Error> { +fn test_set_get_ipv6() { use std::net::Ipv6Addr; - let Some(tun) = open_test_tun()? else { + let Some(tun) = open_tun()? else { return Ok(()); }; @@ -73,28 +70,26 @@ fn test_set_get_ipv6() -> Result<(), Error> { // let result = tun.ipv6_addr()?; // assert_eq!(addr, result); - - Ok(()) } #[test] +#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_mtu() -> Result<(), Error> { - let Some(interf) = open_test_tun()? else { +fn test_set_get_mtu() { + let Some(interf) = open_tun()? else { return Ok(()); }; interf.set_mtu(500)?; assert_eq!(interf.mtu().unwrap(), 500); - - Ok(()) } #[test] +#[throws] #[cfg(not(target_os = "windows"))] -fn test_set_get_netmask() -> Result<(), Error> { - let Some(interf) = open_test_tun()? else { +fn test_set_get_netmask() { + let Some(interf) = open_tun()? else { return Ok(()); }; @@ -105,6 +100,4 @@ fn test_set_get_netmask() -> Result<(), Error> { interf.set_netmask(netmask)?; assert_eq!(interf.netmask()?, netmask); - - Ok(()) } diff --git a/tun/tests/tokio.rs b/tun/tests/tokio.rs index ddec6b3..3b89777 100644 --- a/tun/tests/tokio.rs +++ b/tun/tests/tokio.rs @@ -1,25 +1,25 @@ #[cfg(all(feature = "tokio", not(target_os = "windows")))] -use std::{ - io::ErrorKind, - net::Ipv4Addr, -}; +use std::net::Ipv4Addr; #[cfg(all(feature = "tokio", not(target_os = "windows")))] -fn open_test_tun() -> Option { +fn open_tun() -> Option { match tun::TunInterface::new() { Ok(tun) => Some(tun), - Err(error) if matches!(error.kind(), ErrorKind::NotFound | ErrorKind::PermissionDenied) => { - eprintln!("skipping test: {}", error); + Err(err) + if err.kind() == std::io::ErrorKind::PermissionDenied + || matches!(err.raw_os_error(), Some(1 | 13)) => + { + eprintln!("skipping tokio tun test without tunnel privileges: {err}"); None } - Err(error) => panic!("failed to create tun interface: {error}"), + Err(err) => panic!("failed to create tun interface: {err}"), } } #[tokio::test] #[cfg(all(feature = "tokio", not(target_os = "windows")))] async fn test_create() { - let Some(tun) = open_test_tun() else { + let Some(tun) = open_tun() else { return; }; let _ = tun::tokio::TunInterface::new(tun).unwrap(); @@ -29,7 +29,7 @@ async fn test_create() { #[ignore = "requires interactivity"] #[cfg(all(feature = "tokio", not(target_os = "windows")))] async fn test_write() { - let Some(tun) = open_test_tun() else { + let Some(tun) = open_tun() else { return; }; tun.set_ipv4_addr(Ipv4Addr::from([192, 168, 1, 10]))