Compare commits

..

No commits in common. "7f280c08cfee17c6330c24979a4d4f48f9d75e7b" and "3fb0269d7c6ef6882396e4e8f7dc090d82ceec00" have entirely different histories.

145 changed files with 1129 additions and 21908 deletions

View file

@ -1,3 +1,6 @@
[target.'cfg(unix)']
runner = "sudo -E"
[alias] # command aliases [alias] # command aliases
rr = "run --release" rr = "run --release"
bb = "build --release" bb = "build --release"

View file

@ -1,31 +0,0 @@
name: Build Rust
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
rust:
name: Cargo Test
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Test
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c cargo test --workspace --all-features

View file

@ -1,31 +0,0 @@
name: Build Site
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
site:
name: Next.js Build
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Build
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c bash -lc 'cd site && npm install && npm run build'

View file

@ -54,7 +54,6 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: 1.85.0
targets: ${{ join(matrix.rust-targets, ', ') }} targets: ${{ join(matrix.rust-targets, ', ') }}
- name: Install Protobuf - name: Install Protobuf
shell: bash shell: bash

View file

@ -6,9 +6,6 @@ on:
pull_request: pull_request:
branches: branches:
- "*" - "*"
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
build: build:
name: Build Crate (${{ matrix.platform }}) name: Build Crate (${{ matrix.platform }})
@ -75,14 +72,14 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: 1.85.0 toolchain: stable
components: rustfmt components: rustfmt
targets: ${{ join(matrix.targets, ', ') }} targets: ${{ join(matrix.targets, ', ') }}
- name: Setup Rust Cache - name: Setup Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Build - name: Build
shell: bash shell: bash
run: cargo build --locked --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }} run: cargo build --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }}
- name: Test - name: Test
shell: bash shell: bash
run: cargo test --locked --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }} run: cargo test --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }}

View file

@ -47,7 +47,6 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: 1.85.0
targets: ${{ join(matrix.rust-targets, ', ') }} targets: ${{ join(matrix.rust-targets, ', ') }}
- name: Install Protobuf - name: Install Protobuf
shell: bash shell: bash

3
.gitignore vendored
View file

@ -1,6 +1,5 @@
# Xcode # Xcode
xcuserdata xcuserdata
Apple/build/
# Swift # Swift
Apple/Package/.swiftpm/ Apple/Package/.swiftpm/
@ -13,8 +12,6 @@ target/
.idea/ .idea/
tmp/ tmp/
intake/
*.db *.db
*.sqlite3
*.sock *.sock

View file

@ -6,8 +6,6 @@ import SwiftUI
@main @main
@MainActor @MainActor
class AppDelegate: NSObject, NSApplicationDelegate { class AppDelegate: NSObject, NSApplicationDelegate {
private var windowController: NSWindowController?
private let quitItem: NSMenuItem = { private let quitItem: NSMenuItem = {
let quitItem = NSMenuItem( let quitItem = NSMenuItem(
title: "Quit Burrow", title: "Quit Burrow",
@ -19,17 +17,6 @@ class AppDelegate: NSObject, NSApplicationDelegate {
return quitItem return quitItem
}() }()
private lazy var openItem: NSMenuItem = {
let item = NSMenuItem(
title: "Open Burrow",
action: #selector(openWindow),
keyEquivalent: "o"
)
item.target = self
item.keyEquivalentModifierMask = .command
return item
}()
private let toggleItem: NSMenuItem = { private let toggleItem: NSMenuItem = {
let toggleView = NSHostingView(rootView: MenuItemToggleView()) let toggleView = NSHostingView(rootView: MenuItemToggleView())
toggleView.frame.size = CGSize(width: 300, height: 32) toggleView.frame.size = CGSize(width: 300, height: 32)
@ -44,7 +31,6 @@ class AppDelegate: NSObject, NSApplicationDelegate {
let menu = NSMenu() let menu = NSMenu()
menu.items = [ menu.items = [
toggleItem, toggleItem,
openItem,
.separator(), .separator(),
quitItem quitItem
] ]
@ -63,28 +49,5 @@ class AppDelegate: NSObject, NSApplicationDelegate {
func applicationDidFinishLaunching(_ notification: Notification) { func applicationDidFinishLaunching(_ notification: Notification) {
statusItem.menu = menu statusItem.menu = menu
} }
@objc
private func openWindow() {
if let window = windowController?.window {
window.makeKeyAndOrderFront(nil)
NSApplication.shared.activate(ignoringOtherApps: true)
return
}
let contentView = BurrowView()
let hostingController = NSHostingController(rootView: contentView)
let window = NSWindow(contentViewController: hostingController)
window.title = "Burrow"
window.setContentSize(NSSize(width: 820, height: 720))
window.styleMask.insert([.titled, .closable, .miniaturizable, .resizable])
window.center()
let controller = NSWindowController(window: window)
controller.shouldCascadeWindows = true
controller.showWindow(nil)
windowController = controller
NSApplication.shared.activate(ignoringOtherApps: true)
}
} }
#endif #endif

View file

@ -23,6 +23,7 @@
D0D4E53A2C8D996F007F820A /* BurrowCore.framework in Embed Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; }; D0D4E53A2C8D996F007F820A /* BurrowCore.framework in Embed Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; settings = {ATTRIBUTES = (CodeSignOnCopy, RemoveHeadersOnCopy, ); }; };
D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49A2C8D921A007F820A /* Logging.swift */; }; D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49A2C8D921A007F820A /* Logging.swift */; };
D0D4E5702C8D9C62007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; }; D0D4E5702C8D9C62007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; };
D0D4E5712C8D9C6F007F820A /* HackClub.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49D2C8D921A007F820A /* HackClub.swift */; };
D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49E2C8D921A007F820A /* Network.swift */; }; D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49E2C8D921A007F820A /* Network.swift */; };
D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49F2C8D921A007F820A /* WireGuard.swift */; }; D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E49F2C8D921A007F820A /* WireGuard.swift */; };
D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A22C8D921A007F820A /* BurrowView.swift */; }; D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A22C8D921A007F820A /* BurrowView.swift */; };
@ -32,6 +33,7 @@
D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */; }; D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */; };
D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */; }; D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */; };
D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A82C8D921A007F820A /* NetworkView.swift */; }; D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A82C8D921A007F820A /* NetworkView.swift */; };
D0D4E57B2C8D9C6F007F820A /* OAuth2.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4A92C8D921A007F820A /* OAuth2.swift */; };
D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AA2C8D921A007F820A /* Tunnel.swift */; }; D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AA2C8D921A007F820A /* Tunnel.swift */; };
D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */; }; D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */; };
D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */; }; D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */; };
@ -158,6 +160,7 @@
D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "swift-protobuf-config.json"; sourceTree = "<group>"; }; D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "swift-protobuf-config.json"; sourceTree = "<group>"; };
D0D4E4992C8D921A007F820A /* Client.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Client.swift; sourceTree = "<group>"; }; D0D4E4992C8D921A007F820A /* Client.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Client.swift; sourceTree = "<group>"; };
D0D4E49A2C8D921A007F820A /* Logging.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Logging.swift; sourceTree = "<group>"; }; D0D4E49A2C8D921A007F820A /* Logging.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Logging.swift; sourceTree = "<group>"; };
D0D4E49D2C8D921A007F820A /* HackClub.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HackClub.swift; sourceTree = "<group>"; };
D0D4E49E2C8D921A007F820A /* Network.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Network.swift; sourceTree = "<group>"; }; D0D4E49E2C8D921A007F820A /* Network.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Network.swift; sourceTree = "<group>"; };
D0D4E49F2C8D921A007F820A /* WireGuard.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WireGuard.swift; sourceTree = "<group>"; }; D0D4E49F2C8D921A007F820A /* WireGuard.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WireGuard.swift; sourceTree = "<group>"; };
D0D4E4A12C8D921A007F820A /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; }; D0D4E4A12C8D921A007F820A /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; };
@ -168,6 +171,7 @@
D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "NetworkExtension+Async.swift"; sourceTree = "<group>"; }; D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "NetworkExtension+Async.swift"; sourceTree = "<group>"; };
D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkExtensionTunnel.swift; sourceTree = "<group>"; }; D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkExtensionTunnel.swift; sourceTree = "<group>"; };
D0D4E4A82C8D921A007F820A /* NetworkView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkView.swift; sourceTree = "<group>"; }; D0D4E4A82C8D921A007F820A /* NetworkView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NetworkView.swift; sourceTree = "<group>"; };
D0D4E4A92C8D921A007F820A /* OAuth2.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = OAuth2.swift; sourceTree = "<group>"; };
D0D4E4AA2C8D921A007F820A /* Tunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Tunnel.swift; sourceTree = "<group>"; }; D0D4E4AA2C8D921A007F820A /* Tunnel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Tunnel.swift; sourceTree = "<group>"; };
D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelButton.swift; sourceTree = "<group>"; }; D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelButton.swift; sourceTree = "<group>"; };
D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelStatusView.swift; sourceTree = "<group>"; }; D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TunnelStatusView.swift; sourceTree = "<group>"; };
@ -336,6 +340,7 @@
D0D4E4A02C8D921A007F820A /* Networks */ = { D0D4E4A02C8D921A007F820A /* Networks */ = {
isa = PBXGroup; isa = PBXGroup;
children = ( children = (
D0D4E49D2C8D921A007F820A /* HackClub.swift */,
D0D4E49E2C8D921A007F820A /* Network.swift */, D0D4E49E2C8D921A007F820A /* Network.swift */,
D0D4E49F2C8D921A007F820A /* WireGuard.swift */, D0D4E49F2C8D921A007F820A /* WireGuard.swift */,
); );
@ -353,6 +358,7 @@
D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */, D0D4E4A62C8D921A007F820A /* NetworkExtension+Async.swift */,
D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */, D0D4E4A72C8D921A007F820A /* NetworkExtensionTunnel.swift */,
D0D4E4A82C8D921A007F820A /* NetworkView.swift */, D0D4E4A82C8D921A007F820A /* NetworkView.swift */,
D0D4E4A92C8D921A007F820A /* OAuth2.swift */,
D0D4E4AA2C8D921A007F820A /* Tunnel.swift */, D0D4E4AA2C8D921A007F820A /* Tunnel.swift */,
D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */, D0D4E4AB2C8D921A007F820A /* TunnelButton.swift */,
D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */, D0D4E4AC2C8D921A007F820A /* TunnelStatusView.swift */,
@ -628,6 +634,7 @@
isa = PBXSourcesBuildPhase; isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647; buildActionMask = 2147483647;
files = ( files = (
D0D4E5712C8D9C6F007F820A /* HackClub.swift in Sources */,
D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */, D0D4E5722C8D9C6F007F820A /* Network.swift in Sources */,
D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */, D0D4E5732C8D9C6F007F820A /* WireGuard.swift in Sources */,
D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */, D0D4E5742C8D9C6F007F820A /* BurrowView.swift in Sources */,
@ -637,6 +644,7 @@
D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */, D0D4E5782C8D9C6F007F820A /* NetworkExtension+Async.swift in Sources */,
D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */, D0D4E5792C8D9C6F007F820A /* NetworkExtensionTunnel.swift in Sources */,
D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */, D0D4E57A2C8D9C6F007F820A /* NetworkView.swift in Sources */,
D0D4E57B2C8D9C6F007F820A /* OAuth2.swift in Sources */,
D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */, D0D4E57C2C8D9C6F007F820A /* Tunnel.swift in Sources */,
D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */, D0D4E57D2C8D9C6F007F820A /* TunnelButton.swift in Sources */,
D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */, D0D4E57E2C8D9C6F007F820A /* TunnelStatusView.swift in Sources */,

View file

@ -40,4 +40,5 @@ APP_GROUP_IDENTIFIER = group.$(APP_BUNDLE_IDENTIFIER)
APP_GROUP_IDENTIFIER[sdk=macosx*] = $(DEVELOPMENT_TEAM).$(APP_BUNDLE_IDENTIFIER) APP_GROUP_IDENTIFIER[sdk=macosx*] = $(DEVELOPMENT_TEAM).$(APP_BUNDLE_IDENTIFIER)
NETWORK_EXTENSION_BUNDLE_IDENTIFIER = $(APP_BUNDLE_IDENTIFIER).network NETWORK_EXTENSION_BUNDLE_IDENTIFIER = $(APP_BUNDLE_IDENTIFIER).network
OTHER_SWIFT_FLAGS = $(inherited) // https://github.com/grpc/grpc-swift/issues/683#issuecomment-1130118953
OTHER_SWIFT_FLAGS = $(inherited) -Xcc -fmodule-map-file=$(GENERATED_MODULEMAP_DIR)/CNIOAtomics.modulemap -Xcc -fmodule-map-file=$(GENERATED_MODULEMAP_DIR)/CNIODarwin.modulemap -Xcc -fmodule-map-file=$(GENERATED_MODULEMAP_DIR)/CGRPCZlib.modulemap

View file

@ -1,5 +1,4 @@
@_implementationOnly import CConstants @_implementationOnly import CConstants
import Foundation
import OSLog import OSLog
public enum Constants { public enum Constants {
@ -28,30 +27,9 @@ public enum Constants {
private static let _groupContainerURL: Result<URL, Error> = { private static let _groupContainerURL: Result<URL, Error> = {
switch FileManager.default.containerURL(forSecurityApplicationGroupIdentifier: appGroupIdentifier) { switch FileManager.default.containerURL(forSecurityApplicationGroupIdentifier: appGroupIdentifier) {
case .some(let url): .success(url) case .some(let url): .success(url)
case .none: case .none: .failure(.invalidAppGroupIdentifier)
fallbackContainerURL().mapError { _ in .invalidAppGroupIdentifier }
} }
}() }()
private static func fallbackContainerURL() -> Result<URL, any Swift.Error> {
#if targetEnvironment(simulator)
Result {
let baseURL = try FileManager.default.url(
for: .applicationSupportDirectory,
in: .userDomainMask,
appropriateFor: nil,
create: true
)
let url = baseURL
.appending(component: bundleIdentifier, directoryHint: .isDirectory)
.appending(component: "SimulatorFallback", directoryHint: .isDirectory)
try FileManager.default.createDirectory(at: url, withIntermediateDirectories: true)
return url
}
#else
.failure(Error.invalidAppGroupIdentifier)
#endif
}
} }
extension Logger { extension Logger {

View file

@ -1,64 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option cc_enable_arenas = true;
option go_package = "google.golang.org/protobuf/types/known/timestamppb";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
// A Timestamp represents a point in time independent of any time zone or local
// calendar, encoded as a count of seconds and fractions of seconds at
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
// January 1, 1970, in the proleptic Gregorian calendar which extends the
// Gregorian calendar backwards to year one.
//
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
// second table is needed for interpretation, using a 24-hour linear smear.
//
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
// restricting to that range, we ensure that we can convert to and from RFC
// 3339 date strings.
message Timestamp {
// Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
// Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999 inclusive.
int32 nanos = 2;
}

View file

@ -5,15 +5,7 @@ import libburrow
import NetworkExtension import NetworkExtension
import os import os
private final class SendableCallbackBox<Callback>: @unchecked Sendable { class PacketTunnelProvider: NEPacketTunnelProvider {
let callback: Callback
init(_ callback: Callback) {
self.callback = callback
}
}
final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
enum Error: Swift.Error { enum Error: Swift.Error {
case missingTunnelConfiguration case missingTunnelConfiguration
} }
@ -38,41 +30,27 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
} }
} }
override func startTunnel( override func startTunnel(options: [String: NSObject]? = nil) async throws {
options: [String: NSObject]?, do {
completionHandler: @escaping (Swift.Error?) -> Void let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first
) { guard let settings = configuration?.settings else {
let completion = SendableCallbackBox(completionHandler) throw Error.missingTunnelConfiguration
Task {
do {
let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first
guard let settings = configuration?.settings else {
throw Error.missingTunnelConfiguration
}
try await setTunnelNetworkSettings(settings)
_ = try await client.tunnelStart(.init())
logger.log("Started tunnel with network settings: \(settings)")
completion.callback(nil)
} catch {
logger.error("Failed to start tunnel: \(error)")
completion.callback(error)
} }
try await setTunnelNetworkSettings(settings)
_ = try await client.tunnelStart(.init())
logger.log("Started tunnel with network settings: \(settings)")
} catch {
logger.error("Failed to start tunnel: \(error)")
throw error
} }
} }
override func stopTunnel( override func stopTunnel(with reason: NEProviderStopReason) async {
with reason: NEProviderStopReason, do {
completionHandler: @escaping () -> Void _ = try await client.tunnelStop(.init())
) { logger.log("Stopped client")
let completion = SendableCallbackBox(completionHandler) } catch {
Task { logger.error("Failed to stop tunnel: \(error)")
do {
_ = try await client.tunnelStop(.init())
logger.log("Stopped client")
} catch {
logger.error("Failed to stop tunnel: \(error)")
}
completion.callback()
} }
} }
} }

View file

@ -73,21 +73,7 @@ CARGO_PATH="$(dirname $PROTOC):$CARGO_PATH"
# Run cargo without the various environment variables set by Xcode. # Run cargo without the various environment variables set by Xcode.
# Those variables can confuse cargo and the build scripts it runs. # Those variables can confuse cargo and the build scripts it runs.
CARGO_ENV=( env -i PATH="$CARGO_PATH" PROTOC="$PROTOC" CARGO_TARGET_DIR="${CONFIGURATION_TEMP_DIR}/target" IPHONEOS_DEPLOYMENT_TARGET="$IPHONEOS_DEPLOYMENT_TARGET" MACOSX_DEPLOYMENT_TARGET="$MACOSX_DEPLOYMENT_TARGET" cargo build "${CARGO_ARGS[@]}"
"PATH=$CARGO_PATH"
"PROTOC=$PROTOC"
"CARGO_TARGET_DIR=${CONFIGURATION_TEMP_DIR}/target"
)
if [[ -n "$IPHONEOS_DEPLOYMENT_TARGET" ]]; then
CARGO_ENV+=("IPHONEOS_DEPLOYMENT_TARGET=$IPHONEOS_DEPLOYMENT_TARGET")
fi
if [[ -n "$MACOSX_DEPLOYMENT_TARGET" ]]; then
CARGO_ENV+=("MACOSX_DEPLOYMENT_TARGET=$MACOSX_DEPLOYMENT_TARGET")
fi
env -i "${CARGO_ENV[@]}" cargo build "${CARGO_ARGS[@]}"
mkdir -p "${BUILT_PRODUCTS_DIR}" mkdir -p "${BUILT_PRODUCTS_DIR}"

View file

@ -0,0 +1,20 @@
{
"colors" : [
{
"color" : {
"color-space" : "srgb",
"components" : {
"alpha" : "1.000",
"blue" : "0x50",
"green" : "0x37",
"red" : "0xEC"
}
},
"idiom" : "universal"
}
],
"info" : {
"author" : "xcode",
"version" : 1
}
}

View file

@ -0,0 +1,12 @@
{
"images" : [
{
"filename" : "flag-standalone-wtransparent.pdf",
"idiom" : "universal"
}
],
"info" : {
"author" : "xcode",
"version" : 1
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,61 +1,39 @@
import SwiftUI import SwiftUI
struct NetworkCarouselView: View { struct NetworkCarouselView: View {
var networks: [NetworkCardModel] var networks: [any Network] = [
HackClub(id: 1),
HackClub(id: 2),
WireGuard(id: 4),
HackClub(id: 5)
]
var body: some View { var body: some View {
Group { ScrollView(.horizontal) {
if networks.isEmpty { LazyHStack {
#if os(iOS) ForEach(networks, id: \.id) { network in
VStack(alignment: .leading, spacing: 6) { NetworkView(network: network)
Text("No stored networks yet") .containerRelativeFrame(.horizontal, count: 10, span: 7, spacing: 0, alignment: .center)
.font(.headline) .scrollTransition(.interactive, axis: .horizontal) { content, phase in
Text("WireGuard and Tailnet networks show up here as soon as you add one.") content
.font(.footnote) .scaleEffect(1.0 - abs(phase.value) * 0.1)
.foregroundStyle(.secondary)
}
.frame(maxWidth: .infinity, alignment: .leading)
.padding()
.background(
RoundedRectangle(cornerRadius: 18)
.fill(.thinMaterial)
)
#else
ContentUnavailableView(
"No Networks Yet",
systemImage: "network.slash",
description: Text("Add a WireGuard network, or save a Tailnet account so Burrow can store a managed network when the daemon is reachable.")
)
.frame(maxWidth: .infinity, minHeight: 175)
#endif
} else {
ScrollView(.horizontal) {
LazyHStack {
ForEach(networks) { network in
NetworkView(network: network)
.containerRelativeFrame(.horizontal, count: 10, span: 7, spacing: 0, alignment: .center)
.scrollTransition(.interactive, axis: .horizontal) { content, phase in
content
.scaleEffect(1.0 - abs(phase.value) * 0.1)
}
} }
}
} }
.scrollTargetLayout()
.scrollClipDisabled()
.scrollIndicators(.hidden)
.defaultScrollAnchor(.center)
.scrollTargetBehavior(.viewAligned)
.containerRelativeFrame(.horizontal)
} }
} }
.scrollTargetLayout()
.scrollClipDisabled()
.scrollIndicators(.hidden)
.defaultScrollAnchor(.center)
.scrollTargetBehavior(.viewAligned)
.containerRelativeFrame(.horizontal)
} }
} }
#if DEBUG #if DEBUG
struct NetworkCarouselView_Previews: PreviewProvider { struct NetworkCarouselView_Previews: PreviewProvider {
static var previews: some View { static var previews: some View {
NetworkCarouselView(networks: [WireGuardCard(id: 1, detail: "10.13.13.2/24 · wg.burrow.rs:51820").card]) NetworkCarouselView()
} }
} }
#endif #endif

View file

@ -105,7 +105,7 @@ public final class NetworkExtensionTunnel: Tunnel {
let proto = NETunnelProviderProtocol() let proto = NETunnelProviderProtocol()
proto.providerBundleIdentifier = bundleIdentifier proto.providerBundleIdentifier = bundleIdentifier
proto.serverAddress = "burrow.rs" proto.serverAddress = "hackclub.com"
manager.protocolConfiguration = proto manager.protocolConfiguration = proto
try await manager.save() try await manager.save()

View file

@ -31,8 +31,8 @@ struct NetworkView<Content: View>: View {
} }
extension NetworkView where Content == AnyView { extension NetworkView where Content == AnyView {
init(network: NetworkCardModel) { init(network: any Network) {
color = network.backgroundColor color = network.backgroundColor
content = { network.label } content = { AnyView(network.label) }
} }
} }

View file

@ -0,0 +1,27 @@
import BurrowCore
import SwiftUI
struct HackClub: Network {
typealias NetworkType = Burrow_WireGuardNetwork
static let type: Burrow_NetworkType = .hackClub
var id: Int32
var backgroundColor: Color { .init("HackClub") }
@MainActor var label: some View {
GeometryReader { reader in
VStack(alignment: .leading) {
Image("HackClub")
.resizable()
.aspectRatio(contentMode: .fit)
.frame(height: reader.size.height / 4)
Spacer()
Text("@conradev")
.foregroundStyle(.white)
.font(.body.monospaced())
}
.padding()
.frame(maxWidth: .infinity)
}
}
}

View file

@ -1,608 +1,36 @@
import BurrowConfiguration import Atomics
import BurrowCore import BurrowCore
import Foundation
import Security
import SwiftProtobuf import SwiftProtobuf
import SwiftUI import SwiftUI
struct NetworkCardModel: Identifiable { protocol Network {
let id: Int32 associatedtype NetworkType: Message
let backgroundColor: Color associatedtype Label: View
let label: AnyView
}
struct TailnetNetworkPayload: Codable, Sendable { static var type: Burrow_NetworkType { get }
var provider: TailnetProvider
var authority: String?
var account: String
var identity: String
var tailnet: String?
var hostname: String?
func encoded() throws -> Data { var id: Int32 { get }
let encoder = JSONEncoder() var backgroundColor: Color { get }
encoder.outputFormatting = [.prettyPrinted, .sortedKeys]
return try encoder.encode(self)
}
}
struct TailnetLoginStartRequest: Codable, Sendable { @MainActor var label: Label { get }
var accountName: String
var identityName: String
var hostname: String?
var controlURL: String?
}
struct TailnetLoginStatus: Codable, Sendable {
var backendState: String
var authURL: String?
var running: Bool
var needsLogin: Bool
var tailnetName: String?
var magicDNSSuffix: String?
var selfDNSName: String?
var tailscaleIPs: [String]
var health: [String]
}
struct TailnetLoginStartResponse: Codable, Sendable {
var sessionID: String
var status: TailnetLoginStatus
}
struct TailnetAuthorityProbeStatus: Sendable {
var authority: String
var statusCode: Int
var summary: String
var detail: String?
}
enum TailnetBridgeClient {
private static let baseURL = URL(string: "http://127.0.0.1:8080")!
static func startLogin(_ request: TailnetLoginStartRequest) async throws -> TailnetLoginStartResponse {
var urlRequest = URLRequest(
url: baseURL.appendingPathComponent("v1/tailscale/login/start")
)
urlRequest.httpMethod = "POST"
urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type")
let encoder = JSONEncoder()
encoder.keyEncodingStrategy = .convertToSnakeCase
urlRequest.httpBody = try encoder.encode(request)
let (data, response) = try await URLSession.shared.data(for: urlRequest)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStartResponse.self, from: data)
}
static func status(sessionID: String) async throws -> TailnetLoginStatus {
let url = baseURL
.appendingPathComponent("v1/tailscale/login")
.appendingPathComponent(sessionID)
let (data, response) = try await URLSession.shared.data(from: url)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStatus.self, from: data)
}
private static func validate(response: URLResponse, data: Data) throws {
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
}
}
enum TailnetAuthorityProbeClient {
static func probe(provider: TailnetProvider, authority: String) async throws -> TailnetAuthorityProbeStatus {
let normalizedAuthority = normalizeAuthority(authority)
let baseURL = try validatedBaseURL(normalizedAuthority)
let probeURL = probeURL(for: provider, baseURL: baseURL)
var request = URLRequest(url: probeURL)
request.timeoutInterval = 10
request.setValue("application/json", forHTTPHeaderField: "Accept")
let (data, response) = try await URLSession.shared.data(for: request)
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
let body = String(data: data, encoding: .utf8)?
.trimmingCharacters(in: .whitespacesAndNewlines)
let detail = body.flatMap { $0.isEmpty ? nil : $0 }
return TailnetAuthorityProbeStatus(
authority: normalizedAuthority,
statusCode: http.statusCode,
summary: "\(provider.title) reachable",
detail: detail
)
}
private static func normalizeAuthority(_ authority: String) -> String {
let trimmed = authority.trimmingCharacters(in: .whitespacesAndNewlines)
if trimmed.contains("://") {
return trimmed
}
return "https://\(trimmed)"
}
private static func validatedBaseURL(_ authority: String) throws -> URL {
guard let url = URL(string: authority), url.host != nil else {
throw TailnetBridgeError.server("Invalid server URL")
}
return url
}
private static func probeURL(for provider: TailnetProvider, baseURL: URL) -> URL {
switch provider {
case .headscale:
baseURL.appendingPathComponent("health")
case .burrow:
baseURL.appendingPathComponent("healthz")
case .tailscale:
baseURL
}
}
}
enum TailnetBridgeError: LocalizedError {
case server(String)
var errorDescription: String? {
switch self {
case .server(let message):
message
}
}
} }
@Observable @Observable
@MainActor @MainActor
final class NetworkViewModel: Sendable { final class NetworkViewModel: Sendable {
private(set) var networks: [Burrow_Network] = [] private(set) var networks: [Burrow_Network] = []
private(set) var connectionError: String?
private let socketURLResult: Result<URL, Error>
nonisolated(unsafe) private var task: Task<Void, Never>? private var task: Task<Void, Error>!
init(socketURLResult: Result<URL, Error>) { init(socketURL: URL) {
self.socketURLResult = socketURLResult
startStreaming()
}
deinit {
task?.cancel()
}
var cards: [NetworkCardModel] {
networks.map(Self.makeCard(for:))
}
var nextNetworkID: Int32 {
(networks.map(\.id).max() ?? 0) + 1
}
func addWireGuardNetwork(configText: String) async throws -> Int32 {
try await addNetwork(type: .wireGuard, payload: Data(configText.utf8))
}
func addTailnetNetwork(payload: TailnetNetworkPayload) async throws -> Int32 {
try await addNetwork(type: .tailnet, payload: payload.encoded())
}
private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 {
let socketURL = try socketURLResult.get()
let networkID = nextNetworkID
let request = Burrow_Network.with {
$0.id = networkID
$0.type = type
$0.payload = payload
}
let client = NetworksClient.unix(socketURL: socketURL)
_ = try await client.networkAdd(request)
return networkID
}
private func startStreaming() {
task?.cancel()
let socketURLResult = self.socketURLResult
task = Task { [weak self] in task = Task { [weak self] in
do { let client = NetworksClient.unix(socketURL: socketURL)
let socketURL = try socketURLResult.get() for try await networks in client.networkList(.init()) {
let client = NetworksClient.unix(socketURL: socketURL) guard let viewModel = self else { continue }
for try await response in client.networkList(.init()) { Task { @MainActor in
guard !Task.isCancelled else { return } viewModel.networks = networks.network
await MainActor.run {
guard let self else { return }
self.networks = response.network
self.connectionError = nil
}
}
} catch {
guard !Task.isCancelled else { return }
await MainActor.run {
guard let self else { return }
self.connectionError = error.localizedDescription
} }
} }
} }
} }
private static func makeCard(for network: Burrow_Network) -> NetworkCardModel {
switch network.type {
case .wireGuard:
WireGuardCard(network: network).card
case .tailnet:
TailnetCard(network: network).card
case .UNRECOGNIZED(let rawValue):
unsupportedCard(
id: network.id,
title: "Unknown Network",
detail: "Type \(rawValue) is not recognized by this build."
)
@unknown default:
unsupportedCard(
id: network.id,
title: "Unsupported Network",
detail: "Update Burrow to view this network."
)
}
}
private static func unsupportedCard(id: Int32, title: String, detail: String) -> NetworkCardModel {
NetworkCardModel(
id: id,
backgroundColor: .gray.opacity(0.85),
label: AnyView(
VStack(alignment: .leading, spacing: 12) {
Text(title)
.font(.title3.weight(.semibold))
.foregroundStyle(.white)
Text(detail)
.font(.body)
.foregroundStyle(.white.opacity(0.9))
Spacer()
Text("Network #\(id)")
.font(.footnote.monospaced())
.foregroundStyle(.white.opacity(0.8))
}
.padding()
.frame(maxWidth: .infinity, alignment: .leading)
)
)
}
}
enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable {
case tailscale
case headscale
case burrow
var id: String { rawValue }
var title: String {
switch self {
case .tailscale: "Tailscale"
case .headscale: "Headscale"
case .burrow: "Burrow"
}
}
var usesWebLogin: Bool {
self == .tailscale
}
var requiresControlURL: Bool {
self != .tailscale
}
var defaultAuthority: String? {
switch self {
case .tailscale:
"https://controlplane.tailscale.com"
case .headscale:
"https://ts.burrow.net"
case .burrow:
nil
}
}
var subtitle: String {
switch self {
case .tailscale:
"Use Tailscale's real browser login flow."
case .headscale:
"Store a Headscale control-plane endpoint and credentials."
case .burrow:
"Store Burrow control-plane credentials."
}
}
}
enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
case wireGuard
case tor
case headscale
var id: String { rawValue }
var title: String {
switch self {
case .wireGuard: "WireGuard"
case .tor: "Tor"
case .headscale: "Tailnet"
}
}
var subtitle: String {
switch self {
case .wireGuard: "Import a tunnel and optional account metadata."
case .tor: "Store Arti account and identity preferences."
case .headscale: "Save Tailscale, Headscale, or Burrow control-plane identities."
}
}
var accentColor: Color {
switch self {
case .wireGuard: .init("WireGuard")
case .tor: .orange
case .headscale: .mint
}
}
var actionTitle: String {
switch self {
case .wireGuard: "Add Network"
case .tor: "Save Account"
case .headscale: "Save Account"
}
}
var availabilityNote: String? {
switch self {
case .wireGuard:
nil
case .tor:
"Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet."
case .headscale:
"Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon."
}
}
}
enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable {
case none
case web
case password
case preauthKey
var id: String { rawValue }
var title: String {
switch self {
case .none: "None"
case .web: "Web Login"
case .password: "Password"
case .preauthKey: "Preauth Key"
}
}
}
struct NetworkAccountRecord: Codable, Identifiable, Hashable, Sendable {
let id: UUID
var kind: AccountNetworkKind
var title: String
var authority: String?
var provider: TailnetProvider?
var accountName: String
var identityName: String
var hostname: String?
var username: String?
var tailnet: String?
var authMode: AccountAuthMode
var note: String?
var createdAt: Date
var updatedAt: Date
}
struct TailnetCard {
var id: Int32
var provider: String
var title: String
var detail: String
init(network: Burrow_Network) {
let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload))
id = network.id
provider = payload?.provider.title ?? "Tailnet"
title = payload?.tailnet ?? payload?.hostname ?? "Tailnet"
detail = [
payload?.provider.title,
payload?.authority,
payload.map { "Account: \($0.account)" },
]
.compactMap { $0 }
.joined(separator: " · ")
.ifEmpty("Stored Tailnet configuration")
}
var card: NetworkCardModel {
NetworkCardModel(
id: id,
backgroundColor: .mint,
label: AnyView(
VStack(alignment: .leading, spacing: 12) {
HStack {
VStack(alignment: .leading, spacing: 4) {
Text(provider)
.font(.headline)
.foregroundStyle(.white.opacity(0.85))
Text(title)
.font(.title3.weight(.semibold))
.foregroundStyle(.white)
}
Spacer()
}
Spacer()
Text(detail)
.font(.body.monospaced())
.foregroundStyle(.white.opacity(0.92))
.lineLimit(4)
}
.padding()
.frame(maxWidth: .infinity, alignment: .leading)
)
)
}
}
@Observable
@MainActor
final class NetworkAccountStore {
private static let storageKey = "burrow.network-accounts"
private let defaults: UserDefaults
private(set) var accounts: [NetworkAccountRecord] = []
init(defaults: UserDefaults = UserDefaults(suiteName: Constants.appGroupIdentifier) ?? .standard) {
self.defaults = defaults
load()
}
func upsert(_ record: NetworkAccountRecord, secret: String?) throws {
if let index = accounts.firstIndex(where: { $0.id == record.id }) {
accounts[index] = record
} else {
accounts.append(record)
}
accounts.sort {
if $0.kind == $1.kind {
return $0.title.localizedCaseInsensitiveCompare($1.title) == .orderedAscending
}
return $0.kind.rawValue < $1.kind.rawValue
}
try persist()
if let secret, !secret.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty {
try AccountSecretStore.store(secret, for: record.id)
} else {
try AccountSecretStore.removeSecret(for: record.id)
}
}
func delete(_ record: NetworkAccountRecord) throws {
accounts.removeAll { $0.id == record.id }
try persist()
try AccountSecretStore.removeSecret(for: record.id)
}
func hasStoredSecret(for record: NetworkAccountRecord) -> Bool {
AccountSecretStore.hasSecret(for: record.id)
}
private func load() {
guard let data = defaults.data(forKey: Self.storageKey) else {
accounts = []
return
}
do {
accounts = try JSONDecoder().decode([NetworkAccountRecord].self, from: data)
} catch {
accounts = []
}
}
private func persist() throws {
let data = try JSONEncoder().encode(accounts)
defaults.set(data, forKey: Self.storageKey)
}
}
private enum AccountSecretStore {
private static let service = "\(Constants.bundleIdentifier).accounts"
static func hasSecret(for accountID: UUID) -> Bool {
let query = baseQuery(for: accountID)
return SecItemCopyMatching(query as CFDictionary, nil) == errSecSuccess
}
static func store(_ secret: String, for accountID: UUID) throws {
let data = Data(secret.utf8)
let query = baseQuery(for: accountID)
let status = SecItemCopyMatching(query as CFDictionary, nil)
if status == errSecSuccess {
let updateStatus = SecItemUpdate(
query as CFDictionary,
[kSecValueData as String: data] as CFDictionary
)
guard updateStatus == errSecSuccess else {
throw AccountSecretStoreError.osStatus(updateStatus)
}
return
}
var item = query
item[kSecValueData as String] = data
item[kSecAttrAccessible as String] = kSecAttrAccessibleAfterFirstUnlock
let addStatus = SecItemAdd(item as CFDictionary, nil)
guard addStatus == errSecSuccess else {
throw AccountSecretStoreError.osStatus(addStatus)
}
}
static func removeSecret(for accountID: UUID) throws {
let status = SecItemDelete(baseQuery(for: accountID) as CFDictionary)
guard status == errSecSuccess || status == errSecItemNotFound else {
throw AccountSecretStoreError.osStatus(status)
}
}
private static func baseQuery(for accountID: UUID) -> [String: Any] {
[
kSecClass as String: kSecClassGenericPassword,
kSecAttrService as String: service,
kSecAttrAccount as String: accountID.uuidString,
]
}
}
private enum AccountSecretStoreError: LocalizedError {
case osStatus(OSStatus)
var errorDescription: String? {
switch self {
case .osStatus(let status):
if let message = SecCopyErrorMessageString(status, nil) as String? {
return message
}
return "Keychain error \(status)"
}
}
}
private extension String {
func ifEmpty(_ fallback: @autoclosure () -> String) -> String {
isEmpty ? fallback() : self
}
} }

View file

@ -1,40 +1,14 @@
import BurrowCore import BurrowCore
import Foundation
import SwiftUI import SwiftUI
struct WireGuardCard { struct WireGuard: Network {
typealias NetworkType = Burrow_WireGuardNetwork
static let type: BurrowCore.Burrow_NetworkType = .wireGuard
var id: Int32 var id: Int32
var title: String var backgroundColor: Color { .init("WireGuard") }
var detail: String
init(id: Int32, title: String = "WireGuard", detail: String = "Stored configuration") { @MainActor var label: some View {
self.id = id
self.title = title
self.detail = detail
}
init(network: Burrow_Network) {
let payload = String(data: network.payload, encoding: .utf8) ?? ""
let address = Self.firstValue(for: "Address", in: payload)
let endpoint = Self.firstValue(for: "Endpoint", in: payload)
self.id = network.id
self.title = "WireGuard"
self.detail = [address, endpoint]
.compactMap { $0 }
.filter { !$0.isEmpty }
.joined(separator: " · ")
.ifEmpty("Stored configuration")
}
var card: NetworkCardModel {
NetworkCardModel(
id: id,
backgroundColor: .init("WireGuard"),
label: AnyView(label)
)
}
private var label: some View {
GeometryReader { reader in GeometryReader { reader in
VStack(alignment: .leading) { VStack(alignment: .leading) {
HStack { HStack {
@ -49,29 +23,12 @@ struct WireGuardCard {
} }
.frame(maxWidth: .infinity, maxHeight: reader.size.height / 4) .frame(maxWidth: .infinity, maxHeight: reader.size.height / 4)
Spacer() Spacer()
Text(detail) Text("@conradev")
.foregroundStyle(.white) .foregroundStyle(.white)
.font(.body.monospaced()) .font(.body.monospaced())
.lineLimit(3)
} }
.padding() .padding()
.frame(maxWidth: .infinity) .frame(maxWidth: .infinity)
} }
} }
private static func firstValue(for key: String, in config: String) -> String? {
config
.split(whereSeparator: \.isNewline)
.map(String.init)
.first(where: { $0.hasPrefix("\(key) = ") })?
.split(separator: "=", maxSplits: 1)
.last
.map { $0.trimmingCharacters(in: .whitespaces) }
}
}
private extension String {
func ifEmpty(_ fallback: @autoclosure () -> String) -> String {
isEmpty ? fallback() : self
}
} }

293
Apple/UI/OAuth2.swift Normal file
View file

@ -0,0 +1,293 @@
import AuthenticationServices
import Foundation
import os
import SwiftUI
enum OAuth2 {
enum Error: Swift.Error {
case unknown
case invalidAuthorizationURL
case invalidCallbackURL
case invalidRedirectURI
}
struct Credential {
var accessToken: String
var refreshToken: String?
var expirationDate: Date?
}
struct Session {
var authorizationEndpoint: URL
var tokenEndpoint: URL
var redirectURI: URL
var responseType = OAuth2.ResponseType.code
var scopes: Set<String>
var clientID: String
var clientSecret: String
fileprivate static let queue: OSAllocatedUnfairLock<[Int: CheckedContinuation<URL, Swift.Error>]> = {
.init(initialState: [:])
}()
fileprivate static func handle(url: URL) {
let continuations = queue.withLock { continuations in
let copy = continuations
continuations.removeAll()
return copy
}
for (_, continuation) in continuations {
continuation.resume(returning: url)
}
}
init(
authorizationEndpoint: URL,
tokenEndpoint: URL,
redirectURI: URL,
scopes: Set<String>,
clientID: String,
clientSecret: String
) {
self.authorizationEndpoint = authorizationEndpoint
self.tokenEndpoint = tokenEndpoint
self.redirectURI = redirectURI
self.scopes = scopes
self.clientID = clientID
self.clientSecret = clientSecret
}
private var authorizationURL: URL {
get throws {
var queryItems: [URLQueryItem] = [
.init(name: "client_id", value: clientID),
.init(name: "response_type", value: responseType.rawValue),
.init(name: "redirect_uri", value: redirectURI.absoluteString)
]
if !scopes.isEmpty {
queryItems.append(.init(name: "scope", value: scopes.joined(separator: ",")))
}
guard var components = URLComponents(url: authorizationEndpoint, resolvingAgainstBaseURL: false) else {
throw OAuth2.Error.invalidAuthorizationURL
}
components.queryItems = queryItems
guard let authorizationURL = components.url else { throw OAuth2.Error.invalidAuthorizationURL }
return authorizationURL
}
}
private func handle(callbackURL: URL) async throws -> OAuth2.AccessTokenResponse {
switch responseType {
case .code:
guard let components = URLComponents(url: callbackURL, resolvingAgainstBaseURL: false) else {
throw OAuth2.Error.invalidCallbackURL
}
return try await handle(response: try components.decode(OAuth2.CodeResponse.self))
default:
throw OAuth2.Error.invalidCallbackURL
}
}
private func handle(response: OAuth2.CodeResponse) async throws -> OAuth2.AccessTokenResponse {
var components = URLComponents()
components.queryItems = [
.init(name: "client_id", value: clientID),
.init(name: "client_secret", value: clientSecret),
.init(name: "grant_type", value: GrantType.authorizationCode.rawValue),
.init(name: "code", value: response.code),
.init(name: "redirect_uri", value: redirectURI.absoluteString)
]
let httpBody = Data(components.percentEncodedQuery!.utf8)
var request = URLRequest(url: tokenEndpoint)
request.setValue("application/x-www-form-urlencoded", forHTTPHeaderField: "Content-Type")
request.httpMethod = "POST"
request.httpBody = httpBody
let session = URLSession(configuration: .ephemeral)
let (data, _) = try await session.data(for: request)
return try OAuth2.decoder.decode(OAuth2.AccessTokenResponse.self, from: data)
}
func authorize(_ session: WebAuthenticationSession) async throws -> Credential {
let authorizationURL = try authorizationURL
let callbackURL = try await session.start(
url: authorizationURL,
redirectURI: redirectURI
)
return try await handle(callbackURL: callbackURL).credential
}
}
private struct CodeResponse: Codable {
var code: String
var state: String?
}
private struct AccessTokenResponse: Codable {
var accessToken: String
var tokenType: TokenType
var expiresIn: Double?
var refreshToken: String?
var credential: Credential {
.init(
accessToken: accessToken,
refreshToken: refreshToken,
expirationDate: expiresIn.map { Date(timeIntervalSinceNow: $0) }
)
}
}
enum TokenType: Codable, RawRepresentable {
case bearer
case unknown(String)
init(rawValue: String) {
self = switch rawValue.lowercased() {
case "bearer": .bearer
default: .unknown(rawValue)
}
}
var rawValue: String {
switch self {
case .bearer: "bearer"
case .unknown(let type): type
}
}
}
enum GrantType: Codable, RawRepresentable {
case authorizationCode
case unknown(String)
init(rawValue: String) {
self = switch rawValue.lowercased() {
case "authorization_code": .authorizationCode
default: .unknown(rawValue)
}
}
var rawValue: String {
switch self {
case .authorizationCode: "authorization_code"
case .unknown(let type): type
}
}
}
enum ResponseType: Codable, RawRepresentable {
case code
case idToken
case unknown(String)
init(rawValue: String) {
self = switch rawValue.lowercased() {
case "code": .code
case "id_token": .idToken
default: .unknown(rawValue)
}
}
var rawValue: String {
switch self {
case .code: "code"
case .idToken: "id_token"
case .unknown(let type): type
}
}
}
fileprivate static var decoder: JSONDecoder {
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return decoder
}
fileprivate static var encoder: JSONEncoder {
let encoder = JSONEncoder()
encoder.keyEncodingStrategy = .convertToSnakeCase
return encoder
}
}
extension WebAuthenticationSession: @unchecked @retroactive Sendable {
}
extension WebAuthenticationSession {
#if canImport(BrowserEngineKit)
@available(iOS 17.4, macOS 14.4, tvOS 17.4, watchOS 10.4, *)
fileprivate static func callback(for redirectURI: URL) throws -> ASWebAuthenticationSession.Callback {
switch redirectURI.scheme {
case "https":
guard let host = redirectURI.host else { throw OAuth2.Error.invalidRedirectURI }
return .https(host: host, path: redirectURI.path)
case "http":
throw OAuth2.Error.invalidRedirectURI
case .some(let scheme):
return .customScheme(scheme)
case .none:
throw OAuth2.Error.invalidRedirectURI
}
}
#endif
fileprivate func start(url: URL, redirectURI: URL) async throws -> URL {
#if canImport(BrowserEngineKit)
if #available(iOS 17.4, macOS 14.4, tvOS 17.4, watchOS 10.4, *) {
return try await authenticate(
using: url,
callback: try Self.callback(for: redirectURI),
additionalHeaderFields: [:]
)
}
#endif
return try await withThrowingTaskGroup(of: URL.self) { group in
group.addTask {
return try await authenticate(using: url, callbackURLScheme: redirectURI.scheme ?? "")
}
let id = Int.random(in: 0..<Int.max)
group.addTask {
return try await withCheckedThrowingContinuation { continuation in
OAuth2.Session.queue.withLock { $0[id] = continuation }
}
}
guard let url = try await group.next() else { throw OAuth2.Error.invalidCallbackURL }
group.cancelAll()
OAuth2.Session.queue.withLock { $0[id] = nil }
return url
}
}
}
extension View {
func handleOAuth2Callback() -> some View {
onOpenURL { url in OAuth2.Session.handle(url: url) }
}
}
extension URLComponents {
fileprivate func decode<T: Decodable>(_ type: T.Type) throws -> T {
guard let queryItems else {
throw DecodingError.valueNotFound(
T.self,
.init(codingPath: [], debugDescription: "Missing query items")
)
}
let data = try OAuth2.encoder.encode(try queryItems.values)
return try OAuth2.decoder.decode(T.self, from: data)
}
}
extension Sequence where Element == URLQueryItem {
fileprivate var values: [String: String?] {
get throws {
try Dictionary(map { ($0.name, $0.value) }) { _, _ in
throw DecodingError.dataCorrupted(.init(codingPath: [], debugDescription: "Duplicate query items"))
}
}
}
}

View file

@ -1,38 +0,0 @@
# Burrow Constitution
1. Mission
Burrow exists to build a proper VPN: fast, inspectable, deployable on infrastructure the project controls, and legible enough that future contributors can extend it without guesswork.
2. Commitments
- Protocol work must favor correctness over novelty. Burrow does not claim support for a transport or control-plane feature until the wire format, state handling, and recovery behavior are implemented and tested.
- Security is a design constraint, not a cleanup phase. Key material, bootstrap credentials, control-plane tokens, and routing policy must have explicit storage and rotation paths.
- Performance matters. Burrow should avoid needless copies, hidden blocking, and ad hoc process graphs that make packet forwarding or control-plane convergence harder to reason about.
- Source, infrastructure, and release logic live in the repository. If the forge cannot be rebuilt from the tree, the work is incomplete.
- Non-trivial changes require a Burrow Evolution Proposal. Durable rationale belongs in the repository, not only in chat.
3. Infrastructure
Burrow controls its own forge, runners, deployment automation, and edge configuration for `burrow.net` and `burrow.rs`.
- Dedicated compute is preferred over SaaS dependencies when the dependency would hold release, source, or identity authority.
- Secrets may be bootstrapped from local intake for initial bring-up, but long-lived operation must converge on encrypted, versioned secret handling.
- Production access must be attributable. Automation identities, SSH keys, and service accounts must be named and documented.
4. Contributors
- Read this constitution before drafting product, protocol, or infrastructure changes.
- Capture intent, testing expectations, and rollback procedures in proposals.
- Prefer reversible migrations. If a change is destructive, document the preconditions and teardown plan first.
- Security-sensitive work requires explicit reviewer attention, even when the implementation is performed by an agent.
5. Governance
- Burrow Evolution Proposals (BEPs) are the primary design record for architectural, protocol, forge, and deployment changes.
- Accepted proposals are authoritative until superseded.
- Constitutional changes require a dedicated proposal that quotes the affected text and records the decision.
6. Origin
Burrow started as a firewall-burrowing client and now carries its own transport, daemon, mesh, and control-plane work. This constitution exists so the project can finish that evolution coherently.

3964
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
FROM docker.io/library/rust:1.85-slim-bookworm AS builder FROM docker.io/library/rust:1.79-slim-bookworm AS builder
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG LLVM_VERSION=16 ARG LLVM_VERSION=16

View file

@ -1,23 +1,21 @@
tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1)
cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features -- cargo_console := RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features
cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run -- cargo_norm := RUST_BACKTRACE=1 RUST_LOG=debug cargo run
sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
check: check:
@cargo check @cargo check
build: build:
@cargo build @cargo run build
daemon-console: daemon-console:
@$(sudo_cargo_console) daemon @$(cargo_console) daemon
daemon: daemon:
@$(sudo_cargo_norm) daemon @$(cargo_norm) daemon
start: start:
@$(sudo_cargo_norm) start @$(cargo_norm) start
stop: stop:
@$(cargo_norm) stop @$(cargo_norm) stop

View file

@ -5,19 +5,10 @@
Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/). Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/).
`burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them. `burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them.
Routine verification now runs unprivileged with `cargo test --workspace --all-features`; only tunnel startup needs elevation.
The repository now carries its own design and deployment record:
- [Constitution](./CONSTITUTION.md)
- [Burrow Evolution](./evolution/README.md)
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
- [Forward Email Runbook](./docs/FORWARDEMAIL.md)
## Contributing ## Contributing
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh). Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow! Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app.
The project structure is divided in the following folders: The project structure is divided in the following folders:

View file

@ -1,95 +0,0 @@
#!/usr/bin/env bash
burrow_require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
burrow_cleanup_flake_tmpdirs() {
if [[ "${#BURROW_FLAKE_TMPDIRS[@]}" -eq 0 ]]; then
return
fi
rm -rf "${BURROW_FLAKE_TMPDIRS[@]}"
}
burrow_prepare_flake_ref() {
local input="${1:-.}"
case "${input}" in
path:*|git+*|github:*|tarball+*|http://*|https://*)
printf '%s\n' "${input}"
return 0
;;
esac
local resolved
resolved="$(cd "${input}" && pwd)"
local cache_root="${HOME}/.cache/burrow"
mkdir -p "${cache_root}"
local copy_root
copy_root="$(mktemp -d "${cache_root}/flake-XXXXXX")"
mkdir -p "${copy_root}/repo"
rsync -a \
--delete \
--exclude '.git' \
--exclude '.direnv' \
--exclude 'result' \
--exclude 'burrow.sock' \
--exclude 'node_modules' \
--exclude 'target' \
--exclude 'build' \
"${resolved}/" "${copy_root}/repo/"
BURROW_FLAKE_TMPDIRS+=("${copy_root}")
printf 'path:%s/repo\n' "${copy_root}"
}
burrow_resolve_image_artifact() {
local store_path="$1"
if [[ -f "${store_path}" ]]; then
printf '%s\n' "${store_path}"
return 0
fi
if [[ -d "${store_path}" ]]; then
local candidate
candidate="$(
find "${store_path}" -type f \
\( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) \
| sort \
| head -n1
)"
if [[ -n "${candidate}" ]]; then
printf '%s\n' "${candidate}"
return 0
fi
fi
echo "unable to locate disk image artifact under ${store_path}" >&2
exit 1
}
burrow_detect_compression() {
local artifact="$1"
case "${artifact}" in
*.bz2)
printf 'bz2\n'
;;
*.xz)
printf 'xz\n'
;;
*.zst|*.zstd)
printf 'zstd\n'
;;
*)
printf '\n'
;;
esac
}

View file

@ -1,284 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
google_client_id="${AUTHENTIK_GOOGLE_CLIENT_ID:-}"
google_client_secret="${AUTHENTIK_GOOGLE_CLIENT_SECRET:-}"
source_slug="${AUTHENTIK_GOOGLE_SOURCE_SLUG:-google}"
source_name="${AUTHENTIK_GOOGLE_SOURCE_NAME:-Google}"
identification_stage_name="${AUTHENTIK_GOOGLE_IDENTIFICATION_STAGE_NAME:-default-authentication-identification}"
authentication_flow_slug="${AUTHENTIK_GOOGLE_AUTHENTICATION_FLOW_SLUG:-default-source-authentication}"
enrollment_flow_slug="${AUTHENTIK_GOOGLE_ENROLLMENT_FLOW_SLUG:-default-source-enrollment}"
login_mode="${AUTHENTIK_GOOGLE_LOGIN_MODE:-redirect}"
user_matching_mode="${AUTHENTIK_GOOGLE_USER_MATCHING_MODE:-email_link}"
policy_engine_mode="${AUTHENTIK_GOOGLE_POLICY_ENGINE_MODE:-any}"
google_account_map_json="${AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON:-[]}"
property_mapping_name="${AUTHENTIK_GOOGLE_PROPERTY_MAPPING_NAME:-Burrow Google Account Map}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-google-source.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_GOOGLE_CLIENT_ID
AUTHENTIK_GOOGLE_CLIENT_SECRET
Optional environment:
AUTHENTIK_URL
AUTHENTIK_GOOGLE_SOURCE_SLUG
AUTHENTIK_GOOGLE_SOURCE_NAME
AUTHENTIK_GOOGLE_IDENTIFICATION_STAGE_NAME
AUTHENTIK_GOOGLE_AUTHENTICATION_FLOW_SLUG
AUTHENTIK_GOOGLE_ENROLLMENT_FLOW_SLUG
AUTHENTIK_GOOGLE_LOGIN_MODE promoted|redirect
AUTHENTIK_GOOGLE_USER_MATCHING_MODE identifier|email_link|email_deny|username_link|username_deny
AUTHENTIK_GOOGLE_POLICY_ENGINE_MODE all|any
AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON JSON array of alias mappings
AUTHENTIK_GOOGLE_PROPERTY_MAPPING_NAME
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$google_client_id" || -z "$google_client_secret" || "$google_client_id" == PENDING* || "$google_client_secret" == PENDING* ]]; then
echo "Google OAuth credentials are not configured; skipping Authentik Google source sync." >&2
echo "Set Authorized redirect URI in Google to ${authentik_url}/source/oauth/callback/${source_slug}/" >&2
exit 0
fi
if ! printf '%s' "$google_account_map_json" | jq -e 'type == "array"' >/dev/null; then
echo "error: AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON must be a JSON array" >&2
exit 1
fi
case "$login_mode" in
promoted|redirect) ;;
*)
echo "warning: unsupported AUTHENTIK_GOOGLE_LOGIN_MODE=$login_mode; falling back to redirect" >&2
login_mode="redirect"
;;
esac
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_single_result() {
local path="$1"
local jq_filter="$2"
api GET "$path" | jq -r "$jq_filter" | head -n1
}
wait_for_authentik
flow_pk="$(
lookup_single_result \
"/api/v3/flows/instances/?slug=${authentication_flow_slug}" \
'.results[] | select(.slug != null) | .pk // empty'
)"
if [[ -z "$flow_pk" ]]; then
echo "error: could not resolve Authentik authentication flow slug ${authentication_flow_slug}" >&2
exit 1
fi
enrollment_flow_pk="$(
lookup_single_result \
"/api/v3/flows/instances/?slug=${enrollment_flow_slug}" \
'.results[] | select(.slug != null) | .pk // empty'
)"
if [[ -z "$enrollment_flow_pk" ]]; then
echo "error: could not resolve Authentik enrollment flow slug ${enrollment_flow_slug}" >&2
exit 1
fi
identification_stage="$(
api GET "/api/v3/stages/identification/" \
| jq -c --arg name "$identification_stage_name" '.results[] | select(.name == $name)'
)"
if [[ -z "$identification_stage" ]]; then
echo "error: could not resolve Authentik identification stage ${identification_stage_name}" >&2
exit 1
fi
stage_pk="$(printf '%s\n' "$identification_stage" | jq -r '.pk')"
property_mapping_payload='[]'
if [[ "$(printf '%s' "$google_account_map_json" | jq 'length')" -gt 0 ]]; then
alias_map_python="$(
printf '%s' "$google_account_map_json" \
| jq -c '
map({
key: (.source_email | ascii_downcase),
value: {
username: .username,
email: .email,
name: .name
}
})
| from_entries
'
)"
oauth_property_mapping_expression="$(
cat <<EOF
email = (info.get("email") or "").strip().lower()
alias_map = ${alias_map_python}
mapped = alias_map.get(email)
if not mapped:
return {}
result = {}
for key in ("username", "email", "name"):
value = mapped.get(key)
if value:
result[key] = value
return result
EOF
)"
oauth_property_mapping_payload="$(
jq -n \
--arg name "$property_mapping_name" \
--arg expression "$oauth_property_mapping_expression" \
'{
name: $name,
expression: $expression
}'
)"
existing_property_mapping="$(
api GET "/api/v3/propertymappings/source/oauth/?page_size=200" \
| jq -c --arg name "$property_mapping_name" '.results[]? | select(.name == $name)'
)"
if [[ -n "$existing_property_mapping" ]]; then
property_mapping_pk="$(printf '%s\n' "$existing_property_mapping" | jq -r '.pk')"
api PATCH "/api/v3/propertymappings/source/oauth/${property_mapping_pk}/" "$oauth_property_mapping_payload" >/dev/null
else
property_mapping_pk="$(
api POST "/api/v3/propertymappings/source/oauth/" "$oauth_property_mapping_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${property_mapping_pk:-}" ]]; then
echo "error: Google OAuth property mapping did not return a primary key" >&2
exit 1
fi
property_mapping_payload="$(jq -cn --arg property_mapping_pk "$property_mapping_pk" '[$property_mapping_pk]')"
fi
oauth_source_payload="$(
jq -n \
--arg name "$source_name" \
--arg slug "$source_slug" \
--arg authentication_flow "$flow_pk" \
--arg enrollment_flow "$enrollment_flow_pk" \
--arg user_matching_mode "$user_matching_mode" \
--arg policy_engine_mode "$policy_engine_mode" \
--argjson user_property_mappings "$property_mapping_payload" \
--arg consumer_key "$google_client_id" \
--arg consumer_secret "$google_client_secret" \
'{
name: $name,
slug: $slug,
enabled: true,
promoted: true,
authentication_flow: $authentication_flow,
enrollment_flow: $enrollment_flow,
user_property_mappings: $user_property_mappings,
group_property_mappings: [],
policy_engine_mode: $policy_engine_mode,
user_matching_mode: $user_matching_mode,
provider_type: "google",
consumer_key: $consumer_key,
consumer_secret: $consumer_secret
}'
)"
existing_source="$(
api GET "/api/v3/sources/oauth/?slug=${source_slug}" \
| jq -c '.results[]?'
)"
if [[ -n "$existing_source" ]]; then
source_pk="$(printf '%s\n' "$existing_source" | jq -r '.pk')"
api PATCH "/api/v3/sources/oauth/${source_slug}/" "$oauth_source_payload" >/dev/null
else
source_pk="$(
api POST "/api/v3/sources/oauth/" "$oauth_source_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "$source_pk" ]]; then
echo "error: Google OAuth source did not return a primary key" >&2
exit 1
fi
stage_patch="$(
printf '%s\n' "$identification_stage" \
| jq -c \
--arg source_pk "$source_pk" \
--arg login_mode "$login_mode" '
.sources = (
if $login_mode == "redirect" then
[$source_pk]
else
([ $source_pk ] + ((.sources // []) | map(select(. != $source_pk))))
end
)
| .show_source_labels = true
| if $login_mode == "redirect" then
.user_fields = []
else
.
end
| {
sources,
show_source_labels,
user_fields
}'
)"
api PATCH "/api/v3/stages/identification/${stage_pk}/" "$stage_patch" >/dev/null
echo "Synced Authentik Google source ${source_slug} (${source_pk}) in ${login_mode} mode."

View file

@ -1,113 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/bootstrap-forge-intake.sh [options]
Copy the minimum Burrow forge bootstrap secrets onto the target host under
/var/lib/burrow/intake with the ownership expected by the NixOS services.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key used to reach the host
(default: intake/agent_at_burrow_net_ed25519)
--password-file <path> Forgejo admin bootstrap password file
(default: intake/forgejo_pass_contact_at_burrow_net.txt)
--agent-key-file <path> Agent SSH private key copied for runner bootstrap
(default: intake/agent_at_burrow_net_ed25519)
--no-verify Skip remote ls/stat verification after install
-h, --help Show this help text
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
PASSWORD_FILE="${BURROW_FORGE_PASSWORD_FILE:-${REPO_ROOT}/intake/forgejo_pass_contact_at_burrow_net.txt}"
AGENT_KEY_FILE="${BURROW_FORGE_AGENT_KEY_FILE:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
VERIFY=1
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--password-file)
PASSWORD_FILE="${2:?missing value for --password-file}"
shift 2
;;
--agent-key-file)
AGENT_KEY_FILE="${2:?missing value for --agent-key-file}"
shift 2
;;
--no-verify)
VERIFY=0
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
for path in "${SSH_KEY}" "${PASSWORD_FILE}" "${AGENT_KEY_FILE}"; do
if [[ ! -s "${path}" ]]; then
echo "required file missing or empty: ${path}" >&2
exit 1
fi
done
ssh_opts=(
-i "${SSH_KEY}"
-o IdentitiesOnly=yes
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
-o StrictHostKeyChecking=accept-new
)
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
cleanup() {
if [[ -n "${remote_tmp:-}" ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
scp "${ssh_opts[@]}" \
"${PASSWORD_FILE}" \
"${AGENT_KEY_FILE}" \
"${HOST}:${remote_tmp}/"
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
install -d -m 0755 /var/lib/burrow/intake
install -m 0400 -o forgejo -g forgejo '${remote_tmp}/$(basename "${PASSWORD_FILE}")' /var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt
install -m 0400 -o root -g root '${remote_tmp}/$(basename "${AGENT_KEY_FILE}")' /var/lib/burrow/intake/agent_at_burrow_net_ed25519
"
if [[ "${VERIFY}" -eq 1 ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
ls -l \
/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt \
/var/lib/burrow/intake/agent_at_burrow_net_ed25519
"
fi
echo "Burrow forge bootstrap intake sync complete (host=${HOST})."

View file

@ -1,177 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/check-forge-host.sh [options]
Run a post-boot verification pass against the Burrow forge host.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
--expect-nsc Fail if forgejo-nsc services are not active
--expect-tailnet Fail if Authentik and Headscale services are not active
-h, --help Show this help text
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
EXPECT_NSC=0
EXPECT_TAILNET=0
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--expect-nsc)
EXPECT_NSC=1
shift
;;
--expect-tailnet)
EXPECT_TAILNET=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
EXPECT_NSC="${EXPECT_NSC}" \
EXPECT_TAILNET="${EXPECT_TAILNET}" \
'bash -s' <<'EOF'
set -euo pipefail
base_services=(
forgejo.service
caddy.service
burrow-forgejo-bootstrap.service
burrow-forgejo-runner-bootstrap.service
burrow-forgejo-runner.service
)
nsc_services=(
forgejo-nsc-dispatcher.service
forgejo-nsc-autoscaler.service
)
tailnet_services=(
burrow-authentik-runtime.service
burrow-authentik-ready.service
headscale.service
headscale-bootstrap.service
)
show_service() {
local service="$1"
systemctl show \
--no-pager \
--property Id \
--property LoadState \
--property UnitFileState \
--property ActiveState \
--property SubState \
--property Result \
"${service}"
}
service_is_healthy() {
local service="$1"
local active_state
local result
local unit_type
active_state="$(systemctl show --property ActiveState --value "${service}")"
result="$(systemctl show --property Result --value "${service}")"
unit_type="$(systemctl show --property Type --value "${service}")"
if [[ "${active_state}" == "active" ]]; then
return 0
fi
if [[ "${unit_type}" == "oneshot" && "${active_state}" == "inactive" && "${result}" == "success" ]]; then
return 0
fi
return 1
}
for service in "${base_services[@]}"; do
echo "== ${service} =="
show_service "${service}"
if ! service_is_healthy "${service}"; then
echo "required service is not active: ${service}" >&2
exit 1
fi
done
for service in "${nsc_services[@]}"; do
echo "== ${service} =="
show_service "${service}" || true
if [[ "${EXPECT_NSC}" == "1" && "$(systemctl is-active "${service}" 2>/dev/null || true)" != "active" ]]; then
echo "required NSC service is not active: ${service}" >&2
exit 1
fi
done
for service in "${tailnet_services[@]}"; do
echo "== ${service} =="
show_service "${service}" || true
if [[ "${EXPECT_TAILNET}" == "1" ]] && ! service_is_healthy "${service}"; then
echo "required tailnet service is not active: ${service}" >&2
exit 1
fi
done
echo "== intake =="
ls -l /var/lib/burrow/intake || true
if [[ "${EXPECT_TAILNET}" == "1" ]]; then
echo "== agenix =="
ls -l /run/agenix || true
test -s /run/agenix/burrowAuthentikEnv
test -s /run/agenix/burrowHeadscaleOidcClientSecret
fi
if command -v curl >/dev/null 2>&1; then
echo "== http-local =="
curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login
curl -fsS -o /dev/null -H 'Host: burrow.net' -w 'burrow_root %{http_code}\n' http://127.0.0.1/
curl -fsS -o /dev/null -H 'Host: git.burrow.net' -w 'git_login %{http_code}\n' http://127.0.0.1/user/login
if [[ "${EXPECT_TAILNET}" == "1" ]]; then
curl -fsS -o /dev/null -H 'Host: auth.burrow.net' -w 'authentik_ready %{http_code}\n' http://127.0.0.1/-/health/ready/
curl -sS -o /dev/null -H 'Host: ts.burrow.net' -w 'headscale_root %{http_code}\n' http://127.0.0.1/ || true
fi
fi
EOF

View file

@ -1,165 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: Scripts/cloudflare-upsert-a-record.sh --zone <zone> --name <fqdn> --ipv4 <address> [options]
Upsert a DNS-only or proxied Cloudflare A record without putting the API token on
the process list.
Options:
--zone <zone> Cloudflare zone name, for example burrow.net
--name <fqdn> Fully-qualified DNS record name
--ipv4 <address> IPv4 address for the A record
--token-file <path> Cloudflare API token file
default: intake/cloudflare-token.txt
--ttl <seconds|auto> Record TTL, or auto
default: auto
--proxied <true|false> Whether to proxy through Cloudflare
default: false
-h, --help Show this help
EOF
}
ZONE_NAME=""
RECORD_NAME=""
IPV4=""
TOKEN_FILE="intake/cloudflare-token.txt"
TTL_VALUE="auto"
PROXIED="false"
while [[ $# -gt 0 ]]; do
case "$1" in
--zone)
ZONE_NAME="${2:?missing value for --zone}"
shift 2
;;
--name)
RECORD_NAME="${2:?missing value for --name}"
shift 2
;;
--ipv4)
IPV4="${2:?missing value for --ipv4}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--ttl)
TTL_VALUE="${2:?missing value for --ttl}"
shift 2
;;
--proxied)
PROXIED="${2:?missing value for --proxied}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
if [[ -z "${ZONE_NAME}" || -z "${RECORD_NAME}" || -z "${IPV4}" ]]; then
usage >&2
exit 2
fi
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Cloudflare token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
if [[ ! "${IPV4}" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
echo "Invalid IPv4 address: ${IPV4}" >&2
exit 1
fi
case "${PROXIED}" in
true|false)
;;
*)
echo "--proxied must be true or false" >&2
exit 1
;;
esac
case "${TTL_VALUE}" in
auto)
TTL_JSON=1
;;
''|*[!0-9]*)
echo "--ttl must be a number of seconds or auto" >&2
exit 1
;;
*)
TTL_JSON="${TTL_VALUE}"
;;
esac
TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
if [[ -z "${TOKEN}" ]]; then
echo "Cloudflare token file is empty: ${TOKEN_FILE}" >&2
exit 1
fi
cf_api() {
local method="$1"
local path="$2"
local body="${3-}"
if [[ -n "${body}" ]]; then
curl -fsS -X "${method}" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
--data "${body}" \
"https://api.cloudflare.com/client/v4${path}"
else
curl -fsS -X "${method}" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
"https://api.cloudflare.com/client/v4${path}"
fi
}
zone_lookup="$(cf_api GET "/zones?name=${ZONE_NAME}&status=active")"
zone_id="$(jq -r '.result[0].id // empty' <<<"${zone_lookup}")"
if [[ -z "${zone_id}" ]]; then
echo "Active Cloudflare zone not found: ${ZONE_NAME}" >&2
exit 1
fi
payload="$(jq -cn \
--arg type "A" \
--arg name "${RECORD_NAME}" \
--arg content "${IPV4}" \
--argjson proxied "${PROXIED}" \
--argjson ttl "${TTL_JSON}" \
'{type: $type, name: $name, content: $content, proxied: $proxied, ttl: $ttl}')"
record_lookup="$(cf_api GET "/zones/${zone_id}/dns_records?type=A&name=${RECORD_NAME}")"
record_id="$(jq -r '.result[0].id // empty' <<<"${record_lookup}")"
if [[ -n "${record_id}" ]]; then
result="$(cf_api PUT "/zones/${zone_id}/dns_records/${record_id}" "${payload}")"
action="updated"
else
result="$(cf_api POST "/zones/${zone_id}/dns_records" "${payload}")"
action="created"
fi
jq -r --arg action "${action}" '
if .success != true then
.errors | tostring | halt_error(1)
else
"Cloudflare DNS " + $action + ": " + .result.name + " -> " + .result.content +
" (proxied=" + (.result.proxied | tostring) + ", ttl=" + (.result.ttl | tostring) + ")"
end
' <<<"${result}"

View file

@ -1,100 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
usage() {
cat <<'EOF'
Usage: Scripts/forge-deploy.sh [--test|--switch] [--flake-attr <attr>] [--allow-dirty]
Standardized remote deploy path for the Burrow forge host.
Defaults:
--switch
--flake-attr burrow-forge
Environment:
BURROW_FORGE_HOST root@git.burrow.net
BURROW_FORGE_SSH_KEY intake/agent_at_burrow_net_ed25519
EOF
}
MODE="switch"
FLAKE_ATTR="burrow-forge"
ALLOW_DIRTY=0
BURROW_FLAKE_TMPDIRS=()
cleanup() {
burrow_cleanup_flake_tmpdirs
}
trap cleanup EXIT
while [[ $# -gt 0 ]]; do
case "$1" in
--test)
MODE="test"
shift
;;
--switch)
MODE="switch"
shift
;;
--flake-attr)
FLAKE_ATTR="${2:?missing value for --flake-attr}"
shift 2
;;
--allow-dirty)
ALLOW_DIRTY=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
REPO_ROOT="$(git rev-parse --show-toplevel)"
cd "${REPO_ROOT}"
if [[ ${ALLOW_DIRTY} -ne 1 ]] && [[ -n "$(git status --short)" ]]; then
echo "Refusing to deploy from a dirty checkout. Commit first, or pass --allow-dirty for incident-only work." >&2
exit 1
fi
FORGE_HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
FORGE_SSH_KEY="${BURROW_FORGE_SSH_KEY:-}"
if [[ -z "${FORGE_SSH_KEY}" ]]; then
if [[ -f "${REPO_ROOT}/intake/agent_at_burrow_net_ed25519" ]]; then
FORGE_SSH_KEY="${REPO_ROOT}/intake/agent_at_burrow_net_ed25519"
else
FORGE_SSH_KEY="${HOME}/.ssh/agent_at_burrow_net_ed25519"
fi
fi
if [[ ! -f "${FORGE_SSH_KEY}" ]]; then
echo "Forge SSH key not found at ${FORGE_SSH_KEY}." >&2
echo "Set BURROW_FORGE_SSH_KEY or place the agent key in intake/." >&2
exit 1
fi
FORGE_KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
mkdir -p "$(dirname "${FORGE_KNOWN_HOSTS_FILE}")"
export NIX_SSHOPTS="-i ${FORGE_SSH_KEY} -o IdentitiesOnly=yes -o UserKnownHostsFile=${FORGE_KNOWN_HOSTS_FILE} -o StrictHostKeyChecking=accept-new"
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
nix --extra-experimental-features "nix-command flakes" shell nixpkgs#nixos-rebuild -c \
nixos-rebuild "${MODE}" \
--flake "${flake_ref}#${FLAKE_ATTR}" \
--build-host "${FORGE_HOST}" \
--target-host "${FORGE_HOST}"

View file

@ -1,327 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
DEFAULT_CONFIG="burrow-forge"
DEFAULT_FLAKE="."
DEFAULT_LOCATION="hel1"
DEFAULT_ARCHITECTURE="x86"
DEFAULT_TOKEN_FILE="${REPO_ROOT}/intake/hetzner-api-token.txt"
CONFIG="${HCLOUD_IMAGE_CONFIG:-${DEFAULT_CONFIG}}"
FLAKE="${HCLOUD_IMAGE_FLAKE:-${DEFAULT_FLAKE}}"
LOCATION="${HCLOUD_IMAGE_LOCATION:-${DEFAULT_LOCATION}}"
ARCHITECTURE="${HCLOUD_IMAGE_ARCHITECTURE:-${DEFAULT_ARCHITECTURE}}"
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${DEFAULT_TOKEN_FILE}}"
DESCRIPTION="${HCLOUD_IMAGE_DESCRIPTION:-}"
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
UPLOAD_VERBOSE="${HCLOUD_IMAGE_UPLOAD_VERBOSE:-0}"
ARTIFACT_PATH_INPUT=""
OUTPUT_HASH=""
NO_UPDATE=0
BUILDER_SPEC="${HCLOUD_IMAGE_BUILDER_SPEC:-}"
EXTRA_LABELS=()
NIX_BUILD_FLAGS=()
BURROW_FLAKE_TMPDIRS=()
LOCAL_STORE_DIR=""
usage() {
cat <<'EOF'
Usage: Scripts/hcloud-upload-nixos-image.sh [options]
Build a raw Burrow NixOS image and upload it into Hetzner Cloud as a snapshot.
Options:
--config <name> images.<name>-raw output to build (default: burrow-forge)
--flake <path> Flake path to build from (default: .)
--location <code> Hetzner location for the temporary upload server (default: hel1)
--architecture <x86|arm> CPU architecture of the image (default: x86)
--server-type <name> Hetzner server type for the temporary upload server
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
--artifact-path <path> Prebuilt raw image artifact to upload directly
--output-hash <hash> Stable hash label for --artifact-path uploads
--builder-spec <string> Complete builders string passed to nix build
--description <text> Description for the resulting snapshot
--upload-verbose <n> Pass -v N times to hcloud-upload-image
--label key=value Extra Hetzner image label (repeatable)
--nix-flag <arg> Extra argument passed to nix build (repeatable)
--no-update Reuse an existing snapshot with the same config/output hash
-h, --help Show this help text
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--architecture)
ARCHITECTURE="${2:?missing value for --architecture}"
shift 2
;;
--server-type)
UPLOAD_SERVER_TYPE="${2:?missing value for --server-type}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--artifact-path)
ARTIFACT_PATH_INPUT="${2:?missing value for --artifact-path}"
shift 2
;;
--output-hash)
OUTPUT_HASH="${2:?missing value for --output-hash}"
shift 2
;;
--builder-spec)
BUILDER_SPEC="${2:?missing value for --builder-spec}"
shift 2
;;
--description)
DESCRIPTION="${2:?missing value for --description}"
shift 2
;;
--upload-verbose)
UPLOAD_VERBOSE="${2:?missing value for --upload-verbose}"
shift 2
;;
--label)
EXTRA_LABELS+=("${2:?missing value for --label}")
shift 2
;;
--nix-flag)
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
shift 2
;;
--no-update)
NO_UPDATE=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
cleanup() {
burrow_cleanup_flake_tmpdirs
if [[ -n "${LOCAL_STORE_DIR}" && -d "${LOCAL_STORE_DIR}" ]]; then
rm -rf "${LOCAL_STORE_DIR}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
burrow_require_cmd nix
burrow_require_cmd curl
burrow_require_cmd python3
burrow_require_cmd rsync
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")"
if [[ -z "${HCLOUD_TOKEN}" ]]; then
echo "Hetzner API token file is empty: ${TOKEN_FILE}" >&2
exit 1
fi
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
if [[ -z "${DESCRIPTION}" ]]; then
DESCRIPTION="Burrow ${CONFIG} $(date -u +%Y-%m-%dT%H:%M:%SZ)"
fi
printf 'Building raw image for %s from %s\n' "${CONFIG}" "${flake_ref}" >&2
if [[ -z "${ARTIFACT_PATH_INPUT}" && -n "${BUILDER_SPEC}" && -z "${NIX_BUILD_STORE:-}" ]]; then
mkdir -p "${HOME}/.cache/burrow"
LOCAL_STORE_DIR="$(mktemp -d "${HOME}/.cache/burrow/local-store-XXXXXX")"
fi
artifact_path=""
compression=""
output_hash="${OUTPUT_HASH}"
if [[ -n "${ARTIFACT_PATH_INPUT}" ]]; then
artifact_path="${ARTIFACT_PATH_INPUT}"
if [[ ! -f "${artifact_path}" ]]; then
echo "artifact path does not exist: ${artifact_path}" >&2
exit 1
fi
compression="$(burrow_detect_compression "${artifact_path}")"
if [[ -z "${output_hash}" ]]; then
if command -v sha256sum >/dev/null 2>&1; then
output_hash="$(sha256sum "${artifact_path}" | awk '{print $1}')"
else
output_hash="$(shasum -a 256 "${artifact_path}" | awk '{print $1}')"
fi
fi
else
nix_build_cmd=(
nix
--extra-experimental-features
"nix-command flakes"
build
"${flake_ref}#images.${CONFIG}-raw"
--no-link
--print-out-paths
)
if [[ -n "${BUILDER_SPEC}" ]]; then
nix_build_cmd+=(--builders "${BUILDER_SPEC}")
fi
if [[ -n "${NIX_BUILD_STORE:-}" ]]; then
nix_build_cmd+=(--store "${NIX_BUILD_STORE}")
elif [[ -n "${LOCAL_STORE_DIR}" ]]; then
nix_build_cmd+=(--store "${LOCAL_STORE_DIR}")
fi
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
nix_build_cmd+=("${NIX_BUILD_FLAGS[@]}")
fi
build_output=""
if ! build_output="$("${nix_build_cmd[@]}" 2>&1)"; then
printf '%s\n' "${build_output}" >&2
exit 1
fi
store_path="$(printf '%s\n' "${build_output}" | tail -n1)"
if [[ -z "${store_path}" ]]; then
echo "nix build did not return a store path" >&2
printf '%s\n' "${build_output}" >&2
exit 1
fi
artifact_path="$(burrow_resolve_image_artifact "${store_path}")"
compression="$(burrow_detect_compression "${artifact_path}")"
output_hash="$(basename "${store_path}")"
output_hash="${output_hash%%-*}"
fi
label_args=(
"burrow.nixos-config=${CONFIG}"
"burrow.nixos-output-hash=${output_hash}"
)
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
label_args+=("${EXTRA_LABELS[@]}")
fi
label_csv="$(IFS=,; printf '%s' "${label_args[*]}")"
find_existing_image() {
HCLOUD_TOKEN="${HCLOUD_TOKEN}" \
BURROW_LABEL_SELECTOR="burrow.nixos-config=${CONFIG},burrow.nixos-output-hash=${output_hash}" \
python3 - <<'PY'
import json
import os
import sys
import urllib.parse
import urllib.request
selector = urllib.parse.quote(os.environ["BURROW_LABEL_SELECTOR"], safe=",=")
req = urllib.request.Request(
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
)
with urllib.request.urlopen(req, timeout=30) as resp:
data = json.load(resp)
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
if images:
print(images[-1]["id"])
PY
}
if [[ "${NO_UPDATE}" -eq 1 ]]; then
existing_id="$(find_existing_image || true)"
if [[ -n "${existing_id}" ]]; then
printf 'Reusing existing Hetzner snapshot %s for %s\n' "${existing_id}" "${CONFIG}" >&2
printf '%s\n' "${existing_id}"
exit 0
fi
fi
uploader_bin="${HCLOUD_UPLOAD_IMAGE_BIN:-}"
if [[ -z "${uploader_bin}" ]]; then
uploader_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#hcloud-upload-image" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${uploader_build_output}" >&2
exit 1
}
uploader_bin="$(printf '%s\n' "${uploader_build_output}" | tail -n1)/bin/hcloud-upload-image"
fi
if [[ ! -x "${uploader_bin}" ]]; then
echo "unable to resolve an executable hcloud-upload-image binary; set HCLOUD_UPLOAD_IMAGE_BIN explicitly" >&2
exit 1
fi
upload_cmd=(
"${uploader_bin}"
)
if [[ "${UPLOAD_VERBOSE}" =~ ^[0-9]+$ ]] && [[ "${UPLOAD_VERBOSE}" -gt 0 ]]; then
for _ in $(seq 1 "${UPLOAD_VERBOSE}"); do
upload_cmd+=(-v)
done
fi
upload_cmd+=(
upload
--image-path "${artifact_path}"
--location "${LOCATION}"
--description "${DESCRIPTION}"
--labels "${label_csv}"
)
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
upload_cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
else
upload_cmd+=(--architecture "${ARCHITECTURE}")
fi
if [[ -n "${compression}" ]]; then
upload_cmd+=(--compression "${compression}")
fi
printf 'Uploading %s to Hetzner Cloud via %s\n' "${artifact_path}" "${uploader_bin}" >&2
HCLOUD_TOKEN="${HCLOUD_TOKEN}" "${upload_cmd[@]}" >&2
image_id=""
for _ in $(seq 1 24); do
image_id="$(find_existing_image || true)"
if [[ -n "${image_id}" ]]; then
break
fi
sleep 5
done
if [[ -z "${image_id}" ]]; then
echo "failed to locate uploaded Hetzner snapshot after upload completed" >&2
exit 1
fi
printf '%s\n' "${image_id}"

View file

@ -1,284 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/hetzner-forge.sh [show|create|delete|recreate|build-image|create-from-image|recreate-from-image] [options]
Manage the Burrow forge server and its Hetzner snapshot lifecycle.
Defaults:
action: show
server-name: burrow-forge
server-type: ccx23
location: hel1
image: ubuntu-24.04
ssh keys: contact@burrow.net,agent@burrow.net
Options:
--server-name <name> Server name to manage.
--server-type <type> Hetzner server type.
--location <code> Hetzner location.
--image <name|id> Image used at create time.
--config <name> Burrow image config name for snapshot lookup/build (default: burrow-forge).
--ssh-key <name> SSH key name to attach. Repeatable.
--token-file <path> Hetzner API token file.
--flake <path> Flake path used by image-build actions (default: .)
--upload-location <code> Hetzner location used for image upload (default: same as --location)
--yes Required for delete and recreate.
-h, --help Show this help text.
Environment:
HCLOUD_TOKEN_FILE Defaults to intake/hetzner-api-token.txt
EOF
}
ACTION="show"
SERVER_NAME="burrow-forge"
SERVER_TYPE="ccx23"
LOCATION="hel1"
IMAGE="ubuntu-24.04"
CONFIG="burrow-forge"
FLAKE="."
UPLOAD_LOCATION=""
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-intake/hetzner-api-token.txt}"
YES=0
SSH_KEYS=("contact@burrow.net" "agent@burrow.net")
if [[ $# -gt 0 ]]; then
case "$1" in
show|create|delete|recreate|build-image|create-from-image|recreate-from-image)
ACTION="$1"
shift
;;
esac
fi
while [[ $# -gt 0 ]]; do
case "$1" in
--server-name)
SERVER_NAME="${2:?missing value for --server-name}"
shift 2
;;
--server-type)
SERVER_TYPE="${2:?missing value for --server-type}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--image)
IMAGE="${2:?missing value for --image}"
shift 2
;;
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--ssh-key)
SSH_KEYS+=("${2:?missing value for --ssh-key}")
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--upload-location)
UPLOAD_LOCATION="${2:?missing value for --upload-location}"
shift 2
;;
--yes)
YES=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
if [[ ! -f "${TOKEN_FILE}" ]]; then
echo "Hetzner API token file not found: ${TOKEN_FILE}" >&2
exit 1
fi
if [[ -z "${UPLOAD_LOCATION}" ]]; then
UPLOAD_LOCATION="${LOCATION}"
fi
if [[ "${ACTION}" == "delete" || "${ACTION}" == "recreate" || "${ACTION}" == "recreate-from-image" ]] && [[ ${YES} -ne 1 ]]; then
echo "--yes is required for ${ACTION}" >&2
exit 1
fi
latest_snapshot_id() {
HCLOUD_TOKEN="$(tr -d '\r\n' < "${TOKEN_FILE}")" \
BURROW_CONFIG="${CONFIG}" \
python3 - <<'PY'
import json
import os
import urllib.parse
import urllib.request
selector = urllib.parse.quote(f"burrow.nixos-config={os.environ['BURROW_CONFIG']}", safe=",=")
req = urllib.request.Request(
f"https://api.hetzner.cloud/v1/images?type=snapshot&label_selector={selector}",
headers={"Authorization": f"Bearer {os.environ['HCLOUD_TOKEN']}"},
)
with urllib.request.urlopen(req, timeout=30) as resp:
data = json.load(resp)
images = sorted(data.get("images", []), key=lambda item: item.get("created") or "")
if images:
print(images[-1]["id"])
PY
}
if [[ "${ACTION}" == "build-image" ]]; then
exec "${SCRIPT_DIR}/nsc-build-and-upload-image.sh" \
--config "${CONFIG}" \
--flake "${FLAKE}" \
--location "${UPLOAD_LOCATION}" \
--upload-server-type "${SERVER_TYPE}" \
--token-file "${TOKEN_FILE}"
fi
if [[ "${ACTION}" == "create-from-image" || "${ACTION}" == "recreate-from-image" ]]; then
if [[ "${IMAGE}" == "ubuntu-24.04" ]]; then
IMAGE="$(latest_snapshot_id)"
fi
if [[ -z "${IMAGE}" ]]; then
echo "No Burrow snapshot found for config ${CONFIG}. Run build-image first." >&2
exit 1
fi
if [[ "${ACTION}" == "create-from-image" ]]; then
ACTION="create"
else
ACTION="recreate"
fi
fi
ssh_keys_csv=""
for key in "${SSH_KEYS[@]}"; do
if [[ -n "${ssh_keys_csv}" ]]; then
ssh_keys_csv+=","
fi
ssh_keys_csv+="${key}"
done
export BURROW_HCLOUD_ACTION="${ACTION}"
export BURROW_HCLOUD_SERVER_NAME="${SERVER_NAME}"
export BURROW_HCLOUD_SERVER_TYPE="${SERVER_TYPE}"
export BURROW_HCLOUD_LOCATION="${LOCATION}"
export BURROW_HCLOUD_IMAGE="${IMAGE}"
export BURROW_HCLOUD_TOKEN_FILE="${TOKEN_FILE}"
export BURROW_HCLOUD_SSH_KEYS="${ssh_keys_csv}"
python3 - <<'PY'
import json
import os
import sys
from pathlib import Path
import requests
base = "https://api.hetzner.cloud/v1"
action = os.environ["BURROW_HCLOUD_ACTION"]
server_name = os.environ["BURROW_HCLOUD_SERVER_NAME"]
server_type = os.environ["BURROW_HCLOUD_SERVER_TYPE"]
location = os.environ["BURROW_HCLOUD_LOCATION"]
image = os.environ["BURROW_HCLOUD_IMAGE"]
token = Path(os.environ["BURROW_HCLOUD_TOKEN_FILE"]).read_text(encoding="utf-8").strip()
ssh_keys = [key for key in os.environ["BURROW_HCLOUD_SSH_KEYS"].split(",") if key]
session = requests.Session()
session.headers.update({"Authorization": f"Bearer {token}", "Content-Type": "application/json"})
def request(method: str, path: str, **kwargs) -> requests.Response:
response = session.request(method, f"{base}{path}", timeout=30, **kwargs)
response.raise_for_status()
return response
def find_server():
response = request("GET", "/servers", params={"name": server_name})
data = response.json()
for server in data.get("servers", []):
if server.get("name") == server_name:
return server
return None
def summarize(server):
ipv4 = (((server.get("public_net") or {}).get("ipv4")) or {}).get("ip")
image_name = ((server.get("image") or {}).get("name")) or ""
summary = {
"id": server.get("id"),
"name": server.get("name"),
"status": server.get("status"),
"server_type": ((server.get("server_type") or {}).get("name")),
"location": ((server.get("location") or {}).get("name")),
"image": image_name,
"ipv4": ipv4,
"created": server.get("created"),
}
print(json.dumps(summary, indent=2))
server = find_server()
if action == "show":
if server is None:
print(json.dumps({"name": server_name, "present": False}, indent=2))
else:
summarize(server)
sys.exit(0)
if action == "delete":
if server is None:
print(json.dumps({"name": server_name, "deleted": False, "reason": "not found"}, indent=2))
sys.exit(0)
request("DELETE", f"/servers/{server['id']}")
print(json.dumps({"name": server_name, "deleted": True, "id": server["id"]}, indent=2))
sys.exit(0)
if action == "recreate" and server is not None:
request("DELETE", f"/servers/{server['id']}")
server = None
if action in {"create", "recreate"}:
if server is not None:
summarize(server)
sys.exit(0)
payload = {
"name": server_name,
"server_type": server_type,
"location": location,
"image": image,
"ssh_keys": ssh_keys,
"labels": {
"project": "burrow",
"role": "forge",
},
}
response = request("POST", "/servers", json=payload)
created = response.json()["server"]
summarize(created)
sys.exit(0)
raise SystemExit(f"unsupported action: {action}")
PY

View file

@ -1,542 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
CONFIG="${HCLOUD_IMAGE_CONFIG:-burrow-forge}"
FLAKE="${HCLOUD_IMAGE_FLAKE:-.}"
LOCATION="${HCLOUD_IMAGE_LOCATION:-hel1}"
TOKEN_FILE="${HCLOUD_TOKEN_FILE:-${REPO_ROOT}/intake/hetzner-api-token.txt}"
NSC_SSH_HOST="${NSC_SSH_HOST:-ssh.ord2.namespace.so}"
NSC_MACHINE_TYPE="${NSC_MACHINE_TYPE:-linux/amd64:32x64}"
NSC_BUILDER_DURATION="${NSC_BUILDER_DURATION:-4h}"
NSC_BUILDER_JOBS="${NSC_BUILDER_JOBS:-32}"
NSC_BUILDER_FEATURES="${NSC_BUILDER_FEATURES:-kvm,big-parallel}"
NSC_BIN="${NSC_BIN:-}"
REMOTE_COMPRESSION="${HCLOUD_IMAGE_REMOTE_COMPRESSION:-auto}"
UPLOAD_SERVER_TYPE="${HCLOUD_IMAGE_UPLOAD_SERVER_TYPE:-}"
KEEP_TMPDIR="${HCLOUD_IMAGE_KEEP_TMPDIR:-0}"
NO_UPDATE=0
NIX_BUILD_FLAGS=()
EXTRA_LABELS=()
BURROW_FLAKE_TMPDIRS=()
BUILDER_ID=""
usage() {
cat <<'EOF'
Usage: Scripts/nsc-build-and-upload-image.sh [options]
Create a temporary Namespace Linux builder, build the Burrow raw image on it,
and upload the resulting artifact to Hetzner Cloud.
Options:
--config <name> images.<name>-raw output to build (default: burrow-forge)
--flake <path> Flake path to build from (default: .)
--location <code> Hetzner upload location (default: hel1)
--token-file <path> Hetzner API token file (default: intake/hetzner-api-token.txt)
--machine-type <type> Namespace machine type (default: linux/amd64:32x64)
--ssh-host <host> Namespace SSH endpoint (default: ssh.ord2.namespace.so)
--duration <ttl> Namespace builder lifetime (default: 4h)
--builder-jobs <n> Nix builder job count advertised to the local client
--builder-features <s> Comma-separated Nix system features (default: "kvm,big-parallel")
--remote-compression <mode>
Compress raw/image artifacts on the Namespace builder
before copy-back. Modes: auto, none, xz, zstd
(default: auto)
--upload-server-type <name>
Hetzner server type for the temporary upload host
--label key=value Extra Hetzner snapshot label (repeatable)
--nix-flag <arg> Extra argument passed to nix build (repeatable)
--no-update Reuse an existing snapshot with the same config/output hash
-h, --help Show this help text
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--config)
CONFIG="${2:?missing value for --config}"
shift 2
;;
--flake)
FLAKE="${2:?missing value for --flake}"
shift 2
;;
--location)
LOCATION="${2:?missing value for --location}"
shift 2
;;
--token-file)
TOKEN_FILE="${2:?missing value for --token-file}"
shift 2
;;
--machine-type)
NSC_MACHINE_TYPE="${2:?missing value for --machine-type}"
shift 2
;;
--ssh-host)
NSC_SSH_HOST="${2:?missing value for --ssh-host}"
shift 2
;;
--duration)
NSC_BUILDER_DURATION="${2:?missing value for --duration}"
shift 2
;;
--builder-jobs)
NSC_BUILDER_JOBS="${2:?missing value for --builder-jobs}"
shift 2
;;
--builder-features)
NSC_BUILDER_FEATURES="${2:?missing value for --builder-features}"
shift 2
;;
--remote-compression)
REMOTE_COMPRESSION="${2:?missing value for --remote-compression}"
shift 2
;;
--upload-server-type)
UPLOAD_SERVER_TYPE="${2:?missing value for --upload-server-type}"
shift 2
;;
--label)
EXTRA_LABELS+=("${2:?missing value for --label}")
shift 2
;;
--nix-flag)
NIX_BUILD_FLAGS+=("${2:?missing value for --nix-flag}")
shift 2
;;
--no-update)
NO_UPDATE=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
cleanup() {
if [[ -n "${BUILDER_ID}" && -n "${NSC_BIN}" ]]; then
"${NSC_BIN}" destroy "${BUILDER_ID}" --force >/dev/null 2>&1 || true
fi
burrow_cleanup_flake_tmpdirs
if [[ "${KEEP_TMPDIR}" != "1" && -n "${TMPDIR_BURROW_NSC:-}" && -d "${TMPDIR_BURROW_NSC}" ]]; then
rm -rf "${TMPDIR_BURROW_NSC}"
fi
}
trap cleanup EXIT
burrow_require_cmd nix
burrow_require_cmd curl
burrow_require_cmd python3
burrow_require_cmd ssh
burrow_require_cmd ssh-keygen
burrow_require_cmd ssh-keyscan
burrow_require_cmd tar
flake_ref="$(burrow_prepare_flake_ref "${FLAKE}")"
if [[ -z "${NSC_BIN}" ]]; then
nsc_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#nsc" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${nsc_build_output}" >&2
exit 1
}
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
fi
if [[ ! -x "${NSC_BIN}" ]]; then
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
exit 1
fi
if [[ -n "${NSC_SESSION:-}" && ! -f "${HOME}/.ns/session" ]]; then
mkdir -p "${HOME}/.ns"
printf '%s\n' "${NSC_SESSION}" > "${HOME}/.ns/session"
chmod 600 "${HOME}/.ns/session"
fi
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
"${NSC_BIN}" version >/dev/null || true
TMPDIR_BURROW_NSC="$(mktemp -d "${HOME}/.cache/burrow/nsc-XXXXXX")"
ssh_key="${TMPDIR_BURROW_NSC}/builder"
known_hosts="${TMPDIR_BURROW_NSC}/known_hosts"
id_file="${TMPDIR_BURROW_NSC}/builder.id"
ssh-keygen -q -t ed25519 -N "" -f "${ssh_key}"
ssh-keyscan -H "${NSC_SSH_HOST}" > "${known_hosts}"
ssh_base=(
ssh
-i "${ssh_key}"
-o UserKnownHostsFile="${known_hosts}"
-o StrictHostKeyChecking=yes
)
wait_for_ssh() {
local instance_id="$1"
for _ in $(seq 1 30); do
if "${ssh_base[@]}" -q "${instance_id}@${NSC_SSH_HOST}" true >/dev/null 2>&1; then
return 0
fi
sleep 5
done
return 1
}
configure_builder() {
local instance_id="$1"
"${ssh_base[@]}" "${instance_id}@${NSC_SSH_HOST}" <<'EOF'
set -euo pipefail
if ! command -v nix >/dev/null 2>&1; then
curl -fsSL https://install.determinate.systems/nix | sh -s -- install linux --determinate --init none --no-confirm
fi
if [ -e /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh ]; then
. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh
fi
mkdir -p /etc/nix
cat <<CFG >/etc/nix/nix.conf
build-users-group =
trusted-users = root $USER
auto-optimise-store = true
substituters = https://cache.nixos.org
builders-use-substitutes = true
CFG
mkdir -p /nix/var/nix/daemon-socket
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
nohup nix-daemon >/dev/null 2>&1 </dev/null &
fi
for _ in $(seq 1 120); do
if [ -S /nix/var/nix/daemon-socket/socket ]; then
exit 0
fi
if ! pgrep -x nix-daemon >/dev/null 2>&1; then
nohup nix-daemon >/dev/null 2>&1 </dev/null &
fi
sleep 1
done
echo "nix-daemon socket never appeared" >&2
exit 1
EOF
}
printf 'Creating temporary Namespace builder (%s)\n' "${NSC_MACHINE_TYPE}" >&2
"${NSC_BIN}" create \
--bare \
--machine_type "${NSC_MACHINE_TYPE}" \
--ssh_key "${ssh_key}.pub" \
--duration "${NSC_BUILDER_DURATION}" \
--label "burrow=true" \
--label "purpose=hetzner-image-build" \
--output_to "${id_file}" \
>/dev/null
BUILDER_ID="$(tr -d '\r\n' < "${id_file}")"
if [[ -z "${BUILDER_ID}" ]]; then
echo "nsc create did not return a builder id" >&2
exit 1
fi
printf 'Waiting for Namespace builder %s\n' "${BUILDER_ID}" >&2
wait_for_ssh "${BUILDER_ID}"
configure_builder "${BUILDER_ID}" >&2
remote_root="burrow-image-build-${BUILDER_ID}"
remote_flake_path="./${remote_root}"
local_flake_dir="${flake_ref#path:}"
remote_build_stdout="/tmp/burrow-image-build-${BUILDER_ID}.stdout"
remote_build_stderr="/tmp/burrow-image-build-${BUILDER_ID}.stderr"
printf 'Syncing flake to Namespace builder %s\n' "${BUILDER_ID}" >&2
tar -C "${local_flake_dir}" -cf - . \
| "${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "rm -rf '${remote_root}' && mkdir -p '${remote_root}' && tar -C '${remote_root}' -xf -"
run_remote_build() {
local remote_cmd=(
env
"CONFIG=${CONFIG}"
"REMOTE_FLAKE_PATH=${remote_flake_path}"
"REMOTE_BUILD_STDOUT=${remote_build_stdout}"
"REMOTE_BUILD_STDERR=${remote_build_stderr}"
bash
-s
--
)
if [[ "${#NIX_BUILD_FLAGS[@]}" -gt 0 ]]; then
remote_cmd+=("${NIX_BUILD_FLAGS[@]}")
fi
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" "${remote_cmd[@]}" <<'EOF'
set -euo pipefail
config="${CONFIG}"
remote_flake_path="${REMOTE_FLAKE_PATH}"
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
remote_build_stderr="${REMOTE_BUILD_STDERR}"
nix_build_cmd=(
nix
--extra-experimental-features
"nix-command flakes"
build
"path:${remote_flake_path}#images.${config}-raw"
--no-link
--print-out-paths
)
if [[ "$#" -gt 0 ]]; then
nix_build_cmd+=("$@")
fi
rm -f "${remote_build_stdout}" "${remote_build_stderr}"
if ! "${nix_build_cmd[@]}" >"${remote_build_stdout}" 2>"${remote_build_stderr}"; then
cat "${remote_build_stderr}" >&2
exit 1
fi
EOF
}
resolve_remote_store_path() {
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_BUILD_STDOUT=${remote_build_stdout}" "REMOTE_BUILD_STDERR=${remote_build_stderr}" bash -s <<'EOF'
set -euo pipefail
remote_build_stdout="${REMOTE_BUILD_STDOUT}"
remote_build_stderr="${REMOTE_BUILD_STDERR}"
if [[ ! -s "${remote_build_stdout}" ]]; then
echo "remote build stdout file is missing or empty: ${remote_build_stdout}" >&2
if [[ -s "${remote_build_stderr}" ]]; then
cat "${remote_build_stderr}" >&2
fi
exit 1
fi
tail -n1 "${remote_build_stdout}"
EOF
}
resolve_remote_artifact_path() {
local store_path="$1"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_STORE_PATH=${store_path}" bash -s <<'EOF'
set -euo pipefail
store_path="${REMOTE_STORE_PATH}"
artifact_path="${store_path}"
if [[ -d "${artifact_path}" ]]; then
artifact_path="$(find "${artifact_path}" -type f \( -name '*.raw' -o -name '*.raw.*' -o -name '*.img' -o -name '*.img.*' \) | sort | head -n1)"
fi
if [[ -z "${artifact_path}" || ! -f "${artifact_path}" ]]; then
echo "unable to locate image artifact under ${store_path}" >&2
exit 1
fi
printf '%s\n' "${artifact_path}"
EOF
}
plan_remote_artifact_transfer() {
local artifact_path="$1"
local compression_mode="$2"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF'
set -euo pipefail
artifact_path="${REMOTE_ARTIFACT_PATH}"
compression_mode="${REMOTE_COMPRESSION}"
case "${artifact_path}" in
*.bz2)
printf '%s\tbz2\n' "$(basename "${artifact_path}")"
exit 0
;;
*.xz)
printf '%s\txz\n' "$(basename "${artifact_path}")"
exit 0
;;
*.zst|*.zstd)
printf '%s\tzstd\n' "$(basename "${artifact_path}")"
exit 0
;;
esac
select_compression() {
case "${compression_mode}" in
auto)
if command -v zstd >/dev/null 2>&1; then
printf 'zstd\n'
return 0
fi
if command -v xz >/dev/null 2>&1; then
printf 'xz\n'
return 0
fi
printf 'none\n'
;;
none|xz|zstd)
printf '%s\n' "${compression_mode}"
;;
*)
echo "unsupported remote compression mode: ${compression_mode}" >&2
exit 1
;;
esac
}
mode="$(select_compression)"
case "${mode}" in
none)
printf '%s\tnone\n' "$(basename "${artifact_path}")"
;;
zstd)
printf '%s.zst\tzstd\n' "$(basename "${artifact_path}")"
;;
xz)
printf '%s.xz\txz\n' "$(basename "${artifact_path}")"
;;
esac
EOF
}
stream_remote_artifact() {
local artifact_path="$1"
local compression_mode="$2"
local destination="$3"
"${ssh_base[@]}" "${BUILDER_ID}@${NSC_SSH_HOST}" \
env "REMOTE_ARTIFACT_PATH=${artifact_path}" "REMOTE_COMPRESSION=${compression_mode}" bash -s <<'EOF' > "${destination}"
set -euo pipefail
artifact_path="${REMOTE_ARTIFACT_PATH}"
compression_mode="${REMOTE_COMPRESSION}"
case "${artifact_path}" in
*.bz2|*.xz|*.zst|*.zstd)
cat "${artifact_path}"
exit 0
;;
esac
select_compression() {
case "${compression_mode}" in
auto)
if command -v zstd >/dev/null 2>&1; then
printf 'zstd\n'
return 0
fi
if command -v xz >/dev/null 2>&1; then
printf 'xz\n'
return 0
fi
printf 'none\n'
;;
none|xz|zstd)
printf '%s\n' "${compression_mode}"
;;
*)
echo "unsupported remote compression mode: ${compression_mode}" >&2
exit 1
;;
esac
}
mode="$(select_compression)"
case "${mode}" in
none)
cat "${artifact_path}"
;;
zstd)
if ! command -v zstd >/dev/null 2>&1; then
echo "zstd requested but not available on Namespace builder" >&2
exit 1
fi
zstd -T0 -19 -c "${artifact_path}"
;;
xz)
if ! command -v xz >/dev/null 2>&1; then
echo "xz requested but not available on Namespace builder" >&2
exit 1
fi
xz -T0 -c "${artifact_path}"
;;
esac
EOF
}
printf 'Building raw image on Namespace builder %s\n' "${BUILDER_ID}" >&2
run_remote_build
remote_store_path="$(resolve_remote_store_path)"
if [[ -z "${remote_store_path}" ]]; then
echo "remote build did not return a store path" >&2
exit 1
fi
remote_artifact_path="$(resolve_remote_artifact_path "${remote_store_path}")"
if [[ -z "${remote_artifact_path}" ]]; then
echo "remote build did not return an artifact path" >&2
exit 1
fi
transfer_plan="$(plan_remote_artifact_transfer "${remote_artifact_path}" "${REMOTE_COMPRESSION}")"
local_artifact_name="$(printf '%s\n' "${transfer_plan}" | cut -f1)"
transfer_compression="$(printf '%s\n' "${transfer_plan}" | cut -f2)"
if [[ -z "${local_artifact_name}" || -z "${transfer_compression}" ]]; then
echo "unable to determine artifact transfer plan for ${remote_artifact_path}" >&2
exit 1
fi
output_hash="$(basename "${remote_store_path}")"
output_hash="${output_hash%%-*}"
local_artifact="${TMPDIR_BURROW_NSC}/${local_artifact_name}"
printf 'Streaming built artifact back from Namespace builder %s (%s)\n' "${BUILDER_ID}" "${transfer_compression}" >&2
stream_remote_artifact "${remote_artifact_path}" "${REMOTE_COMPRESSION}" "${local_artifact}"
cmd=(
"${SCRIPT_DIR}/hcloud-upload-nixos-image.sh"
--config "${CONFIG}"
--flake "${FLAKE}"
--location "${LOCATION}"
--token-file "${TOKEN_FILE}"
--artifact-path "${local_artifact}"
--output-hash "${output_hash}"
)
if [[ -n "${UPLOAD_SERVER_TYPE}" ]]; then
cmd+=(--server-type "${UPLOAD_SERVER_TYPE}")
fi
if [[ "${NO_UPDATE}" -eq 1 ]]; then
cmd+=(--no-update)
fi
if [[ "${#EXTRA_LABELS[@]}" -gt 0 ]]; then
for label in "${EXTRA_LABELS[@]}"; do
cmd+=(--label "${label}")
done
fi
"${cmd[@]}"

View file

@ -1,237 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
# shellcheck source=Scripts/_burrow-flake.sh
source "${SCRIPT_DIR}/_burrow-flake.sh"
usage() {
cat <<'EOF'
Usage: Scripts/provision-forgejo-nsc.sh [options]
Generate Burrow forgejo-nsc runtime inputs in intake/ and optionally refresh the
Namespace token from the currently logged-in namespace account.
Options:
--host <user@host> SSH target used to mint the Forgejo PAT.
Default: root@git.burrow.net
--ssh-key <path> SSH private key for the forge host.
Default: intake/agent_at_burrow_net_ed25519
--nsc-bin <path> Override the nsc binary.
--no-refresh-token Reuse intake/forgejo_nsc_token.txt if it already exists.
--token-name <name> Forgejo PAT name prefix (default: forgejo-nsc)
--contact-user <name> Forgejo username used for PAT creation (default: contact)
--scope-owner <name> Forgejo org/user owner for the default NSC scope (default: burrow)
--scope-name <name> Forgejo repository name for the default NSC scope (default: burrow)
-h, --help Show this help text.
EOF
}
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
NSC_BIN="${NSC_BIN:-}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
REFRESH_TOKEN=1
TOKEN_NAME_PREFIX="${FORGEJO_PAT_NAME:-forgejo-nsc}"
CONTACT_USER="${FORGEJO_CONTACT_USER:-contact}"
SCOPE_OWNER="${FORGEJO_SCOPE_OWNER:-burrow}"
SCOPE_NAME="${FORGEJO_SCOPE_NAME:-burrow}"
BURROW_FLAKE_TMPDIRS=()
cleanup() {
burrow_cleanup_flake_tmpdirs
}
trap cleanup EXIT
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--nsc-bin)
NSC_BIN="${2:?missing value for --nsc-bin}"
shift 2
;;
--no-refresh-token)
REFRESH_TOKEN=0
shift
;;
--token-name)
TOKEN_NAME_PREFIX="${2:?missing value for --token-name}"
shift 2
;;
--contact-user)
CONTACT_USER="${2:?missing value for --contact-user}"
shift 2
;;
--scope-owner)
SCOPE_OWNER="${2:?missing value for --scope-owner}"
shift 2
;;
--scope-name)
SCOPE_NAME="${2:?missing value for --scope-name}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
burrow_require_cmd nix
burrow_require_cmd ssh
burrow_require_cmd python3
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
mkdir -p "${REPO_ROOT}/intake"
chmod 700 "${REPO_ROOT}/intake"
flake_ref="$(burrow_prepare_flake_ref "${REPO_ROOT}")"
if [[ -z "${NSC_BIN}" ]]; then
if command -v nsc >/dev/null 2>&1; then
NSC_BIN="$(command -v nsc)"
else
nsc_build_output="$(
nix --extra-experimental-features "nix-command flakes" build \
"${flake_ref}#nsc" \
--no-link \
--print-out-paths 2>&1
)" || {
printf '%s\n' "${nsc_build_output}" >&2
exit 1
}
NSC_BIN="$(printf '%s\n' "${nsc_build_output}" | tail -n1)/bin/nsc"
fi
fi
if [[ ! -x "${NSC_BIN}" ]]; then
echo "unable to resolve an executable nsc binary; set NSC_BIN explicitly" >&2
exit 1
fi
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
dispatcher_out="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
autoscaler_out="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
dispatcher_src="${REPO_ROOT}/services/forgejo-nsc/deploy/dispatcher.yaml"
autoscaler_src="${REPO_ROOT}/services/forgejo-nsc/deploy/autoscaler.yaml"
if [[ "${REFRESH_TOKEN}" -eq 1 || ! -s "${token_file}" ]]; then
"${NSC_BIN}" auth check-login --duration 20m >/dev/null
"${NSC_BIN}" auth generate-dev-token --output_to "${token_file}" >/dev/null
chmod 600 "${token_file}"
fi
webhook_secret="$(python3 - <<'PY'
import secrets
print(secrets.token_hex(32))
PY
)"
token_name="${TOKEN_NAME_PREFIX}-$(date -u +%Y%m%dT%H%M%SZ)"
forgejo_pat="$(
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
"set -euo pipefail; forgejo_bin=\$(systemctl show -p ExecStart forgejo.service --value | sed -E 's/^\\{ path=([^ ;]+).*/\\1/'); sudo -u forgejo \"\${forgejo_bin}\" --config /var/lib/forgejo/custom/conf/app.ini --custom-path /var/lib/forgejo/custom --work-path /var/lib/forgejo admin user generate-access-token --username '${CONTACT_USER}' --scopes all --raw --token-name '${token_name}'" \
| tr -d '\r\n'
)"
if [[ -z "${forgejo_pat}" ]]; then
echo "failed to mint Forgejo PAT on ${HOST}" >&2
exit 1
fi
ssh \
-i "${SSH_KEY}" \
-o IdentitiesOnly=yes \
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}" \
-o StrictHostKeyChecking=accept-new \
"${HOST}" \
'bash -s' <<EOF
set -euo pipefail
base_url='http://127.0.0.1:3000'
token='${forgejo_pat}'
scope_owner='${SCOPE_OWNER}'
scope_name='${SCOPE_NAME}'
api() {
curl -sS -o /tmp/forgejo-provision-response.json -w '%{http_code}' \
-H "Authorization: token \${token}" \
-H 'Content-Type: application/json' \
"\$@"
}
org_code="\$(api "\${base_url}/api/v1/orgs/\${scope_owner}")"
if [[ "\${org_code}" == "404" ]]; then
cat >/tmp/forgejo-provision-org.json <<JSON
{"username":"${SCOPE_OWNER}","full_name":"${SCOPE_OWNER}","visibility":"public"}
JSON
org_code="\$(api -X POST --data @/tmp/forgejo-provision-org.json "\${base_url}/api/v1/orgs")"
if [[ "\${org_code}" != "201" ]]; then
echo "failed to create Forgejo org ${SCOPE_OWNER} (HTTP \${org_code})" >&2
cat /tmp/forgejo-provision-response.json >&2
exit 1
fi
fi
repo_code="\$(api "\${base_url}/api/v1/repos/\${scope_owner}/\${scope_name}")"
if [[ "\${repo_code}" == "404" ]]; then
cat >/tmp/forgejo-provision-repo.json <<JSON
{"name":"${SCOPE_NAME}","description":"Burrow forge bootstrap repository","private":false,"default_branch":"main","auto_init":false}
JSON
repo_code="\$(api -X POST --data @/tmp/forgejo-provision-repo.json "\${base_url}/api/v1/orgs/\${scope_owner}/repos")"
if [[ "\${repo_code}" != "201" ]]; then
echo "failed to create Forgejo repo ${SCOPE_OWNER}/${SCOPE_NAME} (HTTP \${repo_code})" >&2
cat /tmp/forgejo-provision-response.json >&2
exit 1
fi
fi
EOF
FORGEJO_PAT="${forgejo_pat}" \
WEBHOOK_SECRET="${webhook_secret}" \
DISPATCHER_SRC="${dispatcher_src}" \
AUTOSCALER_SRC="${autoscaler_src}" \
DISPATCHER_OUT="${dispatcher_out}" \
AUTOSCALER_OUT="${autoscaler_out}" \
python3 - <<'PY'
import os
from pathlib import Path
def render(src: str, dst: str) -> None:
text = Path(src).read_text(encoding="utf-8")
text = text.replace("PENDING-FORGEJO-PAT", os.environ["FORGEJO_PAT"])
text = text.replace("PENDING-WEBHOOK-SECRET", os.environ["WEBHOOK_SECRET"])
Path(dst).write_text(text, encoding="utf-8")
render(os.environ["DISPATCHER_SRC"], os.environ["DISPATCHER_OUT"])
render(os.environ["AUTOSCALER_SRC"], os.environ["AUTOSCALER_OUT"])
PY
chmod 600 "${dispatcher_out}" "${autoscaler_out}"
echo "Rendered intake/forgejo_nsc_token.txt, intake/forgejo_nsc_dispatcher.yaml, and intake/forgejo_nsc_autoscaler.yaml."
echo "Minted Forgejo PAT ${token_name} for ${CONTACT_USER} on ${HOST}."

View file

@ -1,132 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage: Scripts/sync-forgejo-nsc-config.sh [options]
Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and
restart the dispatcher/autoscaler units.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
--rotate-pat Re-render the intake files before syncing.
--no-restart Copy files only.
-h, --help Show this help text.
EOF
}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
ROTATE_PAT=0
NO_RESTART=0
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--rotate-pat)
ROTATE_PAT=1
shift
;;
--no-restart)
NO_RESTART=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
burrow_require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
burrow_require_cmd ssh
burrow_require_cmd scp
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
if [[ "${ROTATE_PAT}" -eq 1 ]]; then
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}"
fi
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do
if [[ ! -s "${path}" ]]; then
echo "required runtime input missing or empty: ${path}" >&2
exit 1
fi
done
ssh_opts=(
-i "${SSH_KEY}"
-o IdentitiesOnly=yes
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
-o StrictHostKeyChecking=accept-new
)
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
cleanup() {
if [[ -n "${remote_tmp:-}" ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
scp "${ssh_opts[@]}" \
"${token_file}" \
"${dispatcher_file}" \
"${autoscaler_file}" \
"${HOST}:${remote_tmp}/"
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
install -d -m 0755 /var/lib/burrow/intake
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
if [[ "${NO_RESTART}" -eq 0 ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
ls -l \
/var/lib/burrow/intake/forgejo_nsc_token.txt \
/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \
/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
fi
echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))."

View file

@ -1,171 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
umask 077
usage() {
cat <<'EOF'
Usage:
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://<endpoint> \
--s3-region <region> \
--s3-bucket <bucket> \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
Options:
--domain <domain> Forward Email domain to update.
--api-token-file <path> File containing the Forward Email API token.
--s3-endpoint <url> S3-compatible endpoint URL.
--s3-region <region> S3 region string expected by Forward Email.
--s3-bucket <name> Bucket used for alias backup uploads.
--s3-access-key-file <path> File containing the S3 access key id.
--s3-secret-key-file <path> File containing the S3 secret access key.
--test-only Skip the update call and only test the saved connection.
--help Show this help text.
Notes:
- Secrets are passed to curl through a temporary config file to avoid putting
them in the process list.
- By default the script updates the domain settings and then calls
/test-s3-connection.
- For Hetzner Object Storage, use the regional S3 endpoint such as
https://hel1.your-objectstorage.com, not an account alias endpoint.
EOF
}
fail() {
printf 'error: %s\n' "$*" >&2
exit 1
}
require_file() {
local path="$1"
[[ -f "$path" ]] || fail "missing file: $path"
}
read_secret() {
local path="$1"
local value
value="$(tr -d '\r\n' < "$path")"
[[ -n "$value" ]] || fail "empty secret file: $path"
printf '%s' "$value"
}
domain=""
api_token_file=""
s3_endpoint=""
s3_region=""
s3_bucket=""
s3_access_key_file=""
s3_secret_key_file=""
test_only=false
while [[ $# -gt 0 ]]; do
case "$1" in
--domain)
domain="${2:-}"
shift 2
;;
--api-token-file)
api_token_file="${2:-}"
shift 2
;;
--s3-endpoint)
s3_endpoint="${2:-}"
shift 2
;;
--s3-region)
s3_region="${2:-}"
shift 2
;;
--s3-bucket)
s3_bucket="${2:-}"
shift 2
;;
--s3-access-key-file)
s3_access_key_file="${2:-}"
shift 2
;;
--s3-secret-key-file)
s3_secret_key_file="${2:-}"
shift 2
;;
--test-only)
test_only=true
shift
;;
--help|-h)
usage
exit 0
;;
*)
fail "unknown argument: $1"
;;
esac
done
[[ -n "$domain" ]] || fail "--domain is required"
[[ -n "$api_token_file" ]] || fail "--api-token-file is required"
[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set"
[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set"
[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set"
[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set"
[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set"
require_file "$api_token_file"
api_token="$(read_secret "$api_token_file")"
if [[ "$test_only" == false ]]; then
require_file "$s3_access_key_file"
require_file "$s3_secret_key_file"
s3_access_key_id="$(read_secret "$s3_access_key_file")"
s3_secret_access_key="$(read_secret "$s3_secret_key_file")"
case "$s3_endpoint" in
http://*|https://*)
;;
*)
fail "--s3-endpoint must start with http:// or https://"
;;
esac
fi
curl_config="$(mktemp)"
trap 'rm -f "$curl_config"' EXIT
if [[ "$test_only" == false ]]; then
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}"
request = "PUT"
user = "${api_token}:"
data = "has_custom_s3=true"
data-urlencode = "s3_endpoint=${s3_endpoint}"
data-urlencode = "s3_access_key_id=${s3_access_key_id}"
data-urlencode = "s3_secret_access_key=${s3_secret_access_key}"
data-urlencode = "s3_region=${s3_region}"
data-urlencode = "s3_bucket=${s3_bucket}"
EOF
printf 'Configuring Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2
fi
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}/test-s3-connection"
request = "POST"
user = "${api_token}:"
EOF
printf 'Testing Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2

View file

@ -1,261 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import datetime as dt
import hashlib
import hmac
import sys
import textwrap
from pathlib import Path
from urllib.parse import urlencode, urlparse
import requests
def read_secret(path: str) -> str:
value = Path(path).read_text(encoding="utf-8").strip()
if not value:
raise SystemExit(f"error: empty secret file: {path}")
return value
def sign(key: bytes, msg: str) -> bytes:
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def request(
*,
method: str,
endpoint: str,
region: str,
access_key: str,
secret_key: str,
bucket: str,
query: dict[str, str] | None = None,
body: bytes = b"",
content_type: str | None = None,
) -> requests.Response:
parsed = urlparse(endpoint)
if parsed.scheme != "https":
raise SystemExit("error: endpoint must use https")
host = parsed.netloc
canonical_uri = f"/{bucket}"
query = query or {}
canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~")
now = dt.datetime.now(dt.timezone.utc)
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
date_stamp = now.strftime("%Y%m%d")
payload_hash = hashlib.sha256(body).hexdigest()
headers = {
"host": host,
"x-amz-content-sha256": payload_hash,
"x-amz-date": amz_date,
}
if content_type:
headers["content-type"] = content_type
signed_headers = ";".join(sorted(headers.keys()))
canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys()))
canonical_request = "\n".join(
[
method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash,
]
)
algorithm = "AWS4-HMAC-SHA256"
credential_scope = f"{date_stamp}/{region}/s3/aws4_request"
string_to_sign = "\n".join(
[
algorithm,
amz_date,
credential_scope,
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
]
)
k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
k_region = sign(k_date, region)
k_service = sign(k_region, "s3")
signing_key = sign(k_service, "aws4_request")
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
auth_header = (
f"{algorithm} Credential={access_key}/{credential_scope}, "
f"SignedHeaders={signed_headers}, Signature={signature}"
)
url = f"{parsed.scheme}://{host}{canonical_uri}"
if canonical_querystring:
url = f"{url}?{canonical_querystring}"
response = requests.request(
method,
url,
headers={**headers, "Authorization": auth_header},
data=body,
timeout=30,
)
return response
def ensure_bucket(args: argparse.Namespace, bucket: str) -> None:
head = request(
method="HEAD",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
)
if head.status_code == 200:
print(f"{bucket}: exists")
return
if head.status_code != 404:
raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}")
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>{args.region}</LocationConstraint>
</CreateBucketConfiguration>
"""
).encode("utf-8")
create = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
body=body,
content_type="application/xml",
)
if create.status_code not in (200, 204):
raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}")
print(f"{bucket}: created")
def put_lifecycle(args: argparse.Namespace, bucket: str) -> None:
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ID>expire-forwardemail-backups-after-{args.expire_days}-days</ID>
<Status>Enabled</Status>
<Filter>
<Prefix></Prefix>
</Filter>
<Expiration>
<Days>{args.expire_days}</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>
"""
).encode("utf-8")
response = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
body=body,
content_type="application/xml",
)
if response.status_code not in (200, 204):
raise SystemExit(
f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"{bucket}: lifecycle set to {args.expire_days} days")
def get_lifecycle(args: argparse.Namespace, bucket: str) -> None:
response = request(
method="GET",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
)
if response.status_code != 200:
raise SystemExit(
f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"=== {bucket} lifecycle ===")
print(response.text.strip())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Provision Hetzner object-storage buckets for Forward Email backups."
)
parser.add_argument(
"--endpoint",
default="https://hel1.your-objectstorage.com",
help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.",
)
parser.add_argument("--region", default="hel1", help="S3 region.")
parser.add_argument(
"--access-key-file",
default="intake/hetzner-s3-user.txt",
help="File containing the S3 access key id.",
)
parser.add_argument(
"--secret-key-file",
default="intake/hetzner-s3-secret.txt",
help="File containing the S3 secret key.",
)
parser.add_argument(
"--bucket",
action="append",
required=True,
help="Bucket to provision. Repeat for multiple buckets.",
)
parser.add_argument(
"--expire-days",
type=int,
default=90,
help="Lifecycle expiry window in days.",
)
parser.add_argument(
"--verify-only",
action="store_true",
help="Skip create/update and only read the current lifecycle.",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
args.access_key = read_secret(args.access_key_file)
args.secret_key = read_secret(args.secret_key_file)
for bucket in args.bucket:
if args.verify_only:
get_lifecycle(args, bucket)
continue
ensure_bucket(args, bucket)
put_lifecycle(args, bucket)
get_lifecycle(args, bucket)
if __name__ == "__main__":
try:
main()
except requests.RequestException as err:
raise SystemExit(f"error: request failed: {err}") from err

View file

@ -1,66 +0,0 @@
module burrow.dev/tailscale-login-bridge
go 1.26.1
require tailscale.com v1.96.5
require (
filippo.io/edwards25519 v1.2.0 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/coder/websocket v1.8.12 // indirect
github.com/creachadair/msync v0.7.1 // indirect
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/gaissmai/bart v0.26.1 // indirect
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/pires/go-proxyproto v0.8.1 // indirect
github.com/prometheus-community/pro-bing v0.4.0 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a // indirect
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da // indirect
github.com/x448/float16 v0.8.4 // indirect
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 // indirect
)

View file

@ -1,229 +0,0 @@
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q=
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM=
filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo=
filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc=
filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ=
github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA=
github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho=
github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008=
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk=
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo=
github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I=
github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=
github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I=
github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E=
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw=
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg=
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c=
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw=
github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 h1:Zy8IV/+FMLxy6j6p87vk/vQGKcdnbprwjTxc8UiUtsA=
gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q=
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho=
honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
tailscale.com v1.96.5 h1:gNkfA/KSZAl6jCH9cj8urq00HRWItDDTtGsyATI89jA=
tailscale.com v1.96.5/go.mod h1:/3lnZBYb2UEwnN0MNu2SDXUtT06AGd5k0s+OWx3WmcY=

View file

@ -1,133 +0,0 @@
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"time"
"tailscale.com/client/local"
"tailscale.com/ipn"
"tailscale.com/tsnet"
)
type statusResponse struct {
BackendState string `json:"backend_state"`
AuthURL string `json:"auth_url,omitempty"`
Running bool `json:"running"`
NeedsLogin bool `json:"needs_login"`
TailnetName string `json:"tailnet_name,omitempty"`
MagicDNSSuffix string `json:"magic_dns_suffix,omitempty"`
SelfDNSName string `json:"self_dns_name,omitempty"`
TailscaleIPs []string `json:"tailscale_ips,omitempty"`
Health []string `json:"health,omitempty"`
}
func main() {
listen := flag.String("listen", "127.0.0.1:0", "local listen address")
stateDir := flag.String("state-dir", "", "persistent state directory")
hostname := flag.String("hostname", "burrow-apple", "tailnet hostname")
controlURL := flag.String("control-url", "", "optional control URL")
flag.Parse()
if *stateDir == "" {
log.Fatal("--state-dir is required")
}
if err := os.MkdirAll(*stateDir, 0o755); err != nil {
log.Fatalf("create state dir: %v", err)
}
server := &tsnet.Server{
Dir: *stateDir,
Hostname: *hostname,
UserLogf: log.Printf,
}
if *controlURL != "" {
server.ControlURL = *controlURL
}
defer server.Close()
if err := server.Start(); err != nil {
log.Fatalf("start tsnet: %v", err)
}
localClient, err := server.LocalClient()
if err != nil {
log.Fatalf("local client: %v", err)
}
ln, err := net.Listen("tcp", *listen)
if err != nil {
log.Fatalf("listen: %v", err)
}
defer ln.Close()
fmt.Printf("{\"listen_addr\":%q}\n", ln.Addr().String())
_ = os.Stdout.Sync()
mux := http.NewServeMux()
mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
status, err := snapshot(r.Context(), localClient)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(status)
})
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
go func() {
_ = server.Close()
time.Sleep(100 * time.Millisecond)
os.Exit(0)
}()
})
httpServer := &http.Server{
Handler: mux,
}
log.Fatal(httpServer.Serve(ln))
}
func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, error) {
status, err := localClient.StatusWithoutPeers(ctx)
if err != nil {
return nil, err
}
if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && status.AuthURL == "" {
if err := localClient.StartLoginInteractive(ctx); err != nil {
return nil, err
}
status, err = localClient.StatusWithoutPeers(ctx)
if err != nil {
return nil, err
}
}
response := &statusResponse{
BackendState: status.BackendState,
AuthURL: status.AuthURL,
Running: status.BackendState == ipn.Running.String(),
NeedsLogin: status.BackendState == ipn.NeedsLogin.String(),
Health: append([]string(nil), status.Health...),
}
if status.CurrentTailnet != nil {
response.TailnetName = status.CurrentTailnet.Name
response.MagicDNSSuffix = status.CurrentTailnet.MagicDNSSuffix
}
if status.Self != nil {
response.SelfDNSName = status.Self.DNSName
}
for _, ip := range status.TailscaleIPs {
response.TailscaleIPs = append(response.TailscaleIPs, ip.String())
}
return response, nil
}

View file

@ -15,8 +15,6 @@ tokio = { version = "1.37", features = [
"macros", "macros",
"sync", "sync",
"io-util", "io-util",
"net",
"process",
"rt-multi-thread", "rt-multi-thread",
"signal", "signal",
"time", "time",
@ -27,6 +25,7 @@ tun = { version = "0.1", path = "../tun", features = ["serde", "tokio"] }
clap = { version = "4.4", features = ["derive"] } clap = { version = "4.4", features = ["derive"] }
tracing = "0.1" tracing = "0.1"
tracing-log = "0.1" tracing-log = "0.1"
tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" }
tracing-subscriber = { version = "0.3", features = ["std", "env-filter"] } tracing-subscriber = { version = "0.3", features = ["std", "env-filter"] }
log = "0.4" log = "0.4"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
@ -34,7 +33,6 @@ serde_json = "1.0"
blake2 = "0.10" blake2 = "0.10"
chacha20poly1305 = "0.10" chacha20poly1305 = "0.10"
rand = "0.8" rand = "0.8"
bytes = "1"
rand_core = "0.6" rand_core = "0.6"
aead = "0.5" aead = "0.5"
x25519-dalek = { version = "2.0", features = [ x25519-dalek = { version = "2.0", features = [
@ -48,54 +46,40 @@ base64 = "0.21"
fehler = "1.0" fehler = "1.0"
ip_network_table = "0.2" ip_network_table = "0.2"
ip_network = "0.4" ip_network = "0.4"
ipnetwork = { version = "0.21", features = ["serde"] }
async-channel = "2.1" async-channel = "2.1"
schemars = "0.8" schemars = "0.8"
futures = "0.3.28" futures = "0.3.28"
once_cell = "1.19" once_cell = "1.19"
arti-client = "0.40.0"
hickory-proto = "0.25.2"
netstack-smoltcp = "0.2.1"
tokio-util = { version = "0.7.18", features = ["compat"] }
tor-rtcompat = "0.40.0"
console-subscriber = { version = "0.2.0", optional = true } console-subscriber = { version = "0.2.0", optional = true }
console = "0.15.8" console = "0.15.8"
axum = "0.7.4" axum = "0.7.4"
argon2 = "0.5"
reqwest = { version = "0.12", default-features = false, features = [ reqwest = { version = "0.12", default-features = false, features = [
"json", "json",
"rustls-tls", "rustls-tls",
] } ] }
rusqlite = { version = "0.38.0", features = ["blob"] } rusqlite = { version = "0.31.0", features = ["blob"] }
dotenv = "0.15.0" dotenv = "0.15.0"
tonic = "0.12.0" tonic = "0.12.0"
prost = "0.13.1" prost = "0.13.1"
prost-types = "0.13.1" prost-types = "0.13.1"
tokio-stream = "0.1" tokio-stream = "0.1"
async-stream = "0.2" async-stream = "0.2"
tower = { version = "0.4.13", features = ["util"] } tower = "0.4.13"
hyper-util = "0.1.6" hyper-util = "0.1.6"
toml = "0.8.15" toml = "0.8.15"
rust-ini = "0.21.0" rust-ini = "0.21.0"
subtle = "2.6"
[target.'cfg(target_os = "linux")'.dependencies] [target.'cfg(target_os = "linux")'.dependencies]
caps = "0.5" caps = "0.5"
libc = "0.2"
libsystemd = "0.7" libsystemd = "0.7"
nix = { version = "0.27", features = ["fs", "socket", "uio"] }
tracing-journald = "0.3" tracing-journald = "0.3"
[target.'cfg(target_vendor = "apple")'.dependencies] [target.'cfg(target_vendor = "apple")'.dependencies]
nix = { version = "0.27" } nix = { version = "0.27" }
rusqlite = { version = "0.38.0", features = ["bundled", "blob"] } rusqlite = { version = "0.31.0", features = ["bundled", "blob"] }
[target.'cfg(target_os = "macos")'.dependencies]
tracing-oslog = { git = "https://github.com/Stormshield-robinc/tracing-oslog" }
[dev-dependencies] [dev-dependencies]
insta = { version = "1.32", features = ["yaml"] } insta = { version = "1.32", features = ["yaml"] }
tempfile = "3.13"
[package.metadata.generate-rpm] [package.metadata.generate-rpm]
assets = [ assets = [

24
burrow/src/auth/client.rs Normal file
View file

@ -0,0 +1,24 @@
use std::env::var;
use anyhow::Result;
use reqwest::Url;
pub async fn login() -> Result<()> {
let state = "vt :P";
let nonce = "no";
let mut url = Url::parse("https://slack.com/openid/connect/authorize")?;
let mut q = url.query_pairs_mut();
q.append_pair("response_type", "code");
q.append_pair("scope", "openid profile email");
q.append_pair("client_id", &var("CLIENT_ID")?);
q.append_pair("state", state);
q.append_pair("team", &var("SLACK_TEAM_ID")?);
q.append_pair("nonce", nonce);
q.append_pair("redirect_uri", "https://burrow.rs/callback");
drop(q);
println!("Continue auth in your browser:\n{}", url.as_str());
Ok(())
}

View file

@ -1 +1,2 @@
pub mod client;
pub mod server; pub mod server;

View file

@ -1,627 +1,91 @@
use anyhow::{anyhow, Context, Result}; use anyhow::Result;
use argon2::{
password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
Argon2,
};
use base64::{engine::general_purpose, Engine as _};
use rand::RngCore;
use rusqlite::{params, Connection, OptionalExtension};
use crate::control::{ use crate::daemon::rpc::grpc_defs::{Network, NetworkType};
DnsConfig, Hostinfo, LocalAuthResponse, MapRequest, MapResponse, Node, NodeCapMap,
PacketFilter, PeerCapMap, RegisterRequest, UserProfile,
};
const CREATE_SCHEMA: &str = r#"
CREATE TABLE IF NOT EXISTS auth_user (
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL UNIQUE,
display_name TEXT NOT NULL,
profile_pic_url TEXT,
groups_json TEXT NOT NULL DEFAULT '[]',
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS auth_local_credential (
user_id INTEGER PRIMARY KEY REFERENCES auth_user(id) ON DELETE CASCADE,
username TEXT NOT NULL UNIQUE,
password_hash TEXT NOT NULL,
rotated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS auth_session (
id TEXT PRIMARY KEY,
user_id INTEGER NOT NULL REFERENCES auth_user(id) ON DELETE CASCADE,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
expires_at TEXT NOT NULL DEFAULT (datetime('now', '+7 days'))
);
CREATE TABLE IF NOT EXISTS control_node (
id INTEGER PRIMARY KEY AUTOINCREMENT,
stable_id TEXT NOT NULL UNIQUE,
user_id INTEGER NOT NULL REFERENCES auth_user(id) ON DELETE CASCADE,
name TEXT NOT NULL,
node_key TEXT NOT NULL UNIQUE,
machine_key TEXT,
disco_key TEXT,
addresses_json TEXT NOT NULL,
allowed_ips_json TEXT NOT NULL,
endpoints_json TEXT NOT NULL,
home_derp INTEGER,
hostinfo_json TEXT,
tags_json TEXT NOT NULL DEFAULT '[]',
primary_routes_json TEXT NOT NULL DEFAULT '[]',
cap_version INTEGER NOT NULL DEFAULT 1,
cap_map_json TEXT NOT NULL DEFAULT '{}',
peer_cap_map_json TEXT NOT NULL DEFAULT '{}',
machine_authorized INTEGER NOT NULL DEFAULT 1,
node_key_expired INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
last_seen TEXT,
online INTEGER
);
"#;
#[derive(Clone, Debug)]
pub struct StoredUser {
pub profile: UserProfile,
}
pub fn init_db(path: &str) -> Result<()> {
let conn = Connection::open(path)?;
conn.execute_batch(CREATE_SCHEMA)?;
Ok(())
}
pub fn ensure_local_identity(
path: &str,
username: &str,
email: &str,
display_name: &str,
password: &str,
) -> Result<UserProfile> {
let conn = Connection::open(path)?;
conn.execute(
"INSERT INTO auth_user (email, display_name) VALUES (?, ?)
ON CONFLICT(email) DO UPDATE SET display_name = excluded.display_name",
params![email, display_name],
)?;
let user_id: i64 =
conn.query_row("SELECT id FROM auth_user WHERE email = ?", [email], |row| {
row.get(0)
})?;
let existing_hash: Option<String> = conn
.query_row(
"SELECT password_hash FROM auth_local_credential WHERE user_id = ?",
[user_id],
|row| row.get(0),
)
.optional()?;
let password_hash = match existing_hash {
Some(hash) if verify_password(password, &hash) => hash,
_ => hash_password(password)?,
};
conn.execute(
"INSERT INTO auth_local_credential (user_id, username, password_hash)
VALUES (?, ?, ?)
ON CONFLICT(user_id) DO UPDATE SET username = excluded.username, password_hash = excluded.password_hash, rotated_at = datetime('now')",
params![user_id, username, password_hash],
)?;
load_user_profile(&conn, user_id)
}
pub fn authenticate_local(
path: &str,
identifier: &str,
password: &str,
) -> Result<Option<LocalAuthResponse>> {
let conn = Connection::open(path)?;
let record = conn
.query_row(
"SELECT u.id, u.email, u.display_name, u.profile_pic_url, u.groups_json, c.password_hash
FROM auth_user u
JOIN auth_local_credential c ON c.user_id = u.id
WHERE c.username = ? OR u.email = ?",
params![identifier, identifier],
|row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, String>(4)?,
row.get::<_, String>(5)?,
))
},
)
.optional()?;
let Some((user_id, email, display_name, profile_pic_url, groups_json, password_hash)) = record
else {
return Ok(None);
};
if !verify_password(password, &password_hash) {
return Ok(None);
}
let token = random_token();
conn.execute(
"INSERT INTO auth_session (id, user_id) VALUES (?, ?)",
params![token, user_id],
)?;
Ok(Some(LocalAuthResponse {
access_token: token,
user: UserProfile {
id: user_id,
login_name: email,
display_name,
profile_pic_url,
groups: parse_json(&groups_json)?,
},
}))
}
pub fn user_for_session(path: &str, token: &str) -> Result<Option<StoredUser>> {
let conn = Connection::open(path)?;
let user_id = conn
.query_row(
"SELECT user_id FROM auth_session WHERE id = ? AND expires_at > datetime('now')",
[token],
|row| row.get::<_, i64>(0),
)
.optional()?;
let Some(user_id) = user_id else {
return Ok(None);
};
Ok(Some(load_user(&conn, user_id)?))
}
pub fn upsert_node(path: &str, user: &StoredUser, request: &RegisterRequest) -> Result<Node> {
let conn = Connection::open(path)?;
let existing = find_existing_node(&conn, user.profile.id, request)?;
let name = Node::preferred_name(request);
let allowed_ips = Node::normalized_allowed_ips(request);
match existing {
Some((node_id, stable_id, created_at)) => {
conn.execute(
"UPDATE control_node
SET name = ?, node_key = ?, machine_key = ?, disco_key = ?, addresses_json = ?, allowed_ips_json = ?,
endpoints_json = ?, home_derp = ?, hostinfo_json = ?, tags_json = ?, primary_routes_json = ?,
cap_version = ?, cap_map_json = ?, peer_cap_map_json = ?, updated_at = datetime('now'),
last_seen = datetime('now'), online = 1
WHERE id = ?",
params![
name,
request.node_key,
request.machine_key,
request.disco_key,
to_json(&request.addresses)?,
to_json(&allowed_ips)?,
to_json(&request.endpoints)?,
request.home_derp,
optional_json(&request.hostinfo)?,
to_json(&request.tags)?,
to_json(&request.primary_routes)?,
request.version.max(1),
to_json(&request.cap_map)?,
to_json(&request.peer_cap_map)?,
node_id,
],
)?;
load_node(&conn, node_id, stable_id, Some(created_at))
}
None => {
conn.execute(
"INSERT INTO control_node (
stable_id, user_id, name, node_key, machine_key, disco_key, addresses_json, allowed_ips_json,
endpoints_json, home_derp, hostinfo_json, tags_json, primary_routes_json, cap_version,
cap_map_json, peer_cap_map_json, last_seen, online
) VALUES ('', ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, datetime('now'), 1)",
params![
user.profile.id,
name,
request.node_key,
request.machine_key,
request.disco_key,
to_json(&request.addresses)?,
to_json(&allowed_ips)?,
to_json(&request.endpoints)?,
request.home_derp,
optional_json(&request.hostinfo)?,
to_json(&request.tags)?,
to_json(&request.primary_routes)?,
request.version.max(1),
to_json(&request.cap_map)?,
to_json(&request.peer_cap_map)?,
],
)?;
let node_id = conn.last_insert_rowid();
let stable_id = format!("bn-{node_id}");
conn.execute(
"UPDATE control_node SET stable_id = ? WHERE id = ?",
params![stable_id, node_id],
)?;
load_node(&conn, node_id, stable_id, None)
}
}
}
pub fn map_for_node(
path: &str,
user: &StoredUser,
request: &MapRequest,
domain: &str,
) -> Result<MapResponse> {
let conn = Connection::open(path)?;
apply_map_request(&conn, user.profile.id, request)?;
let self_row = conn
.query_row(
"SELECT id, stable_id, created_at FROM control_node WHERE user_id = ? AND node_key = ?",
params![user.profile.id, request.node_key],
|row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
))
},
)
.optional()?
.ok_or_else(|| anyhow!("node not registered"))?;
let node = load_node(&conn, self_row.0, self_row.1, Some(self_row.2))?;
let peers = load_peers(&conn, node.id)?;
Ok(MapResponse {
map_session_handle: Some(format!("map-{}", node.stable_id)),
seq: Some(request.map_session_seq.unwrap_or(0) + 1),
node,
peers,
domain: domain.to_owned(),
dns: Some(DnsConfig {
resolvers: vec!["1.1.1.1".to_owned(), "1.0.0.1".to_owned()],
search_domains: vec![domain.to_owned()],
magic_dns: true,
}),
packet_filters: vec![PacketFilter::default()],
})
}
pub static PATH: &str = "./server.sqlite3"; pub static PATH: &str = "./server.sqlite3";
fn apply_map_request(conn: &Connection, user_id: i64, request: &MapRequest) -> Result<()> { pub fn init_db() -> Result<()> {
let current = conn let conn = rusqlite::Connection::open(PATH)?;
.query_row(
"SELECT id FROM control_node WHERE user_id = ? AND node_key = ?",
params![user_id, request.node_key],
|row| row.get::<_, i64>(0),
)
.optional()?;
let Some(node_id) = current else {
return Ok(());
};
let hostinfo_json = optional_json(&request.hostinfo)?;
let endpoints_json = to_json(&request.endpoints)?;
conn.execute( conn.execute(
"UPDATE control_node "CREATE TABLE IF NOT EXISTS user (
SET disco_key = COALESCE(?, disco_key), id PRIMARY KEY,
hostinfo_json = CASE WHEN ? IS NULL THEN hostinfo_json ELSE ? END, created_at TEXT NOT NULL
endpoints_json = CASE WHEN ? = '[]' THEN endpoints_json ELSE ? END, )",
updated_at = datetime('now'), (),
last_seen = datetime('now'),
online = 1
WHERE id = ?",
params![
request.disco_key,
hostinfo_json,
hostinfo_json,
endpoints_json,
endpoints_json,
node_id,
],
)?; )?;
conn.execute(
"CREATE TABLE IF NOT EXISTS user_connection (
user_id INTEGER REFERENCES user(id) ON DELETE CASCADE,
openid_provider TEXT NOT NULL,
openid_user_id TEXT NOT NULL,
openid_user_name TEXT NOT NULL,
access_token TEXT NOT NULL,
refresh_token TEXT,
PRIMARY KEY (openid_provider, openid_user_id)
)",
(),
)?;
conn.execute(
"CREATE TABLE IF NOT EXISTS device (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
public_key TEXT NOT NULL,
apns_token TEXT UNIQUE,
user_id INT REFERENCES user(id) ON DELETE CASCADE,
created_at TEXT NOT NULL DEFAULT (datetime('now')) CHECK(created_at IS datetime(created_at)),
ipv4 TEXT NOT NULL UNIQUE,
ipv6 TEXT NOT NULL UNIQUE,
access_token TEXT NOT NULL UNIQUE,
refresh_token TEXT NOT NULL UNIQUE,
expires_at TEXT NOT NULL DEFAULT (datetime('now', '+7 days')) CHECK(expires_at IS datetime(expires_at))
)",
()
).unwrap();
Ok(()) Ok(())
} }
fn find_existing_node( pub fn store_connection(
conn: &Connection, openid_user: super::providers::OpenIdUser,
user_id: i64, openid_provider: &str,
request: &RegisterRequest, access_token: &str,
) -> Result<Option<(i64, String, String)>> { refresh_token: Option<&str>,
let mut candidates = vec![request.node_key.as_str()]; ) -> Result<()> {
if let Some(old) = request.old_node_key.as_deref() { log::debug!("Storing openid user {:#?}", openid_user);
if old != request.node_key { let conn = rusqlite::Connection::open(PATH)?;
candidates.push(old);
}
}
for candidate in candidates { conn.execute(
let hit = conn "INSERT OR IGNORE INTO user (id, created_at) VALUES (?, datetime('now'))",
.query_row( (&openid_user.sub,),
"SELECT id, stable_id, created_at FROM control_node WHERE user_id = ? AND node_key = ?",
params![user_id, candidate],
|row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
))
},
)
.optional()?;
if hit.is_some() {
return Ok(hit);
}
}
Ok(None)
}
fn load_peers(conn: &Connection, self_id: i64) -> Result<Vec<Node>> {
let mut stmt = conn.prepare(
"SELECT id, stable_id, created_at FROM control_node WHERE id != ? AND machine_authorized = 1 ORDER BY id",
)?; )?;
let peers = stmt conn.execute(
.query_map([self_id], |row| { "INSERT INTO user_connection (user_id, openid_provider, openid_user_id, openid_user_name, access_token, refresh_token) VALUES (
Ok(( (SELECT id FROM user WHERE id = ?),
row.get::<_, i64>(0)?, ?,
row.get::<_, String>(1)?, ?,
row.get::<_, String>(2)?, ?,
)) ?,
})? ?
.collect::<rusqlite::Result<Vec<_>>>()?; )",
peers (&openid_user.sub, &openid_provider, &openid_user.sub, &openid_user.name, access_token, refresh_token),
.into_iter()
.map(|(id, stable_id, created_at)| load_node(conn, id, stable_id, Some(created_at)))
.collect()
}
fn load_node(
conn: &Connection,
id: i64,
stable_id: String,
created_at_hint: Option<String>,
) -> Result<Node> {
let row = conn.query_row(
"SELECT user_id, name, node_key, machine_key, disco_key, addresses_json, allowed_ips_json,
endpoints_json, home_derp, hostinfo_json, tags_json, primary_routes_json, cap_version,
cap_map_json, peer_cap_map_json, machine_authorized, node_key_expired,
created_at, updated_at, last_seen, online
FROM control_node WHERE id = ?",
[id],
|row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, Option<String>>(4)?,
row.get::<_, String>(5)?,
row.get::<_, String>(6)?,
row.get::<_, String>(7)?,
row.get::<_, Option<i32>>(8)?,
row.get::<_, Option<String>>(9)?,
row.get::<_, String>(10)?,
row.get::<_, String>(11)?,
row.get::<_, i32>(12)?,
row.get::<_, String>(13)?,
row.get::<_, String>(14)?,
row.get::<_, i64>(15)?,
row.get::<_, i64>(16)?,
row.get::<_, String>(17)?,
row.get::<_, String>(18)?,
row.get::<_, Option<String>>(19)?,
row.get::<_, Option<i64>>(20)?,
))
},
)?; )?;
Ok(Node {
id, Ok(())
stable_id,
user_id: row.0,
name: row.1,
node_key: row.2,
machine_key: row.3,
disco_key: row.4,
addresses: parse_json(&row.5)?,
allowed_ips: parse_json(&row.6)?,
endpoints: parse_json(&row.7)?,
home_derp: row.8,
hostinfo: row.9.map(|raw| parse_json::<Hostinfo>(&raw)).transpose()?,
tags: parse_json(&row.10)?,
primary_routes: parse_json(&row.11)?,
cap_version: row.12,
cap_map: parse_json::<NodeCapMap>(&row.13)?,
peer_cap_map: parse_json::<PeerCapMap>(&row.14)?,
machine_authorized: row.15 != 0,
node_key_expired: row.16 != 0,
created_at: Some(created_at_hint.unwrap_or(row.17)),
updated_at: Some(row.18),
last_seen: row.19,
online: row.20.map(|value| value != 0),
})
} }
fn load_user(conn: &Connection, user_id: i64) -> Result<StoredUser> { pub fn store_device(
let profile = load_user_profile(conn, user_id)?; openid_user: super::providers::OpenIdUser,
Ok(StoredUser { profile }) openid_provider: &str,
} access_token: &str,
refresh_token: Option<&str>,
fn load_user_profile(conn: &Connection, user_id: i64) -> Result<UserProfile> { ) -> Result<()> {
let row = conn.query_row( log::debug!("Storing openid user {:#?}", openid_user);
"SELECT email, display_name, profile_pic_url, groups_json FROM auth_user WHERE id = ?", let conn = rusqlite::Connection::open(PATH)?;
[user_id],
|row| { // TODO
Ok((
row.get::<_, String>(0)?, Ok(())
row.get::<_, String>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, String>(3)?,
))
},
)?;
Ok(UserProfile {
id: user_id,
login_name: row.0,
display_name: row.1,
profile_pic_url: row.2,
groups: parse_json(&row.3)?,
})
}
fn hash_password(password: &str) -> Result<String> {
let salt = SaltString::generate(&mut argon2::password_hash::rand_core::OsRng);
let hash = Argon2::default()
.hash_password(password.as_bytes(), &salt)
.map_err(|err| anyhow!("failed to hash password: {err}"))?;
Ok(hash.to_string())
}
fn verify_password(password: &str, password_hash: &str) -> bool {
PasswordHash::new(password_hash)
.ok()
.and_then(|hash| {
Argon2::default()
.verify_password(password.as_bytes(), &hash)
.ok()
})
.is_some()
}
fn random_token() -> String {
let mut bytes = [0u8; 32];
rand::thread_rng().fill_bytes(&mut bytes);
general_purpose::URL_SAFE_NO_PAD.encode(bytes)
}
fn to_json<T: serde::Serialize>(value: &T) -> Result<String> {
serde_json::to_string(value).context("failed to serialize json")
}
fn optional_json<T: serde::Serialize>(value: &Option<T>) -> Result<Option<String>> {
value.as_ref().map(to_json).transpose()
}
fn parse_json<T: serde::de::DeserializeOwned>(value: &str) -> Result<T> {
serde_json::from_str(value)
.with_context(|| format!("failed to decode json payload from '{value}'"))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::control::{Hostinfo, RegisterRequest};
use tempfile::TempDir;
fn temp_db() -> Result<(TempDir, String)> {
let dir = tempfile::tempdir()?;
let db_path = dir.path().join("server.sqlite3");
Ok((dir, db_path.to_string_lossy().to_string()))
}
#[test]
fn local_auth_and_map_round_trip() -> Result<()> {
let (_dir, db_path) = temp_db()?;
init_db(&db_path)?;
ensure_local_identity(
&db_path,
"contact",
"contact@burrow.net",
"Burrow Contact",
"password-1",
)?;
let auth = authenticate_local(&db_path, "contact", "password-1")?
.expect("expected login to succeed");
let user =
user_for_session(&db_path, &auth.access_token)?.expect("expected session to resolve");
let node = upsert_node(
&db_path,
&user,
&RegisterRequest {
node_key: "nodekey:aaaa".to_owned(),
machine_key: Some("machinekey:aaaa".to_owned()),
disco_key: Some("discokey:aaaa".to_owned()),
addresses: vec!["100.64.0.1/32".to_owned()],
endpoints: vec!["203.0.113.10:41641".to_owned()],
hostinfo: Some(Hostinfo {
hostname: Some("burrow-dev".to_owned()),
os: Some("linux".to_owned()),
os_version: Some("6.13".to_owned()),
services: vec!["ssh".to_owned()],
request_tags: vec!["tag:dev".to_owned()],
}),
..RegisterRequest::default()
},
)?;
assert_eq!(node.name, "burrow-dev");
assert_eq!(node.allowed_ips, vec!["100.64.0.1/32"]);
let map = map_for_node(
&db_path,
&user,
&MapRequest {
node_key: "nodekey:aaaa".to_owned(),
stream: true,
endpoints: vec!["203.0.113.10:41641".to_owned()],
..MapRequest::default()
},
"burrow.net",
)?;
assert_eq!(map.node.node_key, "nodekey:aaaa");
assert_eq!(map.domain, "burrow.net");
assert!(map.dns.expect("dns config").magic_dns);
Ok(())
}
#[test]
fn register_can_rotate_node_keys() -> Result<()> {
let (_dir, db_path) = temp_db()?;
init_db(&db_path)?;
ensure_local_identity(
&db_path,
"contact",
"contact@burrow.net",
"Burrow Contact",
"password-1",
)?;
let auth = authenticate_local(&db_path, "contact@burrow.net", "password-1")?
.expect("expected login to succeed");
let user =
user_for_session(&db_path, &auth.access_token)?.expect("expected session to resolve");
upsert_node(
&db_path,
&user,
&RegisterRequest {
node_key: "nodekey:old".to_owned(),
addresses: vec!["100.64.0.2/32".to_owned()],
..RegisterRequest::default()
},
)?;
let rotated = upsert_node(
&db_path,
&user,
&RegisterRequest {
node_key: "nodekey:new".to_owned(),
old_node_key: Some("nodekey:old".to_owned()),
addresses: vec!["100.64.0.3/32".to_owned()],
..RegisterRequest::default()
},
)?;
assert_eq!(rotated.node_key, "nodekey:new");
assert_eq!(rotated.addresses, vec!["100.64.0.3/32"]);
Ok(())
}
} }

View file

@ -1,277 +1,32 @@
pub mod db; pub mod db;
pub mod tailscale; pub mod providers;
use std::{env, path::Path}; use anyhow::Result;
use axum::{http::StatusCode, routing::post, Router};
use anyhow::{Context, Result}; use providers::slack::auth;
use axum::{
extract::{Json, Path as AxumPath, State},
http::{header::AUTHORIZATION, HeaderMap, StatusCode},
response::IntoResponse,
routing::{get, post},
Router,
};
use tokio::signal; use tokio::signal;
use crate::control::{
LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest,
RegisterResponse, BURROW_TAILNET_DOMAIN,
};
#[derive(Clone, Debug)]
pub struct BootstrapIdentity {
pub username: String,
pub email: String,
pub display_name: String,
pub password_file: String,
}
impl Default for BootstrapIdentity {
fn default() -> Self {
Self {
username: "contact".to_owned(),
email: "contact@burrow.net".to_owned(),
display_name: "Burrow Contact".to_owned(),
password_file: "intake/forgejo_pass_contact_at_burrow_net.txt".to_owned(),
}
}
}
#[derive(Clone, Debug)]
pub struct AuthServerConfig {
pub listen: String,
pub db_path: String,
pub tailnet_domain: String,
pub bootstrap: BootstrapIdentity,
}
impl Default for AuthServerConfig {
fn default() -> Self {
Self {
listen: "0.0.0.0:8080".to_owned(),
db_path: db::PATH.to_owned(),
tailnet_domain: BURROW_TAILNET_DOMAIN.to_owned(),
bootstrap: BootstrapIdentity::default(),
}
}
}
impl AuthServerConfig {
pub fn from_env() -> Self {
let mut config = Self::default();
if let Ok(value) = env::var("BURROW_AUTH_LISTEN") {
config.listen = value;
}
if let Ok(value) = env::var("BURROW_AUTH_DB_PATH") {
config.db_path = value;
}
if let Ok(value) = env::var("BURROW_AUTH_TAILNET_DOMAIN") {
config.tailnet_domain = value;
}
if let Ok(value) = env::var("BURROW_BOOTSTRAP_USERNAME") {
config.bootstrap.username = value;
}
if let Ok(value) = env::var("BURROW_BOOTSTRAP_EMAIL") {
config.bootstrap.email = value;
}
if let Ok(value) = env::var("BURROW_BOOTSTRAP_DISPLAY_NAME") {
config.bootstrap.display_name = value;
}
if let Ok(value) = env::var("BURROW_BOOTSTRAP_PASSWORD_FILE") {
config.bootstrap.password_file = value;
}
config
}
fn bootstrap_password(&self) -> Result<Option<String>> {
let path = Path::new(&self.bootstrap.password_file);
if !path.exists() {
return Ok(None);
}
let password = std::fs::read_to_string(path).with_context(|| {
format!("failed to read bootstrap password from {}", path.display())
})?;
let password = password.trim().to_owned();
if password.is_empty() {
return Ok(None);
}
Ok(Some(password))
}
}
#[derive(Clone)]
struct AppState {
config: AuthServerConfig,
tailscale: tailscale::TailscaleBridgeManager,
}
type AppResult<T> = Result<T, (StatusCode, String)>;
pub async fn serve() -> Result<()> { pub async fn serve() -> Result<()> {
serve_with_config(AuthServerConfig::from_env()).await db::init_db()?;
}
pub async fn serve_with_config(config: AuthServerConfig) -> Result<()> { let app = Router::new()
db::init_db(&config.db_path)?; .route("/slack-auth", post(auth))
if let Some(password) = config.bootstrap_password()? { .route("/device/new", post(device_new));
db::ensure_local_identity(
&config.db_path,
&config.bootstrap.username,
&config.bootstrap.email,
&config.bootstrap.display_name,
&password,
)?;
}
let app = build_router(config.clone()); let listener = tokio::net::TcpListener::bind("0.0.0.0:8080").await.unwrap();
let listener = tokio::net::TcpListener::bind(&config.listen).await?; log::info!("Starting auth server on port 8080");
log::info!("Starting auth server on {}", config.listen);
axum::serve(listener, app) axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal()) .with_graceful_shutdown(shutdown_signal())
.await?; .await
.unwrap();
Ok(()) Ok(())
} }
pub fn build_router(config: AuthServerConfig) -> Router { async fn device_new() -> StatusCode {
Router::new()
.route("/healthz", get(healthz))
.route("/device/new", post(device_new))
.route("/v1/auth/login", post(login_local))
.route("/v1/control/register", post(control_register))
.route("/v1/control/map", post(control_map))
.route("/v1/tailscale/login/start", post(tailscale_login_start))
.route("/v1/tailscale/login/:session_id", get(tailscale_login_status))
.with_state(AppState {
config,
tailscale: tailscale::TailscaleBridgeManager::default(),
})
}
async fn login_local(
State(state): State<AppState>,
Json(request): Json<LocalAuthRequest>,
) -> AppResult<Json<LocalAuthResponse>> {
let db_path = state.config.db_path.clone();
blocking(move || db::authenticate_local(&db_path, &request.identifier, &request.password))
.await?
.map(Json)
.ok_or_else(|| (StatusCode::UNAUTHORIZED, "invalid credentials".to_owned()))
}
async fn control_register(
headers: HeaderMap,
State(state): State<AppState>,
Json(request): Json<RegisterRequest>,
) -> AppResult<Json<RegisterResponse>> {
let token = bearer_token(&headers)?;
let db_path = state.config.db_path.clone();
let user = blocking({
let db_path = db_path.clone();
let token = token.clone();
move || db::user_for_session(&db_path, &token)
})
.await?
.ok_or_else(|| (StatusCode::UNAUTHORIZED, "unknown session".to_owned()))?;
let response_user = user.profile.clone();
let node = blocking(move || db::upsert_node(&db_path, &user, &request)).await?;
Ok(Json(RegisterResponse {
user: response_user,
machine_authorized: node.machine_authorized,
node_key_expired: node.node_key_expired,
auth_url: None,
error: None,
node,
}))
}
async fn control_map(
headers: HeaderMap,
State(state): State<AppState>,
Json(request): Json<MapRequest>,
) -> AppResult<Json<MapResponse>> {
let token = bearer_token(&headers)?;
let db_path = state.config.db_path.clone();
let domain = state.config.tailnet_domain.clone();
let user = blocking({
let db_path = db_path.clone();
let token = token.clone();
move || db::user_for_session(&db_path, &token)
})
.await?
.ok_or_else(|| (StatusCode::UNAUTHORIZED, "unknown session".to_owned()))?;
let response = blocking(move || db::map_for_node(&db_path, &user, &request, &domain)).await?;
Ok(Json(response))
}
async fn tailscale_login_start(
State(state): State<AppState>,
Json(request): Json<tailscale::TailscaleLoginStartRequest>,
) -> AppResult<Json<tailscale::TailscaleLoginStartResponse>> {
let response = state
.tailscale
.start_login(request)
.await
.map_err(internal_error)?;
Ok(Json(response))
}
async fn tailscale_login_status(
AxumPath(session_id): AxumPath<String>,
State(state): State<AppState>,
) -> AppResult<Json<tailscale::TailscaleLoginStatus>> {
state
.tailscale
.status(&session_id)
.await
.map_err(internal_error)?
.map(Json)
.ok_or_else(|| (StatusCode::NOT_FOUND, "unknown tailscale login session".to_owned()))
}
async fn healthz() -> impl IntoResponse {
StatusCode::OK StatusCode::OK
} }
async fn device_new() -> impl IntoResponse {
StatusCode::OK
}
async fn blocking<F, T>(work: F) -> AppResult<T>
where
F: FnOnce() -> Result<T> + Send + 'static,
T: Send + 'static,
{
tokio::task::spawn_blocking(work)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?
.map_err(internal_error)
}
fn internal_error(err: anyhow::Error) -> (StatusCode, String) {
(StatusCode::INTERNAL_SERVER_ERROR, err.to_string())
}
fn bearer_token(headers: &HeaderMap) -> AppResult<String> {
let value = headers.get(AUTHORIZATION).ok_or_else(|| {
(
StatusCode::UNAUTHORIZED,
"missing authorization header".to_owned(),
)
})?;
let value = value.to_str().map_err(|_| {
(
StatusCode::BAD_REQUEST,
"invalid authorization header".to_owned(),
)
})?;
value
.strip_prefix("Bearer ")
.map(ToOwned::to_owned)
.ok_or_else(|| (StatusCode::UNAUTHORIZED, "expected bearer token".to_owned()))
}
async fn shutdown_signal() { async fn shutdown_signal() {
let ctrl_c = async { let ctrl_c = async {
signal::ctrl_c() signal::ctrl_c()
@ -296,102 +51,12 @@ async fn shutdown_signal() {
} }
} }
#[cfg(test)] // mod db {
mod tests { // use rusqlite::{Connection, Result};
use super::*;
use axum::{
body::{to_bytes, Body},
http::{Request, StatusCode},
};
use tempfile::tempdir;
use tower::ServiceExt;
#[tokio::test] // #[derive(Debug)]
async fn login_register_and_map_round_trip() -> Result<()> { // struct User {
let dir = tempdir()?; // id: i32,
let password_file = dir.path().join("bootstrap-password.txt"); // created_at: String,
std::fs::write(&password_file, "bootstrap-pass\n")?; // }
let db_path = dir.path().join("server.sqlite3"); // }
let config = AuthServerConfig {
listen: "127.0.0.1:0".to_owned(),
db_path: db_path.to_string_lossy().to_string(),
tailnet_domain: "burrow.net".to_owned(),
bootstrap: BootstrapIdentity {
password_file: password_file.to_string_lossy().to_string(),
..BootstrapIdentity::default()
},
};
db::init_db(&config.db_path)?;
let password = config.bootstrap_password()?.expect("bootstrap password");
db::ensure_local_identity(
&config.db_path,
&config.bootstrap.username,
&config.bootstrap.email,
&config.bootstrap.display_name,
&password,
)?;
let app = build_router(config);
let response = app
.clone()
.oneshot(
Request::post("/v1/auth/login")
.header("content-type", "application/json")
.body(Body::from(serde_json::to_vec(&LocalAuthRequest {
identifier: "contact".to_owned(),
password: "bootstrap-pass".to_owned(),
})?))?,
)
.await?;
assert_eq!(response.status(), StatusCode::OK);
let login: LocalAuthResponse =
serde_json::from_slice(&to_bytes(response.into_body(), usize::MAX).await?)?;
let response = app
.clone()
.oneshot(
Request::post("/v1/control/register")
.header("content-type", "application/json")
.header("authorization", format!("Bearer {}", login.access_token))
.body(Body::from(serde_json::to_vec(&RegisterRequest {
node_key: "nodekey:1234".to_owned(),
machine_key: Some("machinekey:1234".to_owned()),
addresses: vec!["100.64.0.10/32".to_owned()],
endpoints: vec!["198.51.100.10:41641".to_owned()],
hostinfo: Some(crate::control::Hostinfo {
hostname: Some("devbox".to_owned()),
os: Some("linux".to_owned()),
os_version: Some("6.13".to_owned()),
services: vec!["ssh".to_owned()],
request_tags: vec!["tag:dev".to_owned()],
}),
..RegisterRequest::default()
})?))?,
)
.await?;
assert_eq!(response.status(), StatusCode::OK);
let response = app
.oneshot(
Request::post("/v1/control/map")
.header("content-type", "application/json")
.header("authorization", format!("Bearer {}", login.access_token))
.body(Body::from(serde_json::to_vec(&MapRequest {
node_key: "nodekey:1234".to_owned(),
stream: true,
endpoints: vec!["198.51.100.10:41641".to_owned()],
..MapRequest::default()
})?))?,
)
.await?;
assert_eq!(response.status(), StatusCode::OK);
let map: MapResponse =
serde_json::from_slice(&to_bytes(response.into_body(), usize::MAX).await?)?;
assert_eq!(map.domain, "burrow.net");
assert_eq!(map.node.name, "devbox");
assert!(map.dns.expect("dns").magic_dns);
Ok(())
}
}

View file

@ -0,0 +1,8 @@
pub mod slack;
pub use super::db;
#[derive(serde::Deserialize, Default, Debug)]
pub struct OpenIdUser {
pub sub: String,
pub name: String,
}

View file

@ -0,0 +1,102 @@
use anyhow::Result;
use axum::{
extract::Json,
http::StatusCode,
routing::{get, post},
};
use reqwest::header::AUTHORIZATION;
use serde::Deserialize;
use super::db::store_connection;
#[derive(Deserialize)]
pub struct SlackToken {
slack_token: String,
}
pub async fn auth(Json(payload): Json<SlackToken>) -> (StatusCode, String) {
let slack_user = match fetch_slack_user(&payload.slack_token).await {
Ok(user) => user,
Err(e) => {
log::error!("Failed to fetch Slack user: {:?}", e);
return (StatusCode::UNAUTHORIZED, String::new());
}
};
log::info!(
"Slack user {} ({}) logged in.",
slack_user.name,
slack_user.sub
);
let conn = match store_connection(slack_user, "slack", &payload.slack_token, None) {
Ok(user) => user,
Err(e) => {
log::error!("Failed to fetch Slack user: {:?}", e);
return (StatusCode::UNAUTHORIZED, String::new());
}
};
(StatusCode::OK, String::new())
}
async fn fetch_slack_user(access_token: &str) -> Result<super::OpenIdUser> {
let client = reqwest::Client::new();
let res = client
.get("https://slack.com/api/openid.connect.userInfo")
.header(AUTHORIZATION, format!("Bearer {}", access_token))
.send()
.await?
.json::<serde_json::Value>()
.await?;
let res_ok = res
.get("ok")
.and_then(|v| v.as_bool())
.ok_or(anyhow::anyhow!("Slack user object not ok!"))?;
if !res_ok {
return Err(anyhow::anyhow!("Slack user object not ok!"));
}
Ok(serde_json::from_value(res)?)
}
// async fn fetch_save_slack_user_data(query: Query<CallbackQuery>) -> anyhow::Result<()> {
// let client = reqwest::Client::new();
// log::trace!("Code was {}", &query.code);
// let mut url = Url::parse("https://slack.com/api/openid.connect.token")?;
// {
// let mut q = url.query_pairs_mut();
// q.append_pair("client_id", &var("CLIENT_ID")?);
// q.append_pair("client_secret", &var("CLIENT_SECRET")?);
// q.append_pair("code", &query.code);
// q.append_pair("grant_type", "authorization_code");
// q.append_pair("redirect_uri", "https://burrow.rs/callback");
// }
// let data = client
// .post(url)
// .send()
// .await?
// .json::<slack::CodeExchangeResponse>()
// .await?;
// if !data.ok {
// return Err(anyhow::anyhow!("Slack code exchange response not ok!"));
// }
// if let Some(access_token) = data.access_token {
// log::trace!("Access token is {access_token}");
// let user = slack::fetch_slack_user(&access_token)
// .await
// .map_err(|err| anyhow::anyhow!("Failed to fetch Slack user info {:#?}", err))?;
// db::store_user(user, access_token, String::new())
// .map_err(|_| anyhow::anyhow!("Failed to store user in db"))?;
// Ok(())
// } else {
// Err(anyhow::anyhow!("Access token not found in response"))
// }
// }

View file

@ -1,320 +0,0 @@
use std::{
collections::HashMap,
env,
path::{Path, PathBuf},
process::Stdio,
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, Context, Result};
use rand::RngCore;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use tokio::{
io::{AsyncBufReadExt, BufReader},
process::{Child, Command},
sync::Mutex,
task::JoinHandle,
};
#[derive(Clone, Debug, Default, Deserialize)]
pub struct TailscaleLoginStartRequest {
pub account_name: String,
pub identity_name: String,
#[serde(default)]
pub hostname: Option<String>,
#[serde(default)]
pub control_url: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct TailscaleLoginStatus {
pub backend_state: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth_url: Option<String>,
#[serde(default)]
pub running: bool,
#[serde(default)]
pub needs_login: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tailnet_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub magic_dns_suffix: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub self_dns_name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tailscale_ips: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub health: Vec<String>,
}
#[derive(Clone, Debug, Serialize)]
pub struct TailscaleLoginStartResponse {
pub session_id: String,
pub status: TailscaleLoginStatus,
}
#[derive(Clone, Default)]
pub struct TailscaleBridgeManager {
client: Client,
sessions: Arc<Mutex<HashMap<String, Arc<ManagedSession>>>>,
}
struct ManagedSession {
session_id: String,
listen_url: String,
state_dir: PathBuf,
child: Arc<Mutex<Child>>,
_stderr_task: JoinHandle<()>,
}
#[derive(Debug, Deserialize)]
struct HelperHello {
listen_addr: String,
}
impl TailscaleBridgeManager {
pub async fn start_login(
&self,
request: TailscaleLoginStartRequest,
) -> Result<TailscaleLoginStartResponse> {
let key = session_key(&request.account_name, &request.identity_name);
if let Some(existing) = self.sessions.lock().await.get(&key).cloned() {
let status = self.fetch_status(existing.as_ref()).await?;
return Ok(TailscaleLoginStartResponse {
session_id: existing.session_id.clone(),
status,
});
}
let state_dir = state_root().join(session_dir_name(&request));
tokio::fs::create_dir_all(&state_dir)
.await
.with_context(|| format!("failed to create {}", state_dir.display()))?;
let mut child = helper_command(&request, &state_dir)?
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.context("failed to spawn tailscale login helper")?;
let stdout = child
.stdout
.take()
.context("tailscale helper stdout unavailable")?;
let stderr = child
.stderr
.take()
.context("tailscale helper stderr unavailable")?;
let hello_line = tokio::time::timeout(Duration::from_secs(20), async move {
let mut lines = BufReader::new(stdout).lines();
lines.next_line().await
})
.await
.context("timed out waiting for tailscale helper startup")??
.context("tailscale helper exited before reporting listen address")?;
let hello: HelperHello =
serde_json::from_str(&hello_line).context("invalid tailscale helper startup line")?;
let stderr_task = tokio::spawn(async move {
let mut lines = BufReader::new(stderr).lines();
while let Ok(Some(line)) = lines.next_line().await {
log::info!("tailscale-login-bridge: {line}");
}
});
let session = Arc::new(ManagedSession {
session_id: random_session_id(),
listen_url: format!("http://{}", hello.listen_addr),
state_dir,
child: Arc::new(Mutex::new(child)),
_stderr_task: stderr_task,
});
let status = self.wait_for_status(session.as_ref()).await?;
let response = TailscaleLoginStartResponse {
session_id: session.session_id.clone(),
status,
};
self.sessions.lock().await.insert(key, session);
Ok(response)
}
pub async fn status(&self, session_id: &str) -> Result<Option<TailscaleLoginStatus>> {
let session = {
let sessions = self.sessions.lock().await;
sessions
.values()
.find(|session| session.session_id == session_id)
.cloned()
};
match session {
Some(session) => self.fetch_status(session.as_ref()).await.map(Some),
None => Ok(None),
}
}
async fn wait_for_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut last_error = None;
let mut last_status = None;
for _ in 0..40 {
match self.fetch_status(session).await {
Ok(status) if status.running || status.auth_url.is_some() => return Ok(status),
Ok(status) => last_status = Some(status),
Err(err) => last_error = Some(err),
}
tokio::time::sleep(Duration::from_millis(250)).await;
}
if let Some(status) = last_status {
return Ok(status);
}
Err(last_error.unwrap_or_else(|| anyhow!("tailscale helper did not become ready")))
}
async fn fetch_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut child = session.child.lock().await;
if let Some(status) = child.try_wait()? {
return Err(anyhow!(
"tailscale helper exited with status {status} for {}",
session.state_dir.display()
));
}
drop(child);
let response = self
.client
.get(format!("{}/status", session.listen_url))
.send()
.await
.context("failed to query tailscale helper status")?
.error_for_status()
.context("tailscale helper status request failed")?;
response
.json::<TailscaleLoginStatus>()
.await
.context("invalid tailscale helper status response")
}
}
fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result<Command> {
let mut command = if let Ok(path) = env::var("BURROW_TAILSCALE_HELPER") {
Command::new(path)
} else {
let helper_dir = Path::new(env!("CARGO_MANIFEST_DIR"))
.join("..")
.join("Tools/tailscale-login-bridge");
let mut command = Command::new("go");
command.current_dir(helper_dir).arg("run").arg(".");
command.env("GOWORK", "off");
command
};
command
.arg("--listen")
.arg("127.0.0.1:0")
.arg("--state-dir")
.arg(state_dir)
.arg("--hostname")
.arg(default_hostname(request));
if let Some(control_url) = request.control_url.as_deref() {
let trimmed = control_url.trim();
if !trimmed.is_empty() {
command.arg("--control-url").arg(trimmed);
}
}
Ok(command)
}
fn state_root() -> PathBuf {
if let Ok(path) = env::var("BURROW_TAILSCALE_STATE_ROOT") {
return PathBuf::from(path);
}
let home = env::var_os("HOME")
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("."));
if cfg!(target_vendor = "apple") {
return home
.join("Library")
.join("Application Support")
.join("Burrow")
.join("tailscale");
}
home.join(".local").join("share").join("burrow").join("tailscale")
}
fn session_dir_name(request: &TailscaleLoginStartRequest) -> String {
format!(
"{}-{}",
slug(&request.account_name),
slug(&request.identity_name)
)
}
fn session_key(account_name: &str, identity_name: &str) -> String {
format!("{account_name}:{identity_name}")
}
fn default_hostname(request: &TailscaleLoginStartRequest) -> String {
request
.hostname
.as_deref()
.filter(|value| !value.trim().is_empty())
.map(ToOwned::to_owned)
.unwrap_or_else(|| format!("burrow-{}", slug(&request.identity_name)))
}
fn random_session_id() -> String {
let mut bytes = [0_u8; 12];
rand::thread_rng().fill_bytes(&mut bytes);
bytes.iter().map(|byte| format!("{byte:02x}")).collect()
}
fn slug(input: &str) -> String {
let mut output = String::with_capacity(input.len());
for ch in input.chars() {
if ch.is_ascii_alphanumeric() {
output.push(ch.to_ascii_lowercase());
} else if ch == '-' || ch == '_' {
output.push('-');
}
}
if output.is_empty() {
"default".to_owned()
} else {
output
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn slug_sanitizes_input() {
assert_eq!(slug("Apple Phone"), "applephone");
assert_eq!(slug("default_identity"), "default-identity");
assert_eq!(slug(""), "default");
}
#[test]
fn state_dir_is_stable_by_account_and_identity() {
let request = TailscaleLoginStartRequest {
account_name: "default".to_owned(),
identity_name: "apple".to_owned(),
hostname: None,
control_url: None,
};
assert_eq!(session_dir_name(&request), "default-apple");
assert_eq!(default_hostname(&request), "burrow-apple");
}
}

View file

@ -1,87 +0,0 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum TailnetProvider {
Tailscale,
Headscale,
Burrow,
}
impl Default for TailnetProvider {
fn default() -> Self {
Self::Tailscale
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct TailnetConfig {
#[serde(default)]
pub provider: TailnetProvider,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authority: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub account: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tailnet: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostname: Option<String>,
}
impl TailnetConfig {
pub fn from_slice(bytes: &[u8]) -> Result<Self> {
let payload = std::str::from_utf8(bytes).context("tailnet payload must be valid UTF-8")?;
Self::from_str(payload)
}
pub fn from_str(payload: &str) -> Result<Self> {
let trimmed = payload.trim();
if trimmed.starts_with('{') {
return serde_json::from_str(trimmed).context("invalid tailnet JSON payload");
}
toml::from_str(trimmed).context("invalid tailnet TOML payload")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parses_json_payload() {
let config = TailnetConfig::from_str(
r#"{
"provider":"tailscale",
"account":"default",
"identity":"apple",
"tailnet":"example.ts.net",
"hostname":"burrow-phone"
}"#,
)
.unwrap();
assert_eq!(config.provider, TailnetProvider::Tailscale);
assert_eq!(config.account.as_deref(), Some("default"));
assert_eq!(config.identity.as_deref(), Some("apple"));
}
#[test]
fn parses_toml_payload() {
let config = TailnetConfig::from_str(
r#"
provider = "headscale"
authority = "https://headscale.example.com"
account = "default"
identity = "apple"
"#,
)
.unwrap();
assert_eq!(config.provider, TailnetProvider::Headscale);
assert_eq!(
config.authority.as_deref(),
Some("https://headscale.example.com")
);
}
}

View file

@ -1,253 +0,0 @@
pub mod config;
use std::collections::BTreeMap;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub use config::{TailnetConfig, TailnetProvider};
pub const BURROW_CAPABILITY_VERSION: i32 = 1;
pub const BURROW_TAILNET_DOMAIN: &str = "burrow.net";
pub type NodeCapMap = BTreeMap<String, Vec<Value>>;
pub type PeerCapMap = BTreeMap<String, Vec<Value>>;
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct Hostinfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostname: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub os: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub os_version: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub services: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub request_tags: Vec<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct UserProfile {
pub id: i64,
pub login_name: String,
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub profile_pic_url: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct RegisterAuth {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub oauth_access_token: Option<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct Node {
pub id: i64,
pub stable_id: String,
pub name: String,
pub user_id: i64,
pub node_key: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub machine_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disco_key: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub addresses: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub allowed_ips: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub home_derp: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostinfo: Option<Hostinfo>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tags: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub primary_routes: Vec<String>,
#[serde(default = "default_capability_version")]
pub cap_version: i32,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub cap_map: NodeCapMap,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub peer_cap_map: PeerCapMap,
#[serde(default)]
pub machine_authorized: bool,
#[serde(default)]
pub node_key_expired: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub updated_at: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub last_seen: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub online: Option<bool>,
}
impl Node {
pub fn preferred_name(request: &RegisterRequest) -> String {
if let Some(name) = request.name.as_deref() {
return name.to_owned();
}
if let Some(hostname) = request
.hostinfo
.as_ref()
.and_then(|hostinfo| hostinfo.hostname.as_deref())
{
return hostname.to_owned();
}
format!("node-{}", short_key(&request.node_key))
}
pub fn normalized_allowed_ips(request: &RegisterRequest) -> Vec<String> {
if request.allowed_ips.is_empty() {
return request.addresses.clone();
}
request.allowed_ips.clone()
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct RegisterRequest {
#[serde(default = "default_capability_version")]
pub version: i32,
pub node_key: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub old_node_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub machine_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disco_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<RegisterAuth>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub expiry: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub followup: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostinfo: Option<Hostinfo>,
#[serde(default)]
pub ephemeral: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tailnet: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub addresses: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub allowed_ips: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub home_derp: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tags: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub primary_routes: Vec<String>,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub cap_map: NodeCapMap,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub peer_cap_map: PeerCapMap,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct RegisterResponse {
pub user: UserProfile,
pub node: Node,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth_url: Option<String>,
pub machine_authorized: bool,
pub node_key_expired: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct MapRequest {
#[serde(default = "default_capability_version")]
pub version: i32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub compress: Option<String>,
#[serde(default)]
pub keep_alive: bool,
pub node_key: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disco_key: Option<String>,
#[serde(default)]
pub stream: bool,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostinfo: Option<Hostinfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub map_session_handle: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub map_session_seq: Option<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub debug_flags: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub connection_handle: Option<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct DnsConfig {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub resolvers: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub search_domains: Vec<String>,
#[serde(default)]
pub magic_dns: bool,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct PacketFilter {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sources: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub destinations: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub protocols: Vec<String>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct MapResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub map_session_handle: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub seq: Option<i64>,
pub node: Node,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub peers: Vec<Node>,
pub domain: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dns: Option<DnsConfig>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub packet_filters: Vec<PacketFilter>,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct LocalAuthRequest {
pub identifier: String,
pub password: String,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct LocalAuthResponse {
pub access_token: String,
pub user: UserProfile,
}
fn default_capability_version() -> i32 {
BURROW_CAPABILITY_VERSION
}
fn short_key(key: &str) -> String {
key.chars().take(8).collect()
}

View file

@ -1,27 +1,48 @@
use std::{ use std::{
ops::Deref,
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::Arc, sync::Arc,
time::Duration,
}; };
use anyhow::Result; use anyhow::Result;
use rusqlite::Connection; use rusqlite::Connection;
use tokio::sync::{mpsc, watch, RwLock}; use tokio::sync::{mpsc, watch, Notify, RwLock};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response, Status as RspStatus}; use tonic::{Request, Response, Status as RspStatus};
use tracing::warn; use tracing::{debug, info, warn};
use tun::tokio::TunInterface; use tun::{tokio::TunInterface, TunOptions};
use super::{ use super::rpc::grpc_defs::{
rpc::grpc_defs::{ networks_server::Networks,
networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, tunnel_server::Tunnel,
NetworkListResponse, NetworkReorderRequest, State as RPCTunnelState, Empty,
TunnelConfigurationResponse, TunnelStatusResponse, Network,
}, NetworkDeleteRequest,
runtime::{ActiveTunnel, ResolvedTunnel}, NetworkListResponse,
NetworkReorderRequest,
State as RPCTunnelState,
TunnelConfigurationResponse,
TunnelStatusResponse,
}; };
use crate::{ use crate::{
daemon::rpc::ServerConfig, daemon::rpc::{
database::{add_network, delete_network, get_connection, list_networks, reorder_network}, DaemonCommand,
DaemonNotification,
DaemonResponse,
DaemonResponseData,
ServerConfig,
ServerInfo,
},
database::{
add_network,
delete_network,
get_connection,
list_networks,
load_interface,
reorder_network,
},
wireguard::{Config, Interface},
}; };
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -31,10 +52,10 @@ enum RunState {
} }
impl RunState { impl RunState {
fn to_rpc(&self) -> RPCTunnelState { pub fn to_rpc(&self) -> RPCTunnelState {
match self { match self {
Self::Running => RPCTunnelState::Running, RunState::Running => RPCTunnelState::Running,
Self::Idle => RPCTunnelState::Stopped, RunState::Idle => RPCTunnelState::Stopped,
} }
} }
} }
@ -42,24 +63,30 @@ impl RunState {
#[derive(Clone)] #[derive(Clone)]
pub struct DaemonRPCServer { pub struct DaemonRPCServer {
tun_interface: Arc<RwLock<Option<TunInterface>>>, tun_interface: Arc<RwLock<Option<TunInterface>>>,
wg_interface: Arc<RwLock<Interface>>,
config: Arc<RwLock<Config>>,
db_path: Option<PathBuf>, db_path: Option<PathBuf>,
wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>), wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>),
network_update_chan: (watch::Sender<()>, watch::Receiver<()>), network_update_chan: (watch::Sender<()>, watch::Receiver<()>),
active_tunnel: Arc<RwLock<Option<ActiveTunnel>>>,
} }
impl DaemonRPCServer { impl DaemonRPCServer {
pub fn new(db_path: Option<&Path>) -> Result<Self> { pub fn new(
wg_interface: Arc<RwLock<Interface>>,
config: Arc<RwLock<Config>>,
db_path: Option<&Path>,
) -> Result<Self> {
Ok(Self { Ok(Self {
tun_interface: Arc::new(RwLock::new(None)), tun_interface: Arc::new(RwLock::new(None)),
db_path: db_path.map(Path::to_owned), wg_interface,
config,
db_path: db_path.map(|p| p.to_owned()),
wg_state_chan: watch::channel(RunState::Idle), wg_state_chan: watch::channel(RunState::Idle),
network_update_chan: watch::channel(()), network_update_chan: watch::channel(()),
active_tunnel: Arc::new(RwLock::new(None)),
}) })
} }
fn get_connection(&self) -> Result<Connection, RspStatus> { pub fn get_connection(&self) -> Result<Connection, RspStatus> {
get_connection(self.db_path.as_deref()).map_err(proc_err) get_connection(self.db_path.as_deref()).map_err(proc_err)
} }
@ -67,66 +94,13 @@ impl DaemonRPCServer {
self.wg_state_chan.0.send(state).map_err(proc_err) self.wg_state_chan.0.send(state).map_err(proc_err)
} }
async fn get_wg_state(&self) -> RunState {
self.wg_state_chan.1.borrow().to_owned()
}
async fn notify_network_update(&self) -> Result<(), RspStatus> { async fn notify_network_update(&self) -> Result<(), RspStatus> {
self.network_update_chan.0.send(()).map_err(proc_err) self.network_update_chan.0.send(()).map_err(proc_err)
} }
async fn resolve_tunnel(&self) -> Result<ResolvedTunnel, RspStatus> {
let conn = self.get_connection()?;
let networks = list_networks(&conn).map_err(proc_err)?;
ResolvedTunnel::from_networks(&networks).map_err(proc_err)
}
async fn current_tunnel_configuration(&self) -> Result<TunnelConfigurationResponse, RspStatus> {
let config = self
.resolve_tunnel()
.await?
.server_config()
.map_err(proc_err)?;
Ok(configuration_rsp(config))
}
async fn stop_active_tunnel(&self) -> Result<bool, RspStatus> {
let current = { self.active_tunnel.write().await.take() };
let Some(current) = current else {
return Ok(false);
};
current
.shutdown(&self.tun_interface)
.await
.map_err(proc_err)?;
self.set_wg_state(RunState::Idle).await?;
Ok(true)
}
async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> {
let _ = self.stop_active_tunnel().await?;
let active = desired
.start(self.tun_interface.clone())
.await
.map_err(proc_err)?;
self.active_tunnel.write().await.replace(active);
self.set_wg_state(RunState::Running).await?;
Ok(())
}
async fn reconcile_runtime(&self) -> Result<(), RspStatus> {
let desired = self.resolve_tunnel().await?;
let needs_restart = {
let guard = self.active_tunnel.read().await;
guard
.as_ref()
.map(|active| active.identity() != desired.identity())
.unwrap_or(false)
};
if needs_restart {
self.replace_active_tunnel(desired).await?;
}
Ok(())
}
} }
#[tonic::async_trait] #[tonic::async_trait]
@ -139,46 +113,55 @@ impl Tunnel for DaemonRPCServer {
_request: Request<Empty>, _request: Request<Empty>,
) -> Result<Response<Self::TunnelConfigurationStream>, RspStatus> { ) -> Result<Response<Self::TunnelConfigurationStream>, RspStatus> {
let (tx, rx) = mpsc::channel(10); let (tx, rx) = mpsc::channel(10);
let server = self.clone();
let mut sub = self.network_update_chan.1.clone();
tokio::spawn(async move { tokio::spawn(async move {
loop { let serv_config = ServerConfig::default();
let response = server.current_tunnel_configuration().await; tx.send(Ok(TunnelConfigurationResponse {
if tx.send(response).await.is_err() { mtu: serv_config.mtu.unwrap_or(1000),
break; addresses: serv_config.address,
} }))
if sub.changed().await.is_err() { .await
break;
}
}
}); });
Ok(Response::new(ReceiverStream::new(rx))) Ok(Response::new(ReceiverStream::new(rx)))
} }
async fn tunnel_start(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> { async fn tunnel_start(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
let desired = self.resolve_tunnel().await?; let wg_state = self.get_wg_state().await;
let already_running = { match wg_state {
let guard = self.active_tunnel.read().await; RunState::Idle => {
guard let tun_if = TunOptions::new().open()?;
.as_ref() debug!("Setting tun on wg_interface");
.map(|active| active.identity() == desired.identity()) self.tun_interface.write().await.replace(tun_if);
.unwrap_or(false) self.wg_interface
}; .write()
.await
.set_tun_ref(self.tun_interface.clone())
.await;
debug!("tun set on wg_interface");
if already_running { debug!("Setting tun_interface");
warn!("Got start, but active tunnel already matches desired network."); debug!("tun_interface set: {:?}", self.tun_interface);
return Ok(Response::new(Empty {}));
debug!("Cloning wg_interface");
let tmp_wg = self.wg_interface.clone();
let run_task = tokio::spawn(async move {
let twlock = tmp_wg.read().await;
twlock.run().await
});
self.set_wg_state(RunState::Running).await?;
}
RunState::Running => {
warn!("Got start, but tun interface already up.");
}
} }
self.replace_active_tunnel(desired).await?; return Ok(Response::new(Empty {}));
Ok(Response::new(Empty {}))
} }
async fn tunnel_stop(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> { async fn tunnel_stop(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
let _ = self.stop_active_tunnel().await?; self.wg_interface.write().await.remove_tun().await;
Ok(Response::new(Empty {})) self.set_wg_state(RunState::Idle).await?;
return Ok(Response::new(Empty {}));
} }
async fn tunnel_status( async fn tunnel_status(
@ -189,16 +172,13 @@ impl Tunnel for DaemonRPCServer {
let mut state_rx = self.wg_state_chan.1.clone(); let mut state_rx = self.wg_state_chan.1.clone();
tokio::spawn(async move { tokio::spawn(async move {
let cur = state_rx.borrow_and_update().to_owned(); let cur = state_rx.borrow_and_update().to_owned();
if tx.send(Ok(status_rsp(cur))).await.is_err() { tx.send(Ok(status_rsp(cur))).await;
return;
}
loop { loop {
if state_rx.changed().await.is_err() { state_rx.changed().await.unwrap();
break;
}
let cur = state_rx.borrow().to_owned(); let cur = state_rx.borrow().to_owned();
if tx.send(Ok(status_rsp(cur))).await.is_err() { let res = tx.send(Ok(status_rsp(cur))).await;
if res.is_err() {
eprintln!("Tunnel status channel closed");
break; break;
} }
} }
@ -216,7 +196,6 @@ impl Networks for DaemonRPCServer {
let network = request.into_inner(); let network = request.into_inner();
add_network(&conn, &network).map_err(proc_err)?; add_network(&conn, &network).map_err(proc_err)?;
self.notify_network_update().await?; self.notify_network_update().await?;
self.reconcile_runtime().await?;
Ok(Response::new(Empty {})) Ok(Response::new(Empty {}))
} }
@ -224,6 +203,7 @@ impl Networks for DaemonRPCServer {
&self, &self,
_request: Request<Empty>, _request: Request<Empty>,
) -> Result<Response<Self::NetworkListStream>, RspStatus> { ) -> Result<Response<Self::NetworkListStream>, RspStatus> {
debug!("Mock network_list called");
let (tx, rx) = mpsc::channel(10); let (tx, rx) = mpsc::channel(10);
let conn = self.get_connection()?; let conn = self.get_connection()?;
let mut sub = self.network_update_chan.1.clone(); let mut sub = self.network_update_chan.1.clone();
@ -232,12 +212,12 @@ impl Networks for DaemonRPCServer {
let networks = list_networks(&conn) let networks = list_networks(&conn)
.map(|res| NetworkListResponse { network: res }) .map(|res| NetworkListResponse { network: res })
.map_err(proc_err); .map_err(proc_err);
if tx.send(networks).await.is_err() { let res = tx.send(networks).await;
break; if res.is_err() {
} eprintln!("Network list channel closed");
if sub.changed().await.is_err() {
break; break;
} }
sub.changed().await.unwrap();
} }
}); });
Ok(Response::new(ReceiverStream::new(rx))) Ok(Response::new(ReceiverStream::new(rx)))
@ -250,7 +230,6 @@ impl Networks for DaemonRPCServer {
let conn = self.get_connection()?; let conn = self.get_connection()?;
reorder_network(&conn, request.into_inner()).map_err(proc_err)?; reorder_network(&conn, request.into_inner()).map_err(proc_err)?;
self.notify_network_update().await?; self.notify_network_update().await?;
self.reconcile_runtime().await?;
Ok(Response::new(Empty {})) Ok(Response::new(Empty {}))
} }
@ -261,7 +240,6 @@ impl Networks for DaemonRPCServer {
let conn = self.get_connection()?; let conn = self.get_connection()?;
delete_network(&conn, request.into_inner()).map_err(proc_err)?; delete_network(&conn, request.into_inner()).map_err(proc_err)?;
self.notify_network_update().await?; self.notify_network_update().await?;
self.reconcile_runtime().await?;
Ok(Response::new(Empty {})) Ok(Response::new(Empty {}))
} }
} }
@ -270,13 +248,6 @@ fn proc_err(err: impl ToString) -> RspStatus {
RspStatus::internal(err.to_string()) RspStatus::internal(err.to_string())
} }
fn configuration_rsp(config: ServerConfig) -> TunnelConfigurationResponse {
TunnelConfigurationResponse {
mtu: config.mtu.unwrap_or(1000),
addresses: config.address,
}
}
fn status_rsp(state: RunState) -> TunnelStatusResponse { fn status_rsp(state: RunState) -> TunnelStatusResponse {
TunnelStatusResponse { TunnelStatusResponse {
state: state.to_rpc().into(), state: state.to_rpc().into(),

View file

@ -4,20 +4,23 @@ pub mod apple;
mod instance; mod instance;
mod net; mod net;
pub mod rpc; pub mod rpc;
mod runtime;
use anyhow::{Error as AhError, Result}; use anyhow::{Error as AhError, Result};
use instance::DaemonRPCServer; use instance::DaemonRPCServer;
pub use net::{get_socket_path, DaemonClient}; pub use net::{get_socket_path, DaemonClient};
pub use rpc::{DaemonCommand, DaemonResponseData, DaemonStartOptions}; pub use rpc::{DaemonCommand, DaemonResponseData, DaemonStartOptions};
use tokio::{net::UnixListener, sync::Notify}; use tokio::{
net::UnixListener,
sync::{Notify, RwLock},
};
use tokio_stream::wrappers::UnixListenerStream; use tokio_stream::wrappers::UnixListenerStream;
use tonic::transport::Server; use tonic::transport::Server;
use tracing::info; use tracing::{error, info};
use crate::{ use crate::{
daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer}, daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer},
database::get_connection, database::{get_connection, load_interface},
wireguard::Interface,
}; };
pub async fn daemon_main( pub async fn daemon_main(
@ -25,8 +28,16 @@ pub async fn daemon_main(
db_path: Option<&Path>, db_path: Option<&Path>,
notify_ready: Option<Arc<Notify>>, notify_ready: Option<Arc<Notify>>,
) -> Result<()> { ) -> Result<()> {
let _conn = get_connection(db_path)?; if let Some(n) = notify_ready {
let burrow_server = DaemonRPCServer::new(db_path)?; n.notify_one()
}
let conn = get_connection(db_path)?;
let config = load_interface(&conn, "1")?;
let burrow_server = DaemonRPCServer::new(
Arc::new(RwLock::new(config.clone().try_into()?)),
Arc::new(RwLock::new(config)),
db_path.clone(),
)?;
let spp = socket_path.clone(); let spp = socket_path.clone();
let tmp = get_socket_path(); let tmp = get_socket_path();
let sock_path = spp.unwrap_or(Path::new(tmp.as_str())); let sock_path = spp.unwrap_or(Path::new(tmp.as_str()));
@ -44,233 +55,9 @@ pub async fn daemon_main(
Ok::<(), AhError>(()) Ok::<(), AhError>(())
}); });
if let Some(n) = notify_ready {
n.notify_one();
}
info!("Starting daemon..."); info!("Starting daemon...");
tokio::try_join!(serve_job) tokio::try_join!(serve_job)
.map(|_| ()) .map(|_| ())
.map_err(|e| e.into()) .map_err(|e| e.into())
} }
#[cfg(test)]
mod tests {
use std::{
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use anyhow::{anyhow, Result};
use tokio::time::{timeout, Duration};
use super::*;
use crate::daemon::rpc::{
client::BurrowClient,
grpc_defs::{
Empty, Network, NetworkListResponse, NetworkReorderRequest, NetworkType,
TunnelConfigurationResponse, TunnelStatusResponse,
},
};
#[tokio::test]
async fn daemon_tracks_network_priority_via_grpc() -> Result<()> {
let socket_path = temp_path("sock");
let db_path = temp_path("sqlite3");
let ready = Arc::new(Notify::new());
let daemon_ready = ready.clone();
let daemon_socket_path = socket_path.clone();
let daemon_db_path = db_path.clone();
let daemon_task = tokio::spawn(async move {
daemon_main(
Some(daemon_socket_path.as_path()),
Some(daemon_db_path.as_path()),
Some(daemon_ready),
)
.await
});
timeout(Duration::from_secs(5), ready.notified()).await?;
let mut client = timeout(
Duration::from_secs(5),
BurrowClient::from_uds_path(&socket_path),
)
.await??;
let mut config_stream = client
.tunnel_client
.tunnel_configuration(Empty {})
.await?
.into_inner();
let mut network_stream = client
.networks_client
.network_list(Empty {})
.await?
.into_inner();
let mut status_stream = client
.tunnel_client
.tunnel_status(Empty {})
.await?
.into_inner();
let initial_config = next_configuration(&mut config_stream).await?;
assert!(initial_config.addresses.is_empty());
assert_eq!(initial_config.mtu, 1500);
let initial_networks = next_networks(&mut network_stream).await?;
assert!(initial_networks.network.is_empty());
let initial_status = next_status(&mut status_stream).await?;
assert_eq!(
initial_status.state(),
crate::daemon::rpc::grpc_defs::State::Stopped
);
client.tunnel_client.tunnel_start(Empty {}).await?;
let passthrough_status = next_status(&mut status_stream).await?;
assert_eq!(
passthrough_status.state(),
crate::daemon::rpc::grpc_defs::State::Running
);
client.tunnel_client.tunnel_stop(Empty {}).await?;
let stopped_status = next_status(&mut status_stream).await?;
assert_eq!(
stopped_status.state(),
crate::daemon::rpc::grpc_defs::State::Stopped
);
client
.networks_client
.network_add(Network {
id: 1,
r#type: NetworkType::WireGuard.into(),
payload: sample_wireguard_payload(),
})
.await?;
let networks_after_wg = next_networks(&mut network_stream).await?;
assert_eq!(
network_ids(&networks_after_wg),
vec![(1, NetworkType::WireGuard)]
);
let wireguard_config = next_configuration(&mut config_stream).await?;
assert_eq!(
wireguard_config.addresses,
vec!["10.8.0.2/32", "fd00::2/128"]
);
assert_eq!(wireguard_config.mtu, 1420);
client
.networks_client
.network_add(Network {
id: 2,
r#type: NetworkType::WireGuard.into(),
payload: sample_wireguard_payload_with("10.77.0.2/32", 1380),
})
.await?;
let networks_after_second_add = next_networks(&mut network_stream).await?;
assert_eq!(
network_ids(&networks_after_second_add),
vec![(1, NetworkType::WireGuard), (2, NetworkType::WireGuard)]
);
let still_wireguard = next_configuration(&mut config_stream).await?;
assert_eq!(still_wireguard.addresses, wireguard_config.addresses);
client
.networks_client
.network_reorder(NetworkReorderRequest { id: 2, index: 0 })
.await?;
let networks_after_reorder = next_networks(&mut network_stream).await?;
assert_eq!(
network_ids(&networks_after_reorder),
vec![(2, NetworkType::WireGuard), (1, NetworkType::WireGuard)]
);
let second_wireguard_config = next_configuration(&mut config_stream).await?;
assert_eq!(second_wireguard_config.addresses, vec!["10.77.0.2/32"]);
assert_eq!(second_wireguard_config.mtu, 1380);
daemon_task.abort();
let _ = daemon_task.await;
cleanup_path(&socket_path);
cleanup_path(&db_path);
Ok(())
}
fn temp_path(ext: &str) -> PathBuf {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system time is after unix epoch")
.as_nanos();
std::env::temp_dir().join(format!("burrow-daemon-test-{now}.{ext}"))
}
fn cleanup_path(path: &Path) {
let _ = std::fs::remove_file(path);
}
fn sample_wireguard_payload() -> Vec<u8> {
br#"[Interface]
PrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=
Address = 10.8.0.2/32, fd00::2/128
ListenPort = 51820
MTU = 1420
[Peer]
PublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=
PresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=
AllowedIPs = 0.0.0.0/0, ::/0
Endpoint = wg.burrow.rs:51820
"#
.to_vec()
}
fn sample_wireguard_payload_with(address: &str, mtu: u16) -> Vec<u8> {
format!(
"[Interface]\nPrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=\nAddress = {address}\nListenPort = 51820\nMTU = {mtu}\n\n[Peer]\nPublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=\nPresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=\nAllowedIPs = 0.0.0.0/0, ::/0\nEndpoint = wg.burrow.rs:51820\n"
)
.into_bytes()
}
async fn next_configuration(
stream: &mut tonic::Streaming<TunnelConfigurationResponse>,
) -> Result<TunnelConfigurationResponse> {
timeout(Duration::from_secs(5), stream.message())
.await??
.ok_or_else(|| anyhow!("configuration stream ended unexpectedly"))
}
async fn next_networks(
stream: &mut tonic::Streaming<NetworkListResponse>,
) -> Result<NetworkListResponse> {
timeout(Duration::from_secs(5), stream.message())
.await??
.ok_or_else(|| anyhow!("network stream ended unexpectedly"))
}
async fn next_status(
stream: &mut tonic::Streaming<TunnelStatusResponse>,
) -> Result<TunnelStatusResponse> {
timeout(Duration::from_secs(5), stream.message())
.await??
.ok_or_else(|| anyhow!("status stream ended unexpectedly"))
}
fn network_ids(response: &NetworkListResponse) -> Vec<(i32, NetworkType)> {
response
.network
.iter()
.map(|network| (network.id, network.r#type()))
.collect()
}
}

View file

@ -11,7 +11,11 @@ use tokio::{
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use crate::daemon::rpc::{ use crate::daemon::rpc::{
DaemonCommand, DaemonMessage, DaemonNotification, DaemonRequest, DaemonResponse, DaemonCommand,
DaemonMessage,
DaemonNotification,
DaemonRequest,
DaemonResponse,
DaemonResponseData, DaemonResponseData,
}; };

View file

@ -1,6 +1,5 @@
use anyhow::Result; use anyhow::Result;
use hyper_util::rt::TokioIo; use hyper_util::rt::TokioIo;
use std::path::Path;
use tokio::net::UnixStream; use tokio::net::UnixStream;
use tonic::transport::{Endpoint, Uri}; use tonic::transport::{Endpoint, Uri};
use tower::service_fn; use tower::service_fn;
@ -16,18 +15,10 @@ pub struct BurrowClient<T> {
impl BurrowClient<tonic::transport::Channel> { impl BurrowClient<tonic::transport::Channel> {
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub async fn from_uds() -> Result<Self> { pub async fn from_uds() -> Result<Self> {
Self::from_uds_path(get_socket_path()).await
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub async fn from_uds_path(path: impl AsRef<Path>) -> Result<Self> {
let socket_path = path.as_ref().to_owned();
let channel = Endpoint::try_from("http://[::]:50051")? // NOTE: this is a hack(?) let channel = Endpoint::try_from("http://[::]:50051")? // NOTE: this is a hack(?)
.connect_with_connector(service_fn(move |_: Uri| { .connect_with_connector(service_fn(|_: Uri| async {
let socket_path = socket_path.clone(); let sock_path = get_socket_path();
async move { Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(sock_path).await?))
Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(&socket_path).await?))
}
})) }))
.await?; .await?;
let nw_client = NetworksClient::new(channel.clone()); let nw_client = NetworksClient::new(channel.clone());

View file

@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use tun::TunOptions; use tun::TunOptions;
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(tag = "method", content = "params")] #[serde(tag="method", content="params")]
pub enum DaemonCommand { pub enum DaemonCommand {
Start(DaemonStartOptions), Start(DaemonStartOptions),
ServerInfo, ServerInfo,

View file

@ -1,182 +0,0 @@
use std::sync::Arc;
use anyhow::{Context, Result};
use tokio::{sync::RwLock, task::JoinHandle};
use tun::{tokio::TunInterface, TunOptions};
use super::rpc::{
grpc_defs::{Network, NetworkType},
ServerConfig,
};
use crate::{
control::TailnetConfig,
wireguard::{Config, Interface as WireGuardInterface},
};
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RuntimeIdentity {
Passthrough,
Network {
id: i32,
network_type: NetworkType,
payload: Vec<u8>,
},
}
#[derive(Clone, Debug)]
pub enum ResolvedTunnel {
Passthrough {
identity: RuntimeIdentity,
},
Tailnet {
identity: RuntimeIdentity,
config: TailnetConfig,
},
WireGuard {
identity: RuntimeIdentity,
config: Config,
},
}
impl ResolvedTunnel {
pub fn from_networks(networks: &[Network]) -> Result<Self> {
let Some(network) = networks.first() else {
return Ok(Self::Passthrough {
identity: RuntimeIdentity::Passthrough,
});
};
let identity = RuntimeIdentity::Network {
id: network.id,
network_type: network.r#type(),
payload: network.payload.clone(),
};
match network.r#type() {
NetworkType::Tailnet => {
let config = TailnetConfig::from_slice(&network.payload)?;
Ok(Self::Tailnet { identity, config })
}
NetworkType::WireGuard => {
let payload = String::from_utf8(network.payload.clone())
.context("wireguard payload must be valid UTF-8")?;
let config = Config::from_content_fmt(&payload, "ini")?;
Ok(Self::WireGuard { identity, config })
}
}
}
pub fn identity(&self) -> &RuntimeIdentity {
match self {
Self::Passthrough { identity }
| Self::Tailnet { identity, .. }
| Self::WireGuard { identity, .. } => identity,
}
}
pub fn server_config(&self) -> Result<ServerConfig> {
match self {
Self::Passthrough { .. } => Ok(ServerConfig {
address: Vec::new(),
name: None,
mtu: Some(1500),
}),
Self::Tailnet { .. } => Ok(ServerConfig {
address: Vec::new(),
name: None,
mtu: Some(1280),
}),
Self::WireGuard { config, .. } => ServerConfig::try_from(config),
}
}
pub async fn start(
self,
tun_interface: Arc<RwLock<Option<TunInterface>>>,
) -> Result<ActiveTunnel> {
match self {
Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough { identity }),
Self::Tailnet { config, .. } => Err(anyhow::anyhow!(
"tailnet runtime is not wired in this checkout yet ({:?})",
config.provider
)),
Self::WireGuard { identity, config } => {
let tun = TunOptions::new().open()?;
tun_interface.write().await.replace(tun);
match start_wireguard_runtime(config, tun_interface.clone()).await {
Ok((interface, task)) => {
Ok(ActiveTunnel::WireGuard { identity, interface, task })
}
Err(err) => {
tun_interface.write().await.take();
Err(err)
}
}
}
}
}
}
pub enum ActiveTunnel {
Passthrough {
identity: RuntimeIdentity,
},
WireGuard {
identity: RuntimeIdentity,
interface: Arc<RwLock<WireGuardInterface>>,
task: JoinHandle<Result<()>>,
},
}
impl ActiveTunnel {
pub fn identity(&self) -> &RuntimeIdentity {
match self {
Self::Passthrough { identity }
| Self::WireGuard { identity, .. } => identity,
}
}
pub async fn shutdown(self, tun_interface: &Arc<RwLock<Option<TunInterface>>>) -> Result<()> {
match self {
Self::Passthrough { .. } => Ok(()),
Self::WireGuard { interface, task, .. } => {
interface.read().await.remove_tun().await;
let task_result = task.await;
tun_interface.write().await.take();
task_result??;
Ok(())
}
}
}
}
async fn start_wireguard_runtime(
config: Config,
tun_interface: Arc<RwLock<Option<TunInterface>>>,
) -> Result<(Arc<RwLock<WireGuardInterface>>, JoinHandle<Result<()>>)> {
let mut interface: WireGuardInterface = config.try_into()?;
interface.set_tun_ref(tun_interface).await;
let interface = Arc::new(RwLock::new(interface));
let run_interface = interface.clone();
let task = tokio::spawn(async move {
let guard = run_interface.read().await;
guard.run().await
});
Ok((interface, task))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn no_networks_resolve_to_passthrough() {
let resolved = ResolvedTunnel::from_networks(&[]).unwrap();
assert_eq!(resolved.identity(), &RuntimeIdentity::Passthrough);
assert_eq!(
resolved.server_config().unwrap().address,
Vec::<String>::new()
);
}
}

View file

@ -4,9 +4,11 @@ use anyhow::Result;
use rusqlite::{params, Connection}; use rusqlite::{params, Connection};
use crate::{ use crate::{
control::TailnetConfig,
daemon::rpc::grpc_defs::{ daemon::rpc::grpc_defs::{
Network as RPCNetwork, NetworkDeleteRequest, NetworkReorderRequest, NetworkType, Network as RPCNetwork,
NetworkDeleteRequest,
NetworkReorderRequest,
NetworkType,
}, },
wireguard::config::{Config, Interface, Peer}, wireguard::config::{Config, Interface, Peer},
}; };
@ -54,7 +56,7 @@ END;
pub fn initialize_tables(conn: &Connection) -> Result<()> { pub fn initialize_tables(conn: &Connection) -> Result<()> {
conn.execute(CREATE_WG_INTERFACE_TABLE, [])?; conn.execute(CREATE_WG_INTERFACE_TABLE, [])?;
conn.execute(CREATE_WG_PEER_TABLE, [])?; conn.execute(CREATE_WG_PEER_TABLE, [])?;
conn.execute_batch(CREATE_NETWORK_TABLE)?; conn.execute(CREATE_NETWORK_TABLE, [])?;
Ok(()) Ok(())
} }
@ -122,26 +124,35 @@ pub fn dump_interface(conn: &Connection, config: &Config) -> Result<()> {
pub fn get_connection(path: Option<&Path>) -> Result<Connection> { pub fn get_connection(path: Option<&Path>) -> Result<Connection> {
let p = path.unwrap_or_else(|| std::path::Path::new(DB_PATH)); let p = path.unwrap_or_else(|| std::path::Path::new(DB_PATH));
let conn = Connection::open(p)?; if !p.exists() {
initialize_tables(&conn)?; let conn = Connection::open(p)?;
Ok(conn) initialize_tables(&conn)?;
dump_interface(&conn, &Config::default())?;
return Ok(conn);
}
Ok(Connection::open(p)?)
} }
pub fn add_network(conn: &Connection, network: &RPCNetwork) -> Result<()> { pub fn add_network(conn: &Connection, network: &RPCNetwork) -> Result<()> {
validate_network_payload(network)?;
let mut stmt = conn.prepare("INSERT INTO network (id, type, payload) VALUES (?, ?, ?)")?; let mut stmt = conn.prepare("INSERT INTO network (id, type, payload) VALUES (?, ?, ?)")?;
stmt.execute(params![ stmt.execute(params![
network.id, network.id,
network.r#type().as_str_name(), network.r#type().as_str_name(),
&network.payload &network.payload
])?; ])?;
if network.r#type() == NetworkType::WireGuard {
let payload_str = String::from_utf8(network.payload.clone())?;
let wg_config = Config::from_content_fmt(&payload_str, "ini")?;
dump_interface(conn, &wg_config)?;
}
Ok(()) Ok(())
} }
pub fn list_networks(conn: &Connection) -> Result<Vec<RPCNetwork>> { pub fn list_networks(conn: &Connection) -> Result<Vec<RPCNetwork>> {
let mut stmt = conn.prepare("SELECT id, type, payload FROM network ORDER BY idx, id")?; let mut stmt = conn.prepare("SELECT id, type, payload FROM network ORDER BY idx")?;
let networks: Vec<RPCNetwork> = stmt let networks: Vec<RPCNetwork> = stmt
.query_map([], |row| { .query_map([], |row| {
println!("row: {:?}", row);
let network_id: i32 = row.get(0)?; let network_id: i32 = row.get(0)?;
let network_type: String = row.get(1)?; let network_type: String = row.get(1)?;
let network_type = NetworkType::from_str_name(network_type.as_str()) let network_type = NetworkType::from_str_name(network_type.as_str())
@ -158,19 +169,12 @@ pub fn list_networks(conn: &Connection) -> Result<Vec<RPCNetwork>> {
} }
pub fn reorder_network(conn: &Connection, req: NetworkReorderRequest) -> Result<()> { pub fn reorder_network(conn: &Connection, req: NetworkReorderRequest) -> Result<()> {
let mut ordered_ids = ordered_network_ids(conn)?; let mut stmt = conn.prepare("UPDATE network SET idx = ? WHERE id = ?")?;
let Some(current_idx) = ordered_ids.iter().position(|id| *id == req.id) else { let res = stmt.execute(params![req.index, req.id])?;
if res == 0 {
return Err(anyhow::anyhow!("No such network exists")); return Err(anyhow::anyhow!("No such network exists"));
}; }
Ok(())
let target_idx = usize::try_from(req.index)
.map_err(|_| anyhow::anyhow!("Network index must be non-negative"))?;
let moved_id = ordered_ids.remove(current_idx);
let target_idx = target_idx.min(ordered_ids.len());
ordered_ids.insert(target_idx, moved_id);
renumber_networks(conn, &ordered_ids)
} }
pub fn delete_network(conn: &Connection, req: NetworkDeleteRequest) -> Result<()> { pub fn delete_network(conn: &Connection, req: NetworkDeleteRequest) -> Result<()> {
@ -179,8 +183,7 @@ pub fn delete_network(conn: &Connection, req: NetworkDeleteRequest) -> Result<()
if res == 0 { if res == 0 {
return Err(anyhow::anyhow!("No such network exists")); return Err(anyhow::anyhow!("No such network exists"));
} }
let ordered_ids = ordered_network_ids(conn)?; Ok(())
renumber_networks(conn, &ordered_ids)
} }
fn parse_lst(s: &str) -> Vec<String> { fn parse_lst(s: &str) -> Vec<String> {
@ -197,86 +200,9 @@ fn to_lst<T: ToString>(v: &Vec<T>) -> String {
.join(",") .join(",")
} }
fn validate_network_payload(network: &RPCNetwork) -> Result<()> {
match network.r#type() {
NetworkType::WireGuard => {
let payload_str = String::from_utf8(network.payload.clone())?;
Config::from_content_fmt(&payload_str, "ini")?;
}
NetworkType::Tailnet => {
TailnetConfig::from_slice(&network.payload)?;
}
}
Ok(())
}
fn ordered_network_ids(conn: &Connection) -> Result<Vec<i32>> {
let mut stmt = conn.prepare("SELECT id FROM network ORDER BY idx, id")?;
let ids = stmt
.query_map([], |row| row.get::<_, i32>(0))?
.collect::<rusqlite::Result<Vec<i32>>>()?;
Ok(ids)
}
fn renumber_networks(conn: &Connection, ordered_ids: &[i32]) -> Result<()> {
conn.execute_batch("BEGIN IMMEDIATE")?;
let result = (|| -> Result<()> {
let mut stmt = conn.prepare("UPDATE network SET idx = ? WHERE id = ?")?;
for (idx, id) in ordered_ids.iter().enumerate() {
stmt.execute(params![idx as i32, id])?;
}
Ok(())
})();
match result {
Ok(()) => {
conn.execute_batch("COMMIT")?;
Ok(())
}
Err(err) => {
let _ = conn.execute_batch("ROLLBACK");
Err(err)
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use tempfile::tempdir;
fn sample_wireguard_payload() -> Vec<u8> {
br#"[Interface]
PrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=
Address = 10.13.13.2/24
ListenPort = 51820
[Peer]
PublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=
PresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=
AllowedIPs = 0.0.0.0/0, 8.8.8.8/32
Endpoint = wg.burrow.rs:51820
"#
.to_vec()
}
fn sample_wireguard_payload_with_address(address: &str, mtu: u16) -> Vec<u8> {
format!(
"[Interface]\nPrivateKey = OEPVdomeLTxTIBvv3TYsJRge0Hp9NMiY0sIrhT8OWG8=\nAddress = {address}\nListenPort = 51820\nMTU = {mtu}\n\n[Peer]\nPublicKey = 8GaFjVO6c4luCHG4ONO+1bFG8tO+Zz5/Gy+Geht1USM=\nPresharedKey = ha7j4BjD49sIzyF9SNlbueK0AMHghlj6+u0G3bzC698=\nAllowedIPs = 0.0.0.0/0\nEndpoint = wg.burrow.rs:51820\n"
)
.into_bytes()
}
fn sample_tailnet_payload() -> Vec<u8> {
br#"{
"provider":"tailscale",
"account":"default",
"identity":"apple",
"tailnet":"example.ts.net",
"hostname":"burrow-phone"
}"#
.to_vec()
}
#[test] #[test]
fn test_db() { fn test_db() {
@ -287,123 +213,4 @@ Endpoint = wg.burrow.rs:51820
let loaded = load_interface(&conn, "1").unwrap(); let loaded = load_interface(&conn, "1").unwrap();
assert_eq!(config, loaded); assert_eq!(config, loaded);
} }
#[test]
fn add_network_validates_payloads() {
let conn = Connection::open_in_memory().unwrap();
initialize_tables(&conn).unwrap();
add_network(
&conn,
&RPCNetwork {
id: 1,
r#type: NetworkType::WireGuard.into(),
payload: sample_wireguard_payload(),
},
)
.unwrap();
add_network(
&conn,
&RPCNetwork {
id: 2,
r#type: NetworkType::Tailnet.into(),
payload: sample_tailnet_payload(),
},
)
.unwrap();
add_network(
&conn,
&RPCNetwork {
id: 3,
r#type: NetworkType::WireGuard.into(),
payload: sample_wireguard_payload_with_address("10.42.0.2/32", 1380),
},
)
.unwrap();
assert!(add_network(
&conn,
&RPCNetwork {
id: 4,
r#type: NetworkType::WireGuard.into(),
payload: b"not-a-config".to_vec(),
},
)
.is_err());
assert!(add_network(
&conn,
&RPCNetwork {
id: 5,
r#type: NetworkType::Tailnet.into(),
payload: b"not-a-tailnet-config".to_vec(),
},
)
.is_err());
let ids: Vec<i32> = list_networks(&conn)
.unwrap()
.into_iter()
.map(|n| n.id)
.collect();
assert_eq!(ids, vec![1, 2, 3]);
}
#[test]
fn reorder_and_delete_networks_keep_priority_stable() {
let conn = Connection::open_in_memory().unwrap();
initialize_tables(&conn).unwrap();
for (id, address, mtu) in [
(1, "10.42.0.2/32", 1380),
(2, "10.42.0.3/32", 1381),
(3, "10.42.0.4/32", 1382),
] {
add_network(
&conn,
&RPCNetwork {
id,
r#type: NetworkType::WireGuard.into(),
payload: sample_wireguard_payload_with_address(address, mtu),
},
)
.unwrap();
}
reorder_network(&conn, NetworkReorderRequest { id: 3, index: 0 }).unwrap();
let ids: Vec<i32> = list_networks(&conn)
.unwrap()
.into_iter()
.map(|n| n.id)
.collect();
assert_eq!(ids, vec![3, 1, 2]);
delete_network(&conn, NetworkDeleteRequest { id: 1 }).unwrap();
let ids: Vec<i32> = list_networks(&conn)
.unwrap()
.into_iter()
.map(|n| n.id)
.collect();
assert_eq!(ids, vec![3, 2]);
}
#[test]
fn get_connection_does_not_seed_a_default_interface() {
let dir = tempdir().unwrap();
let db_path = dir.path().join("burrow.sqlite3");
let conn = get_connection(Some(db_path.as_path())).unwrap();
let interface_count: i64 = conn
.query_row("SELECT COUNT(*) FROM wg_interface", [], |row| row.get(0))
.unwrap();
let network_count: i64 = conn
.query_row("SELECT COUNT(*) FROM network", [], |row| row.get(0))
.unwrap();
assert_eq!(interface_count, 0);
assert_eq!(network_count, 0);
}
} }

View file

@ -1,25 +1,22 @@
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod control;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod wireguard; pub mod wireguard;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod auth;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod daemon; mod daemon;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod database; pub mod database;
#[cfg(target_os = "linux")] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod tor; mod auth;
pub(crate) mod tracing; pub(crate) mod tracing;
#[cfg(target_os = "linux")]
pub mod usernet;
#[cfg(target_vendor = "apple")] #[cfg(target_vendor = "apple")]
pub use daemon::apple::spawn_in_process; pub use daemon::apple::spawn_in_process;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::{ pub use daemon::{
rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData, rpc::DaemonResponse,
rpc::ServerInfo,
DaemonClient,
DaemonCommand,
DaemonResponseData,
DaemonStartOptions, DaemonStartOptions,
}; };

View file

@ -1,8 +1,6 @@
use anyhow::Result; use anyhow::Result;
use clap::{Args, Parser, Subcommand}; use clap::{Args, Parser, Subcommand};
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod control;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod daemon; mod daemon;
pub(crate) mod tracing; pub(crate) mod tracing;
@ -11,10 +9,6 @@ mod wireguard;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod auth; mod auth;
#[cfg(target_os = "linux")]
mod tor;
#[cfg(target_os = "linux")]
mod usernet;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
use daemon::{DaemonClient, DaemonCommand}; use daemon::{DaemonClient, DaemonCommand};
@ -72,12 +66,6 @@ enum Commands {
NetworkReorder(NetworkReorderArgs), NetworkReorder(NetworkReorderArgs),
/// Delete Network /// Delete Network
NetworkDelete(NetworkDeleteArgs), NetworkDelete(NetworkDeleteArgs),
#[cfg(target_os = "linux")]
/// Run a command in an unshared Linux namespace using a Burrow backend
Exec(ExecArgs),
#[cfg(target_os = "linux")]
/// Run a command in a Linux user namespace with Tor-backed networking
TorExec(TorExecArgs),
} }
#[derive(Args)] #[derive(Args)]
@ -110,25 +98,6 @@ struct NetworkDeleteArgs {
id: i32, id: i32,
} }
#[cfg(target_os = "linux")]
#[derive(Args)]
struct TorExecArgs {
payload_path: String,
#[arg(required = true, num_args = 1.., trailing_var_arg = true)]
command: Vec<String>,
}
#[cfg(target_os = "linux")]
#[derive(Args)]
struct ExecArgs {
#[arg(long, value_enum)]
backend: usernet::ExecBackendKind,
#[arg(long)]
payload: Option<String>,
#[arg(required = true, num_args = 1.., trailing_var_arg = true)]
command: Vec<String>,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_start() -> Result<()> { async fn try_start() -> Result<()> {
let mut client = BurrowClient::from_uds().await?; let mut client = BurrowClient::from_uds().await?;
@ -240,38 +209,6 @@ async fn try_network_delete(id: i32) -> Result<()> {
Ok(()) Ok(())
} }
#[cfg(target_os = "linux")]
async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> {
let exit_code = usernet::run_exec(usernet::ExecInvocation {
backend: usernet::ExecBackendKind::Tor,
payload_path: Some(payload_path.into()),
command,
})
.await?;
if exit_code != 0 {
std::process::exit(exit_code);
}
Ok(())
}
#[cfg(target_os = "linux")]
async fn try_exec(
backend: usernet::ExecBackendKind,
payload: Option<String>,
command: Vec<String>,
) -> Result<()> {
let exit_code = usernet::run_exec(usernet::ExecInvocation {
backend,
payload_path: payload.map(Into::into),
command,
})
.await?;
if exit_code != 0 {
std::process::exit(exit_code);
}
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn handle_unexpected(res: Result<DaemonResponseData, String>) { fn handle_unexpected(res: Result<DaemonResponseData, String>) {
match res { match res {
@ -348,17 +285,6 @@ async fn main() -> Result<()> {
Commands::NetworkList => try_network_list().await?, Commands::NetworkList => try_network_list().await?,
Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?, Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?,
Commands::NetworkDelete(args) => try_network_delete(args.id).await?, Commands::NetworkDelete(args) => try_network_delete(args.id).await?,
#[cfg(target_os = "linux")]
Commands::Exec(args) => {
try_exec(
args.backend.clone(),
args.payload.clone(),
args.command.clone(),
)
.await?
}
#[cfg(target_os = "linux")]
Commands::TorExec(args) => try_tor_exec(&args.payload_path, args.command.clone()).await?,
} }
Ok(()) Ok(())

View file

@ -1,187 +0,0 @@
use std::{net::SocketAddr, path::PathBuf, str};
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Config {
#[serde(default)]
pub account: Option<String>,
#[serde(default)]
pub identity: Option<String>,
#[serde(default)]
pub address: Vec<String>,
#[serde(default)]
pub dns: Vec<String>,
#[serde(default)]
pub mtu: Option<u32>,
#[serde(default)]
pub tun_name: Option<String>,
#[serde(default)]
pub arti: ArtiConfig,
#[serde(default)]
pub tcp_stack: TcpStackConfig,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ArtiConfig {
pub state_dir: String,
pub cache_dir: String,
}
impl Default for ArtiConfig {
fn default() -> Self {
Self {
state_dir: "/var/lib/burrow/arti/state".to_string(),
cache_dir: "/var/cache/burrow/arti".to_string(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "kind", rename_all = "snake_case")]
pub enum TcpStackConfig {
System(SystemTcpStackConfig),
}
impl Default for TcpStackConfig {
fn default() -> Self {
Self::System(SystemTcpStackConfig::default())
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SystemTcpStackConfig {
#[serde(default = "default_system_listen")]
pub listen: String,
}
impl Default for SystemTcpStackConfig {
fn default() -> Self {
Self {
listen: default_system_listen(),
}
}
}
impl Config {
pub fn from_payload(payload: &[u8]) -> Result<Self> {
if let Ok(config) = serde_json::from_slice(payload) {
return Ok(config);
}
let payload = str::from_utf8(payload).context("tor payload must be valid UTF-8")?;
toml::from_str(payload).context("failed to parse tor payload as JSON or TOML")
}
pub fn listen_addr(&self) -> Result<SocketAddr> {
match &self.tcp_stack {
TcpStackConfig::System(config) => config
.listen
.parse()
.with_context(|| format!("invalid system tcp listen address '{}'", config.listen)),
}
}
pub fn authority(&self) -> String {
"arti://local".to_owned()
}
pub fn account_name(&self) -> String {
self.account
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| "default".to_owned())
}
pub fn identity_name(&self, network_id: i32) -> String {
self.identity
.clone()
.filter(|value| !value.trim().is_empty())
.or_else(|| self.tun_name.clone())
.unwrap_or_else(|| format!("tor-{network_id}"))
}
pub fn runtime_dirs(&self, network_id: i32) -> (String, String) {
let authority = sanitize_path_component(&self.authority());
let account = sanitize_path_component(&self.account_name());
let identity = sanitize_path_component(&self.identity_name(network_id));
(
append_runtime_path(&self.arti.state_dir, &[&authority, &account, &identity]),
append_runtime_path(&self.arti.cache_dir, &[&authority, &account, &identity]),
)
}
}
fn default_system_listen() -> String {
"127.0.0.1:9040".to_string()
}
fn append_runtime_path(base: &str, parts: &[&str]) -> String {
let mut path = PathBuf::from(base);
for part in parts {
path.push(part);
}
path.to_string_lossy().to_string()
}
fn sanitize_path_component(value: &str) -> String {
let sanitized: String = value
.chars()
.map(|ch| {
if ch.is_ascii_alphanumeric() || ch == '-' || ch == '_' {
ch
} else {
'_'
}
})
.collect();
if sanitized.is_empty() {
"default".to_owned()
} else {
sanitized
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parses_json_payload() {
let payload = br#"{
"address":["100.64.0.2/32"],
"mtu":1400,
"arti":{"state_dir":"/tmp/state","cache_dir":"/tmp/cache"},
"tcp_stack":{"kind":"system","listen":"127.0.0.1:9150"}
}"#;
let config = Config::from_payload(payload).unwrap();
assert_eq!(config.address, vec!["100.64.0.2/32"]);
assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9150");
assert!(config.runtime_dirs(7).0.contains("arti___local"));
}
#[test]
fn parses_toml_payload() {
let payload = r#"
address = ["100.64.0.3/32"]
mtu = 1280
tun_name = "burrow-tor"
[arti]
state_dir = "/tmp/state"
cache_dir = "/tmp/cache"
[tcp_stack]
kind = "system"
listen = "127.0.0.1:9140"
"#;
let config = Config::from_payload(payload.as_bytes()).unwrap();
assert_eq!(config.tun_name.as_deref(), Some("burrow-tor"));
assert_eq!(config.listen_addr().unwrap().to_string(), "127.0.0.1:9140");
assert_eq!(config.identity_name(11), "burrow-tor");
}
}

View file

@ -1,177 +0,0 @@
use std::{
net::{IpAddr, SocketAddr},
sync::Arc,
};
use anyhow::{Context, Result};
use arti_client::TorClient;
use hickory_proto::{
op::{Message, MessageType, ResponseCode},
rr::{rdata::A, rdata::AAAA, RData, Record, RecordType},
};
use tokio::{net::UdpSocket, sync::watch, task::JoinError};
use tor_rtcompat::PreferredRuntime;
use tracing::{debug, warn};
const DNS_TTL_SECS: u32 = 60;
#[derive(Debug)]
pub struct TorDnsHandle {
shutdown: watch::Sender<bool>,
task: tokio::task::JoinHandle<()>,
}
impl TorDnsHandle {
pub async fn shutdown(self) -> Result<()> {
let _ = self.shutdown.send(true);
match self.task.await {
Ok(()) => Ok(()),
Err(err) if err.is_cancelled() => Ok(()),
Err(err) => Err(join_error(err)),
}
}
}
pub async fn spawn(
bind_addr: SocketAddr,
tor_client: Arc<TorClient<PreferredRuntime>>,
) -> Result<TorDnsHandle> {
let socket = UdpSocket::bind(bind_addr)
.await
.with_context(|| format!("failed to bind tor dns proxy on {bind_addr}"))?;
let (shutdown_tx, mut shutdown_rx) = watch::channel(false);
let task = tokio::spawn(async move {
let mut buffer = [0u8; 4096];
loop {
tokio::select! {
changed = shutdown_rx.changed() => {
match changed {
Ok(()) if *shutdown_rx.borrow() => break,
Ok(()) => continue,
Err(_) => break,
}
}
received = socket.recv_from(&mut buffer) => {
let (len, peer_addr) = match received {
Ok(value) => value,
Err(err) => {
warn!(?err, "tor dns proxy recv failed");
continue;
}
};
let response = match build_response(&buffer[..len], tor_client.as_ref()).await {
Ok(message) => message,
Err(err) => {
debug!(?err, "tor dns proxy failed to answer query");
continue;
}
};
if let Err(err) = socket.send_to(&response, peer_addr).await {
warn!(?err, "tor dns proxy send failed");
}
}
}
}
});
Ok(TorDnsHandle { shutdown: shutdown_tx, task })
}
pub(crate) async fn build_response(
packet: &[u8],
tor_client: &TorClient<PreferredRuntime>,
) -> Result<Vec<u8>> {
let request = Message::from_vec(packet).context("failed to parse dns packet")?;
let mut response = Message::new();
response
.set_id(request.id())
.set_op_code(request.op_code())
.set_message_type(MessageType::Response)
.set_recursion_desired(request.recursion_desired())
.set_recursion_available(true)
.set_response_code(ResponseCode::NoError);
for query in request.queries().iter().cloned() {
response.add_query(query.clone());
match query.query_type() {
RecordType::A | RecordType::AAAA => {
let hostname = query.name().to_utf8();
let hostname = hostname.trim_end_matches('.');
match tor_client.resolve(hostname).await {
Ok(addrs) => {
for addr in addrs {
if let Some(answer) =
record_for_address(query.name().clone(), query.query_type(), addr)
{
response.add_answer(answer);
}
}
}
Err(err) => {
debug!(hostname, ?err, "tor dns lookup failed");
response.set_response_code(ResponseCode::ServFail);
}
}
}
_ => {
response.set_response_code(ResponseCode::NotImp);
}
}
}
response.to_vec().context("failed to encode dns response")
}
fn record_for_address(
name: hickory_proto::rr::Name,
record_type: RecordType,
addr: IpAddr,
) -> Option<Record> {
match (record_type, addr) {
(RecordType::A, IpAddr::V4(ip)) => Some(Record::from_rdata(
name,
DNS_TTL_SECS,
RData::A(A::from(ip)),
)),
(RecordType::AAAA, IpAddr::V6(ip)) => Some(Record::from_rdata(
name,
DNS_TTL_SECS,
RData::AAAA(AAAA::from(ip)),
)),
_ => None,
}
}
fn join_error(err: JoinError) -> anyhow::Error {
anyhow::anyhow!("tor dns task failed: {err}")
}
#[cfg(test)]
mod tests {
use super::*;
use hickory_proto::rr::Name;
use std::net::{Ipv4Addr, Ipv6Addr};
#[test]
fn builds_a_record_for_ipv4_answer() {
let record = record_for_address(
Name::from_ascii("example.com.").unwrap(),
RecordType::A,
IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)),
)
.unwrap();
assert_eq!(record.record_type(), RecordType::A);
}
#[test]
fn skips_mismatched_record_type() {
let record = record_for_address(
Name::from_ascii("example.com.").unwrap(),
RecordType::A,
IpAddr::V6(Ipv6Addr::LOCALHOST),
);
assert!(record.is_none());
}
}

View file

@ -1,439 +0,0 @@
use std::{
ffi::{OsStr, OsString},
fs,
net::{IpAddr, Ipv4Addr, SocketAddr},
os::unix::process::ExitStatusExt,
path::PathBuf,
process::{Command, ExitStatus, Stdio},
sync::Arc,
time::Duration,
};
use anyhow::{bail, Context, Result};
use tokio::process::Command as TokioCommand;
use tor_rtcompat::PreferredRuntime;
use tracing::{debug, info};
use super::{
bootstrap_client,
dns::{spawn as spawn_dns, TorDnsHandle},
runtime::{spawn_with_client, TorHandle},
Config, SystemTcpStackConfig, TcpStackConfig,
};
const CHILD_PREFIX_LEN: u8 = 30;
const CHILD_DNS_PORT: u16 = 53;
const LISTENER_READY_TIMEOUT: Duration = Duration::from_secs(10);
const LISTENER_READY_POLL: Duration = Duration::from_millis(100);
pub async fn run_exec(mut config: Config, command: Vec<String>) -> Result<i32> {
if command.is_empty() {
bail!("tor-exec requires a command to run");
}
ensure_root()?;
ensure_host_tool("ip")?;
ensure_host_tool("iptables")?;
ensure_host_tool("unshare")?;
let requested_listener = config.listen_addr()?;
if requested_listener.port() == 0 {
bail!("tor-exec requires a fixed listener port");
}
let plan = NamespacePlan::new(requested_listener.port());
let (state_dir, cache_dir) = config.runtime_dirs(std::process::id() as i32);
config.arti.state_dir = state_dir;
config.arti.cache_dir = cache_dir;
config.tcp_stack = TcpStackConfig::System(SystemTcpStackConfig {
listen: format!("{}:{}", plan.host_ip, plan.listener_port),
});
let namespace = NamespaceGuard::create(&plan)?;
let tor_client = bootstrap_client(&config).await?;
let tor_handle = spawn_with_client(config, tor_client.clone()).await?;
wait_for_listener(SocketAddr::new(
IpAddr::V4(plan.host_ip),
plan.listener_port,
))
.await?;
let dns_handle = spawn_dns(
SocketAddr::new(IpAddr::V4(plan.host_ip), CHILD_DNS_PORT),
tor_client,
)
.await?;
let status = namespace.run_child(&command).await;
let dns_shutdown = dns_handle.shutdown().await;
let tor_shutdown = tor_handle.shutdown().await;
let status = status?;
dns_shutdown?;
tor_shutdown?;
child_exit_code(status)
}
fn ensure_root() -> Result<()> {
if unsafe { libc::geteuid() } != 0 {
bail!("tor-exec currently requires root on linux");
}
Ok(())
}
fn ensure_host_tool(tool: &str) -> Result<()> {
let status = Command::new("sh")
.args(["-lc", &format!("command -v {tool} >/dev/null")])
.status()
.with_context(|| format!("failed to probe required tool '{tool}'"))?;
if !status.success() {
bail!("required host tool '{tool}' is not available");
}
Ok(())
}
async fn wait_for_listener(addr: SocketAddr) -> Result<()> {
let deadline = tokio::time::Instant::now() + LISTENER_READY_TIMEOUT;
loop {
match tokio::net::TcpStream::connect(addr).await {
Ok(stream) => {
drop(stream);
return Ok(());
}
Err(err) if tokio::time::Instant::now() < deadline => {
debug!(%addr, ?err, "waiting for tor transparent listener");
tokio::time::sleep(LISTENER_READY_POLL).await;
}
Err(err) => return Err(err).with_context(|| format!("timed out waiting for {addr}")),
}
}
}
fn child_exit_code(status: ExitStatus) -> Result<i32> {
if let Some(code) = status.code() {
return Ok(code);
}
if let Some(signal) = status.signal() {
return Ok(128 + signal);
}
bail!("child process terminated without an exit code");
}
#[derive(Debug, Clone)]
struct NamespacePlan {
netns_name: String,
host_if: String,
child_if: String,
host_ip: Ipv4Addr,
child_ip: Ipv4Addr,
listener_port: u16,
}
impl NamespacePlan {
fn new(listener_port: u16) -> Self {
let token = std::process::id() % 10_000;
let segment = ((std::process::id() % 200) as u8) + 20;
Self {
netns_name: format!("burrow-tor-{token}"),
host_if: format!("bth{token}"),
child_if: format!("btc{token}"),
host_ip: Ipv4Addr::new(100, 90, segment, 1),
child_ip: Ipv4Addr::new(100, 90, segment, 2),
listener_port,
}
}
fn host_cidr(&self) -> String {
format!("{}/{}", self.host_ip, CHILD_PREFIX_LEN)
}
fn child_cidr(&self) -> String {
format!("{}/{}", self.child_ip, CHILD_PREFIX_LEN)
}
}
struct NamespaceGuard {
plan: NamespacePlan,
resolv_conf: PathBuf,
nat_rule_installed: bool,
forward_rule_installed: bool,
netns_created: bool,
host_link_created: bool,
}
impl NamespaceGuard {
fn create(plan: &NamespacePlan) -> Result<Self> {
let mut guard = Self {
plan: plan.clone(),
resolv_conf: write_resolv_conf(plan.host_ip)?,
nat_rule_installed: false,
forward_rule_installed: false,
netns_created: false,
host_link_created: false,
};
let setup = (|| -> Result<()> {
run_host_command(["ip", "netns", "add", &guard.plan.netns_name])?;
guard.netns_created = true;
run_host_command([
"ip",
"link",
"add",
&guard.plan.host_if,
"type",
"veth",
"peer",
"name",
&guard.plan.child_if,
])?;
guard.host_link_created = true;
run_host_command([
"ip",
"addr",
"add",
&guard.plan.host_cidr(),
"dev",
&guard.plan.host_if,
])?;
run_host_command(["ip", "link", "set", &guard.plan.host_if, "up"])?;
run_host_command([
"ip",
"link",
"set",
&guard.plan.child_if,
"netns",
&guard.plan.netns_name,
])?;
run_host_command([
"ip",
"netns",
"exec",
&guard.plan.netns_name,
"ip",
"link",
"set",
"lo",
"up",
])?;
run_host_command([
"ip",
"netns",
"exec",
&guard.plan.netns_name,
"ip",
"addr",
"add",
&guard.plan.child_cidr(),
"dev",
&guard.plan.child_if,
])?;
run_host_command([
"ip",
"netns",
"exec",
&guard.plan.netns_name,
"ip",
"link",
"set",
&guard.plan.child_if,
"up",
])?;
run_host_command([
"ip",
"netns",
"exec",
&guard.plan.netns_name,
"ip",
"route",
"add",
"default",
"via",
&guard.plan.host_ip.to_string(),
"dev",
&guard.plan.child_if,
])?;
run_host_command([
"iptables",
"-t",
"nat",
"-A",
"PREROUTING",
"-i",
&guard.plan.host_if,
"-p",
"tcp",
"-j",
"DNAT",
"--to-destination",
&format!("{}:{}", guard.plan.host_ip, guard.plan.listener_port),
])?;
guard.nat_rule_installed = true;
run_host_command([
"iptables",
"-A",
"FORWARD",
"-i",
&guard.plan.host_if,
"-j",
"REJECT",
])?;
guard.forward_rule_installed = true;
Ok(())
})();
if let Err(err) = setup {
guard.cleanup();
return Err(err);
}
Ok(guard)
}
async fn run_child(&self, command: &[String]) -> Result<ExitStatus> {
let mut args = vec![
OsString::from("netns"),
OsString::from("exec"),
OsString::from(&self.plan.netns_name),
OsString::from("unshare"),
OsString::from("--user"),
OsString::from("--map-root-user"),
OsString::from("--mount"),
OsString::from("--pid"),
OsString::from("--fork"),
OsString::from("--kill-child"),
OsString::from("sh"),
OsString::from("-ceu"),
OsString::from(CHILD_SCRIPT),
OsString::from("sh"),
self.resolv_conf.as_os_str().to_os_string(),
];
args.extend(command.iter().map(OsString::from));
let status = TokioCommand::new("ip")
.args(args)
.stdin(Stdio::inherit())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.status()
.await
.context("failed to execute child in tor namespace")?;
Ok(status)
}
fn cleanup(&mut self) {
if self.forward_rule_installed {
let _ = run_host_command([
"iptables",
"-D",
"FORWARD",
"-i",
&self.plan.host_if,
"-j",
"REJECT",
]);
self.forward_rule_installed = false;
}
if self.nat_rule_installed {
let _ = run_host_command([
"iptables",
"-t",
"nat",
"-D",
"PREROUTING",
"-i",
&self.plan.host_if,
"-p",
"tcp",
"-j",
"DNAT",
"--to-destination",
&format!("{}:{}", self.plan.host_ip, self.plan.listener_port),
]);
self.nat_rule_installed = false;
}
if self.host_link_created {
let _ = run_host_command(["ip", "link", "delete", &self.plan.host_if]);
self.host_link_created = false;
}
if self.netns_created {
let _ = run_host_command(["ip", "netns", "delete", &self.plan.netns_name]);
self.netns_created = false;
}
let _ = fs::remove_file(&self.resolv_conf);
}
}
impl Drop for NamespaceGuard {
fn drop(&mut self) {
self.cleanup();
}
}
fn write_resolv_conf(nameserver: Ipv4Addr) -> Result<PathBuf> {
let path = std::env::temp_dir().join(format!("burrow-tor-resolv-{}.conf", std::process::id()));
fs::write(&path, format!("nameserver {nameserver}\noptions ndots:1\n"))
.with_context(|| format!("failed to write {}", path.display()))?;
Ok(path)
}
fn run_host_command<const N: usize>(args: [&str; N]) -> Result<()> {
let (program, rest) = args
.split_first()
.expect("run_host_command requires a program and arguments");
let status = Command::new(program)
.args(rest)
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.status()
.with_context(|| format!("failed to start host command {}", shell_words(&args)))?;
if status.success() {
Ok(())
} else {
bail!("host command failed: {}", shell_words(&args));
}
}
fn shell_words(args: &[&str]) -> String {
args.iter()
.map(|arg| shlex_escape(arg))
.collect::<Vec<_>>()
.join(" ")
}
fn shlex_escape(value: &str) -> String {
if value
.chars()
.all(|ch| ch.is_ascii_alphanumeric() || "-_./:=+".contains(ch))
{
value.to_string()
} else {
format!("'{}'", value.replace('\'', "'\\''"))
}
}
const CHILD_SCRIPT: &str = r#"
mount -t proc proc /proc
mount --bind "$1" /etc/resolv.conf
shift
exec "$@"
"#;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn namespace_plan_uses_short_interface_names() {
let plan = NamespacePlan::new(9040);
assert!(plan.host_if.len() <= 15);
assert!(plan.child_if.len() <= 15);
}
#[test]
fn signal_exit_code_uses_shell_convention() {
let status = ExitStatus::from_raw(libc::SIGTERM);
assert_eq!(child_exit_code(status).unwrap(), 128 + libc::SIGTERM);
}
}

View file

@ -1,9 +0,0 @@
mod config;
pub(crate) mod dns;
mod exec;
mod runtime;
mod system;
pub use config::{ArtiConfig, Config, SystemTcpStackConfig, TcpStackConfig};
pub use exec::run_exec;
pub use runtime::{bootstrap_client, spawn, spawn_with_client, TorHandle};

View file

@ -1,126 +0,0 @@
use std::{sync::Arc, time::Duration};
use anyhow::{Context, Result};
use arti_client::{config::TorClientConfigBuilder, TorClient};
use tokio::{
sync::watch,
task::{JoinError, JoinSet},
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tor_rtcompat::PreferredRuntime;
use tracing::{debug, error, info, warn};
use super::{system::SystemTcpStackRuntime, Config, TcpStackConfig};
#[derive(Debug)]
pub struct TorHandle {
shutdown: watch::Sender<bool>,
task: tokio::task::JoinHandle<()>,
}
impl TorHandle {
pub async fn shutdown(self) -> Result<()> {
let _ = self.shutdown.send(true);
match self.task.await {
Ok(()) => Ok(()),
Err(err) if err.is_cancelled() => Ok(()),
Err(err) => Err(join_error(err)),
}
}
}
pub async fn bootstrap_client(config: &Config) -> Result<Arc<TorClient<PreferredRuntime>>> {
let builder =
TorClientConfigBuilder::from_directories(&config.arti.state_dir, &config.arti.cache_dir);
let tor_config = builder.build().context("failed to build arti config")?;
let tor_client = TorClient::create_bootstrapped(tor_config)
.await
.context("failed to bootstrap arti client")?;
Ok(Arc::new(tor_client))
}
pub async fn spawn(config: Config) -> Result<TorHandle> {
let tor_client = bootstrap_client(&config).await?;
spawn_with_client(config, tor_client).await
}
pub async fn spawn_with_client(
config: Config,
tor_client: Arc<TorClient<PreferredRuntime>>,
) -> Result<TorHandle> {
let (shutdown_tx, mut shutdown_rx) = watch::channel(false);
let task = match config.tcp_stack.clone() {
TcpStackConfig::System(system_config) => tokio::spawn(async move {
let stack = match SystemTcpStackRuntime::bind(&system_config).await {
Ok(stack) => stack,
Err(err) => {
error!(?err, "failed to bind system tcp stack listener");
return;
}
};
info!(
listen = %stack.local_addr(),
"system tcp stack listener bound for tor transparent proxy"
);
let mut connections = JoinSet::new();
loop {
tokio::select! {
changed = shutdown_rx.changed() => {
match changed {
Ok(()) if *shutdown_rx.borrow() => break,
Ok(()) => continue,
Err(_) => break,
}
}
Some(res) = connections.join_next(), if !connections.is_empty() => {
match res {
Ok(Ok(())) => {}
Ok(Err(err)) => warn!(?err, "transparent proxy task failed"),
Err(err) => warn!(?err, "transparent proxy task panicked"),
}
}
accepted = stack.accept() => {
let (mut inbound, original_dst) = match accepted {
Ok(pair) => pair,
Err(err) => {
warn!(?err, "failed to accept transparent tcp connection");
tokio::time::sleep(Duration::from_millis(50)).await;
continue;
}
};
let tor_client = tor_client.clone();
connections.spawn(async move {
debug!(%original_dst, "accepted transparent tcp connection");
let tor_stream = tor_client
.connect((original_dst.ip().to_string(), original_dst.port()))
.await
.with_context(|| format!("failed to connect to {original_dst} over tor"))?;
let mut tor_stream = tor_stream.compat();
tokio::io::copy_bidirectional(&mut inbound, &mut tor_stream)
.await
.with_context(|| format!("failed to bridge tor stream for {original_dst}"))?;
Result::<()>::Ok(())
});
}
}
}
connections.abort_all();
while let Some(res) = connections.join_next().await {
match res {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "transparent proxy task failed during shutdown"),
Err(err) => debug!(?err, "transparent proxy task exited during shutdown"),
}
}
}),
};
Ok(TorHandle { shutdown: shutdown_tx, task })
}
fn join_error(err: JoinError) -> anyhow::Error {
anyhow::anyhow!("tor runtime task failed: {err}")
}

View file

@ -1,143 +0,0 @@
use std::net::SocketAddr;
use anyhow::{Context, Result};
use tokio::net::{TcpListener, TcpStream};
use super::SystemTcpStackConfig;
pub struct SystemTcpStackRuntime {
listener: TcpListener,
}
impl SystemTcpStackRuntime {
pub async fn bind(config: &SystemTcpStackConfig) -> Result<Self> {
let listener = TcpListener::bind(&config.listen)
.await
.with_context(|| format!("failed to bind transparent listener on {}", config.listen))?;
Ok(Self { listener })
}
pub fn local_addr(&self) -> SocketAddr {
self.listener
.local_addr()
.expect("listener should always have a local address")
}
pub async fn accept(&self) -> Result<(TcpStream, SocketAddr)> {
let (stream, _) = self
.listener
.accept()
.await
.context("failed to accept transparent listener connection")?;
let original_dst = original_destination(&stream)?;
Ok((stream, original_dst))
}
}
#[cfg(target_os = "linux")]
fn original_destination(stream: &TcpStream) -> Result<SocketAddr> {
use std::{
mem::{size_of, MaybeUninit},
os::fd::AsRawFd,
};
let level = if stream.local_addr()?.is_ipv6() {
libc::SOL_IPV6
} else {
libc::SOL_IP
};
let mut addr = MaybeUninit::<libc::sockaddr_storage>::zeroed();
let mut len = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
let rc = unsafe {
libc::getsockopt(
stream.as_raw_fd(),
level,
80,
addr.as_mut_ptr().cast(),
&mut len,
)
};
if rc != 0 {
return Err(std::io::Error::last_os_error()).context("SO_ORIGINAL_DST lookup failed");
}
socket_addr_from_storage(unsafe { &addr.assume_init() }, len as usize)
}
#[cfg(not(target_os = "linux"))]
fn original_destination(_stream: &TcpStream) -> Result<SocketAddr> {
anyhow::bail!("system tcp stack transparent destination lookup is only implemented on linux")
}
fn socket_addr_from_storage(addr: &libc::sockaddr_storage, len: usize) -> Result<SocketAddr> {
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
if len < std::mem::size_of::<libc::sa_family_t>() {
anyhow::bail!("socket address buffer was too short");
}
match addr.ss_family as i32 {
libc::AF_INET => {
let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in) };
let ip = Ipv4Addr::from(u32::from_be(addr_in.sin_addr.s_addr));
let port = u16::from_be(addr_in.sin_port);
Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
}
libc::AF_INET6 => {
let addr_in = unsafe { *(addr as *const _ as *const libc::sockaddr_in6) };
let ip = Ipv6Addr::from(addr_in.sin6_addr.s6_addr);
let port = u16::from_be(addr_in.sin6_port);
Ok(SocketAddr::V6(SocketAddrV6::new(
ip,
port,
addr_in.sin6_flowinfo,
addr_in.sin6_scope_id,
)))
}
family => anyhow::bail!("unsupported socket address family {family}"),
}
}
#[cfg(all(test, target_os = "linux"))]
mod tests {
use super::*;
use std::{
mem::size_of,
net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6},
};
#[test]
fn parses_ipv4_socket_addr() {
let mut storage = unsafe { std::mem::zeroed::<libc::sockaddr_storage>() };
let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in) };
addr_in.sin_family = libc::AF_INET as libc::sa_family_t;
addr_in.sin_port = u16::to_be(9040);
addr_in.sin_addr = libc::in_addr {
s_addr: u32::to_be(u32::from(Ipv4Addr::new(127, 0, 0, 1))),
};
let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in>()).unwrap();
assert_eq!(
parsed,
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 9040))
);
}
#[test]
fn parses_ipv6_socket_addr() {
let mut storage = unsafe { std::mem::zeroed::<libc::sockaddr_storage>() };
let addr_in = unsafe { &mut *(&mut storage as *mut _ as *mut libc::sockaddr_in6) };
addr_in.sin6_family = libc::AF_INET6 as libc::sa_family_t;
addr_in.sin6_port = u16::to_be(9150);
addr_in.sin6_addr = libc::in6_addr {
s6_addr: Ipv6Addr::LOCALHOST.octets(),
};
let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in6>()).unwrap();
assert_eq!(
parsed,
SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 9150, 0, 0))
);
}
}

View file

@ -3,7 +3,8 @@ use std::sync::Once;
use tracing::{error, info}; use tracing::{error, info};
use tracing_subscriber::{ use tracing_subscriber::{
layer::{Layer, SubscriberExt}, layer::{Layer, SubscriberExt},
EnvFilter, Registry, EnvFilter,
Registry,
}; };
static TRACING: Once = Once::new(); static TRACING: Once = Once::new();
@ -14,49 +15,36 @@ pub fn initialize() {
error!("Failed to initialize LogTracer: {}", e); error!("Failed to initialize LogTracer: {}", e);
} }
let make_stderr = || { #[cfg(target_os = "windows")]
let system_log = Some(tracing_subscriber::fmt::layer());
#[cfg(target_os = "linux")]
let system_log = match tracing_journald::layer() {
Ok(layer) => Some(layer),
Err(e) => {
if e.kind() != std::io::ErrorKind::NotFound {
error!("Failed to initialize journald: {}", e);
}
None
}
};
#[cfg(target_vendor = "apple")]
let system_log = Some(tracing_oslog::OsLogger::new(
"com.hackclub.burrow",
"tracing",
));
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(|| {
tracing_subscriber::fmt::layer() tracing_subscriber::fmt::layer()
.with_level(true) .with_level(true)
.with_writer(std::io::stderr) .with_writer(std::io::stderr)
.with_line_number(true) .with_line_number(true)
.compact() .compact()
.with_filter(EnvFilter::from_default_env()) .with_filter(EnvFilter::from_default_env())
}; });
#[cfg(target_os = "windows")] let subscriber = Registry::default().with(stderr).with(system_log);
let subscriber = {
let system_log = Some(tracing_subscriber::fmt::layer());
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr);
Registry::default().with(stderr).with(system_log)
};
#[cfg(target_os = "linux")]
let subscriber = {
let system_log = match tracing_journald::layer() {
Ok(layer) => Some(layer),
Err(e) => {
if e.kind() != std::io::ErrorKind::NotFound {
error!("Failed to initialize journald: {}", e);
}
None
}
};
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr);
Registry::default().with(stderr).with(system_log)
};
#[cfg(target_os = "macos")]
let subscriber = {
let system_log = Some(tracing_oslog::OsLogger::new(
"com.hackclub.burrow",
"tracing",
));
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr);
Registry::default().with(stderr).with(system_log)
};
#[cfg(not(any(target_os = "windows", target_os = "linux", target_os = "macos")))]
let subscriber = Registry::default().with(Some(make_stderr()));
#[cfg(feature = "tokio-console")] #[cfg(feature = "tokio-console")]
let subscriber = subscriber.with( let subscriber = subscriber.with(

View file

@ -1,935 +0,0 @@
use std::{
collections::HashMap,
env,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
os::fd::{AsRawFd, FromRawFd, RawFd},
os::unix::net::UnixStream as StdUnixStream,
os::unix::process::ExitStatusExt,
path::{Path, PathBuf},
process::{Command as StdCommand, ExitStatus},
str,
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, bail, Context, Result};
use clap::ValueEnum;
use futures::{SinkExt, StreamExt};
use ipnetwork::IpNetwork;
use netstack_smoltcp::{
StackBuilder, TcpListener as StackTcpListener, TcpStream as StackTcpStream,
UdpSocket as StackUdpSocket,
};
use nix::{
cmsg_space,
fcntl::{fcntl, FcntlArg, FdFlag},
sys::socket::{recvmsg, sendmsg, ControlMessage, ControlMessageOwned, MsgFlags},
};
use serde::{Deserialize, Serialize};
use tokio::{
io::copy_bidirectional,
net::{TcpStream, UdpSocket},
process::{Child, Command},
sync::{mpsc, Mutex, RwLock},
task::JoinSet,
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::{debug, warn};
use tun::{tokio::TunInterface as TokioTunInterface, TunOptions};
use crate::{
tor::{bootstrap_client, dns::build_response as build_tor_dns_response, Config as TorConfig},
wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface},
};
const INNER_ENV: &str = "BURROW_USERNET_INNER";
const INNER_CONTROL_FD_ENV: &str = "BURROW_USERNET_CONTROL_FD";
const INNER_TUN_CONFIG_ENV: &str = "BURROW_USERNET_TUN_CONFIG";
const DEFAULT_MTU: u32 = 1500;
const DEFAULT_TUN_V4: &str = "100.64.0.2/24";
const DEFAULT_TUN_V6: &str = "fd00:64::2/64";
const UDP_IDLE_TIMEOUT: Duration = Duration::from_secs(30);
const READY_ACK: &[u8; 1] = b"1";
#[derive(Clone, Debug, Eq, PartialEq, ValueEnum)]
pub enum ExecBackendKind {
Direct,
Tor,
Wireguard,
}
impl ExecBackendKind {
fn cli_name(&self) -> &'static str {
match self {
Self::Direct => "direct",
Self::Tor => "tor",
Self::Wireguard => "wireguard",
}
}
}
#[derive(Clone, Debug)]
pub struct ExecInvocation {
pub backend: ExecBackendKind,
pub payload_path: Option<PathBuf>,
pub command: Vec<String>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct DirectConfig {
#[serde(default)]
pub address: Vec<String>,
#[serde(default)]
pub dns: Vec<String>,
#[serde(default)]
pub mtu: Option<u32>,
#[serde(default)]
pub tun_name: Option<String>,
}
impl DirectConfig {
pub fn from_payload(payload: &[u8]) -> Result<Self> {
if payload.is_empty() {
return Ok(Self::default());
}
if let Ok(config) = serde_json::from_slice(payload) {
return Ok(config);
}
let payload = str::from_utf8(payload).context("direct payload must be valid UTF-8")?;
toml::from_str(payload).context("failed to parse direct payload as JSON or TOML")
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TunNetworkConfig {
tun_name: String,
addresses: Vec<IpNetwork>,
mtu: u32,
}
enum PreparedBackend {
Socket {
backend: SocketBackend,
tun_config: TunNetworkConfig,
},
Wireguard {
config: WireGuardConfig,
tun_config: TunNetworkConfig,
},
}
impl PreparedBackend {
fn tun_config(&self) -> &TunNetworkConfig {
match self {
Self::Socket { tun_config, .. } => tun_config,
Self::Wireguard { tun_config, .. } => tun_config,
}
}
}
struct NamespaceChild {
child: Child,
control: StdUnixStream,
}
#[derive(Clone)]
enum SocketBackend {
Direct,
Tor(Arc<arti_client::TorClient<tor_rtcompat::PreferredRuntime>>),
}
#[derive(Debug)]
struct UdpReply {
payload: Vec<u8>,
source: SocketAddr,
destination: SocketAddr,
}
#[derive(Debug, Clone, Eq, Hash, PartialEq)]
struct UdpFlowKey {
local: SocketAddr,
remote: SocketAddr,
}
pub async fn run_exec(invocation: ExecInvocation) -> Result<i32> {
if invocation.command.is_empty() {
bail!("exec requires a command to run");
}
if env::var_os(INNER_ENV).is_some() {
run_inner(invocation.command).await
} else {
run_supervisor(invocation).await
}
}
async fn run_supervisor(invocation: ExecInvocation) -> Result<i32> {
let prepared = prepare_backend(&invocation).await?;
let mut child = spawn_namespaced_child(&invocation, prepared.tun_config())?;
let tun = child.receive_tun().await?;
match prepared {
PreparedBackend::Socket { backend, .. } => run_socket_backend(backend, tun, child).await,
PreparedBackend::Wireguard { config, .. } => {
run_wireguard_backend(config, tun, child).await
}
}
}
async fn prepare_backend(invocation: &ExecInvocation) -> Result<PreparedBackend> {
match invocation.backend {
ExecBackendKind::Direct => {
let payload = read_optional_payload(invocation.payload_path.as_deref()).await?;
let config = DirectConfig::from_payload(&payload)?;
let tun_config = socket_tun_config(
&config.address,
config.mtu,
config.tun_name.as_deref(),
"burrow-direct",
)?;
Ok(PreparedBackend::Socket {
backend: SocketBackend::Direct,
tun_config,
})
}
ExecBackendKind::Tor => {
let payload = read_required_payload(invocation.payload_path.as_deref(), "tor").await?;
let mut config = TorConfig::from_payload(&payload)?;
let (state_dir, cache_dir) = config.runtime_dirs(std::process::id() as i32);
config.arti.state_dir = state_dir;
config.arti.cache_dir = cache_dir;
let tun_config = socket_tun_config(
&config.address,
config.mtu,
config.tun_name.as_deref(),
"burrow-tor",
)?;
let tor_client = bootstrap_client(&config).await?;
Ok(PreparedBackend::Socket {
backend: SocketBackend::Tor(tor_client),
tun_config,
})
}
ExecBackendKind::Wireguard => {
let payload =
read_required_payload(invocation.payload_path.as_deref(), "wireguard").await?;
let config = parse_wireguard_payload(&payload, invocation.payload_path.as_deref())?;
let tun_config = wireguard_tun_config(&config)?;
Ok(PreparedBackend::Wireguard { config, tun_config })
}
}
}
fn spawn_namespaced_child(
invocation: &ExecInvocation,
tun_config: &TunNetworkConfig,
) -> Result<NamespaceChild> {
ensure_tool("unshare")?;
ensure_tool("ip")?;
let (parent_control, child_control) =
StdUnixStream::pair().context("failed to create namespace control socket")?;
set_inheritable(child_control.as_raw_fd())?;
let current_exe = env::current_exe().context("failed to locate current burrow binary")?;
let mut cmd = Command::new("unshare");
cmd.args([
"--user",
"--map-root-user",
"--net",
"--mount",
"--pid",
"--fork",
"--kill-child",
"--mount-proc",
]);
cmd.env(INNER_ENV, "1");
cmd.env(INNER_CONTROL_FD_ENV, child_control.as_raw_fd().to_string());
cmd.env(
INNER_TUN_CONFIG_ENV,
serde_json::to_string(tun_config).context("failed to encode namespace tun config")?,
);
cmd.arg(current_exe);
cmd.arg("exec");
cmd.args(["--backend", invocation.backend.cli_name()]);
if let Some(payload_path) = &invocation.payload_path {
cmd.arg("--payload");
cmd.arg(payload_path);
}
cmd.arg("--");
cmd.args(&invocation.command);
let child = cmd
.spawn()
.context("failed to enter unshared Linux namespace")?;
drop(child_control);
Ok(NamespaceChild { child, control: parent_control })
}
async fn run_inner(command: Vec<String>) -> Result<i32> {
run_ip(["link", "set", "lo", "up"])?;
let tun_config = read_inner_tun_config()?;
let tun = open_tun_device(&tun_config)?;
configure_tun_addresses(&tun, &tun_config.addresses, tun_config.mtu)?;
let name = tun.name().context("failed to retrieve tun device name")?;
run_ip(["link", "set", "dev", &name, "up"])?;
install_default_routes(&name, &tun_config.addresses)?;
let control_fd = env::var(INNER_CONTROL_FD_ENV)
.context("missing namespace control fd")?
.parse::<RawFd>()
.context("invalid namespace control fd")?;
send_tun_fd(control_fd, tun.as_raw_fd())?;
await_parent_ready(control_fd).await?;
drop(tun);
let status = spawn_child(&command).await?;
child_exit_code(status)
}
impl NamespaceChild {
async fn receive_tun(&mut self) -> Result<TokioTunInterface> {
let control = self
.control
.try_clone()
.context("failed to clone namespace control socket")?;
let fd = tokio::task::spawn_blocking(move || recv_tun_fd(&control))
.await
.context("failed to join namespace tun receive task")??;
tokio_tun_from_fd(fd)
}
async fn signal_ready(&self) -> Result<()> {
let mut control = self
.control
.try_clone()
.context("failed to clone namespace control socket")?;
tokio::task::spawn_blocking(move || -> Result<()> {
std::io::Write::write_all(&mut control, READY_ACK)
.context("failed to acknowledge namespace readiness")?;
Ok(())
})
.await
.context("failed to join namespace ready task")??;
Ok(())
}
async fn wait(mut self) -> Result<ExitStatus> {
self.child
.wait()
.await
.context("failed to wait for namespace child")
}
}
async fn run_socket_backend(
backend: SocketBackend,
tun: TokioTunInterface,
child: NamespaceChild,
) -> Result<i32> {
let tun = Arc::new(tun);
let (stack, runner, udp_socket, tcp_listener) = StackBuilder::default()
.stack_buffer_size(1024)
.udp_buffer_size(1024)
.tcp_buffer_size(1024)
.enable_udp(true)
.enable_tcp(true)
.enable_icmp(true)
.build()
.context("failed to build userspace netstack")?;
let (mut stack_sink, mut stack_stream) = stack.split();
let mut tasks = JoinSet::new();
if let Some(runner) = runner {
tasks.spawn(async move { runner.await.map_err(anyhow::Error::from) });
}
{
let tun = tun.clone();
tasks.spawn(async move {
let mut buf = vec![0u8; 65_535];
loop {
let len = tun
.recv(&mut buf)
.await
.context("failed to read packet from tun")?;
if len == 0 {
continue;
}
stack_sink
.send(buf[..len].to_vec())
.await
.context("failed to send tun packet into userspace stack")?;
}
#[allow(unreachable_code)]
Result::<()>::Ok(())
});
}
{
let tun = tun.clone();
tasks.spawn(async move {
while let Some(packet) = stack_stream.next().await {
let packet = packet.context("failed to receive packet from userspace stack")?;
tun.send(&packet)
.await
.context("failed to write userspace stack packet to tun")?;
}
Result::<()>::Ok(())
});
}
if let Some(tcp_listener) = tcp_listener {
let backend = backend.clone();
tasks.spawn(async move { tcp_dispatch_loop(tcp_listener, backend).await });
}
if let Some(udp_socket) = udp_socket {
tasks.spawn(async move { udp_dispatch_loop(udp_socket, backend).await });
}
child.signal_ready().await?;
let status = child.wait().await?;
tasks.abort_all();
while let Some(joined) = tasks.join_next().await {
match joined {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "usernet background task exited with error"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "usernet background task panicked"),
}
}
child_exit_code(status)
}
async fn run_wireguard_backend(
config: WireGuardConfig,
tun: TokioTunInterface,
child: NamespaceChild,
) -> Result<i32> {
let interface: WireGuardInterface = config.try_into()?;
interface.set_tun(tun).await;
let interface = Arc::new(interface);
let runner = {
let interface = interface.clone();
tokio::spawn(async move { interface.run().await })
};
child.signal_ready().await?;
let status = child.wait().await?;
interface.remove_tun().await;
match runner.await {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "wireguard exec runtime exited with error"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "wireguard exec runtime panicked"),
}
child_exit_code(status)
}
async fn tcp_dispatch_loop(mut listener: StackTcpListener, backend: SocketBackend) -> Result<()> {
let mut tasks = JoinSet::new();
loop {
tokio::select! {
Some(result) = tasks.join_next(), if !tasks.is_empty() => {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => warn!(?err, "tcp bridge task failed"),
Err(err) if err.is_cancelled() => {}
Err(err) => warn!(?err, "tcp bridge task panicked"),
}
}
next = listener.next() => match next {
Some((stream, local_addr, remote_addr)) => {
debug!(%local_addr, %remote_addr, "accepted userspace tcp stream");
let backend = backend.clone();
tasks.spawn(async move {
bridge_tcp(backend, stream, local_addr, remote_addr).await
});
}
None => break,
}
}
}
tasks.abort_all();
while let Some(result) = tasks.join_next().await {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "tcp bridge task exited during shutdown"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "tcp bridge task panicked during shutdown"),
}
}
Ok(())
}
async fn bridge_tcp(
backend: SocketBackend,
mut inbound: StackTcpStream,
_local_addr: SocketAddr,
remote_addr: SocketAddr,
) -> Result<()> {
match backend {
SocketBackend::Direct => {
debug!(%remote_addr, "dialing direct outbound tcp");
let mut outbound = TcpStream::connect(remote_addr)
.await
.with_context(|| format!("failed to connect to {remote_addr}"))?;
copy_bidirectional(&mut inbound, &mut outbound)
.await
.with_context(|| format!("failed to bridge tcp stream for {remote_addr}"))?;
}
SocketBackend::Tor(tor_client) => {
debug!(%remote_addr, "dialing tor outbound tcp");
let tor_stream = tor_client
.connect((remote_addr.ip().to_string(), remote_addr.port()))
.await
.with_context(|| format!("failed to connect to {remote_addr} over tor"))?;
let mut tor_stream = tor_stream.compat();
copy_bidirectional(&mut inbound, &mut tor_stream)
.await
.with_context(|| format!("failed to bridge tor stream for {remote_addr}"))?;
}
}
Ok(())
}
async fn udp_dispatch_loop(socket: StackUdpSocket, backend: SocketBackend) -> Result<()> {
let (mut udp_reader, mut udp_writer) = socket.split();
let (reply_tx, mut reply_rx) = mpsc::channel::<UdpReply>(128);
let direct_sessions = Arc::new(Mutex::new(
HashMap::<UdpFlowKey, mpsc::Sender<Vec<u8>>>::new(),
));
let mut session_tasks = JoinSet::new();
loop {
tokio::select! {
Some(result) = session_tasks.join_next(), if !session_tasks.is_empty() => {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => warn!(?err, "udp session task failed"),
Err(err) if err.is_cancelled() => {}
Err(err) => warn!(?err, "udp session task panicked"),
}
}
maybe_reply = reply_rx.recv() => match maybe_reply {
Some(reply) => {
udp_writer
.send((reply.payload, reply.source, reply.destination))
.await
.context("failed to write udp reply into userspace stack")?;
}
None => break,
},
maybe_datagram = udp_reader.next() => match maybe_datagram {
Some((payload, local_addr, remote_addr)) => {
match &backend {
SocketBackend::Direct => {
dispatch_direct_udp(
payload,
local_addr,
remote_addr,
reply_tx.clone(),
direct_sessions.clone(),
&mut session_tasks,
).await?;
}
SocketBackend::Tor(tor_client) => {
if remote_addr.port() != 53 {
debug!(%remote_addr, "dropping non-DNS UDP datagram for tor backend");
continue;
}
let response = build_tor_dns_response(&payload, tor_client.as_ref()).await?;
reply_tx
.send(UdpReply {
payload: response,
source: remote_addr,
destination: local_addr,
})
.await
.context("failed to enqueue tor dns response")?;
}
}
}
None => break,
}
}
}
session_tasks.abort_all();
while let Some(result) = session_tasks.join_next().await {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "udp session task exited during shutdown"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "udp session task panicked during shutdown"),
}
}
Ok(())
}
async fn dispatch_direct_udp(
payload: Vec<u8>,
local_addr: SocketAddr,
remote_addr: SocketAddr,
reply_tx: mpsc::Sender<UdpReply>,
sessions: Arc<Mutex<HashMap<UdpFlowKey, mpsc::Sender<Vec<u8>>>>>,
session_tasks: &mut JoinSet<Result<()>>,
) -> Result<()> {
let key = UdpFlowKey {
local: local_addr,
remote: remote_addr,
};
let existing = { sessions.lock().await.get(&key).cloned() };
if let Some(sender) = existing {
if sender.send(payload.clone()).await.is_ok() {
return Ok(());
}
sessions.lock().await.remove(&key);
}
let (tx, rx) = mpsc::channel::<Vec<u8>>(32);
tx.send(payload)
.await
.context("failed to enqueue outbound udp payload")?;
sessions.lock().await.insert(key.clone(), tx);
session_tasks.spawn(async move { run_direct_udp_session(key, rx, reply_tx, sessions).await });
Ok(())
}
async fn run_direct_udp_session(
key: UdpFlowKey,
mut outbound_rx: mpsc::Receiver<Vec<u8>>,
reply_tx: mpsc::Sender<UdpReply>,
sessions: Arc<Mutex<HashMap<UdpFlowKey, mpsc::Sender<Vec<u8>>>>>,
) -> Result<()> {
let bind_addr = match key.remote {
SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
SocketAddr::V6(_) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
};
let socket = UdpSocket::bind(bind_addr)
.await
.with_context(|| format!("failed to bind udp socket for {}", key.remote))?;
socket
.connect(key.remote)
.await
.with_context(|| format!("failed to connect udp socket to {}", key.remote))?;
let mut buf = vec![0u8; 65_535];
loop {
tokio::select! {
maybe_payload = outbound_rx.recv() => match maybe_payload {
Some(payload) => {
socket
.send(&payload)
.await
.with_context(|| format!("failed to send udp payload to {}", key.remote))?;
}
None => break,
},
recv = tokio::time::timeout(UDP_IDLE_TIMEOUT, socket.recv(&mut buf)) => match recv {
Ok(Ok(len)) => {
reply_tx
.send(UdpReply {
payload: buf[..len].to_vec(),
source: key.remote,
destination: key.local,
})
.await
.context("failed to enqueue inbound udp reply")?;
}
Ok(Err(err)) => return Err(err).with_context(|| format!("failed to receive udp response from {}", key.remote)),
Err(_) => break,
}
}
}
sessions.lock().await.remove(&key);
Ok(())
}
fn wireguard_tun_config(config: &WireGuardConfig) -> Result<TunNetworkConfig> {
parse_tun_config(
&config.interface.address,
config.interface.mtu,
Some("burrow-wireguard"),
)
}
fn socket_tun_config(
addresses: &[String],
mtu: Option<u32>,
tun_name: Option<&str>,
default_name: &str,
) -> Result<TunNetworkConfig> {
let default_addresses;
let addresses = if addresses.is_empty() {
default_addresses = vec![DEFAULT_TUN_V4.to_string(), DEFAULT_TUN_V6.to_string()];
default_addresses.as_slice()
} else {
addresses
};
parse_tun_config(addresses, mtu, Some(tun_name.unwrap_or(default_name)))
}
fn parse_tun_config(
addresses: &[String],
mtu: Option<u32>,
tun_name: Option<&str>,
) -> Result<TunNetworkConfig> {
let addresses = addresses
.iter()
.map(|addr| {
addr.parse::<IpNetwork>()
.with_context(|| format!("invalid tunnel address '{addr}'"))
})
.collect::<Result<Vec<_>>>()?;
Ok(TunNetworkConfig {
tun_name: tun_name.unwrap_or("burrow-exec").to_string(),
addresses,
mtu: mtu.unwrap_or(DEFAULT_MTU),
})
}
fn open_tun_device(config: &TunNetworkConfig) -> Result<tun::TunInterface> {
let tun = TunOptions::new()
.name(&config.tun_name)
.no_pi(true)
.tun_excl(true)
.open()
.context("failed to create tun device")?;
Ok(tun.inner.into_inner())
}
fn tokio_tun_from_fd(fd: RawFd) -> Result<TokioTunInterface> {
let tun = unsafe { tun::TunInterface::from_raw_fd(fd) };
TokioTunInterface::new(tun).context("failed to wrap tun fd in tokio interface")
}
fn read_inner_tun_config() -> Result<TunNetworkConfig> {
let raw = env::var(INNER_TUN_CONFIG_ENV).context("missing namespace tun config")?;
serde_json::from_str(&raw).context("invalid namespace tun config")
}
fn configure_tun_addresses(
iface: &tun::TunInterface,
networks: &[IpNetwork],
mtu: u32,
) -> Result<()> {
for network in networks {
match network {
IpNetwork::V4(net) => {
iface.set_ipv4_addr(net.ip())?;
let netmask = prefix_to_netmask_v4(net.prefix());
iface.set_netmask(netmask)?;
iface.set_broadcast_addr(broadcast_v4(net.ip(), netmask))?;
}
IpNetwork::V6(net) => iface.add_ipv6_addr(net.ip(), net.prefix())?,
}
}
iface.set_mtu(mtu as i32)?;
Ok(())
}
fn install_default_routes(name: &str, networks: &[IpNetwork]) -> Result<()> {
if networks
.iter()
.any(|network| matches!(network, IpNetwork::V4(_)))
{
run_ip(["route", "replace", "default", "dev", name])?;
}
if networks
.iter()
.any(|network| matches!(network, IpNetwork::V6(_)))
{
run_ip(["-6", "route", "replace", "default", "dev", name])?;
}
Ok(())
}
fn run_ip<const N: usize>(args: [&str; N]) -> Result<()> {
let status = StdCommand::new("ip")
.args(args)
.status()
.context("failed to execute ip command")?;
if !status.success() {
bail!("ip {} failed with status {}", args.join(" "), status);
}
Ok(())
}
fn set_inheritable(fd: RawFd) -> Result<()> {
let flags = FdFlag::from_bits_truncate(
fcntl(fd, FcntlArg::F_GETFD).context("failed to query descriptor flags")?,
);
let flags = flags & !FdFlag::FD_CLOEXEC;
fcntl(fd, FcntlArg::F_SETFD(flags)).context("failed to clear close-on-exec")?;
Ok(())
}
async fn await_parent_ready(control_fd: RawFd) -> Result<()> {
tokio::task::spawn_blocking(move || -> Result<()> {
let mut control = unsafe { StdUnixStream::from_raw_fd(control_fd) };
let mut ack = [0u8; 1];
std::io::Read::read_exact(&mut control, &mut ack)
.context("failed to read namespace ready ack")?;
if ack != *READY_ACK {
bail!("unexpected namespace ready ack");
}
Ok(())
})
.await
.context("failed to join namespace ready wait task")??;
Ok(())
}
fn send_tun_fd(control_fd: RawFd, tun_fd: RawFd) -> Result<()> {
let buf = [0u8; 1];
let iov = [std::io::IoSlice::new(&buf)];
let fds = [tun_fd];
sendmsg::<()>(
control_fd,
&iov,
&[ControlMessage::ScmRights(&fds)],
MsgFlags::empty(),
None,
)
.context("failed to send tun fd to parent")?;
Ok(())
}
fn recv_tun_fd(control: &StdUnixStream) -> Result<RawFd> {
let mut buf = [0u8; 1];
let mut iov = [std::io::IoSliceMut::new(&mut buf)];
let mut cmsgspace = cmsg_space!([RawFd; 1]);
let msg = recvmsg::<()>(
control.as_raw_fd(),
&mut iov,
Some(&mut cmsgspace),
MsgFlags::empty(),
)
.context("failed to receive tun fd from namespace child")?;
for cmsg in msg.cmsgs() {
if let ControlMessageOwned::ScmRights(fds) = cmsg {
if let Some(fd) = fds.first() {
return Ok(*fd);
}
}
}
bail!("namespace child did not send a tun fd")
}
fn ensure_tool(tool: &str) -> Result<()> {
let status = StdCommand::new("sh")
.args(["-lc", &format!("command -v {tool} >/dev/null")])
.status()
.with_context(|| format!("failed to probe required tool '{tool}'"))?;
if !status.success() {
bail!("required host tool '{tool}' is not available");
}
Ok(())
}
async fn read_optional_payload(path: Option<&Path>) -> Result<Vec<u8>> {
match path {
Some(path) => tokio::fs::read(path)
.await
.with_context(|| format!("failed to read payload from {}", path.display())),
None => Ok(Vec::new()),
}
}
async fn read_required_payload(path: Option<&Path>, backend: &str) -> Result<Vec<u8>> {
let path = path.ok_or_else(|| anyhow!("{backend} exec requires --payload"))?;
tokio::fs::read(path)
.await
.with_context(|| format!("failed to read payload from {}", path.display()))
}
fn parse_wireguard_payload(payload: &[u8], path: Option<&Path>) -> Result<WireGuardConfig> {
let payload = str::from_utf8(payload).context("wireguard payload must be valid UTF-8")?;
if let Some(path) = path {
if let Some(ext) = path.extension().and_then(|ext| ext.to_str()) {
return WireGuardConfig::from_content_fmt(payload, ext);
}
}
WireGuardConfig::from_toml(payload).or_else(|_| WireGuardConfig::from_ini(payload))
}
async fn spawn_child(command: &[String]) -> Result<ExitStatus> {
let mut cmd = Command::new(&command[0]);
if command.len() > 1 {
cmd.args(&command[1..]);
}
cmd.stdin(std::process::Stdio::inherit());
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
cmd.kill_on_drop(true);
cmd.status()
.await
.with_context(|| format!("failed to spawn '{}'", command[0]))
}
fn child_exit_code(status: ExitStatus) -> Result<i32> {
if let Some(code) = status.code() {
return Ok(code);
}
if let Some(signal) = status.signal() {
return Ok(128 + signal);
}
bail!("child process terminated without an exit code");
}
fn prefix_to_netmask_v4(prefix: u8) -> Ipv4Addr {
if prefix == 0 {
Ipv4Addr::new(0, 0, 0, 0)
} else {
let mask = (!0u32) << (32 - prefix);
Ipv4Addr::from(mask)
}
}
fn broadcast_v4(ip: Ipv4Addr, netmask: Ipv4Addr) -> Ipv4Addr {
let ip_u32 = u32::from(ip);
let mask = u32::from(netmask);
Ipv4Addr::from(ip_u32 | !mask)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parses_direct_json_payload() {
let payload = br#"{"address":["10.0.0.2/24"],"mtu":1400,"tun_name":"burrow0"}"#;
let config = DirectConfig::from_payload(payload).unwrap();
assert_eq!(config.address, vec!["10.0.0.2/24"]);
assert_eq!(config.mtu, Some(1400));
assert_eq!(config.tun_name.as_deref(), Some("burrow0"));
}
#[test]
fn socket_tun_config_uses_dual_stack_defaults() {
let config = socket_tun_config(&[], None, None, "burrow-test").unwrap();
assert_eq!(config.tun_name, "burrow-test");
assert!(config
.addresses
.iter()
.any(|network| matches!(network, IpNetwork::V4(_))));
assert!(config
.addresses
.iter()
.any(|network| matches!(network, IpNetwork::V6(_))));
}
}

View file

@ -148,7 +148,7 @@ impl Interface {
debug!("Routing packet to {}", dst_addr); debug!("Routing packet to {}", dst_addr);
let Some(idx) = pcbs.find(dst_addr) else { let Some(idx) = pcbs.find(dst_addr) else {
continue; continue
}; };
debug!("Found peer:{}", idx); debug!("Found peer:{}", idx);

View file

@ -9,15 +9,20 @@ use std::{
use aead::{Aead, Payload}; use aead::{Aead, Payload};
use blake2::{ use blake2::{
digest::{FixedOutput, KeyInit}, digest::{FixedOutput, KeyInit},
Blake2s256, Blake2sMac, Digest, Blake2s256,
Blake2sMac,
Digest,
}; };
use chacha20poly1305::XChaCha20Poly1305; use chacha20poly1305::XChaCha20Poly1305;
use rand_core::OsRng; use rand_core::OsRng;
use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305};
use subtle::ConstantTimeEq;
use super::{ use super::{
errors::WireGuardError, session::Session, x25519, HandshakeInit, HandshakeResponse, errors::WireGuardError,
session::Session,
x25519,
HandshakeInit,
HandshakeResponse,
PacketCookieReply, PacketCookieReply,
}; };
@ -204,7 +209,7 @@ impl Tai64N {
/// Parse a timestamp from a 12 byte u8 slice /// Parse a timestamp from a 12 byte u8 slice
fn parse(buf: &[u8; 12]) -> Result<Tai64N, WireGuardError> { fn parse(buf: &[u8; 12]) -> Result<Tai64N, WireGuardError> {
if buf.len() < 12 { if buf.len() < 12 {
return Err(WireGuardError::InvalidTai64nTimestamp); return Err(WireGuardError::InvalidTai64nTimestamp)
} }
let (sec_bytes, nano_bytes) = buf.split_at(std::mem::size_of::<u64>()); let (sec_bytes, nano_bytes) = buf.split_at(std::mem::size_of::<u64>());
@ -529,14 +534,11 @@ impl Handshake {
&hash, &hash,
)?; )?;
if !bool::from( ring::constant_time::verify_slices_are_equal(
self.params self.params.peer_static_public.as_bytes(),
.peer_static_public &peer_static_public_decrypted,
.as_bytes() )
.ct_eq(&peer_static_public_decrypted), .map_err(|_| WireGuardError::WrongKey)?;
) {
return Err(WireGuardError::WrongKey);
}
// initiator.hash = HASH(initiator.hash || msg.encrypted_static) // initiator.hash = HASH(initiator.hash || msg.encrypted_static)
hash = b2s_hash(&hash, packet.encrypted_static); hash = b2s_hash(&hash, packet.encrypted_static);
@ -554,22 +556,19 @@ impl Handshake {
let timestamp = Tai64N::parse(&timestamp)?; let timestamp = Tai64N::parse(&timestamp)?;
if !timestamp.after(&self.last_handshake_timestamp) { if !timestamp.after(&self.last_handshake_timestamp) {
// Possibly a replay // Possibly a replay
return Err(WireGuardError::WrongTai64nTimestamp); return Err(WireGuardError::WrongTai64nTimestamp)
} }
self.last_handshake_timestamp = timestamp; self.last_handshake_timestamp = timestamp;
// initiator.hash = HASH(initiator.hash || msg.encrypted_timestamp) // initiator.hash = HASH(initiator.hash || msg.encrypted_timestamp)
hash = b2s_hash(&hash, packet.encrypted_timestamp); hash = b2s_hash(&hash, packet.encrypted_timestamp);
self.previous = std::mem::replace( self.previous = std::mem::replace(&mut self.state, HandshakeState::InitReceived {
&mut self.state, chaining_key,
HandshakeState::InitReceived { hash,
chaining_key, peer_ephemeral_public,
hash, peer_index,
peer_ephemeral_public, });
peer_index,
},
);
self.format_handshake_response(dst) self.format_handshake_response(dst)
} }
@ -670,7 +669,7 @@ impl Handshake {
let local_index = self.cookies.index; let local_index = self.cookies.index;
if packet.receiver_idx != local_index { if packet.receiver_idx != local_index {
return Err(WireGuardError::WrongIndex); return Err(WireGuardError::WrongIndex)
} }
// msg.encrypted_cookie = XAEAD(HASH(LABEL_COOKIE || responder.static_public), // msg.encrypted_cookie = XAEAD(HASH(LABEL_COOKIE || responder.static_public),
// msg.nonce, cookie, last_received_msg.mac1) // msg.nonce, cookie, last_received_msg.mac1)
@ -726,7 +725,7 @@ impl Handshake {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<&'a mut [u8], WireGuardError> { ) -> Result<&'a mut [u8], WireGuardError> {
if dst.len() < super::HANDSHAKE_INIT_SZ { if dst.len() < super::HANDSHAKE_INIT_SZ {
return Err(WireGuardError::DestinationBufferTooSmall); return Err(WireGuardError::DestinationBufferTooSmall)
} }
let (message_type, rest) = dst.split_at_mut(4); let (message_type, rest) = dst.split_at_mut(4);
@ -809,7 +808,7 @@ impl Handshake {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<(&'a mut [u8], Session), WireGuardError> { ) -> Result<(&'a mut [u8], Session), WireGuardError> {
if dst.len() < super::HANDSHAKE_RESP_SZ { if dst.len() < super::HANDSHAKE_RESP_SZ {
return Err(WireGuardError::DestinationBufferTooSmall); return Err(WireGuardError::DestinationBufferTooSmall)
} }
let state = std::mem::replace(&mut self.state, HandshakeState::None); let state = std::mem::replace(&mut self.state, HandshakeState::None);

View file

@ -133,9 +133,9 @@ pub enum Packet<'a> {
impl Tunnel { impl Tunnel {
#[inline(always)] #[inline(always)]
pub fn parse_incoming_packet(src: &[u8]) -> Result<Packet<'_>, WireGuardError> { pub fn parse_incoming_packet(src: &[u8]) -> Result<Packet, WireGuardError> {
if src.len() < 4 { if src.len() < 4 {
return Err(WireGuardError::InvalidPacket); return Err(WireGuardError::InvalidPacket)
} }
// Checks the type, as well as the reserved zero fields // Checks the type, as well as the reserved zero fields
@ -177,7 +177,7 @@ impl Tunnel {
pub fn dst_address(packet: &[u8]) -> Option<IpAddr> { pub fn dst_address(packet: &[u8]) -> Option<IpAddr> {
if packet.is_empty() { if packet.is_empty() {
return None; return None
} }
match packet[0] >> 4 { match packet[0] >> 4 {
@ -201,7 +201,7 @@ impl Tunnel {
pub fn src_address(packet: &[u8]) -> Option<IpAddr> { pub fn src_address(packet: &[u8]) -> Option<IpAddr> {
if packet.is_empty() { if packet.is_empty() {
return None; return None
} }
match packet[0] >> 4 { match packet[0] >> 4 {
@ -296,7 +296,7 @@ impl Tunnel {
self.timer_tick(TimerName::TimeLastDataPacketSent); self.timer_tick(TimerName::TimeLastDataPacketSent);
} }
self.tx_bytes += src.len(); self.tx_bytes += src.len();
return TunnResult::WriteToNetwork(packet); return TunnResult::WriteToNetwork(packet)
} }
// If there is no session, queue the packet for future retry // If there is no session, queue the packet for future retry
@ -320,7 +320,7 @@ impl Tunnel {
) -> TunnResult<'a> { ) -> TunnResult<'a> {
if datagram.is_empty() { if datagram.is_empty() {
// Indicates a repeated call // Indicates a repeated call
return self.send_queued_packet(dst); return self.send_queued_packet(dst)
} }
let mut cookie = [0u8; COOKIE_REPLY_SZ]; let mut cookie = [0u8; COOKIE_REPLY_SZ];
@ -331,7 +331,7 @@ impl Tunnel {
Ok(packet) => packet, Ok(packet) => packet,
Err(TunnResult::WriteToNetwork(cookie)) => { Err(TunnResult::WriteToNetwork(cookie)) => {
dst[..cookie.len()].copy_from_slice(cookie); dst[..cookie.len()].copy_from_slice(cookie);
return TunnResult::WriteToNetwork(&mut dst[..cookie.len()]); return TunnResult::WriteToNetwork(&mut dst[..cookie.len()])
} }
Err(TunnResult::Err(e)) => return TunnResult::Err(e), Err(TunnResult::Err(e)) => return TunnResult::Err(e),
_ => unreachable!(), _ => unreachable!(),
@ -435,7 +435,7 @@ impl Tunnel {
let cur_idx = self.current; let cur_idx = self.current;
if cur_idx == new_idx { if cur_idx == new_idx {
// There is nothing to do, already using this session, this is the common case // There is nothing to do, already using this session, this is the common case
return; return
} }
if self.sessions[cur_idx % N_SESSIONS].is_none() if self.sessions[cur_idx % N_SESSIONS].is_none()
|| self.timers.session_timers[new_idx % N_SESSIONS] || self.timers.session_timers[new_idx % N_SESSIONS]
@ -481,7 +481,7 @@ impl Tunnel {
force_resend: bool, force_resend: bool,
) -> TunnResult<'a> { ) -> TunnResult<'a> {
if self.handshake.is_in_progress() && !force_resend { if self.handshake.is_in_progress() && !force_resend {
return TunnResult::Done; return TunnResult::Done
} }
if self.handshake.is_expired() { if self.handshake.is_expired() {
@ -540,7 +540,7 @@ impl Tunnel {
}; };
if computed_len > packet.len() { if computed_len > packet.len() {
return TunnResult::Err(WireGuardError::InvalidPacket); return TunnResult::Err(WireGuardError::InvalidPacket)
} }
self.timer_tick(TimerName::TimeLastDataPacketReceived); self.timer_tick(TimerName::TimeLastDataPacketReceived);

View file

@ -8,13 +8,23 @@ use aead::{generic_array::GenericArray, AeadInPlace, KeyInit};
use chacha20poly1305::{Key, XChaCha20Poly1305}; use chacha20poly1305::{Key, XChaCha20Poly1305};
use parking_lot::Mutex; use parking_lot::Mutex;
use rand_core::{OsRng, RngCore}; use rand_core::{OsRng, RngCore};
use subtle::ConstantTimeEq; use ring::constant_time::verify_slices_are_equal;
use super::{ use super::{
handshake::{ handshake::{
b2s_hash, b2s_keyed_mac_16, b2s_keyed_mac_16_2, b2s_mac_24, LABEL_COOKIE, LABEL_MAC1, b2s_hash,
b2s_keyed_mac_16,
b2s_keyed_mac_16_2,
b2s_mac_24,
LABEL_COOKIE,
LABEL_MAC1,
}, },
HandshakeInit, HandshakeResponse, Packet, TunnResult, Tunnel, WireGuardError, HandshakeInit,
HandshakeResponse,
Packet,
TunnResult,
Tunnel,
WireGuardError,
}; };
const COOKIE_REFRESH: u64 = 128; // Use 128 and not 120 so the compiler can optimize out the division const COOKIE_REFRESH: u64 = 128; // Use 128 and not 120 so the compiler can optimize out the division
@ -126,7 +136,7 @@ impl RateLimiter {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<&'a mut [u8], WireGuardError> { ) -> Result<&'a mut [u8], WireGuardError> {
if dst.len() < super::COOKIE_REPLY_SZ { if dst.len() < super::COOKIE_REPLY_SZ {
return Err(WireGuardError::DestinationBufferTooSmall); return Err(WireGuardError::DestinationBufferTooSmall)
} }
let (message_type, rest) = dst.split_at_mut(4); let (message_type, rest) = dst.split_at_mut(4);
@ -175,9 +185,8 @@ impl RateLimiter {
let (mac1, mac2) = macs.split_at(16); let (mac1, mac2) = macs.split_at(16);
let computed_mac1 = b2s_keyed_mac_16(&self.mac1_key, msg); let computed_mac1 = b2s_keyed_mac_16(&self.mac1_key, msg);
if !bool::from(computed_mac1[..16].ct_eq(mac1)) { verify_slices_are_equal(&computed_mac1[..16], mac1)
return Err(TunnResult::Err(WireGuardError::InvalidMac)); .map_err(|_| TunnResult::Err(WireGuardError::InvalidMac))?;
}
if self.is_under_load() { if self.is_under_load() {
let addr = match src_addr { let addr = match src_addr {
@ -189,11 +198,11 @@ impl RateLimiter {
let cookie = self.current_cookie(addr); let cookie = self.current_cookie(addr);
let computed_mac2 = b2s_keyed_mac_16_2(&cookie, msg, mac1); let computed_mac2 = b2s_keyed_mac_16_2(&cookie, msg, mac1);
if !bool::from(computed_mac2[..16].ct_eq(mac2)) { if verify_slices_are_equal(&computed_mac2[..16], mac2).is_err() {
let cookie_packet = self let cookie_packet = self
.format_cookie_reply(sender_idx, cookie, mac1, dst) .format_cookie_reply(sender_idx, cookie, mac1, dst)
.map_err(TunnResult::Err)?; .map_err(TunnResult::Err)?;
return Err(TunnResult::WriteToNetwork(cookie_packet)); return Err(TunnResult::WriteToNetwork(cookie_packet))
} }
} }
} }

View file

@ -88,11 +88,11 @@ impl ReceivingKeyCounterValidator {
fn will_accept(&self, counter: u64) -> Result<(), WireGuardError> { fn will_accept(&self, counter: u64) -> Result<(), WireGuardError> {
if counter >= self.next { if counter >= self.next {
// As long as the counter is growing no replay took place for sure // As long as the counter is growing no replay took place for sure
return Ok(()); return Ok(())
} }
if counter + N_BITS < self.next { if counter + N_BITS < self.next {
// Drop if too far back // Drop if too far back
return Err(WireGuardError::InvalidCounter); return Err(WireGuardError::InvalidCounter)
} }
if !self.check_bit(counter) { if !self.check_bit(counter) {
Ok(()) Ok(())
@ -107,22 +107,22 @@ impl ReceivingKeyCounterValidator {
fn mark_did_receive(&mut self, counter: u64) -> Result<(), WireGuardError> { fn mark_did_receive(&mut self, counter: u64) -> Result<(), WireGuardError> {
if counter + N_BITS < self.next { if counter + N_BITS < self.next {
// Drop if too far back // Drop if too far back
return Err(WireGuardError::InvalidCounter); return Err(WireGuardError::InvalidCounter)
} }
if counter == self.next { if counter == self.next {
// Usually the packets arrive in order, in that case we simply mark the bit and // Usually the packets arrive in order, in that case we simply mark the bit and
// increment the counter // increment the counter
self.set_bit(counter); self.set_bit(counter);
self.next += 1; self.next += 1;
return Ok(()); return Ok(())
} }
if counter < self.next { if counter < self.next {
// A packet arrived out of order, check if it is valid, and mark // A packet arrived out of order, check if it is valid, and mark
if self.check_bit(counter) { if self.check_bit(counter) {
return Err(WireGuardError::InvalidCounter); return Err(WireGuardError::InvalidCounter)
} }
self.set_bit(counter); self.set_bit(counter);
return Ok(()); return Ok(())
} }
// Packets where dropped, or maybe reordered, skip them and mark unused // Packets where dropped, or maybe reordered, skip them and mark unused
if counter - self.next >= N_BITS { if counter - self.next >= N_BITS {
@ -247,7 +247,7 @@ impl Session {
panic!("The destination buffer is too small"); panic!("The destination buffer is too small");
} }
if packet.receiver_idx != self.receiving_index { if packet.receiver_idx != self.receiving_index {
return Err(WireGuardError::WrongIndex); return Err(WireGuardError::WrongIndex)
} }
// Don't reuse counters, in case this is a replay attack we want to quickly // Don't reuse counters, in case this is a replay attack we want to quickly
// check the counter without running expensive decryption // check the counter without running expensive decryption

View file

@ -190,7 +190,7 @@ impl Tunnel {
{ {
if self.handshake.is_expired() { if self.handshake.is_expired() {
return TunnResult::Err(WireGuardError::ConnectionExpired); return TunnResult::Err(WireGuardError::ConnectionExpired)
} }
// Clear cookie after COOKIE_EXPIRATION_TIME // Clear cookie after COOKIE_EXPIRATION_TIME
@ -206,7 +206,7 @@ impl Tunnel {
tracing::error!("CONNECTION_EXPIRED(REJECT_AFTER_TIME * 3)"); tracing::error!("CONNECTION_EXPIRED(REJECT_AFTER_TIME * 3)");
self.handshake.set_expired(); self.handshake.set_expired();
self.clear_all(); self.clear_all();
return TunnResult::Err(WireGuardError::ConnectionExpired); return TunnResult::Err(WireGuardError::ConnectionExpired)
} }
if let Some(time_init_sent) = self.handshake.timer() { if let Some(time_init_sent) = self.handshake.timer() {
@ -219,7 +219,7 @@ impl Tunnel {
tracing::error!("CONNECTION_EXPIRED(REKEY_ATTEMPT_TIME)"); tracing::error!("CONNECTION_EXPIRED(REKEY_ATTEMPT_TIME)");
self.handshake.set_expired(); self.handshake.set_expired();
self.clear_all(); self.clear_all();
return TunnResult::Err(WireGuardError::ConnectionExpired); return TunnResult::Err(WireGuardError::ConnectionExpired)
} }
if time_init_sent.elapsed() >= REKEY_TIMEOUT { if time_init_sent.elapsed() >= REKEY_TIMEOUT {
@ -299,11 +299,11 @@ impl Tunnel {
} }
if handshake_initiation_required { if handshake_initiation_required {
return self.format_handshake_initiation(dst, true); return self.format_handshake_initiation(dst, true)
} }
if keepalive_required { if keepalive_required {
return self.encapsulate(&[], dst); return self.encapsulate(&[], dst)
} }
TunnResult::Done TunnResult::Done

View file

@ -64,7 +64,7 @@ impl PeerPcb {
let guard = self.socket.read().await; let guard = self.socket.read().await;
let Some(socket) = guard.as_ref() else { let Some(socket) = guard.as_ref() else {
self.open_if_closed().await?; self.open_if_closed().await?;
continue; continue
}; };
let mut res_buf = [0; 1500]; let mut res_buf = [0; 1500];
// tracing::debug!("{} : waiting for readability on {:?}", rid, socket); // tracing::debug!("{} : waiting for readability on {:?}", rid, socket);
@ -72,7 +72,7 @@ impl PeerPcb {
Ok(l) => l, Ok(l) => l,
Err(e) => { Err(e) => {
log::error!("{}: error reading from socket: {:?}", rid, e); log::error!("{}: error reading from socket: {:?}", rid, e);
continue; continue
} }
}; };
let mut res_dat = &res_buf[..len]; let mut res_dat = &res_buf[..len];
@ -88,7 +88,7 @@ impl PeerPcb {
TunnResult::Done => break, TunnResult::Done => break,
TunnResult::Err(e) => { TunnResult::Err(e) => {
tracing::error!(message = "Decapsulate error", error = ?e); tracing::error!(message = "Decapsulate error", error = ?e);
break; break
} }
TunnResult::WriteToNetwork(packet) => { TunnResult::WriteToNetwork(packet) => {
tracing::debug!("WriteToNetwork: {:?}", packet); tracing::debug!("WriteToNetwork: {:?}", packet);
@ -102,29 +102,17 @@ impl PeerPcb {
.await?; .await?;
tracing::debug!("WriteToNetwork done"); tracing::debug!("WriteToNetwork done");
res_dat = &[]; res_dat = &[];
continue; continue
} }
TunnResult::WriteToTunnelV4(packet, addr) => { TunnResult::WriteToTunnelV4(packet, addr) => {
tracing::debug!("WriteToTunnelV4: {:?}, {:?}", packet, addr); tracing::debug!("WriteToTunnelV4: {:?}, {:?}", packet, addr);
tun_interface tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?;
.read() break
.await
.as_ref()
.ok_or(anyhow::anyhow!("tun interface does not exist"))?
.send(packet)
.await?;
break;
} }
TunnResult::WriteToTunnelV6(packet, addr) => { TunnResult::WriteToTunnelV6(packet, addr) => {
tracing::debug!("WriteToTunnelV6: {:?}, {:?}", packet, addr); tracing::debug!("WriteToTunnelV6: {:?}, {:?}", packet, addr);
tun_interface tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?;
.read() break
.await
.as_ref()
.ok_or(anyhow::anyhow!("tun interface does not exist"))?
.send(packet)
.await?;
break;
} }
} }
} }
@ -146,7 +134,7 @@ impl PeerPcb {
let handle = self.socket.read().await; let handle = self.socket.read().await;
let Some(socket) = handle.as_ref() else { let Some(socket) = handle.as_ref() else {
tracing::error!("No socket for peer"); tracing::error!("No socket for peer");
return Ok(()); return Ok(())
}; };
tracing::debug!("Our Encapsulated packet: {:?}", packet); tracing::debug!("Our Encapsulated packet: {:?}", packet);
socket.send(packet).await?; socket.send(packet).await?;
@ -169,7 +157,7 @@ impl PeerPcb {
let handle = self.socket.read().await; let handle = self.socket.read().await;
let Some(socket) = handle.as_ref() else { let Some(socket) = handle.as_ref() else {
tracing::error!("No socket for peer"); tracing::error!("No socket for peer");
return Ok(()); return Ok(())
}; };
socket.send(packet).await?; socket.send(packet).await?;
tracing::debug!("Sent Packet for timer update"); tracing::debug!("Sent Packet for timer update");

View file

@ -1,101 +0,0 @@
# Forward Email Backups
Burrow's mail direction is hosted mail on [Forward Email](https://forwardemail.net/), with domain-owned backup retention in our own S3-compatible object storage.
This is the first mail path to operationalize for `burrow.net` and `burrow.rs`. It keeps SMTP/IMAP hosting off the first forge host while still giving Burrow control over backup retention and object ownership.
## What Forward Email Requires
Forward Email exposes custom backup storage per domain. The documented API shape is:
- `PUT /v1/domains/{domain}` with:
- `has_custom_s3=true`
- `s3_endpoint`
- `s3_access_key_id`
- `s3_secret_access_key`
- `s3_region`
- `s3_bucket`
- `POST /v1/domains/{domain}/test-s3-connection`
Forward Email also documents these operational constraints:
- the bucket must remain private
- credentials are validated with `HeadBucket`
- failed or public-bucket configurations fall back to Forward Email's default storage and notify domain administrators
- custom S3 keeps every backup version, so lifecycle expiration is our responsibility
## Burrow Secret Layout
Present in `intake/` today:
- `intake/forwardemail_api_token.txt`
- `intake/hetzner-s3-user.txt`
- `intake/hetzner-s3-secret.txt`
- Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com`
- Hetzner object storage region: `hel1`
- Hetzner bucket used for Forward Email backups: `burrow`
## Verified Storage State
As of March 15, 2026, Burrow's Forward Email custom S3 configuration is live:
- endpoint: `https://hel1.your-objectstorage.com`
- region: `hel1`
- bucket: `burrow`
- `burrow.net` has `has_custom_s3=true`
- `burrow.rs` has `has_custom_s3=true`
- Forward Email's `/test-s3-connection` succeeded for both domains
- the `burrow` bucket enforces lifecycle expiration after `90` days
Forward Email performs bucket validation with bucket-style addressing. For Hetzner Object Storage, this means the working endpoint is the regional S3 endpoint (`https://hel1.your-objectstorage.com`), not the account alias (`https://burrow.hel1.your-objectstorage.com`). Using the account alias causes TLS hostname mismatches when the vendor prepends the bucket name.
## Helper
Use [`Tools/forwardemail-custom-s3.sh`](../Tools/forwardemail-custom-s3.sh) to configure or retest the domain setting without putting secrets on the process list.
Use [`Tools/forwardemail-hetzner-storage.py`](../Tools/forwardemail-hetzner-storage.py) to ensure the Hetzner backup bucket exists and to apply lifecycle expiry before enabling custom S3 on the Forward Email side.
Bucket bootstrap example:
```sh
Tools/forwardemail-hetzner-storage.py \
--endpoint https://hel1.your-objectstorage.com \
--bucket burrow \
--expire-days 90
```
Example:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://hel1.your-objectstorage.com \
--s3-region hel1 \
--s3-bucket burrow \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
```
Retest an existing domain configuration without rewriting it:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--test-only
```
## Retention
Forward Email preserves every backup object when custom S3 is enabled. Configure lifecycle expiration on the bucket itself. A 30-day or 90-day expiry window is the baseline recommendation from the vendor docs; Burrow should choose explicitly per domain instead of letting the bucket grow without bound. The current Burrow bootstrap helper defaults to `90` days.
## Identity Direction
Hosted mail and SaaS identity are separate concerns:
- mail hosting/backups: Forward Email + Burrow-owned S3-compatible storage
- interactive identity: Authentik as the long-term IdP
- future SaaS SSO target: Linear via SAML once the workspace and plan are ready
This means the forge host does not need to become the first mail server just to give Burrow mailboxes or retention control.

View file

@ -98,14 +98,10 @@ code burrow
You can run burrow on the command line with cargo: You can run burrow on the command line with cargo:
``` ```
sudo -E cargo run -- start cargo run
``` ```
Creating the tunnel requires elevated privileges. Regular checks and tests can run without `sudo`: Cargo will ask for your password because burrow needs permission in order to create a tunnel.
```
cargo test --workspace --all-features
```
</details> </details>

View file

@ -1,31 +0,0 @@
# Protocol Roadmap
Burrow currently has two tunnel paths in-tree:
- a WireGuard data plane
- a Tor-backed userspace TCP path
What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer:
- control-plane data structures in `burrow/src/control/mod.rs`
- local auth bootstrap and persistent node/session storage in `burrow/src/auth/server/`
- governance documents under `evolution/` for the bigger protocol work
## `CONNECT-IP`
Full RFC 9484 support requires more than packet forwarding. It needs HTTP/3 session management, Capsule handling, HTTP Datagram context identifiers, address assignment, route advertisement, and request-scope enforcement. Burrow does not implement those end to end yet.
## Tailscale-Style Negotiation
Burrow now has register/map request and response types plus persistent node records, but it does not yet implement the full Tailscale capability surface, peer delta protocol, DERP coordination, or Noise-based control transport.
## Current Direction
The intended sequence is:
1. Stabilize the control-plane data model and bootstrap auth.
2. Introduce transport-neutral route and address abstractions.
3. Add MASQUE framing and HTTP/3 transport support.
4. Expand policy, relay, and interoperability testing.
This keeps Burrow honest about what is running today while creating a clean path for the rest.

View file

@ -1,30 +0,0 @@
# WireGuard Rust Lineage
Burrow's in-tree WireGuard engine is not a greenfield implementation. It was lifted from the Rust WireGuard lineage around Cloudflare's BoringTun, then cut down and reshaped to fit Burrow's own daemon and tunnel abstractions.
## What Was Lifted
- The repository history includes `1b39eca` (`boringtun wip`) and `28af9003` (`merge boringtun into burrow`).
- The current `burrow/src/wireguard/noise/*` files still carry the original Cloudflare copyright and SPDX headers.
- Core protocol machinery such as the Noise handshake, session state, rate limiter, and timer logic came from that imported body of work.
## What Changed in Burrow
Burrow does not embed BoringTun unchanged.
- The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`.
- Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`.
- The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload.
- Burrow added its own runtime switching path so WireGuard can share one daemon lifecycle with the rest of the managed runtime system.
## What Was Improved
The lifted code has been tightened further in-repo.
- Deprecated constant-time comparisons were replaced with `subtle`.
- Network ordering and runtime selection are now deterministic and test-covered.
- The Burrow runtime can swap between WireGuard configurations without restarting the daemon process itself.
## Why This Matters
This project should be explicit about lineage. Burrow benefits from proven Rust WireGuard work, but it owns the integration surface, runtime behavior, and future maintenance burden. That is why the code should be documented as lifted, modified, and improved rather than described as wholly original.

View file

@ -1,60 +0,0 @@
# Burrow Evolution
Burrow Evolution Proposals (BEPs) are the repository's durable design record for protocol work, control-plane changes, forge infrastructure, and operational policy.
## Goals
1. Capture intent before implementation outruns the architecture.
2. Give contributors and agents enough context to work safely without re-discovering prior decisions.
3. Tie ambitious work to concrete validation, rollout, and rollback criteria.
## When a BEP is required
Open a BEP for:
- new transports or protocol families
- control-plane and identity changes
- deployment, forge, runner, or secrets changes
- data model migrations
- user-visible behavior that changes security or routing semantics
Small bug fixes and isolated refactors do not need a BEP unless they materially change one of the areas above.
## Lifecycle
1. Pitch
Capture the problem and why it matters now.
2. Draft
Copy `evolution/proposals/0000-template.md` to `evolution/proposals/BEP-XXXX-short-slug.md`.
3. Review
Collect feedback, tighten the design, and document unresolved concerns.
4. Decision
Mark the proposal `Accepted`, `Rejected`, or `Returned for Revision`.
5. Implementation
Link code changes, tests, and rollout evidence.
6. Supersession
Keep historical proposals in-tree and point forward to the replacing BEP.
## Status Values
- `Pitch`
- `Draft`
- `In Review`
- `Accepted`
- `Implemented`
- `Rejected`
- `Returned for Revision`
- `Superseded`
- `Archived`
## Layout
```text
evolution/
README.md
proposals/
0000-template.md
BEP-0001-...
```
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.

View file

@ -1,57 +0,0 @@
# `BEP-XXXX` - Title Case Summary
```text
Status: Draft | In Review | Accepted | Implemented | Rejected | Returned for Revision | Superseded | Archived
Proposal: BEP-XXXX
Authors: <name(s) or agent ids>
Coordinator: <name>
Reviewers: <people, bots, contributors>
Constitution Sections: <II, III, IV, etc.>
Implementation PRs: <link(s)> (optional while drafting)
Decision Date: <YYYY-MM-DD or Pending>
```
## Summary
One or two paragraphs that state the desired outcome and why it matters.
## Motivation
- What problem exists today?
- Why should Burrow solve it now?
- Which issues, incidents, or constraints support the change?
## Detailed Design
- Architecture and boundaries
- Data model and migration plan
- Protocol or API changes
- Observability, testing, and failure handling
## Security and Operational Considerations
- Access and secret handling
- Abuse, downgrade, or supply-chain risks
- Rollback and kill-switch plans
## Contributor Playbook
Give the concrete steps, commands, checks, and evidence a contributor should produce while implementing or rolling out the change.
## Alternatives Considered
List alternatives and why they were rejected.
## Impact on Other Work
- follow-up tasks
- dependencies
- compatibility constraints
## Decision
Record the final call, who made it, and any conditions.
## References
Link relevant issues, specs, transcripts, and external research.

View file

@ -1,61 +0,0 @@
# `BEP-0001` - Sovereign Forge and Governance Bootstrap
```text
Status: Draft
Proposal: BEP-0001
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should own its forge, deployment logic, and operational context under `burrow.net`. This proposal establishes the repository-local governance and forge bootstrap required to move build, release, and infrastructure control out of GitHub-centric assumptions and into a self-hosted operating model.
## Motivation
- The repository currently keeps CI definitions under `.github/workflows/` but has no first-class self-hosted forge layout.
- Infrastructure changes and protocol work are already entangled; without a design record, the project risks landing irreversible operations without enough context.
- A self-hosted forge is a prerequisite for durable autonomy over source, runners, and release pipelines.
## Detailed Design
- Add a project constitution and BEP process under `evolution/`.
- Introduce a Nix flake and NixOS host/module layout for `burrow-forge`.
- Add Forgejo-native workflows under `.forgejo/workflows/` for repository-local CI.
- Bootstrap the initial forge identity around `contact@burrow.net` and an agent-owned SSH workflow.
## Security and Operational Considerations
- Initial bootstrap may read credentials from local intake, but production must converge on encrypted secret handling.
- The first forge host replacement must preserve rollback information before deleting any existing VM.
- DNS for `burrow.net` is currently pending activation; the forge rollout must not assume public reachability until nameserver cutover completes.
## Contributor Playbook
- Keep destructive host operations behind explicit verification of the current Hetzner state.
- Build and test repository-local workflows before using them for deployment.
- Record the active server id, image, IPs, and SSH path before replacement.
## Alternatives Considered
- Continue relying on GitHub Actions while separately hosting services. Rejected because it leaves source authority and CI policy split across systems.
- Stand up Forgejo without a repository-local operating model. Rejected because the repo would still be missing deployment truth.
## Impact on Other Work
- Blocks long-term migration of workflows away from GitHub.
- Provides the governance anchor for protocol and control-plane proposals.
## Decision
Pending.
## References
- `CONSTITUTION.md`
- `.github/workflows/`
- `.forgejo/workflows/`

View file

@ -1,60 +0,0 @@
# `BEP-0002` - Control-Plane Bootstrap and Local Auth
```text
Status: Draft
Proposal: BEP-0002
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow needs a repository-owned control-plane model instead of ad hoc network payload storage plus third-party-only auth. This proposal introduces a local username/password bootstrap for `contact@burrow.net`, plus a register/map data model shaped to support a Tailscale-style control server without claiming full parity yet.
## Motivation
- Current auth support is limited and does not provide a plain local bootstrap path for the project's own operator identity.
- The existing database stores network payloads, but not a durable model for users, nodes, sessions, or control-plane negotiation state.
- Future work on route policy, device coordination, and richer negotiation needs a real data model now.
## Detailed Design
- Add control-plane types for users, nodes, register requests, and map responses.
- Extend the auth server schema with local credentials, sessions, provider logins, and control nodes.
- Expose JSON endpoints for local login, node registration, and map retrieval.
- Seed the initial operator account from intake-backed bootstrap credentials.
## Security and Operational Considerations
- Passwords are stored with Argon2id hashes only.
- Session tokens are bearer credentials and must be treated as sensitive.
- The bootstrap credential path is a short-term path; follow-up work should move it into encrypted secret management before public deployment.
## Contributor Playbook
- Verify bootstrap account creation in an isolated test database.
- Exercise login, register, and map end to end with integration tests.
- Do not advertise protocol parity beyond the implemented request/response contract.
## Alternatives Considered
- Wait for full external identity-provider integration first. Rejected because the forge needs an operator account now.
- Keep control-plane state implicit in daemon-local configuration. Rejected because it cannot express multi-device coordination.
## Impact on Other Work
- Unblocks forge bootstrap and future device control-plane work.
- Creates the storage model needed for richer policy and transport proposals.
## Decision
Pending.
## References
- `burrow/src/auth/server/`
- `burrow/src/control/`

View file

@ -1,61 +0,0 @@
# `BEP-0003` - CONNECT-IP and Negotiation Roadmap
```text
Status: Draft
Proposal: BEP-0003
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should grow from a WireGuard-first tunnel runner into a transport stack that can support HTTP/3 MASQUE `CONNECT-IP` and a richer node negotiation model. This proposal stages that work so Burrow can adopt the right abstractions instead of stapling QUIC-era semantics onto a WireGuard-only daemon.
## Motivation
- `CONNECT-IP` introduces HTTP/3 sessions, context identifiers, address assignment, and route advertisements that do not fit the current daemon model.
- A Tailscale-style control plane requires explicit node, endpoint, and session state rather than raw network blobs.
- The project needs a roadmap that distinguishes data-model work, control-plane work, and actual transport implementation.
## Detailed Design
- Stage 1: land control-plane types and persistent auth/session/node storage.
- Stage 2: add transport-agnostic route, address-assignment, and policy abstractions in Burrow.
- Stage 3: implement MASQUE `CONNECT-IP` framing and HTTP Datagram handling.
- Stage 4: connect the transport layer to real relay, policy, and observability paths.
## Security and Operational Considerations
- `CONNECT-IP` changes the trust boundary from WireGuard peers to HTTP/3 peers and relays; authentication, replay handling, and scope restriction must be explicit.
- Route advertisements and delegated prefixes must be validated before touching the data plane.
- Control-plane capability claims must not imply support that the transport layer does not yet implement.
## Contributor Playbook
- Keep protocol codecs independently testable before integrating them into live transports.
- Add interoperability tests for every new capsule or datagram type.
- Separate request parsing, policy validation, and packet forwarding so regressions stay localized.
## Alternatives Considered
- Implement MASQUE directly in the daemon without control-plane refactoring. Rejected because the current daemon has no transport-neutral contract for routes or prefixes.
- Treat Tailscale negotiation as a one-off compatibility shim. Rejected because Burrow needs first-class control-plane concepts either way.
## Impact on Other Work
- Depends on BEP-0002.
- Informs future relay, policy, and node coordination work.
## Decision
Pending.
## References
- RFC 9484
- `burrow/src/daemon/`
- `burrow/src/control/`

View file

@ -1,68 +0,0 @@
# `BEP-0004` - Hosted Mail Backups and SaaS Identity
```text
Status: Draft
Proposal: BEP-0004
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should start with hosted mail on Forward Email instead of self-hosting SMTP and IMAP on the first forge machine. Backup retention should still be controlled by Burrow through custom S3-compatible storage backed by Burrow-owned object storage. In parallel, Burrow should treat SaaS identity as a separate track and converge on Authentik as the long-term IdP, with Linear SAML SSO as a planned downstream integration rather than an immediate bootstrap dependency.
## Motivation
- The first forge host already carries source control, CI, and deployment bootstrap risk. Adding a self-hosted mail stack increases operational scope before the forge is stable.
- Forward Email already exposes SMTP and IMAP while allowing per-domain custom S3 backup storage, which preserves Burrow's data ownership over mailbox backups.
- The repository needs a durable decision record that separates hosted mail operations from future SaaS SSO work.
## Detailed Design
- Use Forward Email as the operational mail provider for `burrow.net` and `burrow.rs`.
- Configure custom S3-compatible storage per domain using Burrow-controlled object storage credentials.
- Keep one backup bucket per domain and enforce lifecycle expiration at the bucket layer.
- Add repository-owned tooling and documentation for applying and testing the Forward Email custom S3 configuration.
- Treat Authentik as the future identity authority for SaaS applications, but keep Linear SAML as a later rollout once the workspace and vendor prerequisites are available. Linear's current docs place SAML and SCIM behind higher-tier workspace security settings, so Burrow should treat plan availability as an explicit precondition.
## Security and Operational Considerations
- Forward Email API tokens and S3 credentials must stay in secret files and must not be passed directly on the shell command line.
- Buckets must remain private. Public bucket detection by the vendor should be treated as a hard failure, not a warning.
- Backup growth is unbounded without lifecycle rules. Retention policy is part of the rollout, not optional cleanup.
- Hosted mail reduces MTA attack surface on the forge host, but it adds third-party dependency risk; keeping backups in Burrow-owned storage limits that blast radius.
## Contributor Playbook
- Put the Forward Email API token in `intake/forwardemail_api_token.txt`.
- Use `Tools/forwardemail-custom-s3.sh` to configure `burrow.net` and `burrow.rs`.
- Run the helper again with `--test-only` after any credential rotation.
- Record the chosen endpoint, region, bucket names, and lifecycle policy alongside rollout evidence.
- Do not claim Linear SAML is live until the Authentik app, Linear workspace settings, workspace plan prerequisites, and end-to-end login flow are verified.
## Alternatives Considered
- Self-host Stalwart on the forge host immediately. Rejected for the first rollout because it expands host scope before source control and CI are stable.
- Rely on Forward Email default backup storage only. Rejected because it gives Burrow less control over retention and data location.
- Delay all SaaS identity planning until after forge cutover. Rejected because Linear and other SaaS integrations will otherwise accrete without an agreed authority.
## Impact on Other Work
- Narrows the first forge host scope.
- Creates a clean mail path for `contact@burrow.net` without requiring self-hosted SMTP and IMAP.
- Leaves Authentik and Linear SAML as explicit follow-up work instead of hidden assumptions.
## Decision
Pending.
## References
- `docs/FORWARDEMAIL.md`
- `Tools/forwardemail-custom-s3.sh`
- Forward Email FAQ: custom S3-compatible storage for backups
- Linear docs: SAML SSO

168
flake.lock generated
View file

@ -1,168 +0,0 @@
{
"nodes": {
"agenix": {
"inputs": {
"darwin": "darwin",
"home-manager": "home-manager",
"nixpkgs": [
"nixpkgs"
],
"systems": "systems"
},
"locked": {
"lastModified": 1770165109,
"narHash": "sha256-9VnK6Oqai65puVJ4WYtCTvlJeXxMzAp/69HhQuTdl/I=",
"owner": "ryantm",
"repo": "agenix",
"rev": "b027ee29d959fda4b60b57566d64c98a202e0feb",
"type": "github"
},
"original": {
"owner": "ryantm",
"repo": "agenix",
"type": "github"
}
},
"darwin": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1744478979,
"narHash": "sha256-dyN+teG9G82G+m+PX/aSAagkC+vUv0SgUw3XkPhQodQ=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "43975d782b418ebf4969e9ccba82466728c2851b",
"type": "github"
},
"original": {
"owner": "lnl7",
"ref": "master",
"repo": "nix-darwin",
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1773889306,
"narHash": "sha256-PAqwnsBSI9SVC2QugvQ3xeYCB0otOwCacB1ueQj2tgw=",
"type": "tarball",
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/nix-community/disko/tar.gz/master"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"type": "tarball",
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/numtide/flake-utils/tar.gz/main"
}
},
"hcloud-upload-image-src": {
"flake": false,
"locked": {
"lastModified": 1766413232,
"narHash": "sha256-1u9tpzciYjB/EgBI81pg9w0kez7hHZON7+AHvfKW7k0=",
"type": "tarball",
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1745494811,
"narHash": "sha256-YZCh2o9Ua1n9uCvrvi5pRxtuVNml8X2a03qIFfRKpFs=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "abfad3d2958c9e6300a883bd443512c55dfeb1be",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1773389992,
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
"type": "tarball",
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
},
"original": {
"type": "tarball",
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"disko": "disko",
"flake-utils": "flake-utils",
"hcloud-upload-image-src": "hcloud-upload-image-src",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

198
flake.nix
View file

@ -1,198 +0,0 @@
{
description = "Burrow development shell and forge host configuration";
inputs = {
nixpkgs.url = "tarball+https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable";
flake-utils.url = "tarball+https://codeload.github.com/numtide/flake-utils/tar.gz/main";
agenix = {
url = "github:ryantm/agenix";
inputs.nixpkgs.follows = "nixpkgs";
};
disko = {
url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master";
inputs.nixpkgs.follows = "nixpkgs";
};
hcloud-upload-image-src = {
url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0";
flake = false;
};
};
outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }:
let
supportedSystems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
in
(flake-utils.lib.eachSystem supportedSystems (system:
let
pkgs = import nixpkgs {
inherit system;
};
lib = pkgs.lib;
commonPackages = with pkgs; [
cargo
rustc
rustfmt
clippy
protobuf
pkg-config
sqlite
git
openssh
curl
jq
nodejs_20
python3
rsync
];
nscPkg =
if pkgs.stdenv.isLinux || pkgs.stdenv.isDarwin then
let
version = "0.0.452";
osName =
if pkgs.stdenv.isLinux then
"linux"
else if pkgs.stdenv.isDarwin then
"darwin"
else
throw "nsc: unsupported host OS ${pkgs.stdenv.hostPlatform.system}";
archInfo =
if pkgs.stdenv.hostPlatform.isx86_64 then
{
arch = "amd64";
hash =
if pkgs.stdenv.isLinux then
"sha256-FBqOJ0UQWTv2r4HWMHrR/aqFzDa0ej/mS8dSoaCe6fY="
else
"sha256-3fRKWO0SCCa5PEym5yCB7dtyEx3xSxXSHfJYz8B+/4M=";
}
else if pkgs.stdenv.hostPlatform.isAarch64 then
{
arch = "arm64";
hash =
if pkgs.stdenv.isLinux then
"sha256-A6twO8Ievbu7Gi5Hqon4ug5rCGOm/uHhlCya3px6+io="
else
"sha256-n363xLaGhy+a6lw2F+WicQYGXnGYnqRW8aTQCSppwcw=";
}
else
throw "nsc: unsupported host platform ${pkgs.stdenv.hostPlatform.system}";
src = pkgs.fetchurl {
url = "https://github.com/namespacelabs/foundation/releases/download/v${version}/nsc_${version}_${osName}_${archInfo.arch}.tar.gz";
sha256 = archInfo.hash;
};
in
pkgs.stdenvNoCC.mkDerivation {
pname = "nsc";
inherit version src;
dontConfigure = true;
dontBuild = true;
unpackPhase = ''
tar -xzf "$src"
'';
installPhase = ''
install -d "$out/bin"
install -m 0555 nsc "$out/bin/nsc"
install -m 0555 docker-credential-nsc "$out/bin/docker-credential-nsc"
install -m 0555 bazel-credential-nsc "$out/bin/bazel-credential-nsc"
'';
}
else
null;
hcloudUploadImagePkg = pkgs.buildGoModule {
pname = "hcloud-upload-image";
version = "1.3.0";
src = hcloud-upload-image-src;
vendorHash = "sha256-IdOAUBPg0CEuHd2rdc7jOlw0XtnAhr3PVPJbnFs2+x4=";
subPackages = [ "." ];
env.GOWORK = "off";
ldflags = [
"-s"
"-w"
];
};
forgejoNscSrc = lib.cleanSourceWith {
src = ./services/forgejo-nsc;
filter = path: type:
let
p = toString path;
name = builtins.baseNameOf path;
hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p;
in
!(hasDir ".git" || hasDir "vendor" || hasDir "node_modules" || name == "result");
};
forgejoNscDispatcher = pkgs.buildGoModule {
pname = "forgejo-nsc-dispatcher";
version = "0.1.0";
src = forgejoNscSrc;
subPackages = [ "./cmd/forgejo-nsc-dispatcher" ];
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
};
forgejoNscAutoscaler = pkgs.buildGoModule {
pname = "forgejo-nsc-autoscaler";
version = "0.1.0";
src = forgejoNscSrc;
subPackages = [ "./cmd/forgejo-nsc-autoscaler" ];
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
};
in
{
devShells.default = pkgs.mkShell {
packages =
commonPackages
++ [
hcloudUploadImagePkg
forgejoNscDispatcher
forgejoNscAutoscaler
]
++ lib.optionals (nscPkg != null) [ nscPkg ];
};
devShells.ci = pkgs.mkShell {
packages =
commonPackages
++ [
hcloudUploadImagePkg
]
++ lib.optionals (nscPkg != null) [ nscPkg ];
};
formatter = pkgs.nixpkgs-fmt;
packages =
{
agenix = agenix.packages.${system}.agenix;
hcloud-upload-image = hcloudUploadImagePkg;
forgejo-nsc-dispatcher = forgejoNscDispatcher;
forgejo-nsc-autoscaler = forgejoNscAutoscaler;
}
// lib.optionalAttrs (nscPkg != null) { nsc = nscPkg; };
}))
// {
nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix;
nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix;
nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix;
nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix;
nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix;
nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {
inherit self;
};
modules = [
agenix.nixosModules.default
disko.nixosModules.disko
./nixos/hosts/burrow-forge/default.nix
];
};
images = {
burrow-forge-raw = self.nixosConfigurations.burrow-forge.config.system.build.diskoImages;
};
};
}

View file

@ -1,58 +0,0 @@
# Burrow Forge Runbook
This directory contains the Burrow forge host definition and the Hetzner bootstrap shape for `burrow-forge`.
Mail hosting is intentionally not part of this NixOS host in the current plan. Burrow's first mail path is Forward Email with Burrow-owned custom S3 backups; see [`docs/FORWARDEMAIL.md`](../docs/FORWARDEMAIL.md).
## Files
- `hosts/burrow-forge/default.nix`: host entrypoint
- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module
- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap
- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services
- `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes
- `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC
- `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets
- `hetzner-cloud-config.yaml`: desired Hetzner host shape
- `keys/contact_at_burrow_net.pub`: initial operator SSH public key
- `keys/agent_at_burrow_net.pub`: automation SSH public key
- `../Scripts/hetzner-forge.sh`: Hetzner inventory and replace workflow
- `../Scripts/nsc-build-and-upload-image.sh`: temporary Namespace builder -> raw image -> Hetzner snapshot
- `../Scripts/bootstrap-forge-intake.sh`: copy the Forgejo bootstrap password and agent SSH key into `/var/lib/burrow/intake/`
- `../Scripts/check-forge-host.sh`: verify Forgejo, Caddy, the local runner, optional NSC services, and optional Tailnet services after boot
- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers
- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host
- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists
- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host
## Intended Flow
1. Build and upload the raw NixOS image with `Scripts/hetzner-forge.sh build-image` or `Scripts/nsc-build-and-upload-image.sh`.
2. Recreate `burrow-forge` from the latest labeled snapshot with `Scripts/hetzner-forge.sh recreate-from-image --yes`.
3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`.
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account.
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`.
7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`.
8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.
10. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`.
## Current Constraints
- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host.
- Authentik and Headscale secrets now live in tracked agenix blobs under `secrets/infra/` and decrypt to `/run/agenix/` on the forge host.
- Public Burrow forge cutover completed on March 15, 2026:
- `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21`
- HTTP redirects to HTTPS on all three names
- `https://burrow.net` returns the root forge landing response
- `https://git.burrow.net` returns the live Forgejo front door
- `https://nsc-autoscaler.burrow.net` terminates TLS on Caddy and returns the expected application-level `404` for `/`
- The Cloudflare token currently in `intake/cloudflare-token.txt` is an account-scoped token: `POST /accounts/<account>/tokens/verify` succeeds, while `POST /user/tokens/verify` returns `Invalid API Token`.
- `burrow.rs` still resolves publicly to a Vercel `DEPLOYMENT_NOT_FOUND` response.
- Both domains publish Forward Email MX/TXT records.
- Forward Email custom S3 is live on both domains against the Hetzner `burrow` bucket and the public regional endpoint `https://hel1.your-objectstorage.com`.
- The current Hetzner account contains both:
- the older Ubuntu bootstrap server in `hil`
- the live `burrow-forge` NixOS server in `hel1`
- The remaining forge work is follow-on product/integration work, not host bring-up, mail backup wiring, or public DNS cutover.

View file

@ -1,10 +0,0 @@
name: burrow-forge
server_type: ccx23
location: hel1
image: ubuntu-24.04
ssh_keys:
- contact@burrow.net
- agent@burrow.net
labels:
project: burrow
role: forge

View file

@ -1,92 +0,0 @@
{ config, self, ... }:
{
imports = [
./hardware-configuration.nix
./disko-config.nix
self.nixosModules.burrow-forge
self.nixosModules.burrow-forge-runner
self.nixosModules.burrow-forgejo-nsc
self.nixosModules.burrow-authentik
self.nixosModules.burrow-headscale
];
system.stateVersion = "24.11";
time.timeZone = "America/Los_Angeles";
nix.settings.experimental-features = [
"nix-command"
"flakes"
];
age.identityPaths = [ "/var/lib/agenix/agenix.key" ];
age.secrets.burrowAuthentikEnv = {
file = ../../../secrets/infra/authentik.env.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowHeadscaleOidcClientSecret = {
file = ../../../secrets/infra/headscale-oidc-client-secret.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikGoogleClientId = {
file = ../../../secrets/infra/authentik-google-client-id.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikGoogleClientSecret = {
file = ../../../secrets/infra/authentik-google-client-secret.age;
owner = "root";
group = "root";
mode = "0400";
};
networking.extraHosts = ''
127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net
::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net
'';
services.burrow.forge = {
enable = true;
adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt";
authorizedKeys = [
(builtins.readFile ../../keys/contact_at_burrow_net.pub)
(builtins.readFile ../../keys/agent_at_burrow_net.pub)
];
};
services.burrow.forgeRunner = {
enable = true;
sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519";
};
services.burrow.forgejoNsc = {
enable = true;
nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt";
dispatcher = {
configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml";
};
autoscaler = {
enable = true;
configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml";
};
};
services.burrow.authentik = {
enable = true;
envFile = config.age.secrets.burrowAuthentikEnv.path;
headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path;
googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path;
googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path;
};
services.burrow.headscale = {
enable = true;
oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path;
};
}

View file

@ -1,36 +0,0 @@
{ lib, ... }:
{
disko.devices = {
disk.main = {
type = "disk";
device = lib.mkDefault "/dev/sda";
imageName = "burrow-forge";
imageSize = "80G";
content = {
type = "gpt";
partitions = {
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
}

View file

@ -1,11 +0,0 @@
{ ... }:
{
# Derived from Hetzner Cloud rescue-mode hardware inspection.
boot.initrd.availableKernelModules = [
"ahci"
"sd_mod"
"virtio_pci"
"virtio_scsi"
];
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa

View file

@ -1,348 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.burrow.authentik;
runtimeDir = "/run/burrow-authentik";
envFile = "${runtimeDir}/authentik.env";
blueprintDir = "${runtimeDir}/blueprints";
blueprintFile = "${blueprintDir}/burrow-authentik.yaml";
postgresVolume = "burrow-authentik-postgresql:/var/lib/postgresql/data";
dataVolume = "burrow-authentik-data:/data";
googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh;
authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" ''
version: 1
metadata:
name: Burrow Authentik
labels:
blueprints.goauthentik.io/description: Minimal Burrow Authentik applications
entries:
- model: authentik_providers_oauth2.scopemapping
id: burrow-oidc-email
identifiers:
name: Burrow OIDC Email
attrs:
name: Burrow OIDC Email
scope_name: email
description: Verified email mapping for Burrow
expression: |
return {
"email": request.user.email,
"email_verified": True,
}
- model: authentik_providers_oauth2.oauth2provider
id: burrow-oidc-provider-ts
identifiers:
name: Burrow Tailnet
attrs:
authorization_flow: !Find [authentik_flows.flow, [slug, default-provider-authorization-implicit-consent]]
invalidation_flow: !Find [authentik_flows.flow, [slug, default-provider-invalidation-flow]]
issuer_mode: per_provider
slug: ${cfg.headscaleProviderSlug}
client_type: confidential
client_id: ${cfg.headscaleDomain}
client_secret: !Env [AUTHENTIK_BURROW_TS_CLIENT_SECRET, ""]
include_claims_in_id_token: true
redirect_uris:
- matching_mode: strict
url: https://${cfg.headscaleDomain}/oidc/callback
property_mappings:
- !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-openid]]
- !KeyOf burrow-oidc-email
- !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-profile]]
signing_key: !Find [authentik_crypto.certificatekeypair, [name, authentik Self-signed Certificate]]
- model: authentik_core.application
identifiers:
slug: ${cfg.headscaleProviderSlug}
attrs:
name: Burrow Tailnet
slug: ${cfg.headscaleProviderSlug}
provider: !KeyOf burrow-oidc-provider-ts
meta_launch_url: https://${cfg.headscaleDomain}/
'';
in
{
options.services.burrow.authentik = {
enable = lib.mkEnableOption "the Burrow Authentik identity provider";
domain = lib.mkOption {
type = lib.types.str;
default = "auth.burrow.net";
description = "Public Authentik domain.";
};
port = lib.mkOption {
type = lib.types.port;
default = 9002;
description = "Local Authentik HTTP listen port.";
};
image = lib.mkOption {
type = lib.types.str;
default = "ghcr.io/goauthentik/server:2026.2.1";
description = "Authentik container image reference.";
};
envFile = lib.mkOption {
type = lib.types.str;
default = "/var/lib/burrow/intake/authentik.env";
description = "Host-local Authentik bootstrap environment file.";
};
headscaleDomain = lib.mkOption {
type = lib.types.str;
default = "ts.burrow.net";
description = "Headscale public domain used for the bundled OIDC client.";
};
headscaleProviderSlug = lib.mkOption {
type = lib.types.str;
default = "ts";
description = "Authentik provider slug for Headscale.";
};
headscaleClientSecretFile = lib.mkOption {
type = lib.types.str;
default = "/var/lib/burrow/intake/authentik_headscale_client_secret.txt";
description = "Host-local file containing the Authentik Headscale OIDC client secret.";
};
googleClientIDFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Google OAuth client ID for the Authentik source.";
};
googleClientSecretFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Google OAuth client secret for the Authentik source.";
};
googleSourceSlug = lib.mkOption {
type = lib.types.str;
default = "google";
description = "Authentik OAuth source slug used for Google login.";
};
googleLoginMode = lib.mkOption {
type = lib.types.enum [
"promoted"
"redirect"
];
default = "redirect";
description = "Identification-stage behavior for the Google Authentik source.";
};
};
config = lib.mkIf cfg.enable {
virtualisation.podman.enable = true;
systemd.tmpfiles.rules = [
"d ${runtimeDir} 0750 root root -"
"d ${blueprintDir} 0750 root root -"
];
systemd.services.burrow-authentik-runtime = {
description = "Render the Burrow Authentik runtime environment";
before = [
"podman-burrow-authentik-postgresql.service"
"podman-burrow-authentik-server.service"
"podman-burrow-authentik-worker.service"
];
wantedBy = [
"podman-burrow-authentik-postgresql.service"
"podman-burrow-authentik-server.service"
"podman-burrow-authentik-worker.service"
];
after = lib.optionals config.services.burrow.headscale.enable [
"burrow-headscale-client-secret.service"
];
wants = lib.optionals config.services.burrow.headscale.enable [
"burrow-headscale-client-secret.service"
];
path = [ pkgs.coreutils ];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
RemainAfterExit = true;
};
script = ''
set -euo pipefail
if [ ! -s ${lib.escapeShellArg cfg.envFile} ]; then
echo "Authentik env file missing: ${cfg.envFile}" >&2
exit 1
fi
if [ ! -s ${lib.escapeShellArg cfg.headscaleClientSecretFile} ]; then
echo "Headscale client secret missing: ${cfg.headscaleClientSecretFile}" >&2
exit 1
fi
install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir}
install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile}
source ${lib.escapeShellArg cfg.envFile}
read_secret() {
tr -d '\r\n' < "$1"
}
cat > ${envFile} <<EOF
PG_DB=authentik
PG_USER=authentik
PG_PASS=$PG_PASS
POSTGRES_DB=authentik
POSTGRES_USER=authentik
POSTGRES_PASSWORD=$PG_PASS
AUTHENTIK_POSTGRESQL__HOST=127.0.0.1
AUTHENTIK_POSTGRESQL__PORT=5433
AUTHENTIK_POSTGRESQL__NAME=authentik
AUTHENTIK_POSTGRESQL__USER=authentik
AUTHENTIK_POSTGRESQL__PASSWORD=$PG_PASS
AUTHENTIK_LISTEN__HTTP=0.0.0.0:${toString cfg.port}
AUTHENTIK_SECRET_KEY=$AUTHENTIK_SECRET_KEY
AUTHENTIK_BOOTSTRAP_PASSWORD=$AUTHENTIK_BOOTSTRAP_PASSWORD
AUTHENTIK_BOOTSTRAP_TOKEN=$AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_BURROW_TS_CLIENT_SECRET=$(read_secret ${lib.escapeShellArg cfg.headscaleClientSecretFile})
EOF
chown root:root ${envFile}
chmod 0600 ${envFile}
'';
};
virtualisation.oci-containers.containers."burrow-authentik-postgresql" = {
image = "docker.io/library/postgres:16-alpine";
autoStart = true;
environmentFiles = [ envFile ];
cmd = [
"-c"
"port=5433"
"-c"
"listen_addresses=127.0.0.1"
];
volumes = [ postgresVolume ];
extraOptions = [
"--network=host"
"--pull=always"
];
};
virtualisation.oci-containers.containers."burrow-authentik-server" = {
image = cfg.image;
autoStart = true;
cmd = [ "server" ];
environmentFiles = [ envFile ];
volumes = [
dataVolume
"${blueprintFile}:/blueprints/burrow-authentik.yaml:ro"
];
extraOptions = [
"--network=host"
"--pull=always"
];
};
virtualisation.oci-containers.containers."burrow-authentik-worker" = {
image = cfg.image;
autoStart = true;
cmd = [ "worker" ];
environmentFiles = [ envFile ];
volumes = [
dataVolume
"${blueprintFile}:/blueprints/burrow-authentik.yaml:ro"
];
extraOptions = [
"--network=host"
"--pull=always"
"--user=root"
];
};
systemd.services.burrow-authentik-ready = {
description = "Wait for Burrow Authentik to become ready";
after = [ "podman-burrow-authentik-server.service" ];
wants = [ "podman-burrow-authentik-server.service" ];
wantedBy = [ "multi-user.target" ];
path = [
pkgs.coreutils
pkgs.curl
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
for _ in $(seq 1 90); do
if ${pkgs.curl}/bin/curl -fsS http://127.0.0.1:${toString cfg.port}/-/health/ready/ >/dev/null; then
exit 0
fi
sleep 2
done
echo "Authentik did not become ready on ${cfg.domain}" >&2
exit 1
'';
};
systemd.services.burrow-authentik-google-source = lib.mkIf (
cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null
) {
description = "Reconcile the Burrow Authentik Google OAuth source";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
googleSourceSyncScript
cfg.envFile
cfg.googleClientIDFile
cfg.googleClientSecretFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
Restart = "on-failure";
RestartSec = 5;
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug}
export AUTHENTIK_GOOGLE_LOGIN_MODE=${lib.escapeShellArg cfg.googleLoginMode}
export AUTHENTIK_GOOGLE_USER_MATCHING_MODE=email_link
export AUTHENTIK_GOOGLE_CLIENT_ID="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientIDFile})"
export AUTHENTIK_GOOGLE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientSecretFile})"
${pkgs.bash}/bin/bash ${googleSourceSyncScript}
'';
};
services.caddy.virtualHosts."${cfg.domain}".extraConfig = ''
encode gzip zstd
reverse_proxy 127.0.0.1:${toString cfg.port}
'';
};
}

Some files were not shown because too many files have changed in this diff Show more