Compare commits

..

No commits in common. "main" and "ck/ipv6" have entirely different histories.

101 changed files with 1204 additions and 17592 deletions

View file

@ -19,21 +19,10 @@ jobs:
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
shell: bash
run: |
set -euo pipefail
repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git"
if [ ! -d .git ]; then
git init .
fi
if git remote get-url origin >/dev/null 2>&1; then
git remote set-url origin "${repo_url}"
else
git remote add origin "${repo_url}"
fi
git fetch --force --tags origin "${GITHUB_SHA}"
git checkout --force --detach FETCH_HEAD
git clean -ffdqx
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Test
shell: bash

View file

@ -19,24 +19,13 @@ jobs:
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
shell: bash
run: |
set -euo pipefail
repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git"
if [ ! -d .git ]; then
git init .
fi
if git remote get-url origin >/dev/null 2>&1; then
git remote set-url origin "${repo_url}"
else
git remote add origin "${repo_url}"
fi
git fetch --force --tags origin "${GITHUB_SHA}"
git checkout --force --detach FETCH_HEAD
git clean -ffdqx
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Build
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c bash -lc 'cd site && npm ci --no-audit --no-fund && npm run build'
nix develop .#ci -c bash -lc 'cd site && npm install && npm run build'

View file

@ -1,38 +0,0 @@
name: Lint Governance
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
jobs:
governance:
name: BEP Metadata
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
shell: bash
run: |
set -euo pipefail
repo_url="${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git"
if [ ! -d .git ]; then
git init .
fi
if git remote get-url origin >/dev/null 2>&1; then
git remote set-url origin "${repo_url}"
else
git remote add origin "${repo_url}"
fi
git fetch --force --tags origin "${GITHUB_SHA}"
git checkout --force --detach FETCH_HEAD
git clean -ffdqx
- name: Validate BEP metadata
shell: bash
run: |
set -euo pipefail
python3 Scripts/check-bep-metadata.py

View file

@ -1,60 +0,0 @@
name: Release
on:
push:
tags:
- "v*"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false
jobs:
release:
name: Release Build
runs-on: namespace-profile-linux-medium
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Bootstrap Nix
shell: bash
run: |
set -euo pipefail
chmod +x Scripts/ci/ensure-nix.sh
Scripts/ci/ensure-nix.sh
- name: Build release artifacts
shell: bash
env:
RELEASE_REF: ${{ github.ref_name }}
run: |
set -euo pipefail
ref="${RELEASE_REF:-manual-${GITHUB_SHA::7}}"
export RELEASE_REF="${ref}"
chmod +x Scripts/ci/build-release-artifacts.sh
nix develop .#ci -c Scripts/ci/build-release-artifacts.sh
- name: Upload release artifacts
uses: https://code.forgejo.org/actions/upload-artifact@v4
with:
name: burrow-release-${{ github.ref_name }}
path: dist/*
if-no-files-found: error
- name: Publish Forgejo release
if: startsWith(github.ref, 'refs/tags/')
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
API_URL: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
TOKEN: ${{ github.token }}
run: |
set -euo pipefail
chmod +x Scripts/ci/publish-forgejo-release.sh
nix develop .#ci -c Scripts/ci/publish-forgejo-release.sh

View file

@ -1,23 +0,0 @@
name: Governance Lint
on:
pull_request:
branches:
- "*"
jobs:
governance:
name: BEP Metadata
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Validate BEP metadata
shell: bash
run: |
set -euo pipefail
python3 Scripts/check-bep-metadata.py

View file

@ -1,14 +0,0 @@
# instructions for agents
1. Spell the project name as `Burrow` in user-facing copy and `burrow` in code, package, and protocol identifiers unless an existing integration requires a different literal.
2. Read [CONSTITUTION.md](CONSTITUTION.md) before changing Apple clients, the daemon, the control plane, forge infrastructure, identity, or security-sensitive code.
3. Anchor non-trivial changes in a Burrow Evolution Proposal (BEP) under [evolution/](evolution/README.md) so future contributors can inherit the rationale, safeguards, and rollout shape.
4. Before touching the Apple app, daemon IPC, or Tailnet flows, review:
- [evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md](evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md)
- [evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md](evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md)
- [evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md](evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md)
- [evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md](evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md)
5. Apple clients must talk only to the daemon over gRPC. Do not add direct HTTP, control-plane, or helper-process calls from Swift UI code.
6. Treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a separate user-facing protocol surface.
7. Maintain canonical identity and operator metadata in [contributors.nix](contributors.nix). If Burrow forge, Authentik, Headscale, or admin/group mappings need to change, edit that registry first and derive runtime configuration from it.
8. When process or architecture is unclear, stop and draft or update a BEP instead of improvising durable behavior in code.

View file

@ -55,7 +55,7 @@ class AppDelegate: NSObject, NSApplicationDelegate {
let statusBar = NSStatusBar.system
let statusItem = statusBar.statusItem(withLength: NSStatusItem.squareLength)
if let button = statusItem.button {
button.image = NSImage(systemSymbolName: "pipe.and.drop.fill", accessibilityDescription: nil)
button.image = NSImage(systemSymbolName: "network.badge.shield.half.filled", accessibilityDescription: nil)
}
return statusItem
}()

View file

@ -1,439 +0,0 @@
import XCTest
import UIKit
@MainActor
final class BurrowTailnetLoginUITests: XCTestCase {
private enum TailnetLoginMode: String, Decodable {
case tailscale
case discovered
}
private struct TestConfig: Decodable {
let email: String
let username: String
let password: String
let mode: TailnetLoginMode?
}
override func setUpWithError() throws {
continueAfterFailure = false
}
func testTailnetLoginThroughAuthentikWebSession() throws {
let config = try loadTestConfig()
let email = config.email
let username = config.username
let password = config.password
let mode = config.mode ?? .tailscale
let browserIdentity = mode == .tailscale ? email : username
let app = XCUIApplication()
app.launch()
let tailnetButton = app.buttons["quick-add-tailnet"]
XCTAssertTrue(tailnetButton.waitForExistence(timeout: 15), "Tailnet add button did not appear")
tailnetButton.tap()
configureTailnetIfNeeded(in: app, mode: mode)
let discoveryField = app.textFields["tailnet-discovery-email"]
XCTAssertTrue(discoveryField.waitForExistence(timeout: 10), "Tailnet discovery email field did not appear")
replaceText(in: discoveryField, with: email)
let serverCard = app.descendants(matching: .any)
.matching(identifier: "tailnet-server-card")
.firstMatch
XCTAssertTrue(serverCard.waitForExistence(timeout: 5), "Tailnet server card did not appear")
let signInButton = app.buttons["tailnet-start-sign-in"]
XCTAssertTrue(signInButton.waitForExistence(timeout: 10), "Tailnet sign-in button did not appear")
signInButton.tap()
acceptAuthenticationPromptIfNeeded(in: app, timeout: 20)
let webSession = webAuthenticationSession()
XCTAssertTrue(webSession.waitForExistence(timeout: 20), "Safari authentication session did not appear")
signIntoAuthentik(in: webSession, username: browserIdentity, password: password)
app.activate()
XCTAssertTrue(
waitForTailnetSignedIn(in: app, timeout: 60),
"Tailnet sign-in never reached the running state"
)
}
private func configureTailnetIfNeeded(in app: XCUIApplication, mode: TailnetLoginMode) {
guard mode == .discovered else { return }
openTailnetMenu(in: app)
tapMenuButton(named: "Edit Custom Server", in: app)
openTailnetMenu(in: app)
tapMenuButton(named: "Show Advanced Settings", in: app)
let authorityField = app.textFields["tailnet-authority"]
XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear")
replaceText(in: authorityField, with: "")
}
private func openTailnetMenu(in app: XCUIApplication) {
let moreButton = app.buttons["More"]
XCTAssertTrue(moreButton.waitForExistence(timeout: 5), "Tailnet menu button did not appear")
moreButton.tap()
}
private func tapMenuButton(named title: String, in app: XCUIApplication) {
let menuButton = firstExistingElement(
from: [
app.buttons[title],
app.descendants(matching: .button)[title],
],
timeout: 5
)
XCTAssertTrue(menuButton.exists, "Menu action \(title) did not appear")
menuButton.tap()
}
private func acceptAuthenticationPromptIfNeeded(
in app: XCUIApplication,
timeout: TimeInterval
) {
let springboard = XCUIApplication(bundleIdentifier: "com.apple.springboard")
let deadline = Date().addingTimeInterval(timeout)
repeat {
let promptCandidates = [
springboard.buttons["Continue"],
springboard.buttons["Allow"],
app.buttons["Continue"],
app.buttons["Allow"],
]
for button in promptCandidates where button.exists && button.isHittable {
button.tap()
return
}
RunLoop.current.run(until: Date().addingTimeInterval(0.25))
} while Date() < deadline
let promptCandidates = [
springboard.buttons["Continue"],
springboard.buttons["Allow"],
app.buttons["Continue"],
app.buttons["Allow"],
]
for button in promptCandidates where button.exists {
button.tap()
return
}
}
private func webAuthenticationSession() -> XCUIApplication {
let safariViewService = XCUIApplication(bundleIdentifier: "com.apple.SafariViewService")
if safariViewService.waitForExistence(timeout: 5) {
return safariViewService
}
let safari = XCUIApplication(bundleIdentifier: "com.apple.mobilesafari")
_ = safari.waitForExistence(timeout: 5)
return safari
}
private func signIntoAuthentik(in webSession: XCUIApplication, username: String, password: String) {
followTailnetRedirectIfNeeded(in: webSession)
if !webSession.exists {
return
}
let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2)
if immediatePasswordField.exists {
replaceSecureText(in: immediatePasswordField, within: webSession, with: password)
submitAuthenticationForm(in: webSession, focusedField: immediatePasswordField)
return
}
let usernameField = firstExistingElement(
in: webSession,
queries: [
{ $0.textFields["Username"] },
{ $0.textFields["Email or Username"] },
{ $0.textFields["Email address"] },
{ $0.textFields["Email"] },
{ $0.webViews.textFields["Username"] },
{ $0.webViews.textFields["Email or Username"] },
{ $0.descendants(matching: .textField).firstMatch },
],
timeout: 12
)
if !usernameField.exists {
return
}
replaceText(in: usernameField, with: username)
tapFirstExistingButton(
in: webSession,
titles: ["Continue", "Next", "Sign In", "Log in", "Login"],
timeout: 5
)
let passwordField = firstExistingSecureField(in: webSession, timeout: 20)
XCTAssertTrue(passwordField.exists, "Authentik password field did not appear")
replaceSecureText(in: passwordField, within: webSession, with: password)
submitAuthenticationForm(in: webSession, focusedField: passwordField)
}
private func followTailnetRedirectIfNeeded(in webSession: XCUIApplication) {
let redirectCandidates = [
webSession.links["Found"],
webSession.webViews.links["Found"],
webSession.buttons["Found"],
webSession.webViews.buttons["Found"],
]
let redirectLink = firstExistingElement(from: redirectCandidates, timeout: 8)
if redirectLink.exists {
redirectLink.tap()
}
}
private func firstExistingSecureField(in app: XCUIApplication, timeout: TimeInterval) -> XCUIElement {
let candidates = [
app.descendants(matching: .secureTextField).firstMatch,
app.secureTextFields["Password"],
app.secureTextFields["Password or Token"],
app.webViews.secureTextFields["Password"],
app.webViews.secureTextFields["Password or Token"],
]
return firstExistingElement(from: candidates, timeout: timeout)
}
private func tapFirstExistingButton(
in app: XCUIApplication,
titles: [String],
timeout: TimeInterval
) {
let candidates = titles.flatMap { title in
[
app.buttons[title],
app.webViews.buttons[title],
]
} + [app.descendants(matching: .button).firstMatch]
let button = firstExistingElement(from: candidates, timeout: timeout)
XCTAssertTrue(button.exists, "Expected one of \(titles.joined(separator: ", ")) to appear")
button.tap()
}
private func submitAuthenticationForm(in app: XCUIApplication, focusedField: XCUIElement) {
focus(focusedField)
focusedField.typeText("\n")
if waitForAny(
[
{ !focusedField.exists },
{ !app.staticTexts["Burrow Tailnet Authentication"].exists },
],
timeout: 1.5
) {
return
}
let keyboard = app.keyboards.firstMatch
if keyboard.waitForExistence(timeout: 2) {
let keyboardCandidates = [
"Return",
"return",
"Go",
"go",
"Continue",
"continue",
"Done",
"done",
"Join",
"join",
"Sign In",
"Log In",
"Login",
]
for title in keyboardCandidates {
let key = keyboard.buttons[title]
if key.exists && key.isHittable {
key.tap()
return
}
}
if let lastKey = keyboard.buttons.allElementsBoundByIndex.last,
lastKey.exists,
lastKey.isHittable
{
lastKey.tap()
return
}
}
tapFirstExistingButton(
in: app,
titles: ["Continue", "Sign In", "Log in", "Login"],
timeout: 5
)
}
private func loadTestConfig() throws -> TestConfig {
let environment = ProcessInfo.processInfo.environment
if let email = nonEmptyEnvironment("BURROW_UI_TEST_EMAIL"),
let password = nonEmptyEnvironment("BURROW_UI_TEST_PASSWORD")
{
return TestConfig(
email: email,
username: nonEmptyEnvironment("BURROW_UI_TEST_USERNAME") ?? email,
password: password,
mode: nonEmptyEnvironment("BURROW_UI_TEST_TAILNET_MODE")
.flatMap(TailnetLoginMode.init(rawValue:))
)
}
let configPath = environment["BURROW_UI_TEST_CONFIG_PATH"] ?? "/tmp/burrow-ui-test-config.json"
let configURL = URL(fileURLWithPath: configPath)
guard FileManager.default.fileExists(atPath: configURL.path) else {
throw XCTSkip(
"Missing UI test configuration. Expected env vars or config file at \(configURL.path)"
)
}
let data = try Data(contentsOf: configURL)
return try JSONDecoder().decode(TestConfig.self, from: data)
}
private func nonEmptyEnvironment(_ key: String) -> String? {
guard let value = ProcessInfo.processInfo.environment[key]?
.trimmingCharacters(in: .whitespacesAndNewlines),
!value.isEmpty
else {
return nil
}
return value
}
private func waitForFieldValue(
_ field: XCUIElement,
containing substring: String,
timeout: TimeInterval
) -> Bool {
let predicate = NSPredicate(format: "value CONTAINS %@", substring)
let expectation = XCTNSPredicateExpectation(predicate: predicate, object: field)
return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed
}
private func waitForButtonLabel(
_ button: XCUIElement,
equals expected: String,
timeout: TimeInterval
) -> Bool {
let predicate = NSPredicate(format: "label == %@", expected)
let expectation = XCTNSPredicateExpectation(predicate: predicate, object: button)
return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed
}
private func waitForTailnetSignedIn(in app: XCUIApplication, timeout: TimeInterval) -> Bool {
let button = app.buttons["tailnet-start-sign-in"]
let deadline = Date().addingTimeInterval(timeout)
repeat {
acceptAuthenticationPromptIfNeeded(in: app, timeout: 1)
if button.exists, button.label == "Signed In" {
return true
}
RunLoop.current.run(until: Date().addingTimeInterval(0.3))
} while Date() < deadline
return button.exists && button.label == "Signed In"
}
private func waitForAny(_ conditions: [() -> Bool], timeout: TimeInterval) -> Bool {
let deadline = Date().addingTimeInterval(timeout)
repeat {
if conditions.contains(where: { $0() }) {
return true
}
RunLoop.current.run(until: Date().addingTimeInterval(0.2))
} while Date() < deadline
return conditions.contains(where: { $0() })
}
private func firstExistingElement(
in app: XCUIApplication,
queries: [(XCUIApplication) -> XCUIElement],
timeout: TimeInterval
) -> XCUIElement {
firstExistingElement(from: queries.map { $0(app) }, timeout: timeout)
}
private func firstExistingElement(from candidates: [XCUIElement], timeout: TimeInterval) -> XCUIElement {
let deadline = Date().addingTimeInterval(timeout)
repeat {
for candidate in candidates where candidate.exists {
return candidate
}
RunLoop.current.run(until: Date().addingTimeInterval(0.2))
} while Date() < deadline
return candidates[0]
}
private func replaceText(in element: XCUIElement, with value: String) {
focus(element)
clearText(in: element)
element.typeText(value)
}
private func replaceSecureText(in element: XCUIElement, within app: XCUIApplication, with value: String) {
UIPasteboard.general.string = value
focus(element)
for revealMenu in [
{ element.doubleTap() },
{ element.press(forDuration: 1.2) },
] {
revealMenu()
let pasteButton = firstExistingElement(from: pasteCandidates(in: app), timeout: 3)
if pasteButton.exists {
pasteButton.tap()
return
}
}
focus(element)
element.typeText(value)
}
private func clearText(in element: XCUIElement) {
guard let currentValue = element.value as? String, !currentValue.isEmpty else {
return
}
let deleteSequence = String(repeating: XCUIKeyboardKey.delete.rawValue, count: currentValue.count)
element.typeText(deleteSequence)
}
private func focus(_ element: XCUIElement) {
element.coordinate(withNormalizedOffset: CGVector(dx: 0.5, dy: 0.5)).tap()
RunLoop.current.run(until: Date().addingTimeInterval(0.3))
}
private func pasteCandidates(in app: XCUIApplication) -> [XCUIElement] {
let pasteLabels = ["Paste", "Incolla", "Paste from Clipboard"]
return pasteLabels.flatMap { label in
[
app.menuItems[label],
app.buttons[label],
app.webViews.buttons[label],
app.descendants(matching: .button).matching(NSPredicate(format: "label == %@", label)).firstMatch,
app.descendants(matching: .menuItem).matching(NSPredicate(format: "label == %@", label)).firstMatch,
]
}
}
}

View file

@ -8,7 +8,6 @@
/* Begin PBXBuildFile section */
D00AA8972A4669BC005C8102 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00AA8962A4669BC005C8102 /* AppDelegate.swift */; };
D11000012F70000100112233 /* BurrowUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D11000042F70000100112233 /* BurrowUITests.swift */; };
D020F65829E4A697002790F6 /* PacketTunnelProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = D020F65729E4A697002790F6 /* PacketTunnelProvider.swift */; };
D020F65D29E4A697002790F6 /* BurrowNetworkExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; };
D03383AD2C8E67E300F7C44E /* SwiftProtobuf in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E22C8DA375008A8CEC /* SwiftProtobuf */; };
@ -43,20 +42,13 @@
D0D4E5A62C8D9E65007F820A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; };
D0F4FAD32C8DC79C0068730A /* BurrowCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0D4E5312C8D996F007F820A /* BurrowCore.framework */; };
D0F7594E2C8DAB6B00126CF3 /* GRPC in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E02C8DA375008A8CEC /* GRPC */; };
D0FA10012D10200100112233 /* burrow.pb.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10032D10200100112233 /* burrow.pb.swift */; };
D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FA10042D10200100112233 /* burrow.grpc.swift */; };
D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4962C8D921A007F820A /* grpc-swift-config.json */; };
D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */; };
D0F7597E2C8DB30500126CF3 /* CGRPCZlib in Frameworks */ = {isa = PBXBuildFile; productRef = D0F7597D2C8DB30500126CF3 /* CGRPCZlib */; };
D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4E4992C8D921A007F820A /* Client.swift */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
D11000022F70000100112233 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */;
proxyType = 1;
remoteGlobalIDString = D05B9F7129E39EEC008CB1F9;
remoteInfo = App;
};
D020F65B29E4A697002790F6 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */;
@ -138,9 +130,6 @@
/* Begin PBXFileReference section */
D00117422B30348D00D87C25 /* Configuration.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Configuration.xcconfig; sourceTree = "<group>"; };
D00AA8962A4669BC005C8102 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
D11000032F70000100112233 /* BurrowUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = BurrowUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
D11000042F70000100112233 /* BurrowUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BurrowUITests.swift; sourceTree = "<group>"; };
D11000052F70000100112233 /* UITests.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UITests.xcconfig; sourceTree = "<group>"; };
D020F63D29E4A1FF002790F6 /* Identity.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Identity.xcconfig; sourceTree = "<group>"; };
D020F64029E4A1FF002790F6 /* Compiler.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Compiler.xcconfig; sourceTree = "<group>"; };
D020F64229E4A1FF002790F6 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
@ -165,6 +154,8 @@
D0BCC6032A09535900AD070D /* libburrow.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libburrow.a; sourceTree = BUILT_PRODUCTS_DIR; };
D0BF09582C8E6789000D8DEC /* UI.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UI.xcconfig; sourceTree = "<group>"; };
D0D4E4952C8D921A007F820A /* burrow.proto */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.protobuf; path = burrow.proto; sourceTree = "<group>"; };
D0D4E4962C8D921A007F820A /* grpc-swift-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "grpc-swift-config.json"; sourceTree = "<group>"; };
D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */ = {isa = PBXFileReference; lastKnownFileType = text.json; path = "swift-protobuf-config.json"; sourceTree = "<group>"; };
D0D4E4992C8D921A007F820A /* Client.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Client.swift; sourceTree = "<group>"; };
D0D4E49A2C8D921A007F820A /* Logging.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Logging.swift; sourceTree = "<group>"; };
D0D4E49E2C8D921A007F820A /* Network.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Network.swift; sourceTree = "<group>"; };
@ -188,18 +179,9 @@
D0D4E58E2C8D9D0A007F820A /* Constants.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Constants.h; sourceTree = "<group>"; };
D0D4E58F2C8D9D0A007F820A /* Constants.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Constants.swift; sourceTree = "<group>"; };
D0D4E5902C8D9D0A007F820A /* module.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = "<group>"; };
D0FA10032D10200100112233 /* burrow.pb.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.pb.swift; sourceTree = "<group>"; };
D0FA10042D10200100112233 /* burrow.grpc.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Generated/burrow.grpc.swift; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
D11000062F70000100112233 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
D020F65029E4A697002790F6 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
@ -261,7 +243,6 @@
D0D4E4F72C8D941D007F820A /* Framework.xcconfig */,
D020F64029E4A1FF002790F6 /* Compiler.xcconfig */,
D0D4E4F62C8D932D007F820A /* Debug.xcconfig */,
D11000052F70000100112233 /* UITests.xcconfig */,
D04A3E1D2BAF465F0043EC85 /* Version.xcconfig */,
D020F64229E4A1FF002790F6 /* Info.plist */,
D0D4E5912C8D9D0A007F820A /* Constants */,
@ -287,7 +268,6 @@
isa = PBXGroup;
children = (
D05B9F7429E39EEC008CB1F9 /* App */,
D11000072F70000100112233 /* AppUITests */,
D020F65629E4A697002790F6 /* NetworkExtension */,
D0D4E49C2C8D921A007F820A /* Core */,
D0D4E4AD2C8D921A007F820A /* UI */,
@ -301,7 +281,6 @@
isa = PBXGroup;
children = (
D05B9F7229E39EEC008CB1F9 /* Burrow.app */,
D11000032F70000100112233 /* BurrowUITests.xctest */,
D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */,
D0BCC6032A09535900AD070D /* libburrow.a */,
D0D4E5312C8D996F007F820A /* BurrowCore.framework */,
@ -324,14 +303,6 @@
path = App;
sourceTree = "<group>";
};
D11000072F70000100112233 /* AppUITests */ = {
isa = PBXGroup;
children = (
D11000042F70000100112233 /* BurrowUITests.swift */,
);
path = AppUITests;
sourceTree = "<group>";
};
D0B98FD729FDDB57004E7149 /* libburrow */ = {
isa = PBXGroup;
children = (
@ -346,8 +317,8 @@
isa = PBXGroup;
children = (
D0D4E4952C8D921A007F820A /* burrow.proto */,
D0FA10032D10200100112233 /* burrow.pb.swift */,
D0FA10042D10200100112233 /* burrow.grpc.swift */,
D0D4E4962C8D921A007F820A /* grpc-swift-config.json */,
D0D4E4972C8D921A007F820A /* swift-protobuf-config.json */,
);
path = Client;
sourceTree = "<group>";
@ -404,24 +375,6 @@
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
D11000082F70000100112233 /* BurrowUITests */ = {
isa = PBXNativeTarget;
buildConfigurationList = D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */;
buildPhases = (
D110000A2F70000100112233 /* Sources */,
D11000062F70000100112233 /* Frameworks */,
D11000092F70000100112233 /* Resources */,
);
buildRules = (
);
dependencies = (
D110000B2F70000100112233 /* PBXTargetDependency */,
);
name = BurrowUITests;
productName = BurrowUITests;
productReference = D11000032F70000100112233 /* BurrowUITests.xctest */;
productType = "com.apple.product-type.bundle.ui-testing";
};
D020F65229E4A697002790F6 /* NetworkExtension */ = {
isa = PBXNativeTarget;
buildConfigurationList = D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */;
@ -475,6 +428,8 @@
);
dependencies = (
D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */,
D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */,
D0F759602C8DB24400126CF3 /* PBXTargetDependency */,
);
name = Core;
packageProductDependencies = (
@ -537,10 +492,6 @@
LastSwiftUpdateCheck = 1600;
LastUpgradeCheck = 1520;
TargetAttributes = {
D11000082F70000100112233 = {
CreatedOnToolsVersion = 16.0;
TestTargetID = D05B9F7129E39EEC008CB1F9;
};
D020F65229E4A697002790F6 = {
CreatedOnToolsVersion = 14.3;
};
@ -573,7 +524,6 @@
projectRoot = "";
targets = (
D05B9F7129E39EEC008CB1F9 /* App */,
D11000082F70000100112233 /* BurrowUITests */,
D020F65229E4A697002790F6 /* NetworkExtension */,
D0D4E5502C8D9BF2007F820A /* UI */,
D0D4E5302C8D996F007F820A /* Core */,
@ -583,13 +533,6 @@
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
D11000092F70000100112233 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
D05B9F7029E39EEC008CB1F9 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
@ -653,14 +596,6 @@
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
D110000A2F70000100112233 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
D11000012F70000100112233 /* BurrowUITests.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
D020F64F29E4A697002790F6 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
@ -682,8 +617,8 @@
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
D0FA10012D10200100112233 /* burrow.pb.swift in Sources */,
D0FA10022D10200100112233 /* burrow.grpc.swift in Sources */,
D0F759612C8DB24B00126CF3 /* grpc-swift-config.json in Sources */,
D0F759622C8DB24B00126CF3 /* swift-protobuf-config.json in Sources */,
D0F7598D2C8DB3DA00126CF3 /* Client.swift in Sources */,
D0D4E56B2C8D9C2F007F820A /* Logging.swift in Sources */,
);
@ -719,11 +654,6 @@
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
D110000B2F70000100112233 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = D05B9F7129E39EEC008CB1F9 /* App */;
targetProxy = D11000022F70000100112233 /* PBXContainerItemProxy */;
};
D020F65C29E4A697002790F6 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = D020F65229E4A697002790F6 /* NetworkExtension */;
@ -759,6 +689,14 @@
target = D0D4E5302C8D996F007F820A /* Core */;
targetProxy = D0F4FAD12C8DC7960068730A /* PBXContainerItemProxy */;
};
D0F7595E2C8DB24400126CF3 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
productRef = D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */;
};
D0F759602C8DB24400126CF3 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
productRef = D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */;
};
D0F7598A2C8DB34200126CF3 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
productRef = D0F759892C8DB34200126CF3 /* GRPC */;
@ -766,20 +704,6 @@
/* End PBXTargetDependency section */
/* Begin XCBuildConfiguration section */
D110000C2F70000100112233 /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */;
buildSettings = {
};
name = Debug;
};
D110000D2F70000100112233 /* Release */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */;
buildSettings = {
};
name = Release;
};
D020F65F29E4A697002790F6 /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = D020F66229E4A6E5002790F6 /* NetworkExtension.xcconfig */;
@ -867,15 +791,6 @@
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
D110000C2F70000100112233 /* Debug */,
D110000D2F70000100112233 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */ = {
isa = XCConfigurationList;
buildConfigurations = (
@ -1006,6 +921,16 @@
package = D0B1D10E2C436152004B7823 /* XCRemoteSwiftPackageReference "swift-async-algorithms" */;
productName = AsyncAlgorithms;
};
D0F7595D2C8DB24400126CF3 /* GRPCSwiftPlugin */ = {
isa = XCSwiftPackageProductDependency;
package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */;
productName = "plugin:GRPCSwiftPlugin";
};
D0F7595F2C8DB24400126CF3 /* SwiftProtobufPlugin */ = {
isa = XCSwiftPackageProductDependency;
package = D0D4E4852C8D8F29007F820A /* XCRemoteSwiftPackageReference "swift-protobuf" */;
productName = "plugin:SwiftProtobufPlugin";
};
D0F7597D2C8DB30500126CF3 /* CGRPCZlib */ = {
isa = XCSwiftPackageProductDependency;
package = D0D4E4822C8D8EF6007F820A /* XCRemoteSwiftPackageReference "grpc-swift" */;

View file

@ -28,20 +28,7 @@
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES"
shouldAutocreateTestPlan = "NO">
<Testables>
<TestableReference
skipped = "NO"
parallelizable = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "D11000082F70000100112233"
BuildableName = "BurrowUITests.xctest"
BlueprintName = "BurrowUITests"
ReferencedContainer = "container:Burrow.xcodeproj">
</BuildableReference>
</TestableReference>
</Testables>
shouldAutocreateTestPlan = "YES">
</TestAction>
<LaunchAction
buildConfiguration = "Debug"

View file

@ -36,9 +36,13 @@ public enum Constants {
private static func fallbackContainerURL() -> Result<URL, any Swift.Error> {
#if targetEnvironment(simulator)
Result {
// The simulator app's Application Support path lives inside its sandbox container,
// so the host daemon cannot reach it. Use a shared host temp location instead.
let url = URL(filePath: "/tmp", directoryHint: .isDirectory)
let baseURL = try FileManager.default.url(
for: .applicationSupportDirectory,
in: .userDomainMask,
appropriateFor: nil,
create: true
)
let url = baseURL
.appending(component: bundleIdentifier, directoryHint: .isDirectory)
.appending(component: "SimulatorFallback", directoryHint: .isDirectory)
try FileManager.default.createDirectory(at: url, withIntermediateDirectories: true)

View file

@ -1,14 +0,0 @@
#include "Compiler.xcconfig"
SUPPORTED_PLATFORMS = iphonesimulator iphoneos
TARGETED_DEVICE_FAMILY[sdk=iphone*] = 1,2
PRODUCT_NAME = $(TARGET_NAME)
PRODUCT_BUNDLE_IDENTIFIER = $(APP_BUNDLE_IDENTIFIER).uitests
STRING_CATALOG_GENERATE_SYMBOLS = NO
SWIFT_EMIT_LOC_STRINGS = NO
ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES
LD_RUNPATH_SEARCH_PATHS = $(inherited) @executable_path/Frameworks @loader_path/Frameworks
TEST_TARGET_NAME = App

View file

@ -1,7 +1,5 @@
import Foundation
import GRPC
import NIOTransportServices
import SwiftProtobuf
public typealias TunnelClient = Burrow_TunnelAsyncClient
public typealias NetworksClient = Burrow_NetworksAsyncClient
@ -32,477 +30,3 @@ extension NetworksClient: Client {
self.init(channel: channel, defaultCallOptions: .init(), interceptors: .none)
}
}
public struct Burrow_TailnetDiscoverRequest: Sendable {
public var email: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetDiscoverResponse: Sendable {
public var domain: String = ""
public var authority: String = ""
public var oidcIssuer: String = ""
public var managed: Bool = false
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetProbeRequest: Sendable {
public var authority: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetProbeResponse: Sendable {
public var authority: String = ""
public var statusCode: Int32 = 0
public var summary: String = ""
public var detail: String = ""
public var reachable: Bool = false
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStartRequest: Sendable {
public var accountName: String = ""
public var identityName: String = ""
public var hostname: String = ""
public var authority: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStatusRequest: Sendable {
public var sessionID: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginCancelRequest: Sendable {
public var sessionID: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStatusResponse: Sendable {
public var sessionID: String = ""
public var backendState: String = ""
public var authURL: String = ""
public var running: Bool = false
public var needsLogin: Bool = false
public var tailnetName: String = ""
public var magicDNSSuffix: String = ""
public var selfDNSName: String = ""
public var tailnetIPs: [String] = []
public var health: [String] = []
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TunnelPacket: Sendable {
public var payload = Data()
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetDiscoverRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "email")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.email)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.email.isEmpty {
try visitor.visitSingularStringField(value: self.email, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetDiscoverResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetDiscoverResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "domain"),
2: .same(proto: "authority"),
3: .same(proto: "oidc_issuer"),
4: .same(proto: "managed"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.domain)
case 2: try decoder.decodeSingularStringField(value: &self.authority)
case 3: try decoder.decodeSingularStringField(value: &self.oidcIssuer)
case 4: try decoder.decodeSingularBoolField(value: &self.managed)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.domain.isEmpty {
try visitor.visitSingularStringField(value: self.domain, fieldNumber: 1)
}
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 2)
}
if !self.oidcIssuer.isEmpty {
try visitor.visitSingularStringField(value: self.oidcIssuer, fieldNumber: 3)
}
if self.managed {
try visitor.visitSingularBoolField(value: self.managed, fieldNumber: 4)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetProbeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetProbeRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "authority")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.authority)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetProbeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetProbeResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "authority"),
2: .same(proto: "status_code"),
3: .same(proto: "summary"),
4: .same(proto: "detail"),
5: .same(proto: "reachable"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.authority)
case 2: try decoder.decodeSingularInt32Field(value: &self.statusCode)
case 3: try decoder.decodeSingularStringField(value: &self.summary)
case 4: try decoder.decodeSingularStringField(value: &self.detail)
case 5: try decoder.decodeSingularBoolField(value: &self.reachable)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1)
}
if self.statusCode != 0 {
try visitor.visitSingularInt32Field(value: self.statusCode, fieldNumber: 2)
}
if !self.summary.isEmpty {
try visitor.visitSingularStringField(value: self.summary, fieldNumber: 3)
}
if !self.detail.isEmpty {
try visitor.visitSingularStringField(value: self.detail, fieldNumber: 4)
}
if self.reachable {
try visitor.visitSingularBoolField(value: self.reachable, fieldNumber: 5)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStartRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStartRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "account_name"),
2: .standard(proto: "identity_name"),
3: .same(proto: "hostname"),
4: .same(proto: "authority"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.accountName)
case 2: try decoder.decodeSingularStringField(value: &self.identityName)
case 3: try decoder.decodeSingularStringField(value: &self.hostname)
case 4: try decoder.decodeSingularStringField(value: &self.authority)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.accountName.isEmpty {
try visitor.visitSingularStringField(value: self.accountName, fieldNumber: 1)
}
if !self.identityName.isEmpty {
try visitor.visitSingularStringField(value: self.identityName, fieldNumber: 2)
}
if !self.hostname.isEmpty {
try visitor.visitSingularStringField(value: self.hostname, fieldNumber: 3)
}
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 4)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStatusRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStatusRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginCancelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginCancelRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStatusResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id"),
2: .standard(proto: "backend_state"),
3: .standard(proto: "auth_url"),
4: .same(proto: "running"),
5: .standard(proto: "needs_login"),
6: .standard(proto: "tailnet_name"),
7: .standard(proto: "magic_dns_suffix"),
8: .standard(proto: "self_dns_name"),
9: .standard(proto: "tailnet_ips"),
10: .same(proto: "health"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
case 2: try decoder.decodeSingularStringField(value: &self.backendState)
case 3: try decoder.decodeSingularStringField(value: &self.authURL)
case 4: try decoder.decodeSingularBoolField(value: &self.running)
case 5: try decoder.decodeSingularBoolField(value: &self.needsLogin)
case 6: try decoder.decodeSingularStringField(value: &self.tailnetName)
case 7: try decoder.decodeSingularStringField(value: &self.magicDNSSuffix)
case 8: try decoder.decodeSingularStringField(value: &self.selfDNSName)
case 9: try decoder.decodeRepeatedStringField(value: &self.tailnetIPs)
case 10: try decoder.decodeRepeatedStringField(value: &self.health)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
if !self.backendState.isEmpty {
try visitor.visitSingularStringField(value: self.backendState, fieldNumber: 2)
}
if !self.authURL.isEmpty {
try visitor.visitSingularStringField(value: self.authURL, fieldNumber: 3)
}
if self.running {
try visitor.visitSingularBoolField(value: self.running, fieldNumber: 4)
}
if self.needsLogin {
try visitor.visitSingularBoolField(value: self.needsLogin, fieldNumber: 5)
}
if !self.tailnetName.isEmpty {
try visitor.visitSingularStringField(value: self.tailnetName, fieldNumber: 6)
}
if !self.magicDNSSuffix.isEmpty {
try visitor.visitSingularStringField(value: self.magicDNSSuffix, fieldNumber: 7)
}
if !self.selfDNSName.isEmpty {
try visitor.visitSingularStringField(value: self.selfDNSName, fieldNumber: 8)
}
if !self.tailnetIPs.isEmpty {
try visitor.visitRepeatedStringField(value: self.tailnetIPs, fieldNumber: 9)
}
if !self.health.isEmpty {
try visitor.visitRepeatedStringField(value: self.health, fieldNumber: 10)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TunnelPacket: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TunnelPacket"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "payload")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularBytesField(value: &self.payload)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.payload.isEmpty {
try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
public struct TailnetClient: Client, GRPCClient {
public let channel: GRPCChannel
public var defaultCallOptions: CallOptions
public init(channel: any GRPCChannel) {
self.channel = channel
self.defaultCallOptions = .init()
}
public func discover(
_ request: Burrow_TailnetDiscoverRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetDiscoverResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/Discover",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func probe(
_ request: Burrow_TailnetProbeRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetProbeResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/Probe",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginStart(
_ request: Burrow_TailnetLoginStartRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetLoginStatusResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginStart",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginStatus(
_ request: Burrow_TailnetLoginStatusRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetLoginStatusResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginStatus",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginCancel(
_ request: Burrow_TailnetLoginCancelRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginCancel",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
}
public struct TunnelPacketClient: Client, GRPCClient {
public let channel: GRPCChannel
public var defaultCallOptions: CallOptions
public init(channel: any GRPCChannel) {
self.channel = channel
self.defaultCallOptions = .init()
}
public func makeTunnelPacketsCall(
callOptions: CallOptions? = nil
) -> GRPCAsyncBidirectionalStreamingCall<Burrow_TunnelPacket, Burrow_TunnelPacket> {
self.makeAsyncBidirectionalStreamingCall(
path: "/burrow.Tunnel/TunnelPackets",
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
}

View file

@ -1,761 +0,0 @@
//
// DO NOT EDIT.
// swift-format-ignore-file
//
// Generated by the protocol buffer compiler.
// Source: burrow.proto
//
import GRPC
import NIO
import NIOConcurrencyHelpers
import SwiftProtobuf
/// Usage: instantiate `Burrow_TunnelClient`, then call methods of this protocol to make API calls.
public protocol Burrow_TunnelClientProtocol: GRPCClient {
var serviceName: String { get }
var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get }
func tunnelConfiguration(
_ request: Burrow_Empty,
callOptions: CallOptions?,
handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_TunnelConfigurationResponse>
func tunnelStart(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> UnaryCall<Burrow_Empty, Burrow_Empty>
func tunnelStop(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> UnaryCall<Burrow_Empty, Burrow_Empty>
func tunnelStatus(
_ request: Burrow_Empty,
callOptions: CallOptions?,
handler: @escaping (Burrow_TunnelStatusResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_TunnelStatusResponse>
}
extension Burrow_TunnelClientProtocol {
public var serviceName: String {
return "burrow.Tunnel"
}
/// Server streaming call to TunnelConfiguration
///
/// - Parameters:
/// - request: Request to send to TunnelConfiguration.
/// - callOptions: Call options.
/// - handler: A closure called when each response is received from the server.
/// - Returns: A `ServerStreamingCall` with futures for the metadata and status.
public func tunnelConfiguration(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil,
handler: @escaping (Burrow_TunnelConfigurationResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_TunnelConfigurationResponse> {
return self.makeServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? [],
handler: handler
)
}
/// Unary call to TunnelStart
///
/// - Parameters:
/// - request: Request to send to TunnelStart.
/// - callOptions: Call options.
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
public func tunnelStart(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> UnaryCall<Burrow_Empty, Burrow_Empty> {
return self.makeUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? []
)
}
/// Unary call to TunnelStop
///
/// - Parameters:
/// - request: Request to send to TunnelStop.
/// - callOptions: Call options.
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
public func tunnelStop(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> UnaryCall<Burrow_Empty, Burrow_Empty> {
return self.makeUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? []
)
}
/// Server streaming call to TunnelStatus
///
/// - Parameters:
/// - request: Request to send to TunnelStatus.
/// - callOptions: Call options.
/// - handler: A closure called when each response is received from the server.
/// - Returns: A `ServerStreamingCall` with futures for the metadata and status.
public func tunnelStatus(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil,
handler: @escaping (Burrow_TunnelStatusResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_TunnelStatusResponse> {
return self.makeServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? [],
handler: handler
)
}
}
@available(*, deprecated)
extension Burrow_TunnelClient: @unchecked Sendable {}
@available(*, deprecated, renamed: "Burrow_TunnelNIOClient")
public final class Burrow_TunnelClient: Burrow_TunnelClientProtocol {
private let lock = Lock()
private var _defaultCallOptions: CallOptions
private var _interceptors: Burrow_TunnelClientInterceptorFactoryProtocol?
public let channel: GRPCChannel
public var defaultCallOptions: CallOptions {
get { self.lock.withLock { return self._defaultCallOptions } }
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
}
public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? {
get { self.lock.withLock { return self._interceptors } }
set { self.lock.withLockVoid { self._interceptors = newValue } }
}
/// Creates a client for the burrow.Tunnel service.
///
/// - Parameters:
/// - channel: `GRPCChannel` to the service host.
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
/// - interceptors: A factory providing interceptors for each RPC.
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self._defaultCallOptions = defaultCallOptions
self._interceptors = interceptors
}
}
public struct Burrow_TunnelNIOClient: Burrow_TunnelClientProtocol {
public var channel: GRPCChannel
public var defaultCallOptions: CallOptions
public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol?
/// Creates a client for the burrow.Tunnel service.
///
/// - Parameters:
/// - channel: `GRPCChannel` to the service host.
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
/// - interceptors: A factory providing interceptors for each RPC.
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self.defaultCallOptions = defaultCallOptions
self.interceptors = interceptors
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
public protocol Burrow_TunnelAsyncClientProtocol: GRPCClient {
static var serviceDescriptor: GRPCServiceDescriptor { get }
var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? { get }
func makeTunnelConfigurationCall(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_TunnelConfigurationResponse>
func makeTunnelStartCall(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> GRPCAsyncUnaryCall<Burrow_Empty, Burrow_Empty>
func makeTunnelStopCall(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> GRPCAsyncUnaryCall<Burrow_Empty, Burrow_Empty>
func makeTunnelStatusCall(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_TunnelStatusResponse>
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
extension Burrow_TunnelAsyncClientProtocol {
public static var serviceDescriptor: GRPCServiceDescriptor {
return Burrow_TunnelClientMetadata.serviceDescriptor
}
public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? {
return nil
}
public func makeTunnelConfigurationCall(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_TunnelConfigurationResponse> {
return self.makeAsyncServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? []
)
}
public func makeTunnelStartCall(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncUnaryCall<Burrow_Empty, Burrow_Empty> {
return self.makeAsyncUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? []
)
}
public func makeTunnelStopCall(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncUnaryCall<Burrow_Empty, Burrow_Empty> {
return self.makeAsyncUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? []
)
}
public func makeTunnelStatusCall(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_TunnelStatusResponse> {
return self.makeAsyncServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? []
)
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
extension Burrow_TunnelAsyncClientProtocol {
public func tunnelConfiguration(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncResponseStream<Burrow_TunnelConfigurationResponse> {
return self.performAsyncServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelConfiguration.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelConfigurationInterceptors() ?? []
)
}
public func tunnelStart(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
return try await self.performAsyncUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStart.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStartInterceptors() ?? []
)
}
public func tunnelStop(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
return try await self.performAsyncUnaryCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStop.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStopInterceptors() ?? []
)
}
public func tunnelStatus(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncResponseStream<Burrow_TunnelStatusResponse> {
return self.performAsyncServerStreamingCall(
path: Burrow_TunnelClientMetadata.Methods.tunnelStatus.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeTunnelStatusInterceptors() ?? []
)
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
public struct Burrow_TunnelAsyncClient: Burrow_TunnelAsyncClientProtocol {
public var channel: GRPCChannel
public var defaultCallOptions: CallOptions
public var interceptors: Burrow_TunnelClientInterceptorFactoryProtocol?
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_TunnelClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self.defaultCallOptions = defaultCallOptions
self.interceptors = interceptors
}
}
public protocol Burrow_TunnelClientInterceptorFactoryProtocol: Sendable {
/// - Returns: Interceptors to use when invoking 'tunnelConfiguration'.
func makeTunnelConfigurationInterceptors() -> [ClientInterceptor<Burrow_Empty, Burrow_TunnelConfigurationResponse>]
/// - Returns: Interceptors to use when invoking 'tunnelStart'.
func makeTunnelStartInterceptors() -> [ClientInterceptor<Burrow_Empty, Burrow_Empty>]
/// - Returns: Interceptors to use when invoking 'tunnelStop'.
func makeTunnelStopInterceptors() -> [ClientInterceptor<Burrow_Empty, Burrow_Empty>]
/// - Returns: Interceptors to use when invoking 'tunnelStatus'.
func makeTunnelStatusInterceptors() -> [ClientInterceptor<Burrow_Empty, Burrow_TunnelStatusResponse>]
}
public enum Burrow_TunnelClientMetadata {
public static let serviceDescriptor = GRPCServiceDescriptor(
name: "Tunnel",
fullName: "burrow.Tunnel",
methods: [
Burrow_TunnelClientMetadata.Methods.tunnelConfiguration,
Burrow_TunnelClientMetadata.Methods.tunnelStart,
Burrow_TunnelClientMetadata.Methods.tunnelStop,
Burrow_TunnelClientMetadata.Methods.tunnelStatus,
]
)
public enum Methods {
public static let tunnelConfiguration = GRPCMethodDescriptor(
name: "TunnelConfiguration",
path: "/burrow.Tunnel/TunnelConfiguration",
type: GRPCCallType.serverStreaming
)
public static let tunnelStart = GRPCMethodDescriptor(
name: "TunnelStart",
path: "/burrow.Tunnel/TunnelStart",
type: GRPCCallType.unary
)
public static let tunnelStop = GRPCMethodDescriptor(
name: "TunnelStop",
path: "/burrow.Tunnel/TunnelStop",
type: GRPCCallType.unary
)
public static let tunnelStatus = GRPCMethodDescriptor(
name: "TunnelStatus",
path: "/burrow.Tunnel/TunnelStatus",
type: GRPCCallType.serverStreaming
)
}
}
/// Usage: instantiate `Burrow_NetworksClient`, then call methods of this protocol to make API calls.
public protocol Burrow_NetworksClientProtocol: GRPCClient {
var serviceName: String { get }
var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get }
func networkAdd(
_ request: Burrow_Network,
callOptions: CallOptions?
) -> UnaryCall<Burrow_Network, Burrow_Empty>
func networkList(
_ request: Burrow_Empty,
callOptions: CallOptions?,
handler: @escaping (Burrow_NetworkListResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_NetworkListResponse>
func networkReorder(
_ request: Burrow_NetworkReorderRequest,
callOptions: CallOptions?
) -> UnaryCall<Burrow_NetworkReorderRequest, Burrow_Empty>
func networkDelete(
_ request: Burrow_NetworkDeleteRequest,
callOptions: CallOptions?
) -> UnaryCall<Burrow_NetworkDeleteRequest, Burrow_Empty>
}
extension Burrow_NetworksClientProtocol {
public var serviceName: String {
return "burrow.Networks"
}
/// Unary call to NetworkAdd
///
/// - Parameters:
/// - request: Request to send to NetworkAdd.
/// - callOptions: Call options.
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
public func networkAdd(
_ request: Burrow_Network,
callOptions: CallOptions? = nil
) -> UnaryCall<Burrow_Network, Burrow_Empty> {
return self.makeUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkAdd.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? []
)
}
/// Server streaming call to NetworkList
///
/// - Parameters:
/// - request: Request to send to NetworkList.
/// - callOptions: Call options.
/// - handler: A closure called when each response is received from the server.
/// - Returns: A `ServerStreamingCall` with futures for the metadata and status.
public func networkList(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil,
handler: @escaping (Burrow_NetworkListResponse) -> Void
) -> ServerStreamingCall<Burrow_Empty, Burrow_NetworkListResponse> {
return self.makeServerStreamingCall(
path: Burrow_NetworksClientMetadata.Methods.networkList.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkListInterceptors() ?? [],
handler: handler
)
}
/// Unary call to NetworkReorder
///
/// - Parameters:
/// - request: Request to send to NetworkReorder.
/// - callOptions: Call options.
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
public func networkReorder(
_ request: Burrow_NetworkReorderRequest,
callOptions: CallOptions? = nil
) -> UnaryCall<Burrow_NetworkReorderRequest, Burrow_Empty> {
return self.makeUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkReorder.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? []
)
}
/// Unary call to NetworkDelete
///
/// - Parameters:
/// - request: Request to send to NetworkDelete.
/// - callOptions: Call options.
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
public func networkDelete(
_ request: Burrow_NetworkDeleteRequest,
callOptions: CallOptions? = nil
) -> UnaryCall<Burrow_NetworkDeleteRequest, Burrow_Empty> {
return self.makeUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkDelete.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? []
)
}
}
@available(*, deprecated)
extension Burrow_NetworksClient: @unchecked Sendable {}
@available(*, deprecated, renamed: "Burrow_NetworksNIOClient")
public final class Burrow_NetworksClient: Burrow_NetworksClientProtocol {
private let lock = Lock()
private var _defaultCallOptions: CallOptions
private var _interceptors: Burrow_NetworksClientInterceptorFactoryProtocol?
public let channel: GRPCChannel
public var defaultCallOptions: CallOptions {
get { self.lock.withLock { return self._defaultCallOptions } }
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
}
public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? {
get { self.lock.withLock { return self._interceptors } }
set { self.lock.withLockVoid { self._interceptors = newValue } }
}
/// Creates a client for the burrow.Networks service.
///
/// - Parameters:
/// - channel: `GRPCChannel` to the service host.
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
/// - interceptors: A factory providing interceptors for each RPC.
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self._defaultCallOptions = defaultCallOptions
self._interceptors = interceptors
}
}
public struct Burrow_NetworksNIOClient: Burrow_NetworksClientProtocol {
public var channel: GRPCChannel
public var defaultCallOptions: CallOptions
public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol?
/// Creates a client for the burrow.Networks service.
///
/// - Parameters:
/// - channel: `GRPCChannel` to the service host.
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
/// - interceptors: A factory providing interceptors for each RPC.
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self.defaultCallOptions = defaultCallOptions
self.interceptors = interceptors
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
public protocol Burrow_NetworksAsyncClientProtocol: GRPCClient {
static var serviceDescriptor: GRPCServiceDescriptor { get }
var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? { get }
func makeNetworkAddCall(
_ request: Burrow_Network,
callOptions: CallOptions?
) -> GRPCAsyncUnaryCall<Burrow_Network, Burrow_Empty>
func makeNetworkListCall(
_ request: Burrow_Empty,
callOptions: CallOptions?
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_NetworkListResponse>
func makeNetworkReorderCall(
_ request: Burrow_NetworkReorderRequest,
callOptions: CallOptions?
) -> GRPCAsyncUnaryCall<Burrow_NetworkReorderRequest, Burrow_Empty>
func makeNetworkDeleteCall(
_ request: Burrow_NetworkDeleteRequest,
callOptions: CallOptions?
) -> GRPCAsyncUnaryCall<Burrow_NetworkDeleteRequest, Burrow_Empty>
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
extension Burrow_NetworksAsyncClientProtocol {
public static var serviceDescriptor: GRPCServiceDescriptor {
return Burrow_NetworksClientMetadata.serviceDescriptor
}
public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? {
return nil
}
public func makeNetworkAddCall(
_ request: Burrow_Network,
callOptions: CallOptions? = nil
) -> GRPCAsyncUnaryCall<Burrow_Network, Burrow_Empty> {
return self.makeAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkAdd.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? []
)
}
public func makeNetworkListCall(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncServerStreamingCall<Burrow_Empty, Burrow_NetworkListResponse> {
return self.makeAsyncServerStreamingCall(
path: Burrow_NetworksClientMetadata.Methods.networkList.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkListInterceptors() ?? []
)
}
public func makeNetworkReorderCall(
_ request: Burrow_NetworkReorderRequest,
callOptions: CallOptions? = nil
) -> GRPCAsyncUnaryCall<Burrow_NetworkReorderRequest, Burrow_Empty> {
return self.makeAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkReorder.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? []
)
}
public func makeNetworkDeleteCall(
_ request: Burrow_NetworkDeleteRequest,
callOptions: CallOptions? = nil
) -> GRPCAsyncUnaryCall<Burrow_NetworkDeleteRequest, Burrow_Empty> {
return self.makeAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkDelete.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? []
)
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
extension Burrow_NetworksAsyncClientProtocol {
public func networkAdd(
_ request: Burrow_Network,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
return try await self.performAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkAdd.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkAddInterceptors() ?? []
)
}
public func networkList(
_ request: Burrow_Empty,
callOptions: CallOptions? = nil
) -> GRPCAsyncResponseStream<Burrow_NetworkListResponse> {
return self.performAsyncServerStreamingCall(
path: Burrow_NetworksClientMetadata.Methods.networkList.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkListInterceptors() ?? []
)
}
public func networkReorder(
_ request: Burrow_NetworkReorderRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
return try await self.performAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkReorder.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkReorderInterceptors() ?? []
)
}
public func networkDelete(
_ request: Burrow_NetworkDeleteRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
return try await self.performAsyncUnaryCall(
path: Burrow_NetworksClientMetadata.Methods.networkDelete.path,
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: self.interceptors?.makeNetworkDeleteInterceptors() ?? []
)
}
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
public struct Burrow_NetworksAsyncClient: Burrow_NetworksAsyncClientProtocol {
public var channel: GRPCChannel
public var defaultCallOptions: CallOptions
public var interceptors: Burrow_NetworksClientInterceptorFactoryProtocol?
public init(
channel: GRPCChannel,
defaultCallOptions: CallOptions = CallOptions(),
interceptors: Burrow_NetworksClientInterceptorFactoryProtocol? = nil
) {
self.channel = channel
self.defaultCallOptions = defaultCallOptions
self.interceptors = interceptors
}
}
public protocol Burrow_NetworksClientInterceptorFactoryProtocol: Sendable {
/// - Returns: Interceptors to use when invoking 'networkAdd'.
func makeNetworkAddInterceptors() -> [ClientInterceptor<Burrow_Network, Burrow_Empty>]
/// - Returns: Interceptors to use when invoking 'networkList'.
func makeNetworkListInterceptors() -> [ClientInterceptor<Burrow_Empty, Burrow_NetworkListResponse>]
/// - Returns: Interceptors to use when invoking 'networkReorder'.
func makeNetworkReorderInterceptors() -> [ClientInterceptor<Burrow_NetworkReorderRequest, Burrow_Empty>]
/// - Returns: Interceptors to use when invoking 'networkDelete'.
func makeNetworkDeleteInterceptors() -> [ClientInterceptor<Burrow_NetworkDeleteRequest, Burrow_Empty>]
}
public enum Burrow_NetworksClientMetadata {
public static let serviceDescriptor = GRPCServiceDescriptor(
name: "Networks",
fullName: "burrow.Networks",
methods: [
Burrow_NetworksClientMetadata.Methods.networkAdd,
Burrow_NetworksClientMetadata.Methods.networkList,
Burrow_NetworksClientMetadata.Methods.networkReorder,
Burrow_NetworksClientMetadata.Methods.networkDelete,
]
)
public enum Methods {
public static let networkAdd = GRPCMethodDescriptor(
name: "NetworkAdd",
path: "/burrow.Networks/NetworkAdd",
type: GRPCCallType.unary
)
public static let networkList = GRPCMethodDescriptor(
name: "NetworkList",
path: "/burrow.Networks/NetworkList",
type: GRPCCallType.serverStreaming
)
public static let networkReorder = GRPCMethodDescriptor(
name: "NetworkReorder",
path: "/burrow.Networks/NetworkReorder",
type: GRPCCallType.unary
)
public static let networkDelete = GRPCMethodDescriptor(
name: "NetworkDelete",
path: "/burrow.Networks/NetworkDelete",
type: GRPCCallType.unary
)
}
}

View file

@ -1,598 +0,0 @@
// DO NOT EDIT.
// swift-format-ignore-file
// swiftlint:disable all
//
// Generated by the Swift generator plugin for the protocol buffer compiler.
// Source: burrow.proto
//
// For information on using the generated types, please see the documentation:
// https://github.com/apple/swift-protobuf/
import Foundation
import SwiftProtobuf
// If the compiler emits an error on this type, it is because this file
// was generated by a version of the `protoc` Swift plug-in that is
// incompatible with the version of SwiftProtobuf to which you are linking.
// Please ensure that you are building against the same version of the API
// that was used to generate this file.
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
typealias Version = _2
}
public enum Burrow_NetworkType: SwiftProtobuf.Enum, Swift.CaseIterable {
public typealias RawValue = Int
case wireGuard // = 0
case tailnet // = 1
case UNRECOGNIZED(Int)
public init() {
self = .wireGuard
}
public init?(rawValue: Int) {
switch rawValue {
case 0: self = .wireGuard
case 1: self = .tailnet
default: self = .UNRECOGNIZED(rawValue)
}
}
public var rawValue: Int {
switch self {
case .wireGuard: return 0
case .tailnet: return 1
case .UNRECOGNIZED(let i): return i
}
}
// The compiler won't synthesize support with the UNRECOGNIZED case.
public static let allCases: [Burrow_NetworkType] = [
.wireGuard,
.tailnet,
]
}
public enum Burrow_State: SwiftProtobuf.Enum, Swift.CaseIterable {
public typealias RawValue = Int
case stopped // = 0
case running // = 1
case UNRECOGNIZED(Int)
public init() {
self = .stopped
}
public init?(rawValue: Int) {
switch rawValue {
case 0: self = .stopped
case 1: self = .running
default: self = .UNRECOGNIZED(rawValue)
}
}
public var rawValue: Int {
switch self {
case .stopped: return 0
case .running: return 1
case .UNRECOGNIZED(let i): return i
}
}
// The compiler won't synthesize support with the UNRECOGNIZED case.
public static let allCases: [Burrow_State] = [
.stopped,
.running,
]
}
public struct Burrow_NetworkReorderRequest: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var id: Int32 = 0
public var index: Int32 = 0
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_WireGuardPeer: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var endpoint: String = String()
public var subnet: [String] = []
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_WireGuardNetwork: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var address: String = String()
public var dns: String = String()
public var peer: [Burrow_WireGuardPeer] = []
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_NetworkDeleteRequest: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var id: Int32 = 0
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_Network: @unchecked Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var id: Int32 = 0
public var type: Burrow_NetworkType = .wireGuard
public var payload: Data = Data()
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_NetworkListResponse: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var network: [Burrow_Network] = []
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_Empty: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TunnelStatusResponse: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var state: Burrow_State = .stopped
public var start: SwiftProtobuf.Google_Protobuf_Timestamp {
get {return _start ?? SwiftProtobuf.Google_Protobuf_Timestamp()}
set {_start = newValue}
}
/// Returns true if `start` has been explicitly set.
public var hasStart: Bool {return self._start != nil}
/// Clears the value of `start`. Subsequent reads from it will return its default value.
public mutating func clearStart() {self._start = nil}
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
fileprivate var _start: SwiftProtobuf.Google_Protobuf_Timestamp? = nil
}
public struct Burrow_TunnelConfigurationResponse: Sendable {
// SwiftProtobuf.Message conformance is added in an extension below. See the
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
// methods supported on all messages.
public var addresses: [String] = []
public var mtu: Int32 = 0
public var routes: [String] = []
public var dnsServers: [String] = []
public var searchDomains: [String] = []
public var includeDefaultRoute: Bool = false
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
// MARK: - Code below here is support for the SwiftProtobuf runtime.
fileprivate let _protobuf_package = "burrow"
extension Burrow_NetworkType: SwiftProtobuf._ProtoNameProviding {
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
0: .same(proto: "WireGuard"),
1: .same(proto: "Tailnet"),
]
}
extension Burrow_State: SwiftProtobuf._ProtoNameProviding {
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
0: .same(proto: "Stopped"),
1: .same(proto: "Running"),
]
}
extension Burrow_NetworkReorderRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".NetworkReorderRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "id"),
2: .same(proto: "index"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }()
case 2: try { try decoder.decodeSingularInt32Field(value: &self.index) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if self.id != 0 {
try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1)
}
if self.index != 0 {
try visitor.visitSingularInt32Field(value: self.index, fieldNumber: 2)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_NetworkReorderRequest, rhs: Burrow_NetworkReorderRequest) -> Bool {
if lhs.id != rhs.id {return false}
if lhs.index != rhs.index {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_WireGuardPeer: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".WireGuardPeer"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "endpoint"),
2: .same(proto: "subnet"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularStringField(value: &self.endpoint) }()
case 2: try { try decoder.decodeRepeatedStringField(value: &self.subnet) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.endpoint.isEmpty {
try visitor.visitSingularStringField(value: self.endpoint, fieldNumber: 1)
}
if !self.subnet.isEmpty {
try visitor.visitRepeatedStringField(value: self.subnet, fieldNumber: 2)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_WireGuardPeer, rhs: Burrow_WireGuardPeer) -> Bool {
if lhs.endpoint != rhs.endpoint {return false}
if lhs.subnet != rhs.subnet {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_WireGuardNetwork: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".WireGuardNetwork"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "address"),
2: .same(proto: "dns"),
3: .same(proto: "peer"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularStringField(value: &self.address) }()
case 2: try { try decoder.decodeSingularStringField(value: &self.dns) }()
case 3: try { try decoder.decodeRepeatedMessageField(value: &self.peer) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.address.isEmpty {
try visitor.visitSingularStringField(value: self.address, fieldNumber: 1)
}
if !self.dns.isEmpty {
try visitor.visitSingularStringField(value: self.dns, fieldNumber: 2)
}
if !self.peer.isEmpty {
try visitor.visitRepeatedMessageField(value: self.peer, fieldNumber: 3)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_WireGuardNetwork, rhs: Burrow_WireGuardNetwork) -> Bool {
if lhs.address != rhs.address {return false}
if lhs.dns != rhs.dns {return false}
if lhs.peer != rhs.peer {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_NetworkDeleteRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".NetworkDeleteRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "id"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if self.id != 0 {
try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_NetworkDeleteRequest, rhs: Burrow_NetworkDeleteRequest) -> Bool {
if lhs.id != rhs.id {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_Network: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".Network"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "id"),
2: .same(proto: "type"),
3: .same(proto: "payload"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularInt32Field(value: &self.id) }()
case 2: try { try decoder.decodeSingularEnumField(value: &self.type) }()
case 3: try { try decoder.decodeSingularBytesField(value: &self.payload) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if self.id != 0 {
try visitor.visitSingularInt32Field(value: self.id, fieldNumber: 1)
}
if self.type != .wireGuard {
try visitor.visitSingularEnumField(value: self.type, fieldNumber: 2)
}
if !self.payload.isEmpty {
try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 3)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_Network, rhs: Burrow_Network) -> Bool {
if lhs.id != rhs.id {return false}
if lhs.type != rhs.type {return false}
if lhs.payload != rhs.payload {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_NetworkListResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".NetworkListResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "network"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeRepeatedMessageField(value: &self.network) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.network.isEmpty {
try visitor.visitRepeatedMessageField(value: self.network, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_NetworkListResponse, rhs: Burrow_NetworkListResponse) -> Bool {
if lhs.network != rhs.network {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_Empty: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".Empty"
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
// Load everything into unknown fields
while try decoder.nextFieldNumber() != nil {}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_Empty, rhs: Burrow_Empty) -> Bool {
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_TunnelStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".TunnelStatusResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "state"),
2: .same(proto: "start"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeSingularEnumField(value: &self.state) }()
case 2: try { try decoder.decodeSingularMessageField(value: &self._start) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every if/case branch local when no optimizations
// are enabled. https://github.com/apple/swift-protobuf/issues/1034 and
// https://github.com/apple/swift-protobuf/issues/1182
if self.state != .stopped {
try visitor.visitSingularEnumField(value: self.state, fieldNumber: 1)
}
try { if let v = self._start {
try visitor.visitSingularMessageField(value: v, fieldNumber: 2)
} }()
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_TunnelStatusResponse, rhs: Burrow_TunnelStatusResponse) -> Bool {
if lhs.state != rhs.state {return false}
if lhs._start != rhs._start {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}
extension Burrow_TunnelConfigurationResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = _protobuf_package + ".TunnelConfigurationResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "addresses"),
2: .same(proto: "mtu"),
3: .same(proto: "routes"),
4: .standard(proto: "dns_servers"),
5: .standard(proto: "search_domains"),
6: .standard(proto: "include_default_route"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
// The use of inline closures is to circumvent an issue where the compiler
// allocates stack space for every case branch when no optimizations are
// enabled. https://github.com/apple/swift-protobuf/issues/1034
switch fieldNumber {
case 1: try { try decoder.decodeRepeatedStringField(value: &self.addresses) }()
case 2: try { try decoder.decodeSingularInt32Field(value: &self.mtu) }()
case 3: try { try decoder.decodeRepeatedStringField(value: &self.routes) }()
case 4: try { try decoder.decodeRepeatedStringField(value: &self.dnsServers) }()
case 5: try { try decoder.decodeRepeatedStringField(value: &self.searchDomains) }()
case 6: try { try decoder.decodeSingularBoolField(value: &self.includeDefaultRoute) }()
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.addresses.isEmpty {
try visitor.visitRepeatedStringField(value: self.addresses, fieldNumber: 1)
}
if self.mtu != 0 {
try visitor.visitSingularInt32Field(value: self.mtu, fieldNumber: 2)
}
if !self.routes.isEmpty {
try visitor.visitRepeatedStringField(value: self.routes, fieldNumber: 3)
}
if !self.dnsServers.isEmpty {
try visitor.visitRepeatedStringField(value: self.dnsServers, fieldNumber: 4)
}
if !self.searchDomains.isEmpty {
try visitor.visitRepeatedStringField(value: self.searchDomains, fieldNumber: 5)
}
if self.includeDefaultRoute {
try visitor.visitSingularBoolField(value: self.includeDefaultRoute, fieldNumber: 6)
}
try unknownFields.traverse(visitor: &visitor)
}
public static func ==(lhs: Burrow_TunnelConfigurationResponse, rhs: Burrow_TunnelConfigurationResponse) -> Bool {
if lhs.addresses != rhs.addresses {return false}
if lhs.mtu != rhs.mtu {return false}
if lhs.routes != rhs.routes {return false}
if lhs.dnsServers != rhs.dnsServers {return false}
if lhs.searchDomains != rhs.searchDomains {return false}
if lhs.includeDefaultRoute != rhs.includeDefaultRoute {return false}
if lhs.unknownFields != rhs.unknownFields {return false}
return true
}
}

View file

@ -0,0 +1,11 @@
{
"invocations": [
{
"protoFiles": [
"burrow.proto",
],
"server": false,
"visibility": "public"
}
]
}

View file

@ -0,0 +1,10 @@
{
"invocations": [
{
"protoFiles": [
"burrow.proto",
],
"visibility": "public"
}
]
}

View file

@ -1,7 +1,6 @@
import AsyncAlgorithms
import BurrowConfiguration
import BurrowCore
import GRPC
import libburrow
import NetworkExtension
import os
@ -20,9 +19,6 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
}
private let logger = Logger.logger(for: PacketTunnelProvider.self)
private var packetCall: GRPCAsyncBidirectionalStreamingCall<Burrow_TunnelPacket, Burrow_TunnelPacket>?
private var inboundPacketTask: Task<Void, Never>?
private var outboundPacketTask: Task<Void, Never>?
private var client: TunnelClient {
get throws { try _client.get() }
@ -49,18 +45,16 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
let completion = SendableCallbackBox(completionHandler)
Task {
do {
_ = try await client.tunnelStart(.init())
let configuration = try await Array(client.tunnelConfiguration(.init()).prefix(1)).first
guard let settings = configuration?.settings else {
throw Error.missingTunnelConfiguration
}
try await setTunnelNetworkSettings(settings)
try startPacketBridge()
_ = try await client.tunnelStart(.init())
logger.log("Started tunnel with network settings: \(settings)")
completion.callback(nil)
} catch {
logger.error("Failed to start tunnel: \(error)")
stopPacketBridge()
completion.callback(error)
}
}
@ -72,7 +66,6 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
) {
let completion = SendableCallbackBox(completionHandler)
Task {
stopPacketBridge()
do {
_ = try await client.tunnelStop(.init())
logger.log("Stopped client")
@ -84,243 +77,20 @@ final class PacketTunnelProvider: NEPacketTunnelProvider, @unchecked Sendable {
}
}
extension PacketTunnelProvider {
private func startPacketBridge() throws {
stopPacketBridge()
let packetClient = TunnelPacketClient.unix(socketURL: try Constants.socketURL)
let call = packetClient.makeTunnelPacketsCall()
self.packetCall = call
inboundPacketTask = Task { [weak self] in
guard let self else { return }
do {
for try await packet in call.responseStream {
let payload = packet.payload
self.packetFlow.writePackets(
[payload],
withProtocols: [Self.protocolNumber(for: payload)]
)
}
} catch {
guard !Task.isCancelled else { return }
self.logger.error("Tunnel packet receive loop failed: \(error)")
}
}
outboundPacketTask = Task { [weak self] in
guard let self else { return }
defer { call.requestStream.finish() }
do {
while !Task.isCancelled {
let packets = await self.readPacketsBatch()
for (payload, _) in packets {
var packet = Burrow_TunnelPacket()
packet.payload = payload
try await call.requestStream.send(packet)
}
}
} catch {
guard !Task.isCancelled else { return }
self.logger.error("Tunnel packet send loop failed: \(error)")
}
}
}
private func stopPacketBridge() {
inboundPacketTask?.cancel()
inboundPacketTask = nil
outboundPacketTask?.cancel()
outboundPacketTask = nil
packetCall?.cancel()
packetCall = nil
}
private func readPacketsBatch() async -> [(Data, NSNumber)] {
await withCheckedContinuation { continuation in
packetFlow.readPackets { packets, protocols in
continuation.resume(returning: Array(zip(packets, protocols)))
}
}
}
private static func protocolNumber(for payload: Data) -> NSNumber {
guard let version = payload.first.map({ $0 >> 4 }) else {
return NSNumber(value: AF_INET)
}
switch version {
case 6:
return NSNumber(value: AF_INET6)
default:
return NSNumber(value: AF_INET)
}
}
}
extension Burrow_TunnelConfigurationResponse {
fileprivate var settings: NEPacketTunnelNetworkSettings {
let parsedAddresses = addresses.compactMap(ParsedTunnelAddress.init(rawValue:))
let ipv4Addresses = parsedAddresses.compactMap(\.ipv4Address)
let ipv6Addresses = parsedAddresses.compactMap(\.ipv6Address)
let parsedRoutes = routes.compactMap(ParsedTunnelRoute.init(rawValue:))
var ipv4Routes = parsedRoutes.compactMap(\.ipv4Route)
var ipv6Routes = parsedRoutes.compactMap(\.ipv6Route)
if includeDefaultRoute {
ipv4Routes.append(.default())
ipv6Routes.append(.default())
}
let ipv6Addresses = addresses.filter { IPv6Address($0) != nil }
let settings = NEPacketTunnelNetworkSettings(tunnelRemoteAddress: "1.1.1.1")
settings.mtu = NSNumber(value: mtu)
if !ipv4Addresses.isEmpty {
let ipv4Settings = NEIPv4Settings(
addresses: ipv4Addresses.map(\.address),
subnetMasks: ipv4Addresses.map(\.subnetMask)
settings.ipv4Settings = NEIPv4Settings(
addresses: addresses.filter { IPv4Address($0) != nil },
subnetMasks: ["255.255.255.0"]
)
if !ipv4Routes.isEmpty {
ipv4Settings.includedRoutes = ipv4Routes
}
settings.ipv4Settings = ipv4Settings
}
if !ipv6Addresses.isEmpty {
let ipv6Settings = NEIPv6Settings(
addresses: ipv6Addresses.map(\.address),
networkPrefixLengths: ipv6Addresses.map(\.prefixLength)
settings.ipv6Settings = NEIPv6Settings(
addresses: ipv6Addresses,
networkPrefixLengths: ipv6Addresses.map { _ in 64 }
)
if !ipv6Routes.isEmpty {
ipv6Settings.includedRoutes = ipv6Routes
}
settings.ipv6Settings = ipv6Settings
}
if !dnsServers.isEmpty {
let dnsSettings = NEDNSSettings(servers: dnsServers)
if !searchDomains.isEmpty {
dnsSettings.matchDomains = searchDomains
}
settings.dnsSettings = dnsSettings
}
return settings
}
}
private struct ParsedTunnelAddress {
struct IPv4AddressSetting {
let address: String
let subnetMask: String
}
struct IPv6AddressSetting {
let address: String
let prefixLength: NSNumber
}
let ipv4Address: IPv4AddressSetting?
let ipv6Address: IPv6AddressSetting?
init?(rawValue: String) {
let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init)
let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !address.isEmpty else {
return nil
}
let prefix = components.count == 2 ? Int(components[1]) : nil
if IPv4Address(address) != nil {
let prefixLength = prefix ?? 32
guard (0 ... 32).contains(prefixLength) else {
return nil
}
ipv4Address = IPv4AddressSetting(
address: address,
subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength)
)
ipv6Address = nil
return
}
if IPv6Address(address) != nil {
let prefixLength = prefix ?? 128
guard (0 ... 128).contains(prefixLength) else {
return nil
}
ipv4Address = nil
ipv6Address = IPv6AddressSetting(
address: address,
prefixLength: NSNumber(value: prefixLength)
)
return
}
return nil
}
private static func ipv4SubnetMask(prefixLength: Int) -> String {
guard prefixLength > 0 else {
return "0.0.0.0"
}
let mask = UInt32.max << (32 - prefixLength)
let octets = [
(mask >> 24) & 0xff,
(mask >> 16) & 0xff,
(mask >> 8) & 0xff,
mask & 0xff,
]
return octets.map(String.init).joined(separator: ".")
}
}
private struct ParsedTunnelRoute {
let ipv4Route: NEIPv4Route?
let ipv6Route: NEIPv6Route?
init?(rawValue: String) {
let components = rawValue.split(separator: "/", maxSplits: 1).map(String.init)
let address = components.first?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !address.isEmpty else {
return nil
}
let prefix = components.count == 2 ? Int(components[1]) : nil
if IPv4Address(address) != nil {
let prefixLength = prefix ?? 32
guard (0 ... 32).contains(prefixLength) else {
return nil
}
ipv4Route = NEIPv4Route(
destinationAddress: address,
subnetMask: Self.ipv4SubnetMask(prefixLength: prefixLength)
)
ipv6Route = nil
return
}
if IPv6Address(address) != nil {
let prefixLength = prefix ?? 128
guard (0 ... 128).contains(prefixLength) else {
return nil
}
ipv4Route = nil
ipv6Route = NEIPv6Route(
destinationAddress: address,
networkPrefixLength: NSNumber(value: prefixLength)
)
return
}
return nil
}
private static func ipv4SubnetMask(prefixLength: Int) -> String {
var mask = UInt32.max << (32 - prefixLength)
if prefixLength == 0 {
mask = 0
}
let octets = [
String((mask >> 24) & 0xff),
String((mask >> 16) & 0xff),
String((mask >> 8) & 0xff),
String(mask & 0xff),
]
return octets.joined(separator: ".")
}
}

File diff suppressed because it is too large Load diff

View file

@ -26,11 +26,28 @@ struct TailnetNetworkPayload: Codable, Sendable {
}
}
struct TailnetDiscoveryResponse: Codable, Sendable {
var domain: String
var provider: TailnetProvider
var authority: String
var oidcIssuer: String?
struct TailnetLoginStartRequest: Codable, Sendable {
var accountName: String
var identityName: String
var hostname: String?
var controlURL: String?
}
struct TailnetLoginStatus: Codable, Sendable {
var backendState: String
var authURL: String?
var running: Bool
var needsLogin: Bool
var tailnetName: String?
var magicDNSSuffix: String?
var selfDNSName: String?
var tailscaleIPs: [String]
var health: [String]
}
struct TailnetLoginStartResponse: Codable, Sendable {
var sessionID: String
var status: TailnetLoginStatus
}
struct TailnetAuthorityProbeStatus: Sendable {
@ -40,102 +57,121 @@ struct TailnetAuthorityProbeStatus: Sendable {
var detail: String?
}
struct TailnetLoginStatus: Sendable {
var sessionID: String
var backendState: String
var authURL: URL?
var running: Bool
var needsLogin: Bool
var tailnetName: String?
var magicDNSSuffix: String?
var selfDNSName: String?
var tailnetIPs: [String]
var health: [String]
}
enum TailnetBridgeClient {
private static let baseURL = URL(string: "http://127.0.0.1:8080")!
enum TailnetDiscoveryClient {
static func discover(email: String, socketURL: URL) async throws -> TailnetDiscoveryResponse {
var request = Burrow_TailnetDiscoverRequest()
request.email = email
let response = try await TailnetClient.unix(socketURL: socketURL).discover(request)
return TailnetDiscoveryResponse(
domain: response.domain,
provider: response.managed ? .tailscale : .headscale,
authority: response.authority,
oidcIssuer: response.oidcIssuer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.oidcIssuer
static func startLogin(_ request: TailnetLoginStartRequest) async throws -> TailnetLoginStartResponse {
var urlRequest = URLRequest(
url: baseURL.appendingPathComponent("v1/tailscale/login/start")
)
urlRequest.httpMethod = "POST"
urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type")
let encoder = JSONEncoder()
encoder.keyEncodingStrategy = .convertToSnakeCase
urlRequest.httpBody = try encoder.encode(request)
let (data, response) = try await URLSession.shared.data(for: urlRequest)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStartResponse.self, from: data)
}
static func status(sessionID: String) async throws -> TailnetLoginStatus {
let url = baseURL
.appendingPathComponent("v1/tailscale/login")
.appendingPathComponent(sessionID)
let (data, response) = try await URLSession.shared.data(from: url)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStatus.self, from: data)
}
private static func validate(response: URLResponse, data: Data) throws {
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
}
}
enum TailnetAuthorityProbeClient {
static func probe(authority: String, socketURL: URL) async throws -> TailnetAuthorityProbeStatus {
var request = Burrow_TailnetProbeRequest()
request.authority = authority
static func probe(provider: TailnetProvider, authority: String) async throws -> TailnetAuthorityProbeStatus {
let normalizedAuthority = normalizeAuthority(authority)
let baseURL = try validatedBaseURL(normalizedAuthority)
let probeURL = probeURL(for: provider, baseURL: baseURL)
let response = try await TailnetClient.unix(socketURL: socketURL).probe(request)
return TailnetAuthorityProbeStatus(
authority: response.authority,
statusCode: Int(response.statusCode),
summary: response.summary,
detail: response.detail.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.detail
var request = URLRequest(url: probeURL)
request.timeoutInterval = 10
request.setValue("application/json", forHTTPHeaderField: "Accept")
let (data, response) = try await URLSession.shared.data(for: request)
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
let body = String(data: data, encoding: .utf8)?
.trimmingCharacters(in: .whitespacesAndNewlines)
let detail = body.flatMap { $0.isEmpty ? nil : $0 }
return TailnetAuthorityProbeStatus(
authority: normalizedAuthority,
statusCode: http.statusCode,
summary: "\(provider.title) reachable",
detail: detail
)
}
private static func normalizeAuthority(_ authority: String) -> String {
let trimmed = authority.trimmingCharacters(in: .whitespacesAndNewlines)
if trimmed.contains("://") {
return trimmed
}
return "https://\(trimmed)"
}
private static func validatedBaseURL(_ authority: String) throws -> URL {
guard let url = URL(string: authority), url.host != nil else {
throw TailnetBridgeError.server("Invalid server URL")
}
return url
}
private static func probeURL(for provider: TailnetProvider, baseURL: URL) -> URL {
switch provider {
case .headscale:
baseURL.appendingPathComponent("health")
case .burrow:
baseURL.appendingPathComponent("healthz")
case .tailscale:
baseURL
}
}
}
enum TailnetLoginClient {
static func start(
accountName: String,
identityName: String,
hostname: String?,
authority: String,
socketURL: URL
) async throws -> TailnetLoginStatus {
var request = Burrow_TailnetLoginStartRequest()
request.accountName = accountName
request.identityName = identityName
request.hostname = hostname ?? ""
request.authority = authority
let response = try await TailnetClient.unix(socketURL: socketURL).loginStart(request)
return decode(response)
}
enum TailnetBridgeError: LocalizedError {
case server(String)
static func status(sessionID: String, socketURL: URL) async throws -> TailnetLoginStatus {
var request = Burrow_TailnetLoginStatusRequest()
request.sessionID = sessionID
let response = try await TailnetClient.unix(socketURL: socketURL).loginStatus(request)
return decode(response)
var errorDescription: String? {
switch self {
case .server(let message):
message
}
static func cancel(sessionID: String, socketURL: URL) async throws {
var request = Burrow_TailnetLoginCancelRequest()
request.sessionID = sessionID
_ = try await TailnetClient.unix(socketURL: socketURL).loginCancel(request)
}
private static func decode(_ response: Burrow_TailnetLoginStatusResponse) -> TailnetLoginStatus {
TailnetLoginStatus(
sessionID: response.sessionID,
backendState: response.backendState,
authURL: URL(string: response.authURL.trimmingCharacters(in: .whitespacesAndNewlines)),
running: response.running,
needsLogin: response.needsLogin,
tailnetName: response.tailnetName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.tailnetName,
magicDNSSuffix: response.magicDNSSuffix.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.magicDNSSuffix,
selfDNSName: response.selfDNSName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.selfDNSName,
tailnetIPs: response.tailnetIPs,
health: response.health
)
}
}
@ -146,7 +182,7 @@ final class NetworkViewModel: Sendable {
private(set) var connectionError: String?
private let socketURLResult: Result<URL, Error>
@ObservationIgnored private var task: Task<Void, Never>?
nonisolated(unsafe) private var task: Task<Void, Never>?
init(socketURLResult: Result<URL, Error>) {
self.socketURLResult = socketURLResult
@ -173,42 +209,6 @@ final class NetworkViewModel: Sendable {
try await addNetwork(type: .tailnet, payload: payload.encoded())
}
func discoverTailnet(email: String) async throws -> TailnetDiscoveryResponse {
let socketURL = try socketURLResult.get()
return try await TailnetDiscoveryClient.discover(email: email, socketURL: socketURL)
}
func probeTailnetAuthority(_ authority: String) async throws -> TailnetAuthorityProbeStatus {
let socketURL = try socketURLResult.get()
return try await TailnetAuthorityProbeClient.probe(authority: authority, socketURL: socketURL)
}
func startTailnetLogin(
accountName: String,
identityName: String,
hostname: String?,
authority: String
) async throws -> TailnetLoginStatus {
let socketURL = try socketURLResult.get()
return try await TailnetLoginClient.start(
accountName: accountName,
identityName: identityName,
hostname: hostname,
authority: authority,
socketURL: socketURL
)
}
func tailnetLoginStatus(sessionID: String) async throws -> TailnetLoginStatus {
let socketURL = try socketURLResult.get()
return try await TailnetLoginClient.status(sessionID: sessionID, socketURL: socketURL)
}
func cancelTailnetLogin(sessionID: String) async throws {
let socketURL = try socketURLResult.get()
try await TailnetLoginClient.cancel(sessionID: sessionID, socketURL: socketURL)
}
private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 {
let socketURL = try socketURLResult.get()
let networkID = nextNetworkID
@ -303,11 +303,19 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable {
var title: String {
switch self {
case .tailscale: "Tailscale"
case .headscale: "Custom Tailnet"
case .headscale: "Headscale"
case .burrow: "Burrow"
}
}
var usesWebLogin: Bool {
self == .tailscale
}
var requiresControlURL: Bool {
self != .tailscale
}
var defaultAuthority: String? {
switch self {
case .tailscale:
@ -322,44 +330,19 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable {
var subtitle: String {
switch self {
case .tailscale:
"Managed Tailnet authority."
"Use Tailscale's real browser login flow."
case .headscale:
"Custom Tailnet control server."
"Store a Headscale control-plane endpoint and credentials."
case .burrow:
"Burrow-native Tailnet authority."
"Store Burrow control-plane credentials."
}
}
static func inferred(authority: String?, explicit: TailnetProvider?) -> TailnetProvider {
if explicit == .burrow {
return .burrow
}
if isManagedTailscaleAuthority(authority) {
return .tailscale
}
return .headscale
}
static func isManagedTailscaleAuthority(_ authority: String?) -> Bool {
guard let normalized = authority?
.trimmingCharacters(in: .whitespacesAndNewlines)
.lowercased()
.trimmingCharacters(in: CharacterSet(charactersIn: "/")),
!normalized.isEmpty
else {
return false
}
return normalized == "https://controlplane.tailscale.com"
|| normalized == "http://controlplane.tailscale.com"
|| normalized == "controlplane.tailscale.com"
}
}
enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
case wireGuard
case tor
case tailnet
case headscale
var id: String { rawValue }
@ -367,7 +350,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self {
case .wireGuard: "WireGuard"
case .tor: "Tor"
case .tailnet: "Tailnet"
case .headscale: "Tailnet"
}
}
@ -375,7 +358,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self {
case .wireGuard: "Import a tunnel and optional account metadata."
case .tor: "Store Arti account and identity preferences."
case .tailnet: "Save Tailnet authority, identity defaults, and login material."
case .headscale: "Save Tailscale, Headscale, or Burrow control-plane identities."
}
}
@ -383,7 +366,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self {
case .wireGuard: .init("WireGuard")
case .tor: .orange
case .tailnet: .mint
case .headscale: .mint
}
}
@ -391,7 +374,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self {
case .wireGuard: "Add Network"
case .tor: "Save Account"
case .tailnet: "Save Account"
case .headscale: "Save Account"
}
}
@ -401,15 +384,15 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
nil
case .tor:
"Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet."
case .tailnet:
"Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can already be stored in the daemon."
case .headscale:
"Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon."
}
}
}
enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable {
case web
case none
case web
case password
case preauthKey
@ -417,8 +400,8 @@ enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable {
var title: String {
switch self {
case .web: "Browser Sign-In"
case .none: "None"
case .web: "Web Login"
case .password: "Password"
case .preauthKey: "Preauth Key"
}
@ -444,15 +427,17 @@ struct NetworkAccountRecord: Codable, Identifiable, Hashable, Sendable {
struct TailnetCard {
var id: Int32
var provider: String
var title: String
var detail: String
init(network: Burrow_Network) {
let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload))
id = network.id
provider = payload?.provider.title ?? "Tailnet"
title = payload?.tailnet ?? payload?.hostname ?? "Tailnet"
detail = [
payload?.authority.flatMap { URL(string: $0)?.host } ?? payload?.authority,
payload?.provider.title,
payload?.authority,
payload.map { "Account: \($0.account)" },
]
@ -469,7 +454,7 @@ struct TailnetCard {
VStack(alignment: .leading, spacing: 12) {
HStack {
VStack(alignment: .leading, spacing: 4) {
Text("Tailnet")
Text(provider)
.font(.headline)
.foregroundStyle(.white.opacity(0.85))
Text(title)

View file

@ -10,12 +10,6 @@ check:
build:
@cargo build
bep-check:
@python3 Scripts/check-bep-metadata.py
bep-list:
@Scripts/bep list
daemon-console:
@$(sudo_cargo_console) daemon

View file

@ -10,7 +10,6 @@ Routine verification now runs unprivileged with `cargo test --workspace --all-fe
The repository now carries its own design and deployment record:
- [Constitution](./CONSTITUTION.md)
- [Agent Instructions](./AGENTS.md)
- [Burrow Evolution](./evolution/README.md)
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
@ -20,8 +19,6 @@ The repository now carries its own design and deployment record:
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh).
Agent and governance-sensitive work should start with [AGENTS.md](./AGENTS.md), [CONSTITUTION.md](./CONSTITUTION.md), and the relevant BEPs under [`evolution/proposals/`](./evolution/proposals/). Identity and bootstrap metadata now live in [`contributors.nix`](./contributors.nix).
The project structure is divided in the following folders:
```

View file

@ -1,243 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG:-onepassword}"
application_name="${AUTHENTIK_ONEPASSWORD_APPLICATION_NAME:-1Password}"
provider_name="${AUTHENTIK_ONEPASSWORD_PROVIDER_NAME:-1Password}"
template_slug="${AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG:-ts}"
client_id="${AUTHENTIK_ONEPASSWORD_CLIENT_ID:-1password.burrow.net}"
launch_url="${AUTHENTIK_ONEPASSWORD_LAUNCH_URL:-https://burrow-team.1password.com/}"
redirect_uris_json="${AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON:-[
\"https://burrow-team.1password.com/sso/oidc/redirect/\",
\"onepassword://sso/oidc/redirect\"
]}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-1password-oidc.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
Optional environment:
AUTHENTIK_URL
AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG
AUTHENTIK_ONEPASSWORD_APPLICATION_NAME
AUTHENTIK_ONEPASSWORD_PROVIDER_NAME
AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG
AUTHENTIK_ONEPASSWORD_CLIENT_ID
AUTHENTIK_ONEPASSWORD_LAUNCH_URL
AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then
echo "error: AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
wait_for_authentik
template_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \
| head -n1
)"
if [[ -z "$template_provider" ]]; then
echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2
exit 1
fi
authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')"
invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')"
property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')"
signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')"
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg client_id "$client_id" \
--arg signing_key "$signing_key" \
--argjson property_mappings "$property_mappings" \
--argjson redirect_uris "$redirect_uris_json" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
client_type: "public",
client_id: $client_id,
include_claims_in_id_token: true,
redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})),
property_mappings: $property_mappings,
signing_key: $signing_key,
issuer_mode: "per_provider",
sub_mode: "hashed_user_id"
}'
)"
existing_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/oauth2/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: 1Password OIDC provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: true,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')"
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: 1Password OIDC application did not return a primary key" >&2
exit 1
fi
for _ in $(seq 1 30); do
if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then
echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})."
exit 0
fi
sleep 2
done
echo "warning: 1Password OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik 1Password OIDC application ${application_slug} (${application_name})."

View file

@ -1,263 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
directory_json="${AUTHENTIK_BURROW_DIRECTORY_JSON:-[]}"
users_group="${AUTHENTIK_BURROW_USERS_GROUP:-burrow-users}"
admins_group="${AUTHENTIK_BURROW_ADMINS_GROUP:-burrow-admins}"
forgejo_application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-burrow-directory.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_BURROW_DIRECTORY_JSON
Optional environment:
AUTHENTIK_URL
AUTHENTIK_BURROW_USERS_GROUP
AUTHENTIK_BURROW_ADMINS_GROUP
AUTHENTIK_FORGEJO_APPLICATION_SLUG
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if ! printf '%s' "$directory_json" | jq -e 'type == "array"' >/dev/null; then
echo "error: AUTHENTIK_BURROW_DIRECTORY_JSON must be a JSON array" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_group_pk() {
local group_name="$1"
api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \
| jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \
| head -n1
}
ensure_group() {
local group_name="$1"
local payload group_pk
payload="$(
jq -cn \
--arg name "$group_name" \
'{name: $name}'
)"
group_pk="$(lookup_group_pk "$group_name")"
if [[ -n "$group_pk" ]]; then
api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null
else
group_pk="$(
api POST "/api/v3/core/groups/" "$payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "$group_pk" ]]; then
echo "error: could not create Authentik group ${group_name}" >&2
exit 1
fi
printf '%s\n' "$group_pk"
}
lookup_user_pk() {
local username="$1"
api GET "/api/v3/core/users/?page_size=200&search=${username}" \
| jq -r --arg username "$username" '.results[]? | select(.username == $username) | .pk // empty' \
| head -n1
}
ensure_user() {
local user_spec="$1"
local username name email is_admin groups_json password_file effective_groups_json group_name
local group_pks_json payload user_pk
username="$(printf '%s\n' "$user_spec" | jq -r '.username')"
name="$(printf '%s\n' "$user_spec" | jq -r '.name')"
email="$(printf '%s\n' "$user_spec" | jq -r '.email')"
is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')"
groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')"
password_file="$(printf '%s\n' "$user_spec" | jq -r '.passwordFile // empty')"
if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then
echo "error: each Burrow Authentik user requires username and email" >&2
exit 1
fi
effective_groups_json="$(
printf '%s\n' "$groups_json" \
| jq -c --arg users_group "$users_group" --arg admins_group "$admins_group" --argjson is_admin "$is_admin" '
. + [$users_group] + (if $is_admin then [$admins_group] else [] end) | unique
'
)"
group_pks_json='[]'
while IFS= read -r group_name; do
group_pk="$(ensure_group "$group_name")"
group_pks_json="$(
jq -cn \
--argjson current "$group_pks_json" \
--arg next "$group_pk" \
'$current + [$next]'
)"
done < <(printf '%s\n' "$effective_groups_json" | jq -r '.[]')
payload="$(
jq -cn \
--arg username "$username" \
--arg name "$name" \
--arg email "$email" \
--argjson groups "$group_pks_json" \
'{
username: $username,
name: $name,
email: $email,
is_active: true,
path: "users",
groups: $groups
}'
)"
user_pk="$(lookup_user_pk "$username")"
if [[ -n "$user_pk" ]]; then
api PATCH "/api/v3/core/users/${user_pk}/" "$payload" >/dev/null
else
user_pk="$(
api POST "/api/v3/core/users/" "$payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "$user_pk" ]]; then
echo "error: could not create Authentik user ${username}" >&2
exit 1
fi
if [[ -n "$password_file" ]]; then
if [[ ! -s "$password_file" ]]; then
echo "error: password file for Authentik user ${username} is missing: ${password_file}" >&2
exit 1
fi
api POST "/api/v3/core/users/${user_pk}/set_password/" "$(
jq -cn \
--arg password "$(tr -d '\r\n' < "$password_file")" \
'{password: $password}'
)" >/dev/null
fi
}
lookup_application_pk() {
local slug="$1"
api GET "/api/v3/core/applications/?page_size=200" \
| jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \
| head -n1
}
ensure_application_group_binding() {
local application_slug="$1"
local group_name="$2"
local application_pk group_pk existing payload binding_pk
application_pk="$(lookup_application_pk "$application_slug")"
if [[ -z "$application_pk" ]]; then
echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2
return 0
fi
group_pk="$(lookup_group_pk "$group_name")"
if [[ -z "$group_pk" ]]; then
echo "error: could not resolve Authentik group ${group_name}" >&2
exit 1
fi
existing="$(
api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \
| jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \
| head -n1
)"
payload="$(
jq -cn \
--arg target "$application_pk" \
--arg group "$group_pk" \
'{
group: $group,
target: $target,
negate: false,
enabled: true,
order: 100,
timeout: 30,
failure_result: false
}'
)"
if [[ -n "$existing" ]]; then
binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null
else
api POST "/api/v3/policies/bindings/" "$payload" >/dev/null
fi
}
wait_for_authentik
ensure_group "$users_group" >/dev/null
ensure_group "$admins_group" >/dev/null
while IFS= read -r user_spec; do
ensure_user "$user_spec"
done < <(printf '%s\n' "$directory_json" | jq -c '.[]')
if [[ -n "$forgejo_application_slug" ]]; then
ensure_application_group_binding "$forgejo_application_slug" "$users_group"
fi
echo "Synced Burrow Authentik directory."

View file

@ -1,250 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_FORGEJO_APPLICATION_SLUG:-git}"
application_name="${AUTHENTIK_FORGEJO_APPLICATION_NAME:-burrow.net}"
provider_name="${AUTHENTIK_FORGEJO_PROVIDER_NAME:-burrow.net}"
client_id="${AUTHENTIK_FORGEJO_CLIENT_ID:-git.burrow.net}"
client_secret="${AUTHENTIK_FORGEJO_CLIENT_SECRET:-}"
launch_url="${AUTHENTIK_FORGEJO_LAUNCH_URL:-https://git.burrow.net/}"
redirect_uris_json="${AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON:-[
\"https://git.burrow.net/user/oauth2/burrow.net/callback\",
\"https://git.burrow.net/user/oauth2/authentik/callback\",
\"https://git.burrow.net/user/oauth2/GitHub/callback\"
]}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-forgejo-oidc.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_FORGEJO_CLIENT_SECRET
Optional environment:
AUTHENTIK_URL
AUTHENTIK_FORGEJO_APPLICATION_SLUG
AUTHENTIK_FORGEJO_APPLICATION_NAME
AUTHENTIK_FORGEJO_PROVIDER_NAME
AUTHENTIK_FORGEJO_CLIENT_ID
AUTHENTIK_FORGEJO_LAUNCH_URL
AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then
echo "Forgejo OIDC client secret is not configured; skipping Authentik Forgejo sync." >&2
exit 0
fi
if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then
echo "error: AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
wait_for_authentik
template_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c '.results[]? | select(.assigned_application_slug == "ts")' \
| head -n1
)"
if [[ -z "$template_provider" ]]; then
echo "error: could not resolve the Burrow Tailnet OAuth provider template" >&2
exit 1
fi
authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')"
invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')"
property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')"
signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')"
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg client_id "$client_id" \
--arg client_secret "$client_secret" \
--arg signing_key "$signing_key" \
--argjson property_mappings "$property_mappings" \
--argjson redirect_uris "$redirect_uris_json" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
client_type: "confidential",
client_id: $client_id,
client_secret: $client_secret,
include_claims_in_id_token: true,
redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})),
property_mappings: $property_mappings,
signing_key: $signing_key,
issuer_mode: "per_provider",
sub_mode: "hashed_user_id"
}'
)"
existing_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/oauth2/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Forgejo OIDC provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: false,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')"
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: Forgejo OIDC application did not return a primary key" >&2
exit 1
fi
for _ in $(seq 1 30); do
if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then
echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})."
exit 0
fi
sleep 2
done
echo "warning: Forgejo OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik Forgejo OIDC application ${application_slug} (${application_name})."

View file

@ -1,344 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}"
application_name="${AUTHENTIK_LINEAR_APPLICATION_NAME:-Linear}"
provider_name="${AUTHENTIK_LINEAR_PROVIDER_NAME:-Linear}"
launch_url="${AUTHENTIK_LINEAR_LAUNCH_URL:-https://linear.app/burrownet}"
acs_url="${AUTHENTIK_LINEAR_ACS_URL:-}"
audience="${AUTHENTIK_LINEAR_AUDIENCE:-}"
issuer="${AUTHENTIK_LINEAR_ISSUER:-${authentik_url}/application/saml/${application_slug}/metadata/}"
default_relay_state="${AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE:-}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-linear-saml.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_LINEAR_ACS_URL
AUTHENTIK_LINEAR_AUDIENCE
Optional environment:
AUTHENTIK_URL
AUTHENTIK_LINEAR_APPLICATION_SLUG
AUTHENTIK_LINEAR_APPLICATION_NAME
AUTHENTIK_LINEAR_PROVIDER_NAME
AUTHENTIK_LINEAR_LAUNCH_URL
AUTHENTIK_LINEAR_ISSUER
AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$acs_url" ]]; then
echo "error: AUTHENTIK_LINEAR_ACS_URL is required" >&2
exit 1
fi
if [[ -z "$audience" ]]; then
echo "error: AUTHENTIK_LINEAR_AUDIENCE is required" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_oauth_template_field() {
local field="$1"
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \
| head -n1
}
reconcile_property_mapping() {
local name="$1"
local saml_name="$2"
local friendly_name="$3"
local expression="$4"
local payload existing_pk
payload="$(
jq -n \
--arg name "$name" \
--arg saml_name "$saml_name" \
--arg friendly_name "$friendly_name" \
--arg expression "$expression" \
'{
name: $name,
saml_name: $saml_name,
friendly_name: $friendly_name,
expression: $expression
}'
)"
existing_pk="$(
api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \
| jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \
| head -n1
)"
if [[ -n "$existing_pk" ]]; then
api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null
printf '%s\n' "$existing_pk"
else
api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty'
fi
}
wait_for_authentik
authorization_flow="$(lookup_oauth_template_field authorization_flow)"
invalidation_flow="$(lookup_oauth_template_field invalidation_flow)"
signing_kp="$(lookup_oauth_template_field signing_key)"
if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then
echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2
exit 1
fi
email_mapping_pk="$(
reconcile_property_mapping \
"Burrow Linear SAML Email" \
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \
"email" \
'return request.user.email'
)"
name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Linear SAML Name" \
"name" \
"name" \
'return request.user.name or request.user.username'
)"
first_name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Linear SAML First Name" \
"firstName" \
"firstName" \
$'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username'
)"
last_name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Linear SAML Last Name" \
"lastName" \
"lastName" \
$'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username'
)"
if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then
echo "error: failed to reconcile Linear SAML property mappings" >&2
exit 1
fi
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg acs_url "$acs_url" \
--arg audience "$audience" \
--arg issuer "$issuer" \
--arg signing_kp "$signing_kp" \
--arg default_relay_state "$default_relay_state" \
--arg name_id_mapping "$email_mapping_pk" \
--arg email_mapping "$email_mapping_pk" \
--arg name_mapping "$name_mapping_pk" \
--arg first_name_mapping "$first_name_mapping_pk" \
--arg last_name_mapping "$last_name_mapping_pk" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
acs_url: $acs_url,
audience: $audience,
issuer: $issuer,
signing_kp: $signing_kp,
sign_assertion: true,
sign_response: true,
sp_binding: "post",
name_id_mapping: $name_id_mapping,
property_mappings: [
$email_mapping,
$name_mapping,
$first_name_mapping,
$last_name_mapping
]
}
+ (if $default_relay_state == "" then {} else {default_relay_state: $default_relay_state} end)'
)"
existing_provider="$(
api GET "/api/v3/providers/saml/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/saml/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Linear SAML provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: true,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="existing"
api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: Linear SAML application did not return a primary key" >&2
exit 1
fi
for _ in $(seq 1 30); do
metadata_status="$(
curl -sS \
-o /dev/null \
-w '%{http_code}' \
--max-redirs 0 \
"${authentik_url}/application/saml/${application_slug}/metadata/" \
|| true
)"
case "$metadata_status" in
200|301|302|307|308)
echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})."
exit 0
;;
esac
sleep 2
done
echo "warning: Linear SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik Linear SAML application ${application_slug} (${application_name})."

View file

@ -1,311 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_LINEAR_APPLICATION_SLUG:-linear}"
provider_name="${AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME:-Linear SCIM}"
scim_url="${AUTHENTIK_LINEAR_SCIM_URL:-}"
scim_token_file="${AUTHENTIK_LINEAR_SCIM_TOKEN_FILE:-}"
user_identifier="${AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER:-email}"
owner_group="${AUTHENTIK_LINEAR_OWNER_GROUP:-linear-owners}"
admin_group="${AUTHENTIK_LINEAR_ADMIN_GROUP:-linear-admins}"
guest_group="${AUTHENTIK_LINEAR_GUEST_GROUP:-linear-guests}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-linear-scim.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_LINEAR_SCIM_URL
AUTHENTIK_LINEAR_SCIM_TOKEN_FILE
Optional environment:
AUTHENTIK_URL
AUTHENTIK_LINEAR_APPLICATION_SLUG
AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME
AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER
AUTHENTIK_LINEAR_OWNER_GROUP
AUTHENTIK_LINEAR_ADMIN_GROUP
AUTHENTIK_LINEAR_GUEST_GROUP
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$scim_url" ]]; then
echo "error: AUTHENTIK_LINEAR_SCIM_URL is required" >&2
exit 1
fi
if [[ -z "$scim_token_file" || ! -s "$scim_token_file" ]]; then
echo "error: AUTHENTIK_LINEAR_SCIM_TOKEN_FILE is required and must be readable" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_group_pk() {
local group_name="$1"
api GET "/api/v3/core/groups/?page_size=200&search=${group_name}" \
| jq -r --arg name "$group_name" '.results[]? | select(.name == $name) | .pk // empty' \
| head -n1
}
ensure_group() {
local group_name="$1"
local payload group_pk
payload="$(jq -cn --arg name "$group_name" '{name: $name}')"
group_pk="$(lookup_group_pk "$group_name")"
if [[ -n "$group_pk" ]]; then
api PATCH "/api/v3/core/groups/${group_pk}/" "$payload" >/dev/null
else
group_pk="$(
api POST "/api/v3/core/groups/" "$payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "$group_pk" ]]; then
echo "error: could not reconcile Authentik group ${group_name}" >&2
exit 1
fi
printf '%s\n' "$group_pk"
}
lookup_application() {
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
}
lookup_scim_provider() {
api GET "/api/v3/providers/scim/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_backchannel_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
}
lookup_scim_mapping_pk() {
local managed_name="$1"
api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \
| jq -r --arg managed "$managed_name" '.results[]? | select(.managed == $managed) | .pk // empty' \
| head -n1
}
reconcile_property_mapping() {
local name="$1"
local expression="$2"
local payload existing_pk
payload="$(
jq -n \
--arg name "$name" \
--arg expression "$expression" \
'{
name: $name,
expression: $expression
}'
)"
existing_pk="$(
api GET "/api/v3/propertymappings/provider/scim/?page_size=200" \
| jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk // empty' \
| head -n1
)"
if [[ -n "$existing_pk" ]]; then
api PATCH "/api/v3/propertymappings/provider/scim/${existing_pk}/" "$payload" >/dev/null
printf '%s\n' "$existing_pk"
else
api POST "/api/v3/propertymappings/provider/scim/" "$payload" \
| jq -r '.pk // empty'
fi
}
sync_object() {
local provider_pk="$1"
local model="$2"
local object_id="$3"
if ! api POST "/api/v3/providers/scim/${provider_pk}/sync/object/" "$(
jq -cn \
--arg model "$model" \
--arg object_id "$object_id" \
'{
sync_object_model: $model,
sync_object_id: $object_id,
override_dry_run: false
}'
)" >/dev/null; then
echo "warning: could not trigger immediate Linear SCIM sync for ${model} ${object_id}; provider will continue with its normal sync cycle." >&2
fi
}
wait_for_authentik
group_mapping_pk="$(lookup_scim_mapping_pk "goauthentik.io/providers/scim/group")"
case "$user_identifier" in
email)
user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\n\nidentifier = request.user.email\nif identifier == "":\n identifier = request.user.username\n\nreturn {\n "userName": identifier,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}'
;;
username)
user_mapping_expression=$'# Some implementations require givenName and familyName to be set\ngivenName, familyName = request.user.name, " "\nformatted = request.user.name + " "\nif " " in request.user.name:\n givenName, _, familyName = request.user.name.partition(" ")\n formatted = request.user.name\n\navatar = request.user.avatar\nphotos = None\nif "://" in avatar:\n photos = [{"value": avatar, "type": "photo"}]\n\nlocale = request.user.locale()\nif locale == "":\n locale = None\n\nemails = []\nif request.user.email != "":\n emails = [{\n "value": request.user.email,\n "type": "other",\n "primary": True,\n }]\nreturn {\n "userName": request.user.username,\n "name": {\n "formatted": formatted,\n "givenName": givenName,\n "familyName": familyName,\n },\n "displayName": request.user.name,\n "photos": photos,\n "locale": locale,\n "active": request.user.is_active,\n "emails": emails,\n}'
;;
*)
echo "error: unsupported AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER value: ${user_identifier}" >&2
exit 1
;;
esac
user_mapping_pk="$(reconcile_property_mapping "Burrow Linear SCIM User" "$user_mapping_expression")"
if [[ -z "$user_mapping_pk" || -z "$group_mapping_pk" ]]; then
echo "error: could not resolve managed Authentik SCIM property mappings" >&2
exit 1
fi
owner_group_pk="$(ensure_group "$owner_group")"
admin_group_pk="$(ensure_group "$admin_group")"
guest_group_pk="$(ensure_group "$guest_group")"
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg url "$scim_url" \
--arg token "$(tr -d '\r\n' < "$scim_token_file")" \
--arg user_mapping_pk "$user_mapping_pk" \
--arg group_mapping_pk "$group_mapping_pk" \
--arg owner_group_pk "$owner_group_pk" \
--arg admin_group_pk "$admin_group_pk" \
--arg guest_group_pk "$guest_group_pk" \
'{
name: $name,
url: $url,
token: $token,
auth_mode: "token",
verify_certificates: true,
compatibility_mode: "default",
property_mappings: [$user_mapping_pk],
property_mappings_group: [$group_mapping_pk],
group_filters: [
$owner_group_pk,
$admin_group_pk,
$guest_group_pk
],
dry_run: false
}'
)"
existing_provider="$(lookup_scim_provider)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/scim/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/scim/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Linear SCIM provider did not return a primary key" >&2
exit 1
fi
application="$(lookup_application)"
if [[ -z "$application" ]]; then
echo "error: could not resolve Authentik application ${application_slug}" >&2
exit 1
fi
application_payload="$(
printf '%s\n' "$application" \
| jq \
--arg provider_pk "$provider_pk" \
'{
name: .name,
slug: .slug,
provider: .provider,
backchannel_providers: ((.backchannel_providers // []) + [($provider_pk | tonumber)] | unique),
open_in_new_tab: .open_in_new_tab,
meta_launch_url: .meta_launch_url,
policy_engine_mode: .policy_engine_mode
}'
)"
api PATCH "/api/v3/core/applications/${application_slug}/" "$application_payload" >/dev/null
group_pks_json="$(jq -cn --arg owner "$owner_group_pk" --arg admin "$admin_group_pk" --arg guest "$guest_group_pk" '[$owner, $admin, $guest]')"
user_pks_json="$(
api GET "/api/v3/core/users/?page_size=200" \
| jq -c \
--argjson group_pks "$group_pks_json" \
'[.results[]?
| select(
([((.groups // [])[] | tostring)] as $user_groups
| ($group_pks | map(. as $wanted | ($user_groups | index($wanted)) != null) | any))
)
| .pk]'
)"
while IFS= read -r group_pk; do
[[ -z "$group_pk" ]] && continue
sync_object "$provider_pk" "authentik.core.models.Group" "$group_pk"
done < <(printf '%s\n' "$group_pks_json" | jq -r '.[]')
while IFS= read -r user_pk; do
[[ -z "$user_pk" ]] && continue
sync_object "$provider_pk" "authentik.core.models.User" "$user_pk"
done < <(printf '%s\n' "$user_pks_json" | jq -r '.[]')
status_json="$(api GET "/api/v3/providers/scim/${provider_pk}/sync/status/" || true)"
if ! printf '%s\n' "$status_json" | jq -e 'has("last_sync_status")' >/dev/null 2>&1; then
echo "warning: could not read Linear SCIM sync status for provider ${provider_pk}; keeping reconciled configuration." >&2
fi
echo "Synced Authentik Linear SCIM provider ${provider_name} (${provider_pk}) with groups ${owner_group}, ${admin_group}, ${guest_group}."

View file

@ -1,309 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
provider_slug="${AUTHENTIK_TAILNET_PROVIDER_SLUG:-ts}"
provider_slugs_json="${AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON:-}"
authentication_flow_name="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME:-Burrow Tailnet Authentication}"
authentication_flow_slug="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG:-burrow-tailnet-authentication}"
identification_stage_name="${AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME:-burrow-tailnet-identification-stage}"
password_stage_name="${AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME:-burrow-tailnet-password-stage}"
user_login_stage_name="${AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME:-burrow-tailnet-user-login-stage}"
google_source_slug="${AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG:-google}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-tailnet-auth-flow.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
Optional environment:
AUTHENTIK_URL
AUTHENTIK_TAILNET_PROVIDER_SLUG
AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON
AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME
AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG
AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME
AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME
AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME
AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -n "$provider_slugs_json" ]]; then
if ! printf '%s' "$provider_slugs_json" | jq -e 'type == "array" and length > 0 and all(.[]; type == "string" and length > 0)' >/dev/null; then
echo "error: AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON must be a non-empty JSON array of strings" >&2
exit 1
fi
else
provider_slugs_json="$(jq -cn --arg slug "$provider_slug" '[$slug]')"
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_stage_by_name() {
local path="$1"
local name="$2"
api GET "${path}?page_size=200" \
| jq -c --arg name "$name" '.results[]? | select(.name == $name)' \
| head -n1
}
lookup_flow_pk() {
local slug="$1"
api GET "/api/v3/flows/instances/?slug=${slug}" \
| jq -r '.results[]? | select(.slug != null) | .pk // empty' \
| head -n1
}
lookup_source_pk() {
local slug="$1"
api GET "/api/v3/sources/oauth/?page_size=200&slug=${slug}" \
| jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \
| head -n1
}
ensure_password_stage() {
local existing payload stage_pk
existing="$(lookup_stage_by_name "/api/v3/stages/password/" "$password_stage_name")"
payload="$(
jq -cn \
--arg name "$password_stage_name" \
'{
name: $name,
backends: [
"authentik.core.auth.InbuiltBackend",
"authentik.core.auth.TokenBackend"
],
allow_show_password: false,
failed_attempts_before_cancel: 5
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/password/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/password/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_identification_stage() {
local password_stage_pk="$1"
local google_source_pk="$2"
local existing payload stage_pk sources_json
existing="$(lookup_stage_by_name "/api/v3/stages/identification/" "$identification_stage_name")"
if [[ -n "$google_source_pk" ]]; then
sources_json="$(jq -cn --arg source "$google_source_pk" '[$source]')"
else
sources_json='[]'
fi
payload="$(
jq -cn \
--arg name "$identification_stage_name" \
--arg password_stage "$password_stage_pk" \
--argjson sources "$sources_json" \
'{
name: $name,
user_fields: ["username", "email"],
password_stage: $password_stage,
case_insensitive_matching: true,
show_matched_user: true,
sources: $sources,
show_source_labels: true,
pretend_user_exists: false,
enable_remember_me: false
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/identification/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/identification/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_user_login_stage() {
local existing payload stage_pk
existing="$(lookup_stage_by_name "/api/v3/stages/user_login/" "$user_login_stage_name")"
payload="$(
jq -cn \
--arg name "$user_login_stage_name" \
'{
name: $name,
session_duration: "hours=12",
terminate_other_sessions: false,
remember_me_offset: "seconds=0",
network_binding: "no_binding",
geoip_binding: "no_binding"
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/user_login/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/user_login/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_authentication_flow() {
local existing_pk payload
existing_pk="$(lookup_flow_pk "$authentication_flow_slug")"
payload="$(
jq -cn \
--arg name "$authentication_flow_name" \
--arg slug "$authentication_flow_slug" \
'{
name: $name,
title: $name,
slug: $slug,
designation: "authentication",
policy_engine_mode: "any",
layout: "stacked"
}'
)"
if [[ -n "$existing_pk" ]]; then
api PATCH "/api/v3/flows/instances/${authentication_flow_slug}/" "$payload" >/dev/null
printf '%s\n' "$existing_pk"
else
api POST "/api/v3/flows/instances/" "$payload" \
| jq -r '.pk // empty'
fi
}
ensure_flow_binding() {
local flow_pk="$1"
local stage_pk="$2"
local order="$3"
local existing payload binding_pk
existing="$(
api GET "/api/v3/flows/bindings/?target=${flow_pk}&stage=${stage_pk}&page_size=200" \
| jq -c '.results[]?' \
| head -n1
)"
payload="$(
jq -cn \
--arg target "$flow_pk" \
--arg stage "$stage_pk" \
--argjson order "$order" \
'{
target: $target,
stage: $stage,
order: $order,
policy_engine_mode: "any"
}'
)"
if [[ -n "$existing" ]]; then
binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/flows/bindings/${binding_pk}/" "$payload" >/dev/null
else
api POST "/api/v3/flows/bindings/" "$payload" >/dev/null
fi
}
wait_for_authentik
mapfile -t provider_pks < <(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -r --argjson provider_slugs "$provider_slugs_json" '
.results[]?
| select(
((.assigned_application_slug // empty) as $assigned | ($provider_slugs | index($assigned)) != null)
or ((.slug // empty) as $slug | ($provider_slugs | index($slug)) != null)
)
| .pk // empty
'
)
if [[ "${#provider_pks[@]}" -eq 0 ]]; then
echo "error: could not resolve any Authentik Tailnet OAuth providers from ${provider_slugs_json}" >&2
exit 1
fi
google_source_pk="$(lookup_source_pk "$google_source_slug" || true)"
password_stage_pk="$(ensure_password_stage)"
identification_stage_pk="$(ensure_identification_stage "$password_stage_pk" "$google_source_pk")"
user_login_stage_pk="$(ensure_user_login_stage)"
authentication_flow_pk="$(ensure_authentication_flow)"
ensure_flow_binding "$authentication_flow_pk" "$identification_stage_pk" 10
ensure_flow_binding "$authentication_flow_pk" "$user_login_stage_pk" 30
for provider_pk in "${provider_pks[@]}"; do
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$(
jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}'
)" >/dev/null
done
echo "Synced Burrow Tailnet authentication flow for providers ${provider_slugs_json}."

View file

@ -1,369 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_TAILSCALE_APPLICATION_SLUG:-tailscale}"
application_name="${AUTHENTIK_TAILSCALE_APPLICATION_NAME:-Tailscale}"
provider_name="${AUTHENTIK_TAILSCALE_PROVIDER_NAME:-Tailscale}"
template_slug="${AUTHENTIK_TAILSCALE_TEMPLATE_SLUG:-ts}"
client_id="${AUTHENTIK_TAILSCALE_CLIENT_ID:-tailscale.burrow.net}"
client_secret="${AUTHENTIK_TAILSCALE_CLIENT_SECRET:-}"
launch_url="${AUTHENTIK_TAILSCALE_LAUNCH_URL:-https://login.tailscale.com/start/oidc}"
access_group="${AUTHENTIK_TAILSCALE_ACCESS_GROUP:-}"
default_external_application_slug="${AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG:-}"
redirect_uris_json="${AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON:-[
\"https://login.tailscale.com/a/oauth_response\"
]}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-tailscale-oidc.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_TAILSCALE_CLIENT_SECRET
Optional environment:
AUTHENTIK_URL
AUTHENTIK_TAILSCALE_APPLICATION_SLUG
AUTHENTIK_TAILSCALE_APPLICATION_NAME
AUTHENTIK_TAILSCALE_PROVIDER_NAME
AUTHENTIK_TAILSCALE_TEMPLATE_SLUG
AUTHENTIK_TAILSCALE_CLIENT_ID
AUTHENTIK_TAILSCALE_LAUNCH_URL
AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON
AUTHENTIK_TAILSCALE_ACCESS_GROUP
AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then
echo "Tailscale OIDC client secret is not configured; skipping Authentik Tailscale sync." >&2
exit 0
fi
if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then
echo "error: AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
wait_for_authentik
lookup_group_pk() {
local group_name="$1"
api GET "/api/v3/core/groups/?page_size=200" \
| jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \
| head -n1
}
lookup_application_pk() {
local slug="$1"
local application_pk lookup_result lookup_status
application_pk="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \
| head -n1
)"
if [[ -n "$application_pk" ]]; then
printf '%s\n' "$application_pk"
return 0
fi
lookup_result="$(api_with_status GET "/api/v3/core/applications/${slug}/")"
lookup_status="$(printf '%s\n' "$lookup_result" | sed -n '1p')"
if [[ "$lookup_status" =~ ^20[01]$ ]]; then
printf '%s\n' "$lookup_result" | sed '1d' | jq -r '.pk // empty'
fi
}
ensure_application_group_binding() {
local application_slug="$1"
local group_name="$2"
local application_pk group_pk existing payload binding_pk
application_pk="$(lookup_application_pk "$application_slug")"
if [[ -z "$application_pk" ]]; then
echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2
return 0
fi
group_pk="$(lookup_group_pk "$group_name")"
if [[ -z "$group_pk" ]]; then
echo "error: could not resolve Authentik group ${group_name}" >&2
exit 1
fi
existing="$(
api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \
| jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \
| head -n1
)"
payload="$(
jq -cn \
--arg target "$application_pk" \
--arg group "$group_pk" \
'{
group: $group,
target: $target,
negate: false,
enabled: true,
order: 100,
timeout: 30,
failure_result: false
}'
)"
if [[ -n "$existing" ]]; then
binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null
else
api POST "/api/v3/policies/bindings/" "$payload" >/dev/null
fi
}
ensure_default_external_application() {
local application_slug="$1"
local application_pk default_brand brand_payload
application_pk="$(lookup_application_pk "$application_slug")"
if [[ -z "$application_pk" ]]; then
echo "error: could not resolve Authentik application ${application_slug} for brand default application" >&2
exit 1
fi
default_brand="$(
api GET "/api/v3/core/brands/?page_size=200" \
| jq -c '.results[]? | select(.default == true)' \
| head -n1
)"
if [[ -z "$default_brand" ]]; then
echo "warning: could not resolve the default Authentik brand; skipping external default application" >&2
return 0
fi
brand_payload="$(
printf '%s\n' "$default_brand" \
| jq --arg application_pk "$application_pk" '.default_application = $application_pk'
)"
api PUT "/api/v3/core/brands/$(printf '%s\n' "$default_brand" | jq -r '.brand_uuid')/" "$brand_payload" >/dev/null
}
template_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \
| head -n1
)"
if [[ -z "$template_provider" ]]; then
echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2
exit 1
fi
authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')"
invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')"
property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')"
signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')"
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg client_id "$client_id" \
--arg client_secret "$client_secret" \
--arg signing_key "$signing_key" \
--argjson property_mappings "$property_mappings" \
--argjson redirect_uris "$redirect_uris_json" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
client_type: "confidential",
client_id: $client_id,
client_secret: $client_secret,
include_claims_in_id_token: true,
redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})),
property_mappings: $property_mappings,
signing_key: $signing_key,
issuer_mode: "per_provider",
sub_mode: "hashed_user_id"
}'
)"
existing_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/oauth2/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Tailscale OIDC provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: true,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')"
api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: Tailscale OIDC application did not return a primary key" >&2
exit 1
fi
if [[ -n "$access_group" ]]; then
ensure_application_group_binding "$application_slug" "$access_group"
fi
if [[ -n "$default_external_application_slug" ]]; then
ensure_default_external_application "$default_external_application_slug"
fi
for _ in $(seq 1 30); do
if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then
echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})."
exit 0
fi
sleep 2
done
echo "warning: Tailscale OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})."

View file

@ -1,412 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_ZULIP_APPLICATION_SLUG:-zulip}"
application_name="${AUTHENTIK_ZULIP_APPLICATION_NAME:-Zulip}"
provider_name="${AUTHENTIK_ZULIP_PROVIDER_NAME:-Zulip}"
acs_url="${AUTHENTIK_ZULIP_ACS_URL:-https://chat.burrow.net/complete/saml/}"
audience="${AUTHENTIK_ZULIP_AUDIENCE:-https://chat.burrow.net}"
launch_url="${AUTHENTIK_ZULIP_LAUNCH_URL:-https://chat.burrow.net/}"
access_group="${AUTHENTIK_ZULIP_ACCESS_GROUP:-}"
admin_group="${AUTHENTIK_ZULIP_ADMIN_GROUP:-}"
issuer="${AUTHENTIK_ZULIP_ISSUER:-$authentik_url}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-zulip-saml.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
Optional environment:
AUTHENTIK_URL
AUTHENTIK_ZULIP_APPLICATION_SLUG
AUTHENTIK_ZULIP_APPLICATION_NAME
AUTHENTIK_ZULIP_PROVIDER_NAME
AUTHENTIK_ZULIP_ACS_URL
AUTHENTIK_ZULIP_AUDIENCE
AUTHENTIK_ZULIP_LAUNCH_URL
AUTHENTIK_ZULIP_ACCESS_GROUP
AUTHENTIK_ZULIP_ADMIN_GROUP
AUTHENTIK_ZULIP_ISSUER
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_oauth_template_field() {
local field="$1"
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -r --arg field "$field" '.results[]? | select(.assigned_application_slug == "ts") | .[$field]' \
| head -n1
}
lookup_group_pk() {
local group_name="$1"
api GET "/api/v3/core/groups/?page_size=200" \
| jq -r --arg group_name "$group_name" '.results[]? | select(.name == $group_name) | .pk // empty' \
| head -n1
}
lookup_application_pk() {
local slug="$1"
api GET "/api/v3/core/applications/?page_size=200" \
| jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \
| head -n1
}
ensure_application_group_binding() {
local application_slug="$1"
local group_name="$2"
local application_pk group_pk existing payload binding_pk
application_pk="$(lookup_application_pk "$application_slug")"
if [[ -z "$application_pk" ]]; then
echo "warning: could not resolve Authentik application ${application_slug}; skipping application group binding" >&2
return 0
fi
group_pk="$(lookup_group_pk "$group_name")"
if [[ -z "$group_pk" ]]; then
echo "error: could not resolve Authentik group ${group_name}" >&2
exit 1
fi
existing="$(
api GET "/api/v3/policies/bindings/?page_size=200&target=${application_pk}" \
| jq -c --arg group_pk "$group_pk" '.results[]? | select(.group == $group_pk)' \
| head -n1
)"
payload="$(
jq -cn \
--arg target "$application_pk" \
--arg group "$group_pk" \
'{
group: $group,
target: $target,
negate: false,
enabled: true,
order: 100,
timeout: 30,
failure_result: false
}'
)"
if [[ -n "$existing" ]]; then
binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/policies/bindings/${binding_pk}/" "$payload" >/dev/null
else
api POST "/api/v3/policies/bindings/" "$payload" >/dev/null
fi
}
reconcile_property_mapping() {
local name="$1"
local saml_name="$2"
local friendly_name="$3"
local expression="$4"
local payload existing_pk
payload="$(
jq -n \
--arg name "$name" \
--arg saml_name "$saml_name" \
--arg friendly_name "$friendly_name" \
--arg expression "$expression" \
'{
name: $name,
saml_name: $saml_name,
friendly_name: $friendly_name,
expression: $expression
}'
)"
existing_pk="$(
api GET "/api/v3/propertymappings/provider/saml/?page_size=200" \
| jq -r --arg name "$name" '.results[]? | select(.name == $name) | .pk' \
| head -n1
)"
if [[ -n "$existing_pk" ]]; then
api PATCH "/api/v3/propertymappings/provider/saml/${existing_pk}/" "$payload" >/dev/null
printf '%s\n' "$existing_pk"
else
api POST "/api/v3/propertymappings/provider/saml/" "$payload" | jq -r '.pk // empty'
fi
}
wait_for_authentik
authorization_flow="$(lookup_oauth_template_field authorization_flow)"
invalidation_flow="$(lookup_oauth_template_field invalidation_flow)"
signing_kp="$(lookup_oauth_template_field signing_key)"
if [[ -z "$authorization_flow" || -z "$invalidation_flow" || -z "$signing_kp" ]]; then
echo "error: could not resolve Authentik provider defaults from Burrow Tailnet template" >&2
exit 1
fi
email_mapping_pk="$(
reconcile_property_mapping \
"Burrow Zulip SAML Email" \
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" \
"email" \
'return request.user.email'
)"
name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Zulip SAML Name" \
"name" \
"name" \
'return request.user.name or request.user.username'
)"
first_name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Zulip SAML First Name" \
"firstName" \
"firstName" \
$'parts = (request.user.name or "").split(" ", 1)\nif len(parts) > 0 and parts[0]:\n return parts[0]\nreturn request.user.username'
)"
last_name_mapping_pk="$(
reconcile_property_mapping \
"Burrow Zulip SAML Last Name" \
"lastName" \
"lastName" \
$'parts = (request.user.name or "").rsplit(" ", 1)\nif len(parts) == 2 and parts[1]:\n return parts[1]\nreturn request.user.username'
)"
role_mapping_pk=""
if [[ -n "$admin_group" ]]; then
role_mapping_pk="$(
reconcile_property_mapping \
"Burrow Zulip SAML Role" \
"zulip_role" \
"zulip_role" \
$'admin_group = "'$admin_group$'"\nif any(group.name == admin_group for group in request.user.ak_groups.all()):\n return "owner"\nreturn None'
)"
fi
if [[ -z "$email_mapping_pk" || -z "$name_mapping_pk" || -z "$first_name_mapping_pk" || -z "$last_name_mapping_pk" ]]; then
echo "error: failed to reconcile Zulip SAML property mappings" >&2
exit 1
fi
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg acs_url "$acs_url" \
--arg audience "$audience" \
--arg issuer "$issuer" \
--arg signing_kp "$signing_kp" \
--arg name_id_mapping "$email_mapping_pk" \
--arg email_mapping "$email_mapping_pk" \
--arg name_mapping "$name_mapping_pk" \
--arg first_name_mapping "$first_name_mapping_pk" \
--arg last_name_mapping "$last_name_mapping_pk" \
--arg role_mapping "$role_mapping_pk" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
acs_url: $acs_url,
audience: $audience,
issuer: $issuer,
signing_kp: $signing_kp,
sign_assertion: true,
sign_response: true,
sp_binding: "post",
name_id_mapping: $name_id_mapping,
property_mappings: [
$email_mapping,
$name_mapping,
$first_name_mapping,
$last_name_mapping
] + (if $role_mapping != "" then [$role_mapping] else [] end)
}'
)"
existing_provider="$(
api GET "/api/v3/providers/saml/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/saml/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/saml/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Zulip SAML provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: true,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')"
api PATCH "/api/v3/core/applications/${application_pk}/" "$application_payload" >/dev/null
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: Zulip SAML application did not return a primary key" >&2
exit 1
fi
if [[ -n "$access_group" ]]; then
ensure_application_group_binding "$application_slug" "$access_group"
fi
for _ in $(seq 1 30); do
metadata_status="$(
curl -sS \
-o /dev/null \
-w '%{http_code}' \
--max-redirs 0 \
"${authentik_url}/application/saml/${application_slug}/metadata/" \
|| true
)"
case "$metadata_status" in
200|301|302|307|308)
echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})."
exit 0
;;
esac
sleep 2
done
echo "warning: Zulip SAML metadata for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik Zulip SAML application ${application_slug} (${application_name})."

View file

@ -1,133 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root=$(git rev-parse --show-toplevel)
proposals_dir="$repo_root/evolution/proposals"
auto_browse() {
if command -v wisu >/dev/null 2>&1; then
exec wisu -i -g --icons "$repo_root/evolution"
fi
exec ls -la "$repo_root/evolution"
}
usage() {
cat <<'USAGE'
Usage: bep [command]
Commands:
list [--status <Status>] List BEPs, optionally filtered by status.
open <BEP-XXXX|XXXX|X> Open a BEP in $EDITOR.
help Show this help.
If no command is provided, bep launches a simple browser for evolution/.
USAGE
}
normalize_id() {
local raw="$1"
if [[ "$raw" =~ ^BEP-[0-9]+$ ]]; then
printf '%s' "$raw"
return 0
fi
if [[ "$raw" =~ ^[0-9]+$ ]]; then
printf 'BEP-%04d' "$raw"
return 0
fi
return 1
}
read_status() {
local file="$1"
awk -F ': ' '/^Status:/ {print $2; exit}' "$file"
}
read_title() {
local file="$1"
local line
line=$(head -n 1 "$file" || true)
printf '%s' "$line" | sed -E 's/^# `[^`]+`[[:space:]]+//; s/^[^A-Za-z0-9]+//'
}
list_bep() {
local filter="${1:-}"
local filter_lower=""
if [[ -n "$filter" ]]; then
filter_lower=$(printf '%s' "$filter" | tr '[:upper:]' '[:lower:]')
fi
printf '%-10s %-18s %s\n' "BEP" "Status" "Title"
local file
local entries=()
for file in "$proposals_dir"/BEP-*.md; do
[[ -e "$file" ]] || continue
local base
base=$(basename "$file")
local id
id=$(printf '%s' "$base" | cut -d- -f1-2)
local status
status=$(read_status "$file")
local status_lower
status_lower=$(printf '%s' "$status" | tr '[:upper:]' '[:lower:]')
if [[ -n "$filter_lower" && "$status_lower" != "$filter_lower" ]]; then
continue
fi
local title
title=$(read_title "$file")
entries+=("$(printf '%-10s %-18s %s' "$id" "$status" "$title")")
done
if [[ ${#entries[@]} -gt 0 ]]; then
printf '%s\n' "${entries[@]}" | sort
fi
}
open_bep() {
local raw="$1"
local id
if ! id=$(normalize_id "$raw"); then
echo "Unknown BEP id: $raw" >&2
exit 1
fi
local matches
matches=("$proposals_dir"/"$id"-*.md)
if [[ ${#matches[@]} -eq 0 || ! -e "${matches[0]}" ]]; then
echo "No proposal found for $id" >&2
exit 1
fi
if [[ ${#matches[@]} -gt 1 ]]; then
echo "Multiple proposals match $id:" >&2
printf ' %s\n' "${matches[@]}" >&2
exit 1
fi
local editor="${EDITOR:-vi}"
exec "$editor" "${matches[0]}"
}
command=${1:-}
case "$command" in
"")
auto_browse
;;
list)
if [[ ${2:-} == "--status" && -n ${3:-} ]]; then
list_bep "$3"
else
list_bep
fi
;;
open)
if [[ -z ${2:-} ]]; then
echo "bep open requires an id" >&2
exit 1
fi
open_bep "$2"
;;
help|-h|--help)
usage
;;
*)
echo "Unknown command: $command" >&2
usage
exit 1
;;
esac

View file

@ -1,94 +0,0 @@
#!/usr/bin/env python3
from __future__ import annotations
import pathlib
import re
import sys
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
PROPOSALS_DIR = REPO_ROOT / "evolution" / "proposals"
ALLOWED_STATUSES = {
"Pitch",
"Draft",
"In Review",
"Accepted",
"Implemented",
"Rejected",
"Returned for Revision",
"Superseded",
"Archived",
}
REQUIRED_FIELDS = [
"Status",
"Proposal",
"Authors",
"Coordinator",
"Reviewers",
"Constitution Sections",
"Implementation PRs",
"Decision Date",
]
def text_block_lines(path: pathlib.Path) -> list[str]:
content = path.read_text(encoding="utf-8")
match = re.search(r"```text\n(.*?)\n```", content, re.DOTALL)
if not match:
raise ValueError("missing leading ```text metadata block")
return [line.rstrip() for line in match.group(1).splitlines() if line.strip()]
def validate(path: pathlib.Path) -> list[str]:
errors: list[str] = []
proposal_id = path.name.split("-", 2)[:2]
expected_id = "-".join(proposal_id).removesuffix(".md")
try:
lines = text_block_lines(path)
except ValueError as exc:
return [f"{path}: {exc}"]
field_names = [line.split(":", 1)[0] for line in lines]
if field_names != REQUIRED_FIELDS:
errors.append(
f"{path}: metadata fields must appear in order {', '.join(REQUIRED_FIELDS)}"
)
return errors
fields = dict(line.split(":", 1) for line in lines)
fields = {key.strip(): value.strip() for key, value in fields.items()}
if fields["Status"] not in ALLOWED_STATUSES:
errors.append(f"{path}: invalid Status {fields['Status']!r}")
if fields["Proposal"] != expected_id:
errors.append(
f"{path}: Proposal field {fields['Proposal']!r} does not match filename id {expected_id!r}"
)
if fields["Status"] in {"Accepted", "Implemented", "Superseded", "Rejected", "Archived"} and fields["Decision Date"] == "Pending":
errors.append(
f"{path}: Decision Date must not be Pending once status is {fields['Status']}"
)
return errors
def main() -> int:
errors: list[str] = []
for path in sorted(PROPOSALS_DIR.glob("BEP-*.md")):
errors.extend(validate(path))
if errors:
for error in errors:
print(error, file=sys.stderr)
return 1
print(f"checked {len(list(PROPOSALS_DIR.glob('BEP-*.md')))} BEPs")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View file

@ -164,14 +164,6 @@ if [[ "${EXPECT_TAILNET}" == "1" ]]; then
test -s /run/agenix/burrowHeadscaleOidcClientSecret
fi
if [[ "${EXPECT_NSC}" == "1" ]]; then
echo "== agenix-nsc =="
ls -l /run/agenix || true
test -s /run/agenix/burrowForgejoNscToken
test -s /run/agenix/burrowForgejoNscDispatcherConfig
test -s /run/agenix/burrowForgejoNscAutoscalerConfig
fi
if command -v curl >/dev/null 2>&1; then
echo "== http-local =="
curl -fsS -o /dev/null -w 'forgejo_login %{http_code}\n' http://127.0.0.1:3000/user/login

View file

@ -1,20 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "${repo_root}"
release_ref="${RELEASE_REF:-manual-${GITHUB_SHA:-unknown}}"
target="x86_64-unknown-linux-gnu"
out_dir="${repo_root}/dist"
staging="${out_dir}/burrow-${release_ref}-${target}"
mkdir -p "${staging}"
cargo build --locked --release -p burrow --bin burrow
install -m 0755 target/release/burrow "${staging}/burrow"
cp README.md "${staging}/README.md"
tarball="${out_dir}/burrow-${release_ref}-${target}.tar.gz"
tar -C "${out_dir}" -czf "${tarball}" "$(basename "${staging}")"
shasum -a 256 "${tarball}" > "${tarball}.sha256"

View file

@ -1,157 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
source_nix_profile() {
local candidate
for candidate in \
"/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" \
"${HOME}/.nix-profile/etc/profile.d/nix.sh"
do
if [[ -f "${candidate}" ]]; then
# shellcheck disable=SC1090
. "${candidate}"
return 0
fi
done
return 1
}
linux_cp_supports_preserve() {
cp --help 2>&1 | grep -q -- '--preserve'
}
ensure_root_owned_home() {
if [[ "$(id -u)" -ne 0 ]]; then
return 0
fi
if [[ ! -d "${HOME}" ]] || [[ ! -O "${HOME}" ]]; then
export HOME="/root"
fi
mkdir -p "${HOME}"
}
ensure_linux_nixbld_accounts() {
if [[ "$(id -u)" -ne 0 ]]; then
return 0
fi
if command -v getent >/dev/null 2>&1 && getent group nixbld >/dev/null 2>&1; then
return 0
fi
if command -v addgroup >/dev/null 2>&1 && ! command -v groupadd >/dev/null 2>&1; then
addgroup -S nixbld >/dev/null 2>&1 || true
for i in $(seq 1 10); do
adduser -S -D -H -h /var/empty -s /sbin/nologin -G nixbld "nixbld${i}" >/dev/null 2>&1 || true
done
return 0
fi
if command -v groupadd >/dev/null 2>&1; then
groupadd -r nixbld >/dev/null 2>&1 || true
for i in $(seq 1 10); do
useradd \
--system \
--no-create-home \
--home-dir /var/empty \
--shell /usr/sbin/nologin \
--gid nixbld \
"nixbld${i}" >/dev/null 2>&1 || true
done
return 0
fi
echo "linux nix bootstrap requires nixbld group creation support" >&2
exit 1
}
ensure_linux_nix_bootstrap_prereqs() {
if linux_cp_supports_preserve; then
ensure_root_owned_home
ensure_linux_nixbld_accounts
return 0
fi
if command -v apk >/dev/null 2>&1; then
apk add --no-cache coreutils xz >/dev/null
elif command -v apt-get >/dev/null 2>&1; then
export DEBIAN_FRONTEND=noninteractive
apt-get update -y >/dev/null
apt-get install -y coreutils xz-utils >/dev/null
elif command -v dnf >/dev/null 2>&1; then
dnf install -y coreutils xz >/dev/null
elif command -v yum >/dev/null 2>&1; then
yum install -y coreutils xz >/dev/null
else
echo "linux nix bootstrap requires GNU cp but no supported package manager was found" >&2
exit 1
fi
linux_cp_supports_preserve || {
echo "linux nix bootstrap still lacks GNU cp after installing prerequisites" >&2
exit 1
}
ensure_root_owned_home
ensure_linux_nixbld_accounts
}
if ! command -v nix >/dev/null 2>&1; then
if ! command -v curl >/dev/null 2>&1; then
echo "curl is required to install nix" >&2
exit 1
fi
case "$(uname -s)" in
Linux)
ensure_linux_nix_bootstrap_prereqs
curl -fsSL https://nixos.org/nix/install | sh -s -- --no-daemon
;;
Darwin)
installer="$(mktemp -t burrow-nix.XXXXXX)"
trap 'rm -f "${installer}"' EXIT
curl -fsSL -o "${installer}" https://install.determinate.systems/nix
chmod +x "${installer}"
if command -v sudo >/dev/null 2>&1; then
if sudo -n true 2>/dev/null; then
sudo -n sh "${installer}" install --no-confirm
else
sudo sh "${installer}" install --no-confirm
fi
else
sh "${installer}" install --no-confirm
fi
;;
*)
echo "unsupported platform for nix bootstrap: $(uname -s)" >&2
exit 1
;;
esac
fi
source_nix_profile || true
export PATH="${HOME}/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}"
config_root="${XDG_CONFIG_HOME:-$HOME/.config}"
config_file="${config_root}/nix/nix.conf"
if [[ -e "${config_file}" && ! -w "${config_file}" ]]; then
config_root="$(mktemp -d -t burrow-nix-config.XXXXXX)"
export XDG_CONFIG_HOME="${config_root}"
config_file="${XDG_CONFIG_HOME}/nix/nix.conf"
fi
mkdir -p "$(dirname -- "${config_file}")"
cat > "${config_file}" <<'EOF'
experimental-features = nix-command flakes
sandbox = true
fallback = true
substituters = https://cache.nixos.org
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
EOF
command -v nix >/dev/null 2>&1 || {
echo "nix is still unavailable after bootstrap" >&2
exit 1
}

View file

@ -1,65 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: "${API_URL:?API_URL is required}"
: "${REPOSITORY:?REPOSITORY is required}"
: "${RELEASE_TAG:?RELEASE_TAG is required}"
: "${TOKEN:?TOKEN is required}"
release_api="${API_URL}/repos/${REPOSITORY}/releases"
tag_api="${release_api}/tags/${RELEASE_TAG}"
release_json="$(mktemp)"
create_json="$(mktemp)"
trap 'rm -f "${release_json}" "${create_json}"' EXIT
status="$(
curl -sS -o "${release_json}" -w '%{http_code}' \
-H "Authorization: token ${TOKEN}" \
"${tag_api}"
)"
if [[ "${status}" == "404" ]]; then
jq -n \
--arg tag "${RELEASE_TAG}" \
--arg name "Burrow ${RELEASE_TAG}" \
'{
tag_name: $tag,
target_commitish: $tag,
name: $name,
body: "Automated prerelease built on Forgejo Namespace runners.",
draft: false,
prerelease: true
}' > "${create_json}"
curl -fsS \
-H "Authorization: token ${TOKEN}" \
-H "Content-Type: application/json" \
-d @"${create_json}" \
"${release_api}" > "${release_json}"
elif [[ "${status}" != "200" ]]; then
echo "failed to query Forgejo release for ${RELEASE_TAG} (HTTP ${status})" >&2
cat "${release_json}" >&2
exit 1
fi
release_id="$(jq -r '.id' "${release_json}")"
if [[ -z "${release_id}" || "${release_id}" == "null" ]]; then
echo "Forgejo release payload is missing an id" >&2
cat "${release_json}" >&2
exit 1
fi
for file in dist/*; do
name="$(basename "${file}")"
asset_id="$(jq -r --arg name "${name}" '.assets[]? | select(.name == $name) | .id' "${release_json}" | head -n1)"
if [[ -n "${asset_id}" ]]; then
curl -fsS -X DELETE \
-H "Authorization: token ${TOKEN}" \
"${release_api}/${release_id}/assets/${asset_id}" >/dev/null
fi
curl -fsS \
-H "Authorization: token ${TOKEN}" \
-F "attachment=@${file}" \
"${release_api}/${release_id}/assets?name=${name}" >/dev/null
done

View file

@ -1,163 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}"
simulator_name="${BURROW_UI_TEST_SIMULATOR_NAME:-iPhone 17 Pro}"
simulator_os="${BURROW_UI_TEST_SIMULATOR_OS:-26.4}"
simulator_id="${BURROW_UI_TEST_SIMULATOR_ID:-}"
derived_data_path="${BURROW_UI_TEST_DERIVED_DATA_PATH:-/tmp/burrow-ui-tests-deriveddata}"
source_packages_path="${BURROW_UI_TEST_SOURCE_PACKAGES_PATH:-/tmp/burrow-ui-tests-sourcepackages}"
fallback_dir="/tmp/${bundle_id}/SimulatorFallback"
socket_path="${fallback_dir}/burrow.sock"
tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState"
daemon_log="${BURROW_UI_TEST_DAEMON_LOG:-/tmp/burrow-ui-test-daemon.log}"
ui_test_config_path="${BURROW_UI_TEST_CONFIG_PATH:-/tmp/burrow-ui-test-config.json}"
ui_test_runner_bundle_id="${bundle_id}.uitests.xctrunner"
ui_test_email="${BURROW_UI_TEST_EMAIL:-ui-test@burrow.net}"
ui_test_username="${BURROW_UI_TEST_USERNAME:-ui-test}"
ui_test_tailnet_mode="${BURROW_UI_TEST_TAILNET_MODE:-tailscale}"
password_secret="${repo_root}/secrets/infra/authentik-ui-test-password.age"
age_identity="${BURROW_UI_TEST_AGE_IDENTITY:-${HOME}/.ssh/id_ed25519}"
ui_test_password="${BURROW_UI_TEST_PASSWORD:-}"
if [[ -z "$ui_test_password" ]]; then
if [[ -f "$password_secret" && -f "$age_identity" ]]; then
ui_test_password="$(age -d -i "$age_identity" "$password_secret" | tr -d '\r\n')"
else
echo "error: BURROW_UI_TEST_PASSWORD is unset and ${password_secret} could not be decrypted" >&2
exit 1
fi
fi
rm -rf "$fallback_dir" "$tailnet_state_root"
mkdir -p "$fallback_dir" "$tailnet_state_root" "$derived_data_path" "$source_packages_path"
rm -f "$socket_path"
resolve_simulator_id() {
xcrun simctl list devices available -j | python3 -c '
import json
import os
import sys
target_name = sys.argv[1]
target_os = sys.argv[2]
target_runtime = "com.apple.CoreSimulator.SimRuntime.iOS-" + target_os.replace(".", "-")
devices = json.load(sys.stdin).get("devices", {})
healthy = []
for runtime, entries in devices.items():
if runtime != target_runtime:
continue
for entry in entries:
if not entry.get("isAvailable", False):
continue
if not os.path.isdir(entry.get("dataPath", "")):
continue
healthy.append(entry)
for entry in healthy:
if entry.get("name") == target_name:
print(entry["udid"])
raise SystemExit(0)
for entry in healthy:
if target_name in entry.get("name", ""):
print(entry["udid"])
raise SystemExit(0)
raise SystemExit(1)
' "$simulator_name" "$simulator_os"
}
if [[ -z "$simulator_id" ]]; then
simulator_id="$(resolve_simulator_id || true)"
fi
if [[ -n "$simulator_id" ]]; then
xcrun simctl boot "$simulator_id" >/dev/null 2>&1 || true
xcrun simctl bootstatus "$simulator_id" -b
xcrun simctl terminate "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true
xcrun simctl terminate "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true
xcrun simctl uninstall "$simulator_id" "$bundle_id" >/dev/null 2>&1 || true
xcrun simctl uninstall "$simulator_id" "$ui_test_runner_bundle_id" >/dev/null 2>&1 || true
destination="id=${simulator_id}"
else
destination="platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}"
fi
cleanup() {
rm -f "$ui_test_config_path"
if [[ -n "${daemon_pid:-}" ]]; then
kill "$daemon_pid" >/dev/null 2>&1 || true
wait "$daemon_pid" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
umask 077
python3 - <<'PY' "$ui_test_config_path" "$ui_test_email" "$ui_test_username" "$ui_test_password" "$ui_test_tailnet_mode"
import json
import pathlib
import sys
config_path = pathlib.Path(sys.argv[1])
config_path.write_text(
json.dumps(
{
"email": sys.argv[2],
"username": sys.argv[3],
"password": sys.argv[4],
"mode": sys.argv[5],
}
),
encoding="utf-8",
)
PY
cargo build -p burrow --bin burrow
(
cd "$fallback_dir"
RUST_LOG="${BURROW_UI_TEST_RUST_LOG:-info,burrow=debug}" \
BURROW_SOCKET_PATH="burrow.sock" \
BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \
"${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1
) &
daemon_pid=$!
for _ in $(seq 1 50); do
[[ -S "$socket_path" ]] && break
sleep 0.2
done
if [[ ! -S "$socket_path" ]]; then
echo "error: Burrow daemon did not create ${socket_path}" >&2
[[ -f "$daemon_log" ]] && cat "$daemon_log" >&2
exit 1
fi
common_xcodebuild_args=(
-quiet
-skipPackagePluginValidation
-project "${repo_root}/Apple/Burrow.xcodeproj"
-scheme App
-configuration Debug
-destination "$destination"
-derivedDataPath "$derived_data_path"
-clonedSourcePackagesDirPath "$source_packages_path"
-only-testing:BurrowUITests
-parallel-testing-enabled NO
-maximum-concurrent-test-simulator-destinations 1
-maximum-parallel-testing-workers 1
CODE_SIGNING_ALLOWED=NO
)
xcodebuild \
"${common_xcodebuild_args[@]}" \
build-for-testing
BURROW_UI_TEST_EMAIL="$ui_test_email" \
BURROW_UI_TEST_USERNAME="$ui_test_username" \
BURROW_UI_TEST_PASSWORD="$ui_test_password" \
BURROW_UI_TEST_CONFIG_PATH="$ui_test_config_path" \
BURROW_UI_TEST_EPHEMERAL_AUTH=1 \
xcodebuild \
"${common_xcodebuild_args[@]}" \
test-without-building

View file

@ -1,186 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}"
smoke_root="${BURROW_TAILNET_SMOKE_ROOT:-/tmp/burrow-tailnet-connectivity}"
socket_path="${smoke_root}/burrow.sock"
db_path="${smoke_root}/burrow.db"
daemon_log="${BURROW_TAILNET_SMOKE_DAEMON_LOG:-${smoke_root}/daemon.log}"
payload_path="${smoke_root}/tailnet.json"
authority="${BURROW_TAILNET_SMOKE_AUTHORITY:-https://ts.burrow.net}"
account_name="${BURROW_TAILNET_SMOKE_ACCOUNT:-ui-test}"
identity_name="${BURROW_TAILNET_SMOKE_IDENTITY:-apple}"
hostname="${BURROW_TAILNET_SMOKE_HOSTNAME:-burrow-apple}"
message="${BURROW_TAILNET_SMOKE_MESSAGE:-burrow-tailnet-smoke}"
timeout_ms="${BURROW_TAILNET_SMOKE_TIMEOUT_MS:-8000}"
remote_ip="${BURROW_TAILNET_SMOKE_REMOTE_IP:-}"
remote_port="${BURROW_TAILNET_SMOKE_REMOTE_PORT:-18081}"
remote_hostname="${BURROW_TAILNET_SMOKE_REMOTE_HOSTNAME:-burrow-echo}"
remote_authkey="${BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY:-}"
helper_bin="${BURROW_TAILNET_SMOKE_HELPER_BIN:-${smoke_root}/tailscale-login-bridge}"
remote_state_root="${BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT:-${smoke_root}/remote-state}"
remote_stdout="${smoke_root}/remote-helper.stdout"
remote_stderr="${BURROW_TAILNET_SMOKE_REMOTE_LOG:-${smoke_root}/remote-helper.log}"
if [[ -n "${TS_AUTHKEY:-}" ]]; then
default_tailnet_state_root="${smoke_root}/local-state"
else
default_tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState"
fi
tailnet_state_root="${BURROW_TAILNET_STATE_ROOT:-${default_tailnet_state_root}}"
need_login=0
if [[ -z "${TS_AUTHKEY:-}" ]] && { [[ ! -d "$tailnet_state_root" ]] || [[ -z "$(find "$tailnet_state_root" -mindepth 1 -maxdepth 2 -print -quit 2>/dev/null)" ]]; }; then
need_login=1
fi
if [[ "$need_login" -eq 1 ]]; then
echo "Tailnet state root is empty; running iOS login bootstrap first..."
"${repo_root}/Scripts/run-ios-tailnet-ui-tests.sh"
fi
rm -rf "$smoke_root"
mkdir -p "$smoke_root"
cleanup() {
rm -f "$payload_path"
if [[ -n "${daemon_pid:-}" ]]; then
kill "$daemon_pid" >/dev/null 2>&1 || true
wait "$daemon_pid" >/dev/null 2>&1 || true
fi
if [[ -n "${remote_pid:-}" ]]; then
kill "$remote_pid" >/dev/null 2>&1 || true
wait "$remote_pid" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
wait_for_helper_listen() {
python3 - <<'PY' "$1"
import json
import pathlib
import sys
import time
path = pathlib.Path(sys.argv[1])
deadline = time.time() + 20
while time.time() < deadline:
if path.exists():
with path.open("r", encoding="utf-8") as handle:
line = handle.readline().strip()
if line:
hello = json.loads(line)
print(hello["listen_addr"])
raise SystemExit(0)
time.sleep(0.1)
raise SystemExit("timed out waiting for helper startup line")
PY
}
wait_for_helper_ip() {
python3 - <<'PY' "$1"
import json
import sys
import time
import urllib.request
url = sys.argv[1]
deadline = time.time() + 30
while time.time() < deadline:
with urllib.request.urlopen(url, timeout=5) as response:
status = json.load(response)
if status.get("running") and status.get("tailscale_ips"):
print(status["tailscale_ips"][0])
raise SystemExit(0)
time.sleep(0.25)
raise SystemExit("timed out waiting for helper to become ready")
PY
}
python3 - <<'PY' "$payload_path" "$authority" "$account_name" "$identity_name" "$hostname"
import json
import pathlib
import sys
path = pathlib.Path(sys.argv[1])
payload = {
"authority": sys.argv[2],
"account": sys.argv[3],
"identity": sys.argv[4],
"hostname": sys.argv[5],
}
path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
PY
cargo build -p burrow --bin burrow
(
cd "${repo_root}/Tools/tailscale-login-bridge"
GOWORK=off go build -o "$helper_bin" .
)
if [[ -z "$remote_ip" ]]; then
if [[ -z "$remote_authkey" ]] && { [[ ! -d "$remote_state_root" ]] || [[ -z "$(find "$remote_state_root" -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null)" ]]; }; then
echo "error: set BURROW_TAILNET_SMOKE_REMOTE_IP, BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY, or BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT to an existing logged-in helper state" >&2
exit 1
fi
if [[ -n "$remote_authkey" ]]; then
rm -rf "$remote_state_root"
mkdir -p "$remote_state_root"
fi
(
cd "$repo_root"
if [[ -n "$remote_authkey" ]]; then
export TS_AUTHKEY="$remote_authkey"
fi
"$helper_bin" \
--listen 127.0.0.1:0 \
--state-dir "$remote_state_root" \
--hostname "$remote_hostname" \
--control-url "$authority" \
--udp-echo-port "$remote_port" \
>"$remote_stdout" 2>"$remote_stderr"
) &
remote_pid=$!
remote_listen_addr="$(wait_for_helper_listen "$remote_stdout")"
remote_ip="$(wait_for_helper_ip "http://${remote_listen_addr}/status")"
fi
(
cd "$smoke_root"
RUST_LOG="${BURROW_TAILNET_SMOKE_RUST_LOG:-info,burrow=debug}" \
BURROW_SOCKET_PATH="$socket_path" \
BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \
"${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1
) &
daemon_pid=$!
for _ in $(seq 1 50); do
[[ -S "$socket_path" ]] && break
sleep 0.2
done
if [[ ! -S "$socket_path" ]]; then
echo "error: Burrow daemon did not create ${socket_path}" >&2
[[ -f "$daemon_log" ]] && cat "$daemon_log" >&2
exit 1
fi
run_burrow() {
BURROW_SOCKET_PATH="$socket_path" \
BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \
"${repo_root}/target/debug/burrow" "$@"
}
run_burrow network-add 1 1 "$payload_path"
run_burrow start
run_burrow tunnel-config
run_burrow tailnet-udp-echo "${remote_ip}:${remote_port}" --message "$message" --timeout-ms "$timeout_ms"
echo
echo "Tailnet connectivity smoke passed."
echo "State root: $tailnet_state_root"
echo "Remote: ${remote_ip}:${remote_port}"

View file

@ -1,112 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
usage() {
cat <<'EOF'
Usage: Scripts/seal-forgejo-nsc-secrets.sh [options]
Encrypt Burrow forgejo-nsc runtime inputs from intake/ into the agenix secrets
consumed by burrow-forge.
Options:
--provision Re-render the local intake files before sealing.
--host <user@host> SSH target forwarded to provision-forgejo-nsc.sh.
--ssh-key <path> SSH private key forwarded to provision-forgejo-nsc.sh.
--nsc-bin <path> Override the nsc binary for provisioning.
-h, --help Show this help text.
EOF
}
PROVISION=0
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
NSC_BIN="${NSC_BIN:-}"
while [[ $# -gt 0 ]]; do
case "$1" in
--provision)
PROVISION=1
shift
;;
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--nsc-bin)
NSC_BIN="${2:?missing value for --nsc-bin}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
require_cmd age
require_cmd nix
require_cmd python3
if [[ "${PROVISION}" -eq 1 ]]; then
provision_args=(--host "${HOST}" --ssh-key "${SSH_KEY}")
if [[ -n "${NSC_BIN}" ]]; then
provision_args+=(--nsc-bin "${NSC_BIN}")
fi
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" "${provision_args[@]}"
fi
tmpdir="$(mktemp -d)"
cleanup() {
rm -rf "${tmpdir}"
}
trap cleanup EXIT
seal_secret() {
local target="$1"
local source_path="$2"
recipients_file="${tmpdir}/$(basename "${target}").recipients"
if [[ ! -s "${source_path}" ]]; then
echo "required runtime input missing or empty: ${source_path}" >&2
exit 1
fi
nix eval --impure --json --expr "let s = import ${REPO_ROOT}/secrets.nix; in s.\"${target}\".publicKeys" \
| python3 -c 'import json, sys; [print(item) for item in json.load(sys.stdin)]' \
> "${recipients_file}"
age -R "${recipients_file}" -o "${REPO_ROOT}/${target}" "${source_path}"
}
seal_secret "secrets/infra/forgejo-nsc-token.age" "${REPO_ROOT}/intake/forgejo_nsc_token.txt"
seal_secret "secrets/infra/forgejo-nsc-dispatcher-config.age" "${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
seal_secret "secrets/infra/forgejo-nsc-autoscaler-config.age" "${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
chmod 600 \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age"
echo "Sealed forgejo-nsc runtime inputs into:"
printf ' %s\n' \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-token.age" \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-dispatcher-config.age" \
"${REPO_ROOT}/secrets/infra/forgejo-nsc-autoscaler-config.age"
echo "Deploy burrow-forge to apply the new CI credentials."

View file

@ -1,7 +1,132 @@
#!/usr/bin/env bash
set -euo pipefail
echo "Scripts/sync-forgejo-nsc-config.sh is obsolete." >&2
echo "Burrow forgejo-nsc now consumes agenix-backed secrets instead of host-local intake files." >&2
echo "Use Scripts/seal-forgejo-nsc-secrets.sh and deploy burrow-forge." >&2
exit 1
usage() {
cat <<'EOF'
Usage: Scripts/sync-forgejo-nsc-config.sh [options]
Copy Burrow forgejo-nsc runtime inputs from intake/ onto the forge host and
restart the dispatcher/autoscaler units.
Options:
--host <user@host> SSH target (default: root@git.burrow.net)
--ssh-key <path> SSH private key (default: intake/agent_at_burrow_net_ed25519)
--rotate-pat Re-render the intake files before syncing.
--no-restart Copy files only.
-h, --help Show this help text.
EOF
}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
HOST="${BURROW_FORGE_HOST:-root@git.burrow.net}"
SSH_KEY="${BURROW_FORGE_SSH_KEY:-${REPO_ROOT}/intake/agent_at_burrow_net_ed25519}"
KNOWN_HOSTS_FILE="${BURROW_FORGE_KNOWN_HOSTS_FILE:-${HOME}/.cache/burrow/forge-known_hosts}"
ROTATE_PAT=0
NO_RESTART=0
while [[ $# -gt 0 ]]; do
case "$1" in
--host)
HOST="${2:?missing value for --host}"
shift 2
;;
--ssh-key)
SSH_KEY="${2:?missing value for --ssh-key}"
shift 2
;;
--rotate-pat)
ROTATE_PAT=1
shift
;;
--no-restart)
NO_RESTART=1
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown option: $1" >&2
usage >&2
exit 64
;;
esac
done
mkdir -p "$(dirname "${KNOWN_HOSTS_FILE}")"
burrow_require_cmd() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "missing required command: $1" >&2
exit 1
fi
}
burrow_require_cmd ssh
burrow_require_cmd scp
if [[ ! -f "${SSH_KEY}" ]]; then
echo "forge SSH key not found: ${SSH_KEY}" >&2
exit 1
fi
if [[ "${ROTATE_PAT}" -eq 1 ]]; then
"${SCRIPT_DIR}/provision-forgejo-nsc.sh" --host "${HOST}" --ssh-key "${SSH_KEY}"
fi
token_file="${REPO_ROOT}/intake/forgejo_nsc_token.txt"
dispatcher_file="${REPO_ROOT}/intake/forgejo_nsc_dispatcher.yaml"
autoscaler_file="${REPO_ROOT}/intake/forgejo_nsc_autoscaler.yaml"
for path in "${token_file}" "${dispatcher_file}" "${autoscaler_file}"; do
if [[ ! -s "${path}" ]]; then
echo "required runtime input missing or empty: ${path}" >&2
exit 1
fi
done
ssh_opts=(
-i "${SSH_KEY}"
-o IdentitiesOnly=yes
-o UserKnownHostsFile="${KNOWN_HOSTS_FILE}"
-o StrictHostKeyChecking=accept-new
)
remote_tmp="$(ssh "${ssh_opts[@]}" "${HOST}" "mktemp -d")"
cleanup() {
if [[ -n "${remote_tmp:-}" ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "rm -rf '${remote_tmp}'" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
scp "${ssh_opts[@]}" \
"${token_file}" \
"${dispatcher_file}" \
"${autoscaler_file}" \
"${HOST}:${remote_tmp}/"
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
install -d -m 0755 /var/lib/burrow/intake
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${token_file}")' /var/lib/burrow/intake/forgejo_nsc_token.txt
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${dispatcher_file}")' /var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml
install -m 0400 -o forgejo-nsc -g forgejo-nsc '${remote_tmp}/$(basename "${autoscaler_file}")' /var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
if [[ "${NO_RESTART}" -eq 0 ]]; then
ssh "${ssh_opts[@]}" "${HOST}" "
set -euo pipefail
systemctl restart forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
systemctl is-active forgejo-nsc-dispatcher.service forgejo-nsc-autoscaler.service
ls -l \
/var/lib/burrow/intake/forgejo_nsc_token.txt \
/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml \
/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml
"
fi
echo "forgejo-nsc runtime sync complete (host=${HOST}, restarted=$((1 - NO_RESTART)))."

View file

@ -2,26 +2,17 @@ package main
import (
"context"
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"log"
"net"
"net/netip"
"net/http"
"os"
"strconv"
"sync"
"time"
"github.com/tailscale/wireguard-go/tun"
"tailscale.com/client/local"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
)
@ -35,123 +26,13 @@ type statusResponse struct {
SelfDNSName string `json:"self_dns_name,omitempty"`
TailscaleIPs []string `json:"tailscale_ips,omitempty"`
Health []string `json:"health,omitempty"`
Peers []peerSummary `json:"peers,omitempty"`
}
type peerSummary struct {
Name string `json:"name,omitempty"`
DNSName string `json:"dns_name,omitempty"`
TailscaleIPs []string `json:"tailscale_ips,omitempty"`
Online bool `json:"online"`
Active bool `json:"active"`
Relay string `json:"relay,omitempty"`
CurAddr string `json:"cur_addr,omitempty"`
LastSeenUnix int64 `json:"last_seen_unix,omitempty"`
}
type pingResponse struct {
Result *ipnstate.PingResult `json:"result,omitempty"`
}
type helperHello struct {
ListenAddr string `json:"listen_addr"`
PacketSocket string `json:"packet_socket,omitempty"`
}
type helperState struct {
mu sync.RWMutex
authURL string
}
func (s *helperState) authURLSnapshot() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.authURL
}
func (s *helperState) setAuthURL(url string) {
s.mu.Lock()
defer s.mu.Unlock()
s.authURL = url
}
func (s *helperState) clearAuthURL() {
s.setAuthURL("")
}
// chanTUN is a tun.Device backed by channels so another process can feed and
// consume raw IP packets while tsnet handles the Tailnet control/data plane.
type chanTUN struct {
Inbound chan []byte
Outbound chan []byte
closed chan struct{}
events chan tun.Event
}
func newChanTUN() *chanTUN {
t := &chanTUN{
Inbound: make(chan []byte, 1024),
Outbound: make(chan []byte, 1024),
closed: make(chan struct{}),
events: make(chan tun.Event, 1),
}
t.events <- tun.EventUp
return t
}
func (t *chanTUN) File() *os.File { return nil }
func (t *chanTUN) Close() error {
select {
case <-t.closed:
default:
close(t.closed)
close(t.Inbound)
}
return nil
}
func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) {
select {
case <-t.closed:
return 0, io.EOF
case pkt, ok := <-t.Outbound:
if !ok {
return 0, io.EOF
}
sizes[0] = copy(bufs[0][offset:], pkt)
return 1, nil
}
}
func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) {
for _, buf := range bufs {
pkt := buf[offset:]
if len(pkt) == 0 {
continue
}
select {
case <-t.closed:
return 0, errors.New("closed")
case t.Inbound <- append([]byte(nil), pkt...):
default:
}
}
return len(bufs), nil
}
func (t *chanTUN) MTU() (int, error) { return 1280, nil }
func (t *chanTUN) Name() (string, error) { return "burrow-tailnet", nil }
func (t *chanTUN) Events() <-chan tun.Event { return t.events }
func (t *chanTUN) BatchSize() int { return 1 }
func main() {
listen := flag.String("listen", "127.0.0.1:0", "local listen address")
stateDir := flag.String("state-dir", "", "persistent state directory")
hostname := flag.String("hostname", "burrow-apple", "tailnet hostname")
controlURL := flag.String("control-url", "", "optional control URL")
packetSocket := flag.String("packet-socket", "", "optional unix socket path for raw packet bridging")
udpEchoPort := flag.Int("udp-echo-port", 0, "optional tailnet UDP echo port")
flag.Parse()
if *stateDir == "" {
@ -167,24 +48,6 @@ func main() {
Hostname: *hostname,
UserLogf: log.Printf,
}
var tunDevice *chanTUN
var packetListener net.Listener
if *packetSocket != "" {
_ = os.Remove(*packetSocket)
ln, err := net.Listen("unix", *packetSocket)
if err != nil {
log.Fatalf("packet listen: %v", err)
}
packetListener = ln
defer func() {
packetListener.Close()
_ = os.Remove(*packetSocket)
}()
tunDevice = newChanTUN()
server.Tun = tunDevice
}
if *controlURL != "" {
server.ControlURL = *controlURL
}
@ -198,7 +61,6 @@ func main() {
if err != nil {
log.Fatalf("local client: %v", err)
}
state := &helperState{}
ln, err := net.Listen("tcp", *listen)
if err != nil {
@ -206,27 +68,12 @@ func main() {
}
defer ln.Close()
if packetListener != nil {
go servePacketBridge(packetListener, tunDevice)
}
if *udpEchoPort > 0 {
go serveUDPEcho(context.Background(), server, localClient, *udpEchoPort)
}
hello := helperHello{
ListenAddr: ln.Addr().String(),
}
if *packetSocket != "" {
hello.PacketSocket = *packetSocket
}
if err := json.NewEncoder(os.Stdout).Encode(hello); err != nil {
log.Fatalf("write hello: %v", err)
}
fmt.Printf("{\"listen_addr\":%q}\n", ln.Addr().String())
_ = os.Stdout.Sync()
mux := http.NewServeMux()
mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
status, err := snapshot(r.Context(), localClient, state)
status, err := snapshot(r.Context(), localClient)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
@ -234,40 +81,6 @@ func main() {
w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(status)
})
mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
ip := r.URL.Query().Get("ip")
if ip == "" {
http.Error(w, "missing ip", http.StatusBadRequest)
return
}
target, err := netip.ParseAddr(ip)
if err != nil {
http.Error(w, fmt.Sprintf("invalid ip: %v", err), http.StatusBadRequest)
return
}
pingType := tailcfg.PingTSMP
switch r.URL.Query().Get("type") {
case "", "tsmp", "TSMP":
pingType = tailcfg.PingTSMP
case "icmp", "ICMP":
pingType = tailcfg.PingICMP
case "peerapi":
pingType = tailcfg.PingPeerAPI
default:
http.Error(w, "unsupported ping type", http.StatusBadRequest)
return
}
result, err := localClient.Ping(r.Context(), target, pingType)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(&pingResponse{Result: result})
})
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent)
go func() {
@ -283,110 +96,16 @@ func main() {
log.Fatal(httpServer.Serve(ln))
}
func servePacketBridge(listener net.Listener, device *chanTUN) {
for {
conn, err := listener.Accept()
if err != nil {
if errors.Is(err, net.ErrClosed) {
return
}
log.Printf("packet accept: %v", err)
continue
}
log.Printf("packet bridge connected")
if err := bridgePacketConn(conn, device); err != nil && !errors.Is(err, io.EOF) {
log.Printf("packet bridge error: %v", err)
}
_ = conn.Close()
log.Printf("packet bridge disconnected")
}
}
func bridgePacketConn(conn net.Conn, device *chanTUN) error {
errCh := make(chan error, 2)
go func() {
for {
pkt, err := readFrame(conn)
if err != nil {
errCh <- err
return
}
select {
case <-device.closed:
errCh <- io.EOF
return
case device.Outbound <- pkt:
}
}
}()
go func() {
for {
select {
case <-device.closed:
errCh <- io.EOF
return
case pkt, ok := <-device.Inbound:
if !ok {
errCh <- io.EOF
return
}
if err := writeFrame(conn, pkt); err != nil {
errCh <- err
return
}
}
}
}()
return <-errCh
}
func readFrame(r io.Reader) ([]byte, error) {
var size [4]byte
if _, err := io.ReadFull(r, size[:]); err != nil {
return nil, err
}
length := binary.BigEndian.Uint32(size[:])
if length == 0 {
return []byte{}, nil
}
packet := make([]byte, length)
if _, err := io.ReadFull(r, packet); err != nil {
return nil, err
}
return packet, nil
}
func writeFrame(w io.Writer, packet []byte) error {
var size [4]byte
binary.BigEndian.PutUint32(size[:], uint32(len(packet)))
if _, err := w.Write(size[:]); err != nil {
return err
}
if len(packet) == 0 {
return nil
}
_, err := w.Write(packet)
return err
}
func snapshot(ctx context.Context, localClient *local.Client, state *helperState) (*statusResponse, error) {
status, err := localClient.Status(ctx)
func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, error) {
status, err := localClient.StatusWithoutPeers(ctx)
if err != nil {
return nil, err
}
authURL := status.AuthURL
if authURL == "" {
authURL = state.authURLSnapshot()
if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && status.AuthURL == "" {
if err := localClient.StartLoginInteractive(ctx); err != nil {
return nil, err
}
if status.BackendState == ipn.Running.String() {
state.clearAuthURL()
authURL = ""
} else if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && authURL == "" {
authURL, err = awaitAuthURL(ctx, localClient, state)
status, err = localClient.StatusWithoutPeers(ctx)
if err != nil {
return nil, err
}
@ -394,7 +113,7 @@ func snapshot(ctx context.Context, localClient *local.Client, state *helperState
response := &statusResponse{
BackendState: status.BackendState,
AuthURL: authURL,
AuthURL: status.AuthURL,
Running: status.BackendState == ipn.Running.String(),
NeedsLogin: status.BackendState == ipn.NeedsLogin.String(),
Health: append([]string(nil), status.Health...),
@ -410,114 +129,5 @@ func snapshot(ctx context.Context, localClient *local.Client, state *helperState
for _, ip := range status.TailscaleIPs {
response.TailscaleIPs = append(response.TailscaleIPs, ip.String())
}
for _, key := range status.Peers() {
peer := status.Peer[key]
if peer == nil {
continue
}
summary := peerSummary{
Name: peer.HostName,
DNSName: peer.DNSName,
Online: peer.Online,
Active: peer.Active,
Relay: peer.Relay,
CurAddr: peer.CurAddr,
LastSeenUnix: peer.LastSeen.Unix(),
}
for _, ip := range peer.TailscaleIPs {
summary.TailscaleIPs = append(summary.TailscaleIPs, ip.String())
}
response.Peers = append(response.Peers, summary)
}
return response, nil
}
func serveUDPEcho(ctx context.Context, server *tsnet.Server, localClient *local.Client, port int) {
ip, err := awaitTailscaleIP(ctx, localClient)
if err != nil {
log.Printf("udp echo setup failed: %v", err)
return
}
listenAddr := net.JoinHostPort(ip.String(), strconv.Itoa(port))
pc, err := server.ListenPacket("udp", listenAddr)
if err != nil {
log.Printf("udp echo listen failed on %s: %v", listenAddr, err)
return
}
defer pc.Close()
log.Printf("udp echo listening on %s", pc.LocalAddr())
buf := make([]byte, 64<<10)
for {
n, addr, err := pc.ReadFrom(buf)
if err != nil {
if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) {
return
}
log.Printf("udp echo read failed: %v", err)
return
}
if _, err := pc.WriteTo(buf[:n], addr); err != nil {
log.Printf("udp echo write failed: %v", err)
return
}
}
}
func awaitTailscaleIP(ctx context.Context, localClient *local.Client) (netip.Addr, error) {
for range 60 {
status, err := localClient.StatusWithoutPeers(ctx)
if err == nil {
for _, ip := range status.TailscaleIPs {
if ip.Is4() {
return ip, nil
}
}
for _, ip := range status.TailscaleIPs {
if ip.Is6() {
return ip, nil
}
}
}
select {
case <-ctx.Done():
return netip.Addr{}, ctx.Err()
case <-time.After(250 * time.Millisecond):
}
}
return netip.Addr{}, errors.New("timed out waiting for tailscale IP")
}
func awaitAuthURL(ctx context.Context, localClient *local.Client, state *helperState) (string, error) {
watchCtx, cancel := context.WithTimeout(ctx, 8*time.Second)
defer cancel()
watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState)
if err != nil {
return "", err
}
defer watcher.Close()
if err := localClient.StartLoginInteractive(ctx); err != nil {
return "", err
}
for {
notify, err := watcher.Next()
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return state.authURLSnapshot(), nil
}
return "", err
}
if notify.BrowseToURL != nil && *notify.BrowseToURL != "" {
state.setAuthURL(*notify.BrowseToURL)
return *notify.BrowseToURL, nil
}
if notify.State != nil && *notify.State == ipn.Running {
state.clearAuthURL()
return "", nil
}
}
}

View file

@ -11,8 +11,6 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]}
burrow = { version = "*", path = "../burrow/" }
tokio = { version = "1.35.0", features = ["time", "sync"] }
gettext-rs = { version = "0.7.0", features = ["gettext-system"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
[build-dependencies]
anyhow = "1.0"

View file

@ -1,139 +0,0 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::{
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountRecord {
pub id: String,
pub kind: AccountKind,
pub title: String,
pub authority: Option<String>,
pub account: String,
pub identity: String,
pub hostname: Option<String>,
pub tailnet: Option<String>,
pub note: Option<String>,
pub created_at: u64,
pub updated_at: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AccountKind {
WireGuard,
Tor,
Tailnet,
}
impl AccountKind {
pub fn title(self) -> &'static str {
match self {
Self::WireGuard => "WireGuard",
Self::Tor => "Tor",
Self::Tailnet => "Tailnet",
}
}
fn sort_rank(self) -> u8 {
match self {
Self::Tailnet => 0,
Self::Tor => 1,
Self::WireGuard => 2,
}
}
}
pub fn load() -> Result<Vec<AccountRecord>> {
let path = storage_path()?;
if !path.exists() {
return Ok(Vec::new());
}
let data =
std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?;
serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display()))
}
pub fn upsert(mut record: AccountRecord) -> Result<Vec<AccountRecord>> {
let mut accounts = load()?;
let now = timestamp();
record.updated_at = now;
if record.created_at == 0 {
record.created_at = now;
}
if let Some(index) = accounts.iter().position(|account| account.id == record.id) {
accounts[index] = record;
} else {
accounts.push(record);
}
accounts.sort_by(|lhs, rhs| {
lhs.kind
.sort_rank()
.cmp(&rhs.kind.sort_rank())
.then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase()))
});
persist(&accounts)?;
Ok(accounts)
}
pub fn new_record(
kind: AccountKind,
title: String,
authority: Option<String>,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
note: Option<String>,
) -> AccountRecord {
let now = timestamp();
AccountRecord {
id: format!("{}-{now}", kind.title().to_ascii_lowercase()),
kind,
title,
authority,
account,
identity,
hostname,
tailnet,
note,
created_at: now,
updated_at: now,
}
}
fn persist(accounts: &[AccountRecord]) -> Result<()> {
let path = storage_path()?;
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?;
std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))
}
fn storage_path() -> Result<PathBuf> {
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home)
.join("burrow")
.join("accounts.json"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("accounts.json"));
}
Ok(std::env::temp_dir().join("burrow-accounts.json"))
}
fn timestamp() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|duration| duration.as_secs())
.unwrap_or_default()
}

View file

@ -1,19 +1,24 @@
use super::*;
use anyhow::Context;
use std::time::Duration;
const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5);
pub struct App {
_home_screen: AsyncController<home_screen::HomeScreen>,
daemon_client: Arc<Mutex<Option<DaemonClient>>>,
settings_screen: Controller<settings_screen::SettingsScreen>,
switch_screen: AsyncController<switch_screen::SwitchScreen>,
}
#[derive(Debug)]
pub enum AppMsg {
None,
PostInit,
}
impl App {
pub fn run() {
let app = RelmApp::new(config::ID);
relm4::set_global_css(APP_CSS);
Self::setup_gresources().unwrap();
Self::setup_i18n().unwrap();
@ -44,7 +49,7 @@ impl AsyncComponent for App {
view! {
adw::Window {
set_title: Some("Burrow"),
set_default_size: (900, 760),
set_default_size: (640, 480),
}
}
@ -53,84 +58,100 @@ impl AsyncComponent for App {
root: Self::Root,
sender: AsyncComponentSender<Self>,
) -> AsyncComponentParts<Self> {
let home_screen = home_screen::HomeScreen::builder()
.launch(())
let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok()));
let switch_screen = switch_screen::SwitchScreen::builder()
.launch(switch_screen::SwitchScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
.forward(sender.input_sender(), |_| AppMsg::None);
let settings_screen = settings_screen::SettingsScreen::builder()
.launch(settings_screen::SettingsScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
.forward(sender.input_sender(), |_| AppMsg::None);
let widgets = view_output!();
let view_stack = adw::ViewStack::new();
view_stack.add_titled(switch_screen.widget(), None, "Switch");
view_stack.add_titled(settings_screen.widget(), None, "Settings");
let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build();
view_switcher_bar.set_reveal(true);
// When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate
//
// let toolbar = adw::ToolbarView::new();
// toolbar.add_top_bar(
// &adw::HeaderBar::builder()
// .title_widget(&gtk::Label::new(Some("Burrow")))
// .build(),
// );
// toolbar.add_bottom_bar(&view_switcher_bar);
// toolbar.set_content(Some(&view_stack));
// root.set_content(Some(&toolbar));
let content = gtk::Box::new(gtk::Orientation::Vertical, 0);
content.append(
&adw::HeaderBar::builder()
.title_widget(&gtk::Label::new(Some("Burrow")))
.build(),
);
content.append(home_screen.widget());
content.append(&view_stack);
content.append(&view_switcher_bar);
root.set_content(Some(&content));
let model = App { _home_screen: home_screen };
sender.input(AppMsg::PostInit);
let model = App {
daemon_client,
switch_screen,
settings_screen,
};
AsyncComponentParts { model, widgets }
}
async fn update(
&mut self,
msg: Self::Input,
_msg: Self::Input,
_sender: AsyncComponentSender<Self>,
_root: &Self::Root,
) {
match msg {
AppMsg::None => {}
loop {
tokio::time::sleep(RECONNECT_POLL_TIME).await;
{
let mut daemon_client = self.daemon_client.lock().await;
let mut disconnected_daemon_client = false;
if let Some(daemon_client) = daemon_client.as_mut() {
if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await {
disconnected_daemon_client = true;
self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonDisconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
}
}
if disconnected_daemon_client || daemon_client.is_none() {
match DaemonClient::new().await {
Ok(new_daemon_client) => {
*daemon_client = Some(new_daemon_client);
self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonReconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
}
Err(_e) => {
// TODO: Handle Error
}
}
}
}
}
}
}
const APP_CSS: &str = r#"
.empty-state {
border-radius: 18px;
padding: 22px;
background: alpha(@card_bg_color, 0.72);
}
.summary-card {
border-radius: 18px;
padding: 14px;
background: alpha(@card_bg_color, 0.72);
}
.network-card {
border-radius: 10px;
padding: 16px;
box-shadow: 0 2px 6px alpha(black, 0.14);
}
.wireguard-card {
background: linear-gradient(135deg, #3277d8, #174ea6);
}
.tailnet-card {
background: linear-gradient(135deg, #31b891, #147d69);
}
.network-card-kind,
.network-card-title,
.network-card-detail {
color: white;
}
.network-card-kind {
opacity: 0.86;
font-weight: 700;
}
.network-card-title {
font-size: 1.22em;
font-weight: 700;
}
.network-card-detail {
opacity: 0.92;
font-family: monospace;
}
"#;

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
use super::*;
use crate::daemon_api;
use adw::prelude::*;
use burrow::{DaemonClient, DaemonCommand, DaemonResponseData};
use gtk::Align;
use relm4::{
component::{
@ -9,9 +9,13 @@ use relm4::{
},
prelude::*,
};
use std::sync::Arc;
use tokio::sync::Mutex;
mod app;
mod home_screen;
mod settings;
mod settings_screen;
mod switch_screen;
pub use app::*;
pub use home_screen::{HomeScreen, HomeScreenMsg};
pub use settings::{DaemonGroupMsg, DiagGroupMsg};

View file

@ -1,420 +0,0 @@
use anyhow::{anyhow, Context, Result};
use burrow::{
control::{TailnetConfig, TailnetProvider},
grpc_defs::{
Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest,
TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest,
},
BurrowClient,
};
use std::{path::PathBuf, sync::OnceLock};
use tokio::time::{timeout, Duration};
const RPC_TIMEOUT: Duration = Duration::from_secs(3);
const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com";
static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new();
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TunnelState {
Running,
Stopped,
}
#[derive(Debug, Clone)]
pub struct NetworkSummary {
pub id: i32,
pub title: String,
pub detail: String,
}
#[derive(Debug, Clone)]
pub struct TailnetDiscovery {
pub authority: String,
pub managed: bool,
pub oidc_issuer: Option<String>,
}
#[derive(Debug, Clone)]
pub struct TailnetProbe {
pub summary: String,
pub detail: Option<String>,
pub status_code: i32,
}
#[derive(Debug, Clone)]
pub struct TailnetLoginStatus {
pub session_id: String,
pub backend_state: String,
pub auth_url: Option<String>,
pub running: bool,
pub needs_login: bool,
pub tailnet_name: Option<String>,
pub self_dns_name: Option<String>,
pub tailnet_ips: Vec<String>,
pub health: Vec<String>,
}
pub fn default_tailnet_authority() -> &'static str {
MANAGED_TAILSCALE_AUTHORITY
}
pub fn configure_client_paths() -> Result<()> {
if std::env::var_os("BURROW_SOCKET_PATH").is_none() {
std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?);
}
Ok(())
}
pub async fn ensure_daemon() -> Result<()> {
configure_client_paths()?;
if daemon_available().await {
return Ok(());
}
let socket_path = socket_path()?;
let db_path = database_path()?;
ensure_parent(&socket_path)?;
ensure_parent(&db_path)?;
if EMBEDDED_DAEMON_STARTED.get().is_none() {
tokio::task::spawn_blocking(move || {
burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path));
})
.await
.context("failed to join embedded daemon startup")?;
let _ = EMBEDDED_DAEMON_STARTED.set(());
}
tunnel_state()
.await
.map(|_| ())
.context("Burrow daemon started but did not accept tunnel status RPCs")
}
pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider {
let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase();
if normalized == "controlplane.tailscale.com"
|| normalized == "http://controlplane.tailscale.com"
|| normalized == MANAGED_TAILSCALE_AUTHORITY
{
TailnetProvider::Tailscale
} else {
TailnetProvider::Headscale
}
}
pub async fn daemon_available() -> bool {
tunnel_state().await.is_ok()
}
fn socket_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") {
return Ok(PathBuf::from(path));
}
default_socket_path()
}
fn default_socket_path() -> Result<PathBuf> {
if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") {
return Ok(PathBuf::from(runtime_dir).join("burrow.sock"));
}
let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned());
Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock")))
}
fn database_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_DB_PATH") {
return Ok(PathBuf::from(path));
}
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("burrow.db"));
}
Ok(std::env::temp_dir().join("burrow.db"))
}
fn ensure_parent(path: &PathBuf) -> Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
Ok(())
}
pub async fn tunnel_state() -> Result<TunnelState> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {}))
.await
.context("timed out connecting to Burrow daemon")??
.into_inner();
let status = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow tunnel status")??
.context("Burrow daemon ended the status stream without a state")?;
Ok(match status.state() {
State::Running => TunnelState::Running,
State::Stopped => TunnelState::Stopped,
})
}
pub async fn start_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {}))
.await
.context("timed out starting Burrow tunnel")??;
Ok(())
}
pub async fn stop_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {}))
.await
.context("timed out stopping Burrow tunnel")??;
Ok(())
}
pub async fn list_networks() -> Result<Vec<NetworkSummary>> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {}))
.await
.context("timed out connecting to Burrow network list")??
.into_inner();
let response = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow network list")??
.context("Burrow daemon ended the network stream without a snapshot")?;
Ok(response.network.iter().map(summarize_network).collect())
}
pub async fn add_wireguard(config: String) -> Result<i32> {
add_network(NetworkType::WireGuard, config.into_bytes()).await
}
pub async fn add_tailnet(
authority: String,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
) -> Result<i32> {
let provider = infer_tailnet_provider(&authority);
let config = TailnetConfig {
provider,
authority: Some(authority),
account: Some(account),
identity: Some(identity),
hostname,
tailnet,
};
let payload = serde_json::to_vec_pretty(&config)?;
add_network(NetworkType::Tailnet, payload).await
}
pub async fn discover_tailnet(email: String) -> Result<TailnetDiscovery> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.discover(TailnetDiscoverRequest { email }),
)
.await
.context("timed out discovering Tailnet authority")??
.into_inner();
Ok(TailnetDiscovery {
authority: response.authority,
managed: response.managed,
oidc_issuer: optional(response.oidc_issuer),
})
}
pub async fn probe_tailnet(authority: String) -> Result<TailnetProbe> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.probe(TailnetProbeRequest { authority }),
)
.await
.context("timed out probing Tailnet authority")??
.into_inner();
Ok(TailnetProbe {
summary: response.summary,
detail: optional(response.detail),
status_code: response.status_code,
})
}
pub async fn start_tailnet_login(
authority: String,
account_name: String,
identity_name: String,
hostname: Option<String>,
) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client.tailnet_client.login_start(TailnetLoginStartRequest {
account_name,
identity_name,
hostname: hostname.unwrap_or_default(),
authority,
}),
)
.await
.context("timed out starting Tailnet sign-in")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn tailnet_login_status(session_id: String) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_status(TailnetLoginStatusRequest { session_id }),
)
.await
.context("timed out reading Tailnet sign-in status")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn cancel_tailnet_login(session_id: String) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_cancel(TailnetLoginCancelRequest { session_id }),
)
.await
.context("timed out cancelling Tailnet sign-in")??;
Ok(())
}
async fn add_network(network_type: NetworkType, payload: Vec<u8>) -> Result<i32> {
let id = next_network_id().await?;
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client.networks_client.network_add(Network {
id,
r#type: network_type.into(),
payload,
}),
)
.await
.context("timed out saving network to Burrow daemon")??;
Ok(id)
}
async fn next_network_id() -> Result<i32> {
let networks = list_networks().await?;
Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1)
}
fn summarize_network(network: &Network) -> NetworkSummary {
match network.r#type() {
NetworkType::WireGuard => summarize_wireguard(network),
NetworkType::Tailnet => summarize_tailnet(network),
}
}
fn summarize_wireguard(network: &Network) -> NetworkSummary {
let payload = String::from_utf8_lossy(&network.payload);
let detail = payload
.lines()
.map(str::trim)
.find(|line| !line.is_empty() && !line.starts_with('['))
.unwrap_or("Stored WireGuard configuration")
.to_owned();
NetworkSummary {
id: network.id,
title: format!("WireGuard {}", network.id),
detail,
}
}
fn summarize_tailnet(network: &Network) -> NetworkSummary {
match TailnetConfig::from_slice(&network.payload) {
Ok(config) => {
let title = config
.tailnet
.clone()
.or(config.hostname.clone())
.unwrap_or_else(|| "Tailnet".to_owned());
let authority = config
.authority
.unwrap_or_else(|| "default authority".to_owned());
let account = config.account.unwrap_or_else(|| "default".to_owned());
NetworkSummary {
id: network.id,
title,
detail: format!("{authority} - account {account}"),
}
}
Err(error) => NetworkSummary {
id: network.id,
title: "Tailnet".to_owned(),
detail: format!("Unable to read Tailnet payload: {error}"),
},
}
}
fn decode_tailnet_status(
response: burrow::grpc_defs::TailnetLoginStatusResponse,
) -> TailnetLoginStatus {
TailnetLoginStatus {
session_id: response.session_id,
backend_state: response.backend_state,
auth_url: optional(response.auth_url),
running: response.running,
needs_login: response.needs_login,
tailnet_name: optional(response.tailnet_name),
self_dns_name: optional(response.self_dns_name),
tailnet_ips: response.tailnet_ips,
health: response.health,
}
}
fn optional(value: String) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn normalized(value: &str, fallback: &str) -> String {
let trimmed = value.trim();
if trimmed.is_empty() {
fallback.to_owned()
} else {
trimmed.to_owned()
}
}
pub fn normalized_optional(value: &str) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn require_value(value: &str, label: &str) -> Result<String> {
normalized_optional(value).ok_or_else(|| anyhow!("{label} is required"))
}

View file

@ -1,15 +1,11 @@
use anyhow::Result;
pub mod components;
mod account_store;
mod daemon_api;
mod diag;
// Generated using meson
mod config;
fn main() {
if let Err(error) = daemon_api::configure_client_paths() {
eprintln!("failed to configure Burrow daemon paths: {error}");
}
components::App::run();
}

View file

@ -5,18 +5,17 @@ use std::{env, path::Path};
use anyhow::{Context, Result};
use axum::{
extract::{Json, Path as AxumPath, Query, State},
extract::{Json, Path as AxumPath, State},
http::{header::AUTHORIZATION, HeaderMap, StatusCode},
response::IntoResponse,
routing::{get, post},
Router,
};
use serde::Deserialize;
use tokio::signal;
use crate::control::{
discovery, LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest,
RegisterResponse, TailnetDiscovery, BURROW_TAILNET_DOMAIN,
LocalAuthRequest, LocalAuthResponse, MapRequest, MapResponse, RegisterRequest,
RegisterResponse, BURROW_TAILNET_DOMAIN,
};
#[derive(Clone, Debug)]
@ -106,11 +105,6 @@ struct AppState {
tailscale: tailscale::TailscaleBridgeManager,
}
#[derive(Debug, Deserialize)]
struct TailnetDiscoveryQuery {
email: String,
}
type AppResult<T> = Result<T, (StatusCode, String)>;
pub async fn serve() -> Result<()> {
@ -145,7 +139,6 @@ pub fn build_router(config: AuthServerConfig) -> Router {
.route("/v1/auth/login", post(login_local))
.route("/v1/control/register", post(control_register))
.route("/v1/control/map", post(control_map))
.route("/v1/tailnet/discover", get(tailnet_discover))
.route("/v1/tailscale/login/start", post(tailscale_login_start))
.route("/v1/tailscale/login/:session_id", get(tailscale_login_status))
.with_state(AppState {
@ -212,19 +205,6 @@ async fn control_map(
Ok(Json(response))
}
async fn tailnet_discover(
Query(query): Query<TailnetDiscoveryQuery>,
) -> AppResult<Json<TailnetDiscovery>> {
if query.email.trim().is_empty() {
return Err((StatusCode::BAD_REQUEST, "email is required".to_owned()));
}
let discovery = discovery::discover_tailnet(&query.email)
.await
.map_err(|err| (StatusCode::BAD_GATEWAY, err.to_string()))?;
Ok(Json(discovery))
}
async fn tailscale_login_start(
State(state): State<AppState>,
Json(request): Json<tailscale::TailscaleLoginStartRequest>,
@ -414,17 +394,4 @@ mod tests {
assert!(map.dns.expect("dns").magic_dns);
Ok(())
}
#[tokio::test]
async fn tailnet_discover_requires_email() -> Result<()> {
let app = build_router(AuthServerConfig::default());
let response = app
.oneshot(
Request::get("/v1/tailnet/discover?email=")
.body(Body::empty())?,
)
.await?;
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
Ok(())
}
}

View file

@ -26,8 +26,6 @@ pub struct TailscaleLoginStartRequest {
pub hostname: Option<String>,
#[serde(default)]
pub control_url: Option<String>,
#[serde(default)]
pub packet_socket: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
@ -57,35 +55,23 @@ pub struct TailscaleLoginStartResponse {
pub status: TailscaleLoginStatus,
}
pub struct TailscaleLoginSession {
pub session_id: String,
pub helper: Arc<TailscaleHelperProcess>,
pub status: TailscaleLoginStatus,
}
#[derive(Clone, Default)]
pub struct TailscaleBridgeManager {
client: Client,
sessions: Arc<Mutex<HashMap<String, Arc<ManagedSession>>>>,
}
pub struct TailscaleHelperProcess {
struct ManagedSession {
session_id: String,
listen_url: String,
packet_socket: Option<PathBuf>,
control_url: Option<String>,
state_dir: PathBuf,
child: Arc<Mutex<Child>>,
_stderr_task: JoinHandle<()>,
}
type ManagedSession = TailscaleHelperProcess;
#[derive(Debug, Deserialize)]
struct HelperHello {
listen_addr: String,
#[serde(default)]
packet_socket: Option<String>,
}
impl TailscaleBridgeManager {
@ -93,233 +79,22 @@ impl TailscaleBridgeManager {
&self,
request: TailscaleLoginStartRequest,
) -> Result<TailscaleLoginStartResponse> {
let session = self.ensure_session(request).await?;
Ok(TailscaleLoginStartResponse {
session_id: session.session_id,
status: session.status,
})
}
pub async fn ensure_session(
&self,
request: TailscaleLoginStartRequest,
) -> Result<TailscaleLoginSession> {
let key = session_key_for_request(&request);
let requested_packet_socket = request
.packet_socket
.as_deref()
.map(str::trim)
.filter(|value| !value.is_empty());
let requested_control_url = request
.control_url
.as_deref()
.map(str::trim)
.filter(|value| !value.is_empty());
let key = session_key(&request.account_name, &request.identity_name);
if let Some(existing) = self.sessions.lock().await.get(&key).cloned() {
let needs_restart_for_socket = match (requested_packet_socket, existing.packet_socket())
{
(Some(requested), Some(current)) => current != Path::new(requested),
(Some(_), None) => true,
_ => false,
};
let needs_restart_for_control_url =
requested_control_url != existing.control_url().map(|value| value.trim());
if !needs_restart_for_socket && !needs_restart_for_control_url {
match self.fetch_status(existing.as_ref()).await {
Ok(status) => {
return Ok(TailscaleLoginSession {
let status = self.fetch_status(existing.as_ref()).await?;
return Ok(TailscaleLoginStartResponse {
session_id: existing.session_id.clone(),
helper: existing,
status,
});
}
Err(err) => {
log::warn!(
"tailscale login session {} is stale, restarting: {err}",
existing.session_id
);
}
}
} else {
log::info!(
"tailscale login session {} no longer matches requested transport, restarting",
existing.session_id
);
}
self.sessions.lock().await.remove(&key);
let _ = self.shutdown_session(existing.as_ref()).await;
}
let session = Arc::new(spawn_tailscale_helper(&request).await?);
let status = self.wait_for_status(session.as_ref()).await?;
let response = TailscaleLoginSession {
session_id: session.session_id.clone(),
helper: session.clone(),
status,
};
self.sessions.lock().await.insert(key, session);
Ok(response)
}
pub async fn status(&self, session_id: &str) -> Result<Option<TailscaleLoginStatus>> {
let session = {
let sessions = self.sessions.lock().await;
sessions
.values()
.find(|session| session.session_id == session_id)
.cloned()
};
match session {
Some(session) => match self.fetch_status(session.as_ref()).await {
Ok(status) => Ok(Some(status)),
Err(err) => {
self.remove_session_by_id(session_id).await;
Err(err)
}
},
None => Ok(None),
}
}
pub async fn cancel(&self, session_id: &str) -> Result<bool> {
let session = self.remove_session_by_id(session_id).await;
match session {
Some(session) => {
self.shutdown_session(session.as_ref()).await?;
Ok(true)
}
None => Ok(false),
}
}
async fn wait_for_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut last_error = None;
let mut last_status = None;
for _ in 0..40 {
match session.status_with_client(&self.client).await {
Ok(status) if status.running || status.auth_url.is_some() => return Ok(status),
Ok(status) => last_status = Some(status),
Err(err) => last_error = Some(err),
}
tokio::time::sleep(Duration::from_millis(250)).await;
}
if let Some(status) = last_status {
return Ok(status);
}
Err(last_error.unwrap_or_else(|| anyhow!("tailscale helper did not become ready")))
}
async fn fetch_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
session.status_with_client(&self.client).await
}
async fn remove_session_by_id(&self, session_id: &str) -> Option<Arc<ManagedSession>> {
let mut sessions = self.sessions.lock().await;
let key = sessions
.iter()
.find_map(|(key, session)| (session.session_id == session_id).then(|| key.clone()))?;
sessions.remove(&key)
}
async fn shutdown_session(&self, session: &ManagedSession) -> Result<()> {
session.shutdown_with_client(&self.client).await
}
}
impl TailscaleHelperProcess {
pub fn session_id(&self) -> &str {
&self.session_id
}
pub fn packet_socket(&self) -> Option<&Path> {
self.packet_socket.as_deref()
}
pub fn control_url(&self) -> Option<&str> {
self.control_url.as_deref()
}
pub fn state_dir(&self) -> &Path {
&self.state_dir
}
pub async fn status(&self) -> Result<TailscaleLoginStatus> {
self.status_with_client(&Client::new()).await
}
pub async fn shutdown(&self) -> Result<()> {
self.shutdown_with_client(&Client::new()).await
}
async fn status_with_client(&self, client: &Client) -> Result<TailscaleLoginStatus> {
let mut child = self.child.lock().await;
if let Some(status) = child.try_wait()? {
return Err(anyhow!(
"tailscale helper exited with status {status} for {}",
self.state_dir.display()
));
}
drop(child);
let response = client
.get(format!("{}/status", self.listen_url))
.send()
.await
.context("failed to query tailscale helper status")?
.error_for_status()
.context("tailscale helper status request failed")?;
let status = response
.json::<TailscaleLoginStatus>()
.await
.context("invalid tailscale helper status response")?;
log::info!(
"tailscale helper status session={} backend_state={} running={} needs_login={} auth_url={:?}",
self.session_id,
status.backend_state,
status.running,
status.needs_login,
status.auth_url
);
Ok(status)
}
async fn shutdown_with_client(&self, client: &Client) -> Result<()> {
let _ = client.post(format!("{}/shutdown", self.listen_url)).send().await;
for _ in 0..10 {
let mut child = self.child.lock().await;
if child.try_wait()?.is_some() {
return Ok(());
}
drop(child);
tokio::time::sleep(Duration::from_millis(100)).await;
}
let mut child = self.child.lock().await;
child
.start_kill()
.context("failed to kill tailscale helper")?;
let _ = child.wait().await;
Ok(())
}
}
pub async fn spawn_tailscale_helper(
request: &TailscaleLoginStartRequest,
) -> Result<TailscaleHelperProcess> {
let state_dir = state_root().join(session_dir_name(request));
let state_dir = state_root().join(session_dir_name(&request));
tokio::fs::create_dir_all(&state_dir)
.await
.with_context(|| format!("failed to create {}", state_dir.display()))?;
let mut child = helper_command(request, &state_dir)?
let mut child = helper_command(&request, &state_dir)?
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
@ -352,15 +127,80 @@ pub async fn spawn_tailscale_helper(
}
});
Ok(TailscaleHelperProcess {
let session = Arc::new(ManagedSession {
session_id: random_session_id(),
listen_url: format!("http://{}", hello.listen_addr),
packet_socket: hello.packet_socket.map(PathBuf::from),
control_url: request.control_url.clone(),
state_dir,
child: Arc::new(Mutex::new(child)),
_stderr_task: stderr_task,
})
});
let status = self.wait_for_status(session.as_ref()).await?;
let response = TailscaleLoginStartResponse {
session_id: session.session_id.clone(),
status,
};
self.sessions.lock().await.insert(key, session);
Ok(response)
}
pub async fn status(&self, session_id: &str) -> Result<Option<TailscaleLoginStatus>> {
let session = {
let sessions = self.sessions.lock().await;
sessions
.values()
.find(|session| session.session_id == session_id)
.cloned()
};
match session {
Some(session) => self.fetch_status(session.as_ref()).await.map(Some),
None => Ok(None),
}
}
async fn wait_for_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut last_error = None;
let mut last_status = None;
for _ in 0..40 {
match self.fetch_status(session).await {
Ok(status) if status.running || status.auth_url.is_some() => return Ok(status),
Ok(status) => last_status = Some(status),
Err(err) => last_error = Some(err),
}
tokio::time::sleep(Duration::from_millis(250)).await;
}
if let Some(status) = last_status {
return Ok(status);
}
Err(last_error.unwrap_or_else(|| anyhow!("tailscale helper did not become ready")))
}
async fn fetch_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut child = session.child.lock().await;
if let Some(status) = child.try_wait()? {
return Err(anyhow!(
"tailscale helper exited with status {status} for {}",
session.state_dir.display()
));
}
drop(child);
let response = self
.client
.get(format!("{}/status", session.listen_url))
.send()
.await
.context("failed to query tailscale helper status")?
.error_for_status()
.context("tailscale helper status request failed")?;
response
.json::<TailscaleLoginStatus>()
.await
.context("invalid tailscale helper status response")
}
}
fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result<Command> {
@ -391,21 +231,10 @@ fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Res
}
}
if let Some(packet_socket) = request.packet_socket.as_deref() {
let trimmed = packet_socket.trim();
if !trimmed.is_empty() {
command.arg("--packet-socket").arg(trimmed);
}
}
Ok(command)
}
pub(crate) fn packet_socket_path(request: &TailscaleLoginStartRequest) -> PathBuf {
state_root().join(session_dir_name(request)).join("packet.sock")
}
pub(crate) fn state_root() -> PathBuf {
fn state_root() -> PathBuf {
if let Ok(path) = env::var("BURROW_TAILSCALE_STATE_ROOT") {
return PathBuf::from(path);
}
@ -420,40 +249,22 @@ pub(crate) fn state_root() -> PathBuf {
.join("Burrow")
.join("tailscale");
}
home.join(".local")
.join("share")
.join("burrow")
.join("tailscale")
home.join(".local").join("share").join("burrow").join("tailscale")
}
pub(crate) fn session_dir_name(request: &TailscaleLoginStartRequest) -> String {
fn session_dir_name(request: &TailscaleLoginStartRequest) -> String {
format!(
"{}-{}-{}",
"{}-{}",
slug(&request.account_name),
slug(&request.identity_name),
slug(control_scope(request))
slug(&request.identity_name)
)
}
fn session_key_for_request(request: &TailscaleLoginStartRequest) -> String {
format!(
"{}:{}:{}",
request.account_name,
request.identity_name,
control_scope(request)
)
fn session_key(account_name: &str, identity_name: &str) -> String {
format!("{account_name}:{identity_name}")
}
fn control_scope(request: &TailscaleLoginStartRequest) -> &str {
request
.control_url
.as_deref()
.map(str::trim)
.filter(|value| !value.is_empty())
.unwrap_or("tailscale-managed")
}
pub(crate) fn default_hostname(request: &TailscaleLoginStartRequest) -> String {
fn default_hostname(request: &TailscaleLoginStartRequest) -> String {
request
.hostname
.as_deref()
@ -496,24 +307,14 @@ mod tests {
}
#[test]
fn state_dir_is_scoped_by_account_identity_and_control_plane() {
fn state_dir_is_stable_by_account_and_identity() {
let request = TailscaleLoginStartRequest {
account_name: "default".to_owned(),
identity_name: "apple".to_owned(),
hostname: None,
control_url: None,
packet_socket: None,
};
assert_eq!(session_dir_name(&request), "default-apple-tailscale-managed");
assert_eq!(session_dir_name(&request), "default-apple");
assert_eq!(default_hostname(&request), "burrow-apple");
let custom_request = TailscaleLoginStartRequest {
control_url: Some("https://ts.burrow.net".to_owned()),
..request
};
assert_eq!(
session_dir_name(&custom_request),
"default-apple-httpstsburrownet"
);
}
}

View file

@ -1,359 +0,0 @@
use anyhow::{anyhow, Context, Result};
use reqwest::{Client, StatusCode, Url};
use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use super::TailnetProvider;
pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server";
const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet";
const WEBFINGER_PATH: &str = "/.well-known/webfinger";
const MANAGED_TAILSCALE_AUTHORITY: &str = "controlplane.tailscale.com";
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TailnetDiscovery {
pub domain: String,
pub provider: TailnetProvider,
pub authority: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub oidc_issuer: Option<String>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TailnetAuthorityProbe {
pub authority: String,
pub status_code: i32,
pub summary: String,
pub detail: String,
pub reachable: bool,
}
#[derive(Clone, Debug, Default, Deserialize)]
struct WebFingerDocument {
#[serde(default)]
links: Vec<WebFingerLink>,
}
#[derive(Clone, Debug, Default, Deserialize)]
struct WebFingerLink {
#[serde(default)]
rel: String,
#[serde(default)]
href: Option<String>,
}
pub async fn discover_tailnet(email: &str) -> Result<TailnetDiscovery> {
let domain = email_domain(email)?;
info!(%email, %domain, "tailnet discovery requested");
let base_url = Url::parse(&format!("https://{domain}"))
.with_context(|| format!("invalid discovery domain {domain}"))?;
let client = Client::builder()
.user_agent("burrow-tailnet-discovery")
.timeout(std::time::Duration::from_secs(10))
.build()
.context("failed to build tailnet discovery client")?;
discover_tailnet_at(&client, email, &base_url).await
}
pub fn normalize_authority(authority: &str) -> String {
let trimmed = authority.trim();
if trimmed.contains("://") {
trimmed.to_owned()
} else {
format!("https://{trimmed}")
}
}
pub fn is_managed_tailscale_authority(authority: &str) -> bool {
let normalized = normalize_authority(authority)
.trim_end_matches('/')
.to_ascii_lowercase();
normalized == format!("https://{MANAGED_TAILSCALE_AUTHORITY}")
|| normalized == format!("http://{MANAGED_TAILSCALE_AUTHORITY}")
}
pub async fn probe_tailnet_authority(authority: &str) -> Result<TailnetAuthorityProbe> {
let authority = normalize_authority(authority);
if is_managed_tailscale_authority(&authority) {
return Ok(TailnetAuthorityProbe {
authority,
status_code: 200,
summary: "Tailscale-managed control plane".to_owned(),
detail: "Using Tailscale's default login server.".to_owned(),
reachable: true,
});
}
let base_url =
Url::parse(&authority).with_context(|| format!("invalid tailnet authority {authority}"))?;
let client = Client::builder()
.user_agent("burrow-tailnet-probe")
.timeout(std::time::Duration::from_secs(10))
.build()
.context("failed to build tailnet authority probe client")?;
if let Some(status) =
probe_url(&client, base_url.join("/health")?, &authority, "Tailnet server reachable").await?
{
return Ok(status);
}
if let Some(status) = probe_url(
&client,
base_url.clone(),
&authority,
"Tailnet server reachable",
)
.await?
{
return Ok(status);
}
Err(anyhow!("could not connect to the server"))
}
pub async fn discover_tailnet_at(
client: &Client,
email: &str,
base_url: &Url,
) -> Result<TailnetDiscovery> {
let domain = email_domain(email)?;
debug!(%email, %domain, base_url = %base_url, "starting tailnet domain discovery");
if let Some(discovery) = discover_well_known(client, base_url).await? {
info!(
%email,
%domain,
authority = %discovery.authority,
provider = ?discovery.provider,
"resolved tailnet discovery from well-known document"
);
return Ok(TailnetDiscovery { domain, ..discovery });
}
if let Some(authority) = discover_webfinger(client, email, base_url).await? {
info!(%email, %domain, %authority, "resolved tailnet discovery from webfinger");
return Ok(TailnetDiscovery {
domain,
provider: inferred_provider(Some(&authority), None),
authority,
oidc_issuer: None,
});
}
Err(anyhow!("no tailnet discovery metadata found for {domain}"))
}
pub fn email_domain(email: &str) -> Result<String> {
let trimmed = email.trim();
let (_, domain) = trimmed
.rsplit_once('@')
.ok_or_else(|| anyhow!("email address must include a domain"))?;
let domain = domain.trim().trim_matches('.').to_ascii_lowercase();
if domain.is_empty() {
return Err(anyhow!("email address must include a domain"));
}
Ok(domain)
}
pub fn inferred_provider(
authority: Option<&str>,
explicit: Option<&TailnetProvider>,
) -> TailnetProvider {
if matches!(explicit, Some(TailnetProvider::Burrow)) {
return TailnetProvider::Burrow;
}
if authority.is_some_and(is_managed_tailscale_authority) {
return TailnetProvider::Tailscale;
}
TailnetProvider::Headscale
}
async fn discover_well_known(client: &Client, base_url: &Url) -> Result<Option<TailnetDiscovery>> {
let url = base_url
.join(TAILNET_DISCOVERY_PATH)
.context("failed to build tailnet discovery URL")?;
debug!(%url, "requesting tailnet well-known document");
let response = client
.get(url)
.header("accept", "application/json")
.send()
.await
.context("tailnet well-known request failed")?;
match response.status() {
StatusCode::OK => response
.json::<TailnetDiscovery>()
.await
.context("invalid tailnet discovery document")
.map(Some),
StatusCode::NOT_FOUND => Ok(None),
status => Err(anyhow!("tailnet well-known lookup failed with HTTP {status}")),
}
}
async fn discover_webfinger(client: &Client, email: &str, base_url: &Url) -> Result<Option<String>> {
let mut url = base_url
.join(WEBFINGER_PATH)
.context("failed to build webfinger URL")?;
url.query_pairs_mut()
.append_pair("resource", &format!("acct:{email}"))
.append_pair("rel", TAILNET_DISCOVERY_REL);
debug!(%email, url = %url, "requesting tailnet webfinger document");
let response = client
.get(url)
.header("accept", "application/jrd+json, application/json")
.send()
.await
.context("tailnet webfinger request failed")?;
match response.status() {
StatusCode::OK => {
let document = response
.json::<WebFingerDocument>()
.await
.context("invalid webfinger document")?;
Ok(document
.links
.into_iter()
.find(|link| link.rel == TAILNET_DISCOVERY_REL)
.and_then(|link| link.href)
.filter(|href| !href.trim().is_empty()))
}
StatusCode::NOT_FOUND => Ok(None),
status => Err(anyhow!("tailnet webfinger lookup failed with HTTP {status}")),
}
}
async fn probe_url(
client: &Client,
url: Url,
authority: &str,
summary: &str,
) -> Result<Option<TailnetAuthorityProbe>> {
let response = match client
.get(url)
.header("accept", "application/json")
.send()
.await
{
Ok(response) => response,
Err(_) => return Ok(None),
};
let status = response.status();
if !status.is_success() {
return Ok(None);
}
let detail = response.text().await.unwrap_or_default().trim().to_owned();
Ok(Some(TailnetAuthorityProbe {
authority: authority.to_owned(),
status_code: i32::from(status.as_u16()),
summary: summary.to_owned(),
detail,
reachable: true,
}))
}
#[cfg(test)]
mod tests {
use axum::{routing::get, Router};
use serde_json::json;
use tokio::net::TcpListener;
use super::*;
#[test]
fn extracts_domain_from_email() {
assert_eq!(email_domain("Contact@Burrow.net").unwrap(), "burrow.net");
assert!(email_domain("contact").is_err());
}
#[test]
fn detects_managed_tailscale_authority() {
assert!(is_managed_tailscale_authority("controlplane.tailscale.com"));
assert!(is_managed_tailscale_authority("https://controlplane.tailscale.com/"));
assert!(!is_managed_tailscale_authority("https://ts.burrow.net"));
}
#[tokio::test]
async fn discovers_from_well_known_document() -> Result<()> {
let router = Router::new().route(
TAILNET_DISCOVERY_PATH,
get(|| async {
axum::Json(json!({
"domain": "burrow.net",
"provider": "headscale",
"authority": "https://ts.burrow.net",
"oidc_issuer": "https://auth.burrow.net/application/o/ts/"
}))
}),
);
let listener = TcpListener::bind("127.0.0.1:0").await?;
let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?;
let server = tokio::spawn(async move { axum::serve(listener, router).await });
let client = Client::builder().build()?;
let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?;
assert_eq!(discovery.provider, TailnetProvider::Headscale);
assert_eq!(discovery.authority, "https://ts.burrow.net");
assert_eq!(discovery.domain, "burrow.net");
server.abort();
Ok(())
}
#[tokio::test]
async fn falls_back_to_webfinger_authority() -> Result<()> {
let router = Router::new()
.route(
TAILNET_DISCOVERY_PATH,
get(|| async { (StatusCode::NOT_FOUND, "") }),
)
.route(
WEBFINGER_PATH,
get(|| async {
axum::Json(json!({
"subject": "acct:contact@burrow.net",
"links": [
{
"rel": TAILNET_DISCOVERY_REL,
"href": "https://ts.burrow.net"
}
]
}))
}),
);
let listener = TcpListener::bind("127.0.0.1:0").await?;
let base_url = Url::parse(&format!("http://{}", listener.local_addr()?))?;
let server = tokio::spawn(async move { axum::serve(listener, router).await });
let client = Client::builder().build()?;
let discovery = discover_tailnet_at(&client, "contact@burrow.net", &base_url).await?;
assert_eq!(discovery.provider, TailnetProvider::Headscale);
assert_eq!(discovery.authority, "https://ts.burrow.net");
server.abort();
Ok(())
}
#[tokio::test]
async fn probes_custom_authority() -> Result<()> {
let router = Router::new().route("/health", get(|| async { "ok" }));
let listener = TcpListener::bind("127.0.0.1:0").await?;
let authority = format!("http://{}", listener.local_addr()?);
let server = tokio::spawn(async move { axum::serve(listener, router).await });
let status = probe_tailnet_authority(&authority).await?;
assert_eq!(status.authority, authority);
assert_eq!(status.status_code, 200);
assert!(status.reachable);
server.abort();
Ok(())
}
}

View file

@ -1,5 +1,4 @@
pub mod config;
pub mod discovery;
use std::collections::BTreeMap;
@ -7,7 +6,6 @@ use serde::{Deserialize, Serialize};
use serde_json::Value;
pub use config::{TailnetConfig, TailnetProvider};
pub use discovery::{TailnetDiscovery, TAILNET_DISCOVERY_REL};
pub const BURROW_CAPABILITY_VERSION: i32 = 1;
pub const BURROW_TAILNET_DOMAIN: &str = "burrow.net";

View file

@ -1,11 +1,11 @@
use std::{
ffi::{c_char, CStr},
path::PathBuf,
sync::{Arc, Mutex},
sync::Arc,
thread,
};
use once_cell::sync::{Lazy, OnceCell};
use once_cell::sync::OnceCell;
use tokio::{
runtime::{Builder, Handle},
sync::Notify,
@ -14,12 +14,15 @@ use tracing::error;
use crate::daemon::daemon_main;
static BURROW_NOTIFY: OnceCell<Arc<Notify>> = OnceCell::new();
static BURROW_HANDLE: OnceCell<Handle> = OnceCell::new();
static BURROW_READY: OnceCell<()> = OnceCell::new();
static BURROW_SPAWN_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
#[no_mangle]
pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) {
crate::tracing::initialize();
let notify = BURROW_NOTIFY.get_or_init(|| Arc::new(Notify::new()));
let handle = BURROW_HANDLE.get_or_init(|| {
let path_buf = if path.is_null() {
None
} else {
@ -30,19 +33,6 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c
} else {
Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap()))
};
spawn_in_process_with_paths(path_buf, db_path_buf);
}
pub fn spawn_in_process_with_paths(path_buf: Option<PathBuf>, db_path_buf: Option<PathBuf>) {
crate::tracing::initialize();
let _guard = BURROW_SPAWN_LOCK.lock().unwrap();
if BURROW_READY.get().is_some() {
return;
}
let notify = Arc::new(Notify::new());
let handle = BURROW_HANDLE.get_or_init(|| {
let sender = notify.clone();
let (handle_tx, handle_rx) = tokio::sync::oneshot::channel();
@ -72,5 +62,4 @@ pub fn spawn_in_process_with_paths(path_buf: Option<PathBuf>, db_path_buf: Optio
let receiver = notify.clone();
handle.block_on(async move { receiver.notified().await });
let _ = BURROW_READY.set(());
}

View file

@ -8,25 +8,18 @@ use rusqlite::Connection;
use tokio::sync::{mpsc, watch, RwLock};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response, Status as RspStatus};
use tracing::{debug, info, warn};
use tracing::warn;
use tun::tokio::TunInterface;
use super::{
rpc::grpc_defs::{
networks_server::Networks, tailnet_control_server::TailnetControl, tunnel_server::Tunnel,
Empty, Network, NetworkDeleteRequest, NetworkListResponse, NetworkReorderRequest,
State as RPCTunnelState, TailnetDiscoverRequest, TailnetDiscoverResponse,
TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse, TunnelPacket,
TunnelStatusResponse,
networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest,
NetworkListResponse, NetworkReorderRequest, State as RPCTunnelState,
TunnelConfigurationResponse, TunnelStatusResponse,
},
runtime::{tailnet_helper_request, ActiveTunnel, ResolvedTunnel},
runtime::{ActiveTunnel, ResolvedTunnel},
};
use crate::{
auth::server::tailscale::{
packet_socket_path, TailscaleBridgeManager,
TailscaleLoginStartRequest as BridgeLoginStartRequest, TailscaleLoginStatus,
},
control::discovery,
daemon::rpc::ServerConfig,
database::{add_network, delete_network, get_connection, list_networks, reorder_network},
};
@ -53,7 +46,6 @@ pub struct DaemonRPCServer {
wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>),
network_update_chan: (watch::Sender<()>, watch::Receiver<()>),
active_tunnel: Arc<RwLock<Option<ActiveTunnel>>>,
tailnet_login: TailscaleBridgeManager,
}
impl DaemonRPCServer {
@ -64,7 +56,6 @@ impl DaemonRPCServer {
wg_state_chan: watch::channel(RunState::Idle),
network_update_chan: watch::channel(()),
active_tunnel: Arc::new(RwLock::new(None)),
tailnet_login: TailscaleBridgeManager::default(),
})
}
@ -87,20 +78,11 @@ impl DaemonRPCServer {
}
async fn current_tunnel_configuration(&self) -> Result<TunnelConfigurationResponse, RspStatus> {
let config = {
let active = self.active_tunnel.read().await;
active
.as_ref()
.map(|tunnel| tunnel.server_config().clone())
};
let config = match config {
Some(config) => config,
None => self
let config = self
.resolve_tunnel()
.await?
.server_config()
.map_err(proc_err)?,
};
.map_err(proc_err)?;
Ok(configuration_rsp(config))
}
@ -120,18 +102,8 @@ impl DaemonRPCServer {
async fn replace_active_tunnel(&self, desired: ResolvedTunnel) -> Result<(), RspStatus> {
let _ = self.stop_active_tunnel().await?;
let tailnet_helper = match &desired {
ResolvedTunnel::Tailnet { identity, config } => Some(
self.tailnet_login
.ensure_session(tailnet_helper_request(identity, config))
.await
.map_err(proc_err)?
.helper,
),
_ => None,
};
let active = desired
.start(self.tun_interface.clone(), tailnet_helper)
.start(self.tun_interface.clone())
.await
.map_err(proc_err)?;
self.active_tunnel.write().await.replace(active);
@ -155,34 +127,11 @@ impl DaemonRPCServer {
Ok(())
}
fn tailnet_bridge_request(
account_name: String,
identity_name: String,
hostname: String,
authority: String,
) -> BridgeLoginStartRequest {
let mut request = BridgeLoginStartRequest {
account_name,
identity_name,
hostname: (!hostname.trim().is_empty()).then_some(hostname),
control_url: Self::tailnet_control_url(&authority),
packet_socket: None,
};
request.packet_socket = Some(packet_socket_path(&request).display().to_string());
request
}
fn tailnet_control_url(authority: &str) -> Option<String> {
let authority = discovery::normalize_authority(authority);
(!discovery::is_managed_tailscale_authority(&authority)).then_some(authority)
}
}
#[tonic::async_trait]
impl Tunnel for DaemonRPCServer {
type TunnelConfigurationStream = ReceiverStream<Result<TunnelConfigurationResponse, RspStatus>>;
type TunnelPacketsStream = ReceiverStream<Result<TunnelPacket, RspStatus>>;
type TunnelStatusStream = ReceiverStream<Result<TunnelStatusResponse, RspStatus>>;
async fn tunnel_configuration(
@ -208,62 +157,6 @@ impl Tunnel for DaemonRPCServer {
Ok(Response::new(ReceiverStream::new(rx)))
}
async fn tunnel_packets(
&self,
request: Request<tonic::Streaming<TunnelPacket>>,
) -> Result<Response<Self::TunnelPacketsStream>, RspStatus> {
let (packet_tx, mut packet_rx) = {
let guard = self.active_tunnel.read().await;
let Some(active) = guard.as_ref() else {
return Err(RspStatus::failed_precondition("no active tunnel"));
};
active.packet_stream().ok_or_else(|| {
RspStatus::failed_precondition(
"active tunnel does not support packet streaming",
)
})?
};
let (tx, rx) = mpsc::channel(128);
tokio::spawn(async move {
loop {
match packet_rx.recv().await {
Ok(payload) => {
if tx.send(Ok(TunnelPacket { payload })).await.is_err() {
break;
}
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue,
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
}
}
});
let mut inbound = request.into_inner();
tokio::spawn(async move {
loop {
match inbound.message().await {
Ok(Some(packet)) => {
debug!(
"daemon tunnel packet stream received {} bytes from client",
packet.payload.len()
);
if packet_tx.send(packet.payload).await.is_err() {
break;
}
}
Ok(None) => break,
Err(error) => {
warn!("tailnet packet stream receive error: {error}");
break;
}
}
}
});
Ok(Response::new(ReceiverStream::new(rx)))
}
async fn tunnel_start(&self, _request: Request<Empty>) -> Result<Response<Empty>, RspStatus> {
let desired = self.resolve_tunnel().await?;
let already_running = {
@ -373,144 +266,14 @@ impl Networks for DaemonRPCServer {
}
}
#[tonic::async_trait]
impl TailnetControl for DaemonRPCServer {
async fn discover(
&self,
request: Request<TailnetDiscoverRequest>,
) -> Result<Response<TailnetDiscoverResponse>, RspStatus> {
let request = request.into_inner();
info!(email = %request.email, "daemon tailnet discover RPC received");
let discovery = discovery::discover_tailnet(&request.email)
.await
.map_err(proc_err)?;
info!(
email = %request.email,
authority = %discovery.authority,
provider = ?discovery.provider,
"daemon tailnet discover RPC resolved"
);
Ok(Response::new(TailnetDiscoverResponse {
domain: discovery.domain,
authority: discovery.authority.clone(),
oidc_issuer: discovery.oidc_issuer.unwrap_or_default(),
managed: matches!(
discovery::inferred_provider(Some(&discovery.authority), Some(&discovery.provider)),
crate::control::TailnetProvider::Tailscale
),
}))
}
async fn probe(
&self,
request: Request<TailnetProbeRequest>,
) -> Result<Response<TailnetProbeResponse>, RspStatus> {
let request = request.into_inner();
let status = discovery::probe_tailnet_authority(&request.authority)
.await
.map_err(proc_err)?;
Ok(Response::new(TailnetProbeResponse {
authority: status.authority,
status_code: status.status_code,
summary: status.summary,
detail: status.detail,
reachable: status.reachable,
}))
}
async fn login_start(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginStartRequest>,
) -> Result<Response<super::rpc::grpc_defs::TailnetLoginStatusResponse>, RspStatus> {
let request = request.into_inner();
info!(
account = %request.account_name,
identity = %request.identity_name,
authority = %request.authority,
"daemon tailnet login start RPC received"
);
let response = self
.tailnet_login
.start_login(Self::tailnet_bridge_request(
request.account_name,
request.identity_name,
request.hostname,
request.authority,
))
.await
.map_err(proc_err)?;
info!(
session_id = %response.session_id,
backend_state = %response.status.backend_state,
running = response.status.running,
needs_login = response.status.needs_login,
auth_url = ?response.status.auth_url,
"daemon tailnet login start RPC resolved"
);
Ok(Response::new(tailnet_login_rsp(
response.session_id,
response.status,
)))
}
async fn login_status(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginStatusRequest>,
) -> Result<Response<super::rpc::grpc_defs::TailnetLoginStatusResponse>, RspStatus> {
let request = request.into_inner();
info!(session_id = %request.session_id, "daemon tailnet login status RPC received");
let status = self
.tailnet_login
.status(&request.session_id)
.await
.map_err(proc_err)?;
let Some(status) = status else {
return Err(RspStatus::not_found("tailnet login session not found"));
};
info!(
session_id = %request.session_id,
backend_state = %status.backend_state,
running = status.running,
needs_login = status.needs_login,
auth_url = ?status.auth_url,
"daemon tailnet login status RPC resolved"
);
Ok(Response::new(tailnet_login_rsp(request.session_id, status)))
}
async fn login_cancel(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginCancelRequest>,
) -> Result<Response<Empty>, RspStatus> {
let request = request.into_inner();
let canceled = self
.tailnet_login
.cancel(&request.session_id)
.await
.map_err(proc_err)?;
if !canceled {
return Err(RspStatus::not_found("tailnet login session not found"));
}
Ok(Response::new(Empty {}))
}
}
fn proc_err(err: impl ToString) -> RspStatus {
RspStatus::internal(err.to_string())
}
fn configuration_rsp(config: ServerConfig) -> TunnelConfigurationResponse {
TunnelConfigurationResponse {
addresses: config.address,
mtu: config.mtu.unwrap_or(1000),
routes: config.routes,
dns_servers: config.dns_servers,
search_domains: config.search_domains,
include_default_route: config.include_default_route,
addresses: config.address,
}
}
@ -520,21 +283,3 @@ fn status_rsp(state: RunState) -> TunnelStatusResponse {
start: None, // TODO: Add timestamp
}
}
fn tailnet_login_rsp(
session_id: String,
status: TailscaleLoginStatus,
) -> super::rpc::grpc_defs::TailnetLoginStatusResponse {
super::rpc::grpc_defs::TailnetLoginStatusResponse {
session_id,
backend_state: status.backend_state,
auth_url: status.auth_url.unwrap_or_default(),
running: status.running,
needs_login: status.needs_login,
tailnet_name: status.tailnet_name.unwrap_or_default(),
magic_dns_suffix: status.magic_dns_suffix.unwrap_or_default(),
self_dns_name: status.self_dns_name.unwrap_or_default(),
tailnet_ips: status.tailscale_ips,
health: status.health,
}
}

View file

@ -16,10 +16,7 @@ use tonic::transport::Server;
use tracing::info;
use crate::{
daemon::rpc::grpc_defs::{
networks_server::NetworksServer, tailnet_control_server::TailnetControlServer,
tunnel_server::TunnelServer,
},
daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer},
database::get_connection,
};
@ -39,11 +36,9 @@ pub async fn daemon_main(
let uds = UnixListener::bind(sock_path)?;
let serve_job = tokio::spawn(async move {
let uds_stream = UnixListenerStream::new(uds);
let tailnet_server = burrow_server.clone();
let _srv = Server::builder()
.add_service(TunnelServer::new(burrow_server.clone()))
.add_service(NetworksServer::new(burrow_server))
.add_service(TailnetControlServer::new(tailnet_server))
.serve_with_incoming(uds_stream)
.await?;
Ok::<(), AhError>(())

View file

@ -5,15 +5,11 @@ use tokio::net::UnixStream;
use tonic::transport::{Endpoint, Uri};
use tower::service_fn;
use super::grpc_defs::{
networks_client::NetworksClient, tailnet_control_client::TailnetControlClient,
tunnel_client::TunnelClient,
};
use super::grpc_defs::{networks_client::NetworksClient, tunnel_client::TunnelClient};
use crate::daemon::get_socket_path;
pub struct BurrowClient<T> {
pub networks_client: NetworksClient<T>,
pub tailnet_client: TailnetControlClient<T>,
pub tunnel_client: TunnelClient<T>,
}
@ -35,11 +31,9 @@ impl BurrowClient<tonic::transport::Channel> {
}))
.await?;
let nw_client = NetworksClient::new(channel.clone());
let tailnet_client = TailnetControlClient::new(channel.clone());
let tun_client = TunnelClient::new(channel.clone());
Ok(BurrowClient {
networks_client: nw_client,
tailnet_client,
tunnel_client: tun_client,
})
}

View file

@ -68,14 +68,6 @@ impl TryFrom<&TunInterface> for ServerInfo {
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)]
pub struct ServerConfig {
pub address: Vec<String>,
#[serde(default)]
pub routes: Vec<String>,
#[serde(default)]
pub dns_servers: Vec<String>,
#[serde(default)]
pub search_domains: Vec<String>,
#[serde(default)]
pub include_default_route: bool,
pub name: Option<String>,
pub mtu: Option<i32>,
}
@ -86,14 +78,6 @@ impl TryFrom<&Config> for ServerConfig {
fn try_from(config: &Config) -> anyhow::Result<Self> {
Ok(ServerConfig {
address: config.interface.address.clone(),
routes: config
.peers
.iter()
.flat_map(|peer| peer.allowed_ips.iter().cloned())
.collect(),
dns_servers: config.interface.dns.clone(),
search_domains: Vec::new(),
include_default_route: false,
name: None,
mtu: config.interface.mtu.map(|mtu| mtu as i32),
})
@ -104,10 +88,6 @@ impl Default for ServerConfig {
fn default() -> Self {
Self {
address: vec!["10.13.13.2".to_string()], // Dummy remote address
routes: Vec::new(),
dns_servers: Vec::new(),
search_domains: Vec::new(),
include_default_route: false,
name: None,
mtu: None,
}

View file

@ -2,4 +2,4 @@
source: burrow/src/daemon/rpc/response.rs
expression: "serde_json::to_string(&DaemonResponse::new(Ok::<DaemonResponseData,\n String>(DaemonResponseData::ServerConfig(ServerConfig::default()))))?"
---
{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"routes":[],"dns_servers":[],"search_domains":[],"include_default_route":false,"name":null,"mtu":null}},"id":0}
{"result":{"Ok":{"type":"ServerConfig","address":["10.13.13.2"],"name":null,"mtu":null}},"id":0}

View file

@ -1,13 +1,7 @@
use std::{path::PathBuf, sync::Arc};
use std::sync::Arc;
use anyhow::{bail, Context, Result};
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
net::UnixStream,
sync::{broadcast, mpsc, RwLock},
task::JoinHandle,
time::{sleep, Duration},
};
use anyhow::{Context, Result};
use tokio::{sync::RwLock, task::JoinHandle};
use tun::{tokio::TunInterface, TunOptions};
use super::rpc::{
@ -15,11 +9,7 @@ use super::rpc::{
ServerConfig,
};
use crate::{
auth::server::tailscale::{
default_hostname, packet_socket_path, spawn_tailscale_helper, TailscaleHelperProcess,
TailscaleLoginStartRequest, TailscaleLoginStatus,
},
control::{discovery, TailnetConfig},
control::TailnetConfig,
wireguard::{Config, Interface as WireGuardInterface},
};
@ -88,19 +78,11 @@ impl ResolvedTunnel {
match self {
Self::Passthrough { .. } => Ok(ServerConfig {
address: Vec::new(),
routes: Vec::new(),
dns_servers: Vec::new(),
search_domains: Vec::new(),
include_default_route: false,
name: None,
mtu: Some(1500),
}),
Self::Tailnet { .. } => Ok(ServerConfig {
address: Vec::new(),
routes: tailnet_routes(),
dns_servers: tailnet_dns_servers(),
search_domains: Vec::new(),
include_default_route: false,
name: None,
mtu: Some(1280),
}),
@ -111,71 +93,21 @@ impl ResolvedTunnel {
pub async fn start(
self,
tun_interface: Arc<RwLock<Option<TunInterface>>>,
tailnet_helper: Option<Arc<TailscaleHelperProcess>>,
) -> Result<ActiveTunnel> {
match self {
Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough {
identity,
server_config: ServerConfig {
address: Vec::new(),
routes: Vec::new(),
dns_servers: Vec::new(),
search_domains: Vec::new(),
include_default_route: false,
name: None,
mtu: Some(1500),
},
}),
Self::Tailnet { identity, config } => {
let (helper, shutdown_helper_on_stop) = match tailnet_helper {
Some(helper) => (helper, false),
None => {
let helper_request = tailnet_helper_request(&identity, &config);
let helper = Arc::new(spawn_tailscale_helper(&helper_request).await?);
(helper, true)
}
};
let status = wait_for_tailnet_ready(helper.as_ref()).await?;
let server_config = tailnet_server_config(&status);
let packet_socket = helper
.packet_socket()
.map(PathBuf::from)
.ok_or_else(|| anyhow::anyhow!("tailnet helper did not report a packet socket"))?;
let packet_bridge = connect_tailnet_packet_bridge(packet_socket).await?;
#[cfg(target_vendor = "apple")]
let tun_task = None;
#[cfg(not(target_vendor = "apple"))]
let tun_task = {
let tun = TunOptions::new().open()?;
tun_interface.write().await.replace(tun);
Some(tokio::spawn(run_tailnet_tun_bridge(
tun_interface.clone(),
packet_bridge.outbound_sender(),
packet_bridge.subscribe(),
)))
};
Ok(ActiveTunnel::Tailnet {
identity,
server_config,
helper,
shutdown_helper_on_stop,
packet_bridge,
tun_task,
})
}
Self::Passthrough { identity } => Ok(ActiveTunnel::Passthrough { identity }),
Self::Tailnet { config, .. } => Err(anyhow::anyhow!(
"tailnet runtime is not wired in this checkout yet ({:?})",
config.provider
)),
Self::WireGuard { identity, config } => {
let server_config = ServerConfig::try_from(&config)?;
let tun = TunOptions::new().open()?;
tun_interface.write().await.replace(tun);
match start_wireguard_runtime(config, tun_interface.clone()).await {
Ok((interface, task)) => Ok(ActiveTunnel::WireGuard {
identity,
server_config,
interface,
task,
}),
Ok((interface, task)) => {
Ok(ActiveTunnel::WireGuard { identity, interface, task })
}
Err(err) => {
tun_interface.write().await.take();
Err(err)
@ -189,19 +121,9 @@ impl ResolvedTunnel {
pub enum ActiveTunnel {
Passthrough {
identity: RuntimeIdentity,
server_config: ServerConfig,
},
Tailnet {
identity: RuntimeIdentity,
server_config: ServerConfig,
helper: Arc<TailscaleHelperProcess>,
shutdown_helper_on_stop: bool,
packet_bridge: TailnetPacketBridge,
tun_task: Option<JoinHandle<Result<()>>>,
},
WireGuard {
identity: RuntimeIdentity,
server_config: ServerConfig,
interface: Arc<RwLock<WireGuardInterface>>,
task: JoinHandle<Result<()>>,
},
@ -210,69 +132,15 @@ pub enum ActiveTunnel {
impl ActiveTunnel {
pub fn identity(&self) -> &RuntimeIdentity {
match self {
Self::Passthrough { identity, .. }
| Self::Tailnet { identity, .. }
Self::Passthrough { identity }
| Self::WireGuard { identity, .. } => identity,
}
}
pub fn server_config(&self) -> &ServerConfig {
match self {
Self::Passthrough { server_config, .. }
| Self::Tailnet { server_config, .. }
| Self::WireGuard { server_config, .. } => server_config,
}
}
pub fn packet_stream(
&self,
) -> Option<(mpsc::Sender<Vec<u8>>, broadcast::Receiver<Vec<u8>>)> {
match self {
Self::Tailnet { packet_bridge, .. } => Some((
packet_bridge.outbound_sender(),
packet_bridge.subscribe(),
)),
_ => None,
}
}
pub async fn shutdown(self, tun_interface: &Arc<RwLock<Option<TunInterface>>>) -> Result<()> {
match self {
Self::Passthrough { .. } => Ok(()),
Self::Tailnet {
helper,
shutdown_helper_on_stop,
packet_bridge,
tun_task,
..
} => {
if let Some(tun_task) = tun_task {
tun_task.abort();
match tun_task.await {
Ok(Ok(())) => {}
Ok(Err(err)) => return Err(err),
Err(err) if err.is_cancelled() => {}
Err(err) => return Err(err.into()),
}
}
packet_bridge.task.abort();
match packet_bridge.task.await {
Ok(Ok(())) => {}
Ok(Err(err)) => return Err(err),
Err(err) if err.is_cancelled() => {}
Err(err) => return Err(err.into()),
}
tun_interface.write().await.take();
if shutdown_helper_on_stop {
helper.shutdown().await?;
}
Ok(())
}
Self::WireGuard {
interface,
task,
..
} => {
Self::WireGuard { interface, task, .. } => {
interface.read().await.remove_tun().await;
let task_result = task.await;
tun_interface.write().await.take();
@ -283,22 +151,6 @@ impl ActiveTunnel {
}
}
pub struct TailnetPacketBridge {
outbound: mpsc::Sender<Vec<u8>>,
inbound: broadcast::Sender<Vec<u8>>,
task: JoinHandle<Result<()>>,
}
impl TailnetPacketBridge {
fn outbound_sender(&self) -> mpsc::Sender<Vec<u8>> {
self.outbound.clone()
}
fn subscribe(&self) -> broadcast::Receiver<Vec<u8>> {
self.inbound.subscribe()
}
}
async fn start_wireguard_runtime(
config: Config,
tun_interface: Arc<RwLock<Option<TunInterface>>>,
@ -314,279 +166,6 @@ async fn start_wireguard_runtime(
Ok((interface, task))
}
pub(crate) fn tailnet_helper_request(
identity: &RuntimeIdentity,
config: &TailnetConfig,
) -> TailscaleLoginStartRequest {
let account_name = config
.account
.as_deref()
.filter(|value| !value.trim().is_empty())
.unwrap_or("default")
.to_owned();
let identity_name = config
.identity
.as_deref()
.filter(|value| !value.trim().is_empty())
.map(ToOwned::to_owned)
.unwrap_or_else(|| match identity {
RuntimeIdentity::Network { id, .. } => format!("network-{id}"),
RuntimeIdentity::Passthrough => "apple".to_owned(),
});
let control_url = config.authority.as_deref().and_then(|authority| {
let authority = discovery::normalize_authority(authority);
(!discovery::is_managed_tailscale_authority(&authority)).then_some(authority)
});
let mut request = TailscaleLoginStartRequest {
account_name,
identity_name,
hostname: config.hostname.clone(),
control_url,
packet_socket: None,
};
request.packet_socket = Some(packet_socket_path(&request).display().to_string());
if request
.hostname
.as_deref()
.map(|value| value.trim().is_empty())
.unwrap_or(true)
{
request.hostname = Some(default_hostname(&request));
}
request
}
async fn wait_for_tailnet_ready(helper: &TailscaleHelperProcess) -> Result<TailscaleLoginStatus> {
let mut last_status = None;
for _ in 0..120 {
let status = helper.status().await?;
if status.running && !status.tailscale_ips.is_empty() {
return Ok(status);
}
if status.needs_login || status.auth_url.is_some() {
bail!("tailnet runtime requires a completed login before the tunnel can start");
}
last_status = Some(status);
sleep(Duration::from_millis(250)).await;
}
if let Some(status) = last_status {
bail!(
"tailnet helper never became ready (backend_state={})",
status.backend_state
);
}
bail!("tailnet helper never produced a status update")
}
fn tailnet_server_config(status: &TailscaleLoginStatus) -> ServerConfig {
let mut search_domains = Vec::new();
if let Some(suffix) = status.magic_dns_suffix.as_deref() {
let suffix = suffix.trim().trim_end_matches('.');
if !suffix.is_empty() {
search_domains.push(suffix.to_owned());
}
}
ServerConfig {
address: status
.tailscale_ips
.iter()
.map(|ip| tailnet_cidr(ip))
.collect(),
routes: tailnet_routes(),
dns_servers: tailnet_dns_servers(),
search_domains,
include_default_route: false,
name: status.self_dns_name.clone(),
mtu: Some(1280),
}
}
fn tailnet_routes() -> Vec<String> {
vec!["100.64.0.0/10".to_owned(), "fd7a:115c:a1e0::/48".to_owned()]
}
fn tailnet_dns_servers() -> Vec<String> {
vec!["100.100.100.100".to_owned()]
}
fn tailnet_cidr(ip: &str) -> String {
if ip.contains('/') {
return ip.to_owned();
}
if ip.contains(':') {
format!("{ip}/128")
} else {
format!("{ip}/32")
}
}
async fn connect_tailnet_packet_bridge(packet_socket: PathBuf) -> Result<TailnetPacketBridge> {
let mut last_error = None;
let mut stream = None;
for _ in 0..50 {
match UnixStream::connect(&packet_socket).await {
Ok(connected) => {
stream = Some(connected);
break;
}
Err(err) => {
last_error = Some(err);
sleep(Duration::from_millis(100)).await;
}
}
}
let stream = if let Some(stream) = stream {
stream
} else {
return Err(last_error
.context("failed to connect to tailnet helper packet socket")?
.into());
};
let (outbound_tx, outbound_rx) = mpsc::channel(128);
let (inbound_tx, _) = broadcast::channel(128);
let task = tokio::spawn(run_tailnet_socket_bridge(
stream,
outbound_rx,
inbound_tx.clone(),
));
Ok(TailnetPacketBridge {
outbound: outbound_tx,
inbound: inbound_tx,
task,
})
}
async fn run_tailnet_socket_bridge(
stream: UnixStream,
mut outbound_rx: mpsc::Receiver<Vec<u8>>,
inbound_tx: broadcast::Sender<Vec<u8>>,
) -> Result<()> {
let (mut reader, mut writer) = stream.into_split();
let inbound = tokio::spawn(async move {
loop {
let packet = read_packet_frame(&mut reader).await?;
tracing::debug!(
"tailnet packet bridge received {} bytes from helper socket",
packet.len()
);
let _ = inbound_tx.send(packet);
}
#[allow(unreachable_code)]
Result::<()>::Ok(())
});
let outbound = tokio::spawn(async move {
while let Some(packet) = outbound_rx.recv().await {
tracing::debug!(
"tailnet packet bridge writing {} bytes to helper socket",
packet.len()
);
write_packet_frame(&mut writer, &packet).await?;
}
Result::<()>::Ok(())
});
let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?;
inbound_result?;
outbound_result?;
Ok(())
}
#[cfg(not(target_vendor = "apple"))]
async fn run_tailnet_tun_bridge(
tun_interface: Arc<RwLock<Option<TunInterface>>>,
outbound_tx: mpsc::Sender<Vec<u8>>,
mut inbound_rx: broadcast::Receiver<Vec<u8>>,
) -> Result<()> {
let inbound_tun = tun_interface.clone();
let inbound = tokio::spawn(async move {
loop {
let packet = match inbound_rx.recv().await {
Ok(packet) => packet,
Err(broadcast::error::RecvError::Lagged(_)) => continue,
Err(broadcast::error::RecvError::Closed) => break,
};
let guard = inbound_tun.read().await;
let Some(tun) = guard.as_ref() else {
bail!("tailnet tun interface unavailable");
};
tun.send(&packet)
.await
.context("failed to write tailnet packet to tun")?;
}
Result::<()>::Ok(())
});
let outbound_tun = tun_interface.clone();
let outbound = tokio::spawn(async move {
let mut buf = vec![0u8; 65_535];
loop {
let len = {
let guard = outbound_tun.read().await;
let Some(tun) = guard.as_ref() else {
bail!("tailnet tun interface unavailable");
};
tun.recv(&mut buf)
.await
.context("failed to read packet from tailnet tun")?
};
outbound_tx
.send(buf[..len].to_vec())
.await
.context("failed to forward packet to tailnet helper")?;
}
#[allow(unreachable_code)]
Result::<()>::Ok(())
});
let (inbound_result, outbound_result) = tokio::try_join!(inbound, outbound)?;
inbound_result?;
outbound_result?;
Ok(())
}
async fn read_packet_frame<R>(reader: &mut R) -> Result<Vec<u8>>
where
R: AsyncRead + Unpin,
{
let mut len_buf = [0u8; 4];
reader
.read_exact(&mut len_buf)
.await
.context("failed to read tailnet packet frame length")?;
let len = u32::from_be_bytes(len_buf) as usize;
let mut packet = vec![0u8; len];
reader
.read_exact(&mut packet)
.await
.context("failed to read tailnet packet frame payload")?;
Ok(packet)
}
async fn write_packet_frame<W>(writer: &mut W, packet: &[u8]) -> Result<()>
where
W: AsyncWrite + Unpin,
{
writer
.write_all(&(packet.len() as u32).to_be_bytes())
.await
.context("failed to write tailnet packet frame length")?;
writer
.write_all(packet)
.await
.context("failed to write tailnet packet frame payload")?;
writer
.flush()
.await
.context("failed to flush tailnet packet frame")
}
#[cfg(test)]
mod tests {
use super::*;
@ -600,19 +179,4 @@ mod tests {
Vec::<String>::new()
);
}
#[test]
fn tailnet_server_config_uses_host_prefixes() {
let status = TailscaleLoginStatus {
running: true,
tailscale_ips: vec!["100.101.102.103".to_owned(), "fd7a:115c:a1e0::123".to_owned()],
..Default::default()
};
let config = tailnet_server_config(&status);
assert_eq!(
config.address,
vec!["100.101.102.103/32", "fd7a:115c:a1e0::123/128"]
);
assert_eq!(config.mtu, Some(1280));
}
}

View file

@ -16,10 +16,10 @@ pub(crate) mod tracing;
#[cfg(target_os = "linux")]
pub mod usernet;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::apple::{spawn_in_process, spawn_in_process_with_paths};
#[cfg(target_vendor = "apple")]
pub use daemon::apple::spawn_in_process;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::{
rpc::grpc_defs, rpc::BurrowClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient,
DaemonCommand, DaemonResponseData, DaemonStartOptions,
rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData,
DaemonStartOptions,
};

View file

@ -72,14 +72,6 @@ enum Commands {
NetworkReorder(NetworkReorderArgs),
/// Delete Network
NetworkDelete(NetworkDeleteArgs),
/// Discover a Tailnet authority through the daemon
TailnetDiscover(TailnetDiscoverArgs),
/// Probe a Tailnet authority through the daemon
TailnetProbe(TailnetProbeArgs),
/// Send an ICMP echo probe through the active Tailnet tunnel over daemon packet streaming
TailnetPing(TailnetPingArgs),
/// Send a UDP echo probe through the active Tailnet tunnel over daemon packet streaming
TailnetUdpEcho(TailnetUdpEchoArgs),
#[cfg(target_os = "linux")]
/// Run a command in an unshared Linux namespace using a Burrow backend
Exec(ExecArgs),
@ -118,36 +110,6 @@ struct NetworkDeleteArgs {
id: i32,
}
#[derive(Args)]
struct TailnetDiscoverArgs {
email: String,
}
#[derive(Args)]
struct TailnetProbeArgs {
authority: String,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
#[derive(Args)]
struct TailnetPingArgs {
remote: String,
#[arg(long, default_value = "burrow-tailnet-smoke")]
payload: String,
#[arg(long, default_value_t = 5000)]
timeout_ms: u64,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
#[derive(Args)]
struct TailnetUdpEchoArgs {
remote: String,
#[arg(long, default_value = "burrow-tailnet-smoke")]
message: String,
#[arg(long, default_value_t = 5000)]
timeout_ms: u64,
}
#[cfg(target_os = "linux")]
#[derive(Args)]
struct TorExecArgs {
@ -278,387 +240,6 @@ async fn try_network_delete(id: i32) -> Result<()> {
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_discover(email: &str) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
let response = client
.tailnet_client
.discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest { email: email.to_owned() })
.await?
.into_inner();
println!("Tailnet Discover Response: {:?}", response);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_probe(authority: &str) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
let response = client
.tailnet_client
.probe(crate::daemon::rpc::grpc_defs::TailnetProbeRequest {
authority: authority.to_owned(),
})
.await?
.into_inner();
println!("Tailnet Probe Response: {:?}", response);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_ping(remote: &str, payload: &str, timeout_ms: u64) -> Result<()> {
use std::net::IpAddr;
use anyhow::Context;
use rand::Rng;
use tokio::{
sync::mpsc,
time::{timeout, Duration},
};
use tokio_stream::wrappers::ReceiverStream;
use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket};
let remote_ip: IpAddr = remote
.parse()
.with_context(|| format!("invalid remote IP address {remote}"))?;
let message = payload.as_bytes().to_vec();
let mut client = BurrowClient::from_uds().await?;
client.tunnel_client.tunnel_start(Empty {}).await?;
let mut config_stream = client
.tunnel_client
.tunnel_configuration(Empty {})
.await?
.into_inner();
let config = config_stream
.message()
.await?
.context("tunnel configuration stream ended before yielding a config")?;
let local_ip = select_tailnet_local_ip(&config.addresses, remote_ip)?;
let identifier = rand::thread_rng().gen::<u16>();
let sequence = 1_u16;
let packet = build_icmp_echo_request(local_ip, remote_ip, identifier, sequence, &message)?;
let (outbound_tx, outbound_rx) = mpsc::channel::<TunnelPacket>(128);
let mut tunnel_packets = client
.tunnel_client
.tunnel_packets(ReceiverStream::new(outbound_rx))
.await?
.into_inner();
outbound_tx
.send(TunnelPacket { payload: packet })
.await
.context("failed to send ICMP echo probe into daemon packet stream")?;
log::debug!(
"tailnet ping probe queued from {local_ip} to {remote_ip} identifier={identifier} sequence={sequence}"
);
drop(outbound_tx);
let reply = timeout(Duration::from_millis(timeout_ms), async {
loop {
let packet = tunnel_packets
.message()
.await
.context("failed to read packet from daemon packet stream")?
.context("daemon packet stream ended before returning a reply")?;
log::debug!(
"tailnet ping received {} bytes from daemon packet stream",
packet.payload.len()
);
if let Some(reply) =
parse_icmp_echo_reply(&packet.payload, local_ip, remote_ip, identifier, sequence)?
{
break Ok::<_, anyhow::Error>(reply);
}
}
})
.await
.with_context(|| format!("timed out waiting for ICMP echo reply from {remote_ip}"))??;
println!("Tailnet Ping Source: {}", reply.source);
println!("Tailnet Ping Destination: {}", reply.destination);
println!(
"Tailnet Ping Payload: {}",
String::from_utf8_lossy(&reply.payload)
);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> Result<()> {
use std::net::SocketAddr;
use anyhow::{bail, Context};
use futures::{SinkExt, StreamExt};
use netstack_smoltcp::StackBuilder;
use tokio::{
sync::mpsc,
time::{timeout, Duration},
};
use tokio_stream::wrappers::ReceiverStream;
use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket};
let remote_addr: SocketAddr = remote
.parse()
.with_context(|| format!("invalid remote socket address {remote}"))?;
let mut client = BurrowClient::from_uds().await?;
client.tunnel_client.tunnel_start(Empty {}).await?;
let mut config_stream = client
.tunnel_client
.tunnel_configuration(Empty {})
.await?
.into_inner();
let config = config_stream
.message()
.await?
.context("tunnel configuration stream ended before yielding a config")?;
let local_addr = select_tailnet_local_socket(&config.addresses, remote_addr.ip())?;
let (stack, runner, udp_socket, _) = StackBuilder::default()
.enable_udp(true)
.enable_tcp(true)
.build()
.context("failed to build userspace UDP stack")?;
let runner = runner.context("userspace UDP stack runner unavailable")?;
let udp_socket = udp_socket.context("userspace UDP stack socket unavailable")?;
let (mut stack_sink, mut stack_stream) = stack.split();
let (mut udp_reader, mut udp_writer) = udp_socket.split();
let (outbound_tx, outbound_rx) = mpsc::channel::<TunnelPacket>(128);
let mut tunnel_packets = client
.tunnel_client
.tunnel_packets(ReceiverStream::new(outbound_rx))
.await?
.into_inner();
let ingress_task = tokio::spawn(async move {
loop {
match tunnel_packets.message().await? {
Some(packet) => {
log::debug!(
"tailnet udp echo received {} bytes from daemon packet stream",
packet.payload.len()
);
stack_sink
.send(packet.payload)
.await
.context("failed to feed inbound tailnet packet into userspace stack")?;
}
None => break,
}
}
Result::<()>::Ok(())
});
let egress_task = tokio::spawn(async move {
while let Some(packet) = stack_stream.next().await {
let payload = packet.context("failed to read outbound packet from userspace stack")?;
log::debug!(
"tailnet udp echo sending {} bytes into daemon packet stream",
payload.len()
);
outbound_tx
.send(TunnelPacket { payload })
.await
.context("failed to forward outbound tailnet packet to daemon")?;
}
Result::<()>::Ok(())
});
let runner_task = tokio::spawn(async move { runner.await.map_err(anyhow::Error::from) });
udp_writer
.send((message.as_bytes().to_vec(), local_addr, remote_addr))
.await
.context("failed to send UDP echo probe into userspace stack")?;
log::debug!("tailnet udp echo probe queued from {local_addr} to {remote_addr}");
let response = timeout(Duration::from_millis(timeout_ms), udp_reader.next())
.await
.with_context(|| format!("timed out waiting for UDP echo from {remote_addr}"))?
.context("userspace UDP stack ended before returning a reply")?;
let (payload, reply_source, reply_destination) = response;
let response_text = String::from_utf8_lossy(&payload);
ingress_task.abort();
egress_task.abort();
runner_task.abort();
if reply_source != remote_addr {
bail!("received UDP reply from unexpected source {reply_source}");
}
if reply_destination != local_addr {
bail!("received UDP reply for unexpected local socket {reply_destination}");
}
if payload != message.as_bytes() {
bail!("UDP echo payload mismatch");
}
println!("Tailnet UDP Echo Source: {reply_source}");
println!("Tailnet UDP Echo Destination: {reply_destination}");
println!("Tailnet UDP Echo Payload: {response_text}");
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn select_tailnet_local_ip(
addresses: &[String],
remote_ip: std::net::IpAddr,
) -> Result<std::net::IpAddr> {
use anyhow::Context;
let family_is_v4 = remote_ip.is_ipv4();
addresses
.iter()
.filter_map(|cidr| cidr.split('/').next())
.filter_map(|ip| ip.parse::<std::net::IpAddr>().ok())
.find(|ip| ip.is_ipv4() == family_is_v4)
.with_context(|| {
format!(
"no local {} tailnet address found in daemon config {:?}",
if family_is_v4 { "IPv4" } else { "IPv6" },
addresses
)
})
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn select_tailnet_local_socket(
addresses: &[String],
remote_ip: std::net::IpAddr,
) -> Result<std::net::SocketAddr> {
use rand::Rng;
let local_ip = select_tailnet_local_ip(addresses, remote_ip)?;
let port = rand::thread_rng().gen_range(40000..50000);
Ok(std::net::SocketAddr::new(local_ip, port))
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
struct IcmpEchoReply {
source: std::net::IpAddr,
destination: std::net::IpAddr,
payload: Vec<u8>,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn build_icmp_echo_request(
source: std::net::IpAddr,
destination: std::net::IpAddr,
identifier: u16,
sequence: u16,
payload: &[u8],
) -> Result<Vec<u8>> {
use anyhow::bail;
let (source, destination) = match (source, destination) {
(std::net::IpAddr::V4(source), std::net::IpAddr::V4(destination)) => (source, destination),
_ => bail!("tailnet ping currently supports IPv4 only"),
};
let mut icmp = Vec::with_capacity(8 + payload.len());
icmp.push(8);
icmp.push(0);
icmp.extend_from_slice(&[0, 0]);
icmp.extend_from_slice(&identifier.to_be_bytes());
icmp.extend_from_slice(&sequence.to_be_bytes());
icmp.extend_from_slice(payload);
let icmp_checksum = internet_checksum(&icmp);
icmp[2..4].copy_from_slice(&icmp_checksum.to_be_bytes());
let total_len = 20 + icmp.len();
let mut packet = Vec::with_capacity(total_len);
packet.push(0x45);
packet.push(0);
packet.extend_from_slice(&(total_len as u16).to_be_bytes());
packet.extend_from_slice(&0u16.to_be_bytes());
packet.extend_from_slice(&0u16.to_be_bytes());
packet.push(64);
packet.push(1);
packet.extend_from_slice(&[0, 0]);
packet.extend_from_slice(&source.octets());
packet.extend_from_slice(&destination.octets());
let header_checksum = internet_checksum(&packet);
packet[10..12].copy_from_slice(&header_checksum.to_be_bytes());
packet.extend_from_slice(&icmp);
Ok(packet)
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn parse_icmp_echo_reply(
packet: &[u8],
local_ip: std::net::IpAddr,
remote_ip: std::net::IpAddr,
identifier: u16,
sequence: u16,
) -> Result<Option<IcmpEchoReply>> {
use anyhow::bail;
let (local_ip, remote_ip) = match (local_ip, remote_ip) {
(std::net::IpAddr::V4(local_ip), std::net::IpAddr::V4(remote_ip)) => (local_ip, remote_ip),
_ => bail!("tailnet ping currently supports IPv4 only"),
};
if packet.len() < 20 {
return Ok(None);
}
let version = packet[0] >> 4;
if version != 4 {
return Ok(None);
}
let ihl = (packet[0] & 0x0f) as usize * 4;
if packet.len() < ihl + 8 {
return Ok(None);
}
if packet[9] != 1 {
return Ok(None);
}
let source = std::net::Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]);
let destination = std::net::Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]);
if source != remote_ip || destination != local_ip {
return Ok(None);
}
let icmp = &packet[ihl..];
if icmp[0] != 0 || icmp[1] != 0 {
return Ok(None);
}
let reply_identifier = u16::from_be_bytes([icmp[4], icmp[5]]);
let reply_sequence = u16::from_be_bytes([icmp[6], icmp[7]]);
if reply_identifier != identifier || reply_sequence != sequence {
return Ok(None);
}
Ok(Some(IcmpEchoReply {
source: std::net::IpAddr::V4(source),
destination: std::net::IpAddr::V4(destination),
payload: icmp[8..].to_vec(),
}))
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn internet_checksum(bytes: &[u8]) -> u16 {
let mut sum = 0u32;
let mut chunks = bytes.chunks_exact(2);
for chunk in &mut chunks {
sum += u16::from_be_bytes([chunk[0], chunk[1]]) as u32;
}
if let Some(&last) = chunks.remainder().first() {
sum += (last as u32) << 8;
}
while (sum >> 16) != 0 {
sum = (sum & 0xffff) + (sum >> 16);
}
!(sum as u16)
}
#[cfg(target_os = "linux")]
async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> {
let exit_code = usernet::run_exec(usernet::ExecInvocation {
@ -767,14 +348,6 @@ async fn main() -> Result<()> {
Commands::NetworkList => try_network_list().await?,
Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?,
Commands::NetworkDelete(args) => try_network_delete(args.id).await?,
Commands::TailnetDiscover(args) => try_tailnet_discover(&args.email).await?,
Commands::TailnetProbe(args) => try_tailnet_probe(&args.authority).await?,
Commands::TailnetPing(args) => {
try_tailnet_ping(&args.remote, &args.payload, args.timeout_ms).await?
}
Commands::TailnetUdpEcho(args) => {
try_tailnet_udp_echo(&args.remote, &args.message, args.timeout_ms).await?
}
#[cfg(target_os = "linux")]
Commands::Exec(args) => {
try_exec(

View file

@ -47,16 +47,10 @@ pub fn initialize() {
#[cfg(target_os = "macos")]
let subscriber = {
// `tracing_oslog` is crashing under Tokio/h2 span churn in the host daemon on
// current macOS. Keep logging on stderr by default and allow opt-in OSLog
// only when explicitly requested for local debugging.
let enable_oslog = matches!(
std::env::var("BURROW_ENABLE_OSLOG").as_deref(),
Ok("1" | "true" | "TRUE" | "yes" | "YES")
);
let system_log = enable_oslog.then(|| {
tracing_oslog::OsLogger::new("com.hackclub.burrow", "tracing")
});
let system_log = Some(tracing_oslog::OsLogger::new(
"com.hackclub.burrow",
"tracing",
));
let stderr = (console::user_attended_stderr() || system_log.is_none()).then(make_stderr);
Registry::default().with(stderr).with(system_log)
};

View file

@ -1,91 +0,0 @@
{
groups = {
users = "burrow-users";
admins = "burrow-admins";
linear = {
owners = "linear-owners";
admins = "linear-admins";
guests = "linear-guests";
};
};
identities = {
contact = {
displayName = "Burrow";
canonicalEmail = "contact@burrow.net";
isAdmin = true;
forgeAuthorized = true;
bootstrapAuthentik = true;
sshPublicKeyPath = ./nixos/keys/contact_at_burrow_net.pub;
roles = [
"operator"
"forge-admin"
];
};
conrad = {
displayName = "Conrad Kramer";
canonicalEmail = "conrad@burrow.net";
isAdmin = true;
forgeAuthorized = false;
bootstrapAuthentik = true;
roles = [
"operator"
"founder"
];
};
jett = {
displayName = "Jett";
canonicalEmail = "jett@burrow.net";
isAdmin = true;
forgeAuthorized = false;
forgeUnixUser = true;
bootstrapAuthentik = true;
sshPublicKeyPath = ./nixos/keys/jett_at_burrow_net.pub;
roles = [
"member"
"operator"
"forge-admin"
];
};
davnotdev = {
displayName = "David";
canonicalEmail = "davnotdev@burrow.net";
isAdmin = true;
forgeAuthorized = false;
bootstrapAuthentik = true;
roles = [
"member"
"operator"
"forge-admin"
];
};
agent = {
displayName = "Burrow Agent";
canonicalEmail = "agent@burrow.net";
isAdmin = false;
forgeAuthorized = true;
bootstrapAuthentik = false;
sshPublicKeyPath = ./nixos/keys/agent_at_burrow_net.pub;
roles = [
"automation"
];
};
ui-test = {
displayName = "Burrow UI Test";
canonicalEmail = "ui-test@burrow.net";
isAdmin = false;
forgeAuthorized = false;
bootstrapAuthentik = true;
authentikPasswordSecret = "burrowAuthentikUiTestPassword";
roles = [
"testing"
"apple-ui"
];
};
};
}

View file

@ -15,7 +15,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo apt install -y clang meson cmake pkg-config libssl-dev libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils
sudo apt install -y clang meson cmake pkg-config libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils
```
2. Install flatpak builder (Optional)
@ -38,7 +38,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo dnf install -y clang ninja-build cmake meson openssl-devel gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib
sudo dnf install -y clang ninja-build cmake meson gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib
```
2. Install flatpak builder (Optional)
@ -61,7 +61,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo xbps-install -Sy gcc clang meson cmake pkg-config openssl-devel gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib
sudo xbps-install -Sy gcc clang meson cmake pkg-config gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib
```
2. Install flatpak builder (Optional)
@ -88,12 +88,6 @@ flatpak install --user \
## Building
With Nix, enter the focused GTK shell before running the Meson build:
```bash
nix develop .#gtk
```
<details>
<summary>General</summary>
@ -145,16 +139,6 @@ nix develop .#gtk
## Running
The GTK app mirrors the Apple home surface: a Burrow header, Networks carousel,
Accounts section, Tunnel action, and the same add flows for WireGuard, Tor, and
Tailnet. It talks to the daemon over the same gRPC API used by Apple clients for
network storage, tunnel state, Tailnet discovery, authority probing, browser
sign-in, and Tailnet payloads.
On Linux the GTK app first looks for a daemon on the configured gRPC socket. If
none is reachable, it starts an embedded user-scoped daemon with a socket under
`XDG_RUNTIME_DIR` and a database under `XDG_DATA_HOME` before refreshing the UI.
<details>
<summary>General</summary>

View file

@ -58,17 +58,3 @@ evolution/
```
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.
## BEP Helper
Use the `bep` helper under `Scripts/` to browse or list proposals:
- `Scripts/bep` opens a quick browser for `evolution/`.
- `Scripts/bep list --status Draft` lists proposals by status.
- `Scripts/bep open BEP-0005` opens a proposal in `$EDITOR`.
Validate proposal metadata with:
```bash
python3 Scripts/check-bep-metadata.py
```

View file

@ -1,81 +0,0 @@
# `BEP-0005` - Daemon IPC and Apple Boundary
```text
Status: Draft
Proposal: BEP-0005
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should formalize one Apple/runtime boundary: Apple clients speak only to the daemon over gRPC on the app-group Unix socket, and the daemon owns all external control-plane, helper-process, and runtime coordination work. This prevents UI code from accreting side HTTP paths or ad hoc control-plane integrations that bypass the system Burrow is supposed to own.
## Motivation
- The current Tailnet work already showed the failure mode: Swift UI code started reaching around the daemon boundary to talk to helper HTTP endpoints directly.
- Apple-specific process ownership is easy to blur between the app, the network extension, and helper daemons unless the contract is explicit.
- If Burrow wants a durable multi-runtime architecture, the daemon must remain the only orchestration boundary between clients and control/data-plane behavior.
## Detailed Design
- Apple UI and Apple support libraries may call only daemon gRPC methods over the declared Burrow Unix socket.
- Direct Swift calls to external control-plane HTTP APIs, localhost helper HTTP servers, or runtime-specific subprocesses are forbidden.
- The daemon is responsible for:
- discovery of Tailnet authorities and related metadata
- control-plane session setup and tracking
- login/session lifecycle brokering
- runtime start/stop/reconcile
- translating helper or bridge processes into stable daemon RPCs
- `burrow/src/control/` owns transport-neutral control-plane semantics such as discovery, authority normalization, and request/response shaping.
- Apple UI owns presentation only:
- forms
- local state
- presenting returned auth URLs or statuses
- surfacing daemon availability and errors
- Any new Apple-facing runtime capability requires a daemon RPC first.
## Security and Operational Considerations
- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code.
- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized.
- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly.
- Non-Apple presentation clients should follow the same daemon-first lifecycle pattern: connect to a managed daemon when present, or start a user-scoped embedded daemon before issuing RPCs, without adding platform-local control-plane paths.
## Contributor Playbook
- Before adding a new Apple-side workflow, identify the daemon RPC that should own it.
- If the RPC does not exist, add the protocol shape in `proto/burrow.proto`, implement it in the daemon, and only then wire Swift UI.
- Verify that no Swift UI or support code calls external control-plane HTTP endpoints directly.
- For Tailnet and similar flows, test:
- daemon unavailable behavior
- successful RPC path
- error propagation through the UI
- Keep Linux GTK and Apple clients visually and functionally aligned around the same daemon-backed home surface: Networks, Accounts, Tunnel, and add flows should remain corresponding views over the daemon API.
## Alternatives Considered
- Let Apple UI call control-plane endpoints directly for convenience. Rejected because it creates parallel orchestration paths and breaks the daemon contract.
- Allow one-off exceptions for login helpers. Rejected because those exceptions become the architecture.
## Impact on Other Work
- Governs the Tailnet refactor and future Apple runtime work.
- Governs Linux GTK daemon startup parity where the same daemon API is reused from a user-scoped presentation process.
- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring.
## Decision
Pending.
## References
- `Apple/UI/`
- `Apple/Core/`
- `Apple/NetworkExtension/`
- `burrow/src/daemon/`
- `burrow/src/control/`

View file

@ -1,74 +0,0 @@
# `BEP-0006` - Tailnet Authority-First Control Plane
```text
Status: Draft
Proposal: BEP-0006
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a distinct user-facing protocol. Burrows config and UI should therefore be authority-first rather than provider-first.
## Motivation
- Splitting Tailscale and Headscale into separate user-facing providers causes fake architectural divergence.
- Discovery already naturally returns an authority and optional issuer; that is the stable contract users actually need.
- Future managed or enterprise deployments should fit the same model without requiring another protocol picker.
## Detailed Design
- Tailnet configuration is centered on:
- account
- identity
- authority/login server URL
- optional tailnet name
- optional hostname
- auth method/material
- User-facing surfaces should not force a protocol choice between Tailscale and Headscale.
- Provider inference may remain internal metadata for compatibility and diagnostics:
- default managed Tailscale authority
- custom self-hosted authority
- Burrow-owned authority when explicitly applicable
- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server.
- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority.
- Platform clients consume the same daemon gRPC surface for Tailnet discovery, authority probing, browser sign-in, and saved network payloads. macOS/iOS SwiftUI and Linux GTK may differ in presentation and local credential stores, but neither should introduce a second control-plane path.
## Security and Operational Considerations
- Authority-first config reduces UI complexity and makes misconfiguration easier to reason about.
- Provider-specific assumptions must not leak into packet or control-plane semantics unless the authority actually requires them.
- Auth material must remain authority-scoped and identity-scoped in daemon storage.
## Contributor Playbook
- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one.
- Store the authority explicitly in payloads and infer provider internally only when needed.
- Keep Linux GTK and Apple clients at functional parity by routing Tailnet add/discover/probe/login through `TailnetControl` and `Networks` RPCs instead of platform-local HTTP or legacy JSON daemon commands.
- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching.
## Alternatives Considered
- Keep separate user-facing providers for Tailscale and Headscale. Rejected because it models deployment shape as protocol shape.
- Collapse all control planes into one opaque Burrow provider. Rejected because the authority still matters operationally and diagnostically.
## Impact on Other Work
- Refines BEP-0002s Tailscale-shaped control-plane work.
- Constrains the Tailnet Apple and Linux GTK refactors plus future daemon control-plane storage.
## Decision
Pending.
## References
- `burrow/src/control/`
- `Apple/UI/Networks/`
- `burrow-gtk/src/`
- `proto/burrow.proto`

View file

@ -1,73 +0,0 @@
# `BEP-0007` - Identity Registry and Operator Bootstrap
```text
Status: Draft
Proposal: BEP-0007
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should maintain one canonical registry for project identities, aliases, bootstrap users, SSH keys, and admin-group mappings. Forgejo, Authentik, and related bootstrap configuration should derive from that registry instead of hardcoding overlapping identity facts in multiple modules.
## Motivation
- Burrow currently hardcodes operator and admin/bootstrap user facts directly in host configuration.
- Multi-account and self-hosted identity are becoming core architecture, not incidental infra details.
- A single registry reduces drift across Forgejo, Authentik, Headscale, SSH authorization, and future control-plane bootstrap.
## Detailed Design
- Add a root-level identity registry (`contributors.nix`) as the canonical source of truth for:
- usernames
- display names
- canonical emails
- external source emails or aliases
- admin scope
- bootstrap eligibility
- forge authorized SSH keys
- named roles
- Consume that registry from host configuration for:
- Forgejo authorized keys
- Forgejo bootstrap admin defaults
- Authentik bootstrap users
- Burrow user/admin group names
- Future work may derive contributor docs, OIDC bootstrap, and additional runtime configuration from the same registry.
## Security and Operational Considerations
- Identity drift is a security bug when it affects admin groups, bootstrap accounts, or SSH authorization.
- The registry stores metadata only; secrets remain in agenix or other declared secret paths.
- Changes to the registry should receive explicit review because they affect access and governance.
## Contributor Playbook
- Edit `contributors.nix` first when changing operator, admin, alias, or bootstrap identity state.
- Derive runtime configuration from the registry instead of duplicating the same facts elsewhere.
- Keep secret references separate from identity metadata.
## Alternatives Considered
- Continue hardcoding users in module options. Rejected because drift is inevitable once Forgejo, Authentik, and Headscale all depend on the same identities.
- Create separate per-service user lists. Rejected because it duplicates governance facts and weakens review.
## Impact on Other Work
- Supports forge auth, Authentik group sync, and future multi-account Burrow control-plane work.
- Creates the basis for stronger contributor and operator provenance later.
## Decision
Pending.
## References
- `contributors.nix`
- `nixos/hosts/burrow-forge/default.nix`
- `nixos/modules/burrow-authentik.nix`
- `nixos/modules/burrow-forge.nix`

View file

@ -1,169 +0,0 @@
# `BEP-0008` - Authentik-Backed Team Chat and Workspace Identity
```text
Status: Draft
Proposal: BEP-0008
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should add a self-hosted team chat surface at `chat.burrow.net` and
continue the project-wide move toward Authentik as the identity authority for
external work systems. The immediate targets are a self-hosted Zulip
deployment rooted in Authentik SAML, a Linear SAML configuration when the
workspace plan supports it, and a 1Password Unlock-with-SSO deployment rooted
in the same Authentik-backed OIDC authority.
This keeps Burrow's day-to-day coordination surfaces aligned with the same
admin groups, canonical users, and secret-handling model already used for
Forgejo, Headscale, and Tailscale. It also avoids fragmenting login state
across vendor-native Google auth flows when Burrow already operates an IdP.
## Motivation
- Forge, Tailnet, operator identity, and Tailscale custom OIDC are already
rooted in Authentik. Team chat, work tracking, and password-manager access
should not become separate authority islands.
- Zulip provides a self-hosted chat system under Burrow's control, which fits
the constitution better than adding another hosted chat dependency.
- Linear remains a SaaS dependency, but its workspace access should still be
derived from Burrow-managed identities and domains when the vendor plan
exposes SAML configuration.
- 1Password Business is another external work surface where Burrow-controlled
identities are preferable to vendor-native Google-only auth. Its current
vendor flow is OIDC-based Unlock with SSO rather than SAML, so the proposal
needs to preserve protocol accuracy instead of flattening everything into
one SAML bucket.
- Burrow already has a canonical public identity registry and a secret-backed
external-email alias map. Reusing that structure is lower-risk than
inventing per-app user bootstrap logic.
## Detailed Design
- Add a Burrow-managed Zulip workload on the forge host at `chat.burrow.net`.
The deployment should be repo-owned and rebuildable from Nix, even if the
runtime uses vendor-supported container images internally.
- Prefer host-managed NixOS services for Zulip's stateful dependencies
(PostgreSQL, Redis, RabbitMQ, memcached, backups) so Burrow owns the
operational surface directly rather than composing a container-side service
mesh.
- Zulip should authenticate through Authentik SAML rather than local passwords
as the primary path. Initial bootstrap may still keep an operational escape
hatch while the deployment is being validated.
- Add Authentik-managed SAML applications for:
- Zulip at `chat.burrow.net`
- Linear using Burrow's claimed domains and Authentik metadata
- Add an Authentik-managed SCIM backchannel for Linear so Burrow can push
role groups declaratively instead of hand-maintaining workspace roles.
- Add an Authentik-managed OIDC application for 1Password Business under the
Burrow team sign-in address.
- Treat Zulip and Linear as downstream applications of the same identity
authority, and treat 1Password as part of that same authority even though
its vendor protocol is OIDC rather than SAML. The source of truth remains:
- public identities and admin intent in `contributors.nix`
- private alias mappings and external accounts in agenix-encrypted secrets
- Keep app-specific configuration in dedicated reconciliation code or module
options instead of hand-edited UI state.
- Prefer service-specific reconciliation over ad hoc manual setup so rebuilds
and host replacement converge automatically.
- When Burrow wants an external-user launcher surface in Authentik, configure
the brand's `default_application` explicitly instead of relying on
`/if/user/`, which otherwise remains internal-user-only.
- Derive Linear SCIM role groups from Burrow's canonical identity metadata.
If Burrow-wide admin intent says a user is an operator/admin, the repo-owned
configuration should map that intent onto the Linear push group without a
second manual roster.
- Model 1Password according to the vendor's actual integration contract:
- OIDC Authorization Code Flow with PKCE
- public client rather than a confidential client
- no Burrow-side dependence on a stored client secret unless the vendor flow
changes
## Security and Operational Considerations
- Do not store external personal email mappings in public registry files.
Public tree data may include Burrow usernames and canonical `@burrow.net`
addresses, but external aliases must stay in encrypted secrets.
- Zulip internal service credentials, Django secret material, and any mail
credentials must have explicit storage and rotation paths.
- Linear SAML must not become Burrow's only admin recovery path. At least one
owner login path outside the enforced SAML flow should remain available until
rollout is proven.
- Linear SCIM group push should be role-scoped and explicit. Burrow should
avoid blanket ownership mapping unless that intent is recorded in the repo.
- 1Password Owners cannot be forced onto Unlock with SSO during initial setup.
Burrow should preserve the owner recovery path and treat OIDC rollout as a
scoped migration for non-owner users first.
- If Zulip is deployed without production-grade outbound email at first, that
limitation must be documented and treated as an operational constraint, not a
hidden assumption.
- Rollback should be straightforward:
- disable or stop the Zulip module
- remove the Authentik SAML apps
- remove the Authentik OIDC app used for 1Password if necessary
- leave the underlying Burrow identities unchanged
## Contributor Playbook
- Define the app and identity intent in the repository before modifying the
forge host.
- Add or update Nix modules so `burrow-forge` can rebuild Zulip and the
corresponding Authentik SAML configuration from the tree.
- Verify:
- `chat.burrow.net` serves a working Zulip login surface
- Authentik exposes working metadata for Zulip and Linear
- Authentik exposes a working OIDC issuer for 1Password
- users in Burrow admin groups receive the expected access on first login
- external Burrow users landing on `auth.burrow.net` reach the intended
app launcher target instead of the internal-only Authentik user interface
- Record concrete evidence for:
- host deployment generation
- Authentik reconciliation success
- Zulip login success
- Linear SAML configuration state
- 1Password Unlock with SSO configuration state
## Alternatives Considered
- Use Zulip Cloud instead of self-hosting. Rejected because the ask is to host
chat under `chat.burrow.net`, and Burrow already operates a forge host with a
self-managed identity plane.
- Keep Linear on Google-native login. Rejected because it leaves Burrow work
access outside the project's operator and group model.
- Treat 1Password as a SAML app for consistency. Rejected because the live
vendor flow is OIDC and Burrow should not pretend otherwise in repo-owned
infrastructure.
- Add per-app manual Authentik configuration without repository automation.
Rejected because it violates Burrow's infrastructure-in-repo commitment.
## Impact on Other Work
- Extends Burrow's Authentik role from control-plane identity into team-work
surfaces.
- Introduces a persistent chat workload on the forge host, with resource and
monitoring implications.
- Creates a likely follow-up for SCIM or richer group synchronization if Linear
or Zulip role mapping needs to become fully declarative later.
- Adds a second OIDC relying party beyond Forgejo, Headscale, and Tailscale,
which raises the importance of keeping Burrow's Authentik scope mappings and
redirect handling consistent across applications.
## Decision
Pending.
## References
- `CONSTITUTION.md`
- `contributors.nix`
- `evolution/proposals/BEP-0004-hosted-mail-and-saas-identity.md`
- Authentik docs: SAML provider and metadata endpoints
- Zulip docs: SAML authentication and docker deployment
- Linear docs: SAML and access control
- 1Password docs: Unlock with SSO using OpenID Connect

26
flake.lock generated
View file

@ -123,37 +123,13 @@
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
}
},
"nsc-autoscaler": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1775221037,
"narHash": "sha256-tv6Y3cqn76PEyZpSMMItVW96KKIboovBWTOv5Lt7PXg=",
"ref": "refs/heads/main",
"rev": "2c485752fde28ec3be2f228b571d1906f4bcf917",
"revCount": 10,
"type": "git",
"url": "https://compatible.systems/conrad/nsc-autoscaler.git"
},
"original": {
"type": "git",
"url": "https://compatible.systems/conrad/nsc-autoscaler.git"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"disko": "disko",
"flake-utils": "flake-utils",
"hcloud-upload-image-src": "hcloud-upload-image-src",
"nixpkgs": "nixpkgs",
"nsc-autoscaler": "nsc-autoscaler"
"nixpkgs": "nixpkgs"
}
},
"systems": {

View file

@ -12,18 +12,13 @@
url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master";
inputs.nixpkgs.follows = "nixpkgs";
};
nsc-autoscaler = {
url = "git+https://compatible.systems/conrad/nsc-autoscaler.git";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-utils.follows = "flake-utils";
};
hcloud-upload-image-src = {
url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0";
flake = false;
};
};
outputs = { self, nixpkgs, flake-utils, agenix, disko, nsc-autoscaler, hcloud-upload-image-src }:
outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }:
let
supportedSystems = [
"x86_64-linux"
@ -94,7 +89,6 @@
pkgs.stdenvNoCC.mkDerivation {
pname = "nsc";
inherit version src;
meta.mainProgram = "nsc";
dontConfigure = true;
dontBuild = true;
unpackPhase = ''
@ -145,35 +139,6 @@
subPackages = [ "./cmd/forgejo-nsc-autoscaler" ];
vendorHash = "sha256-Kpr+5Q7Dy4JiLuJVZbFeJAzLR7PLPYxhtJqfxMEytcs=";
};
burrowSrc = lib.cleanSourceWith {
src = ./.;
filter = path: type:
let
p = toString path;
name = builtins.baseNameOf path;
hasDir = dir: lib.hasInfix "/${dir}/" p || lib.hasSuffix "/${dir}" p;
in
!(hasDir ".git" || hasDir "target" || hasDir "node_modules" || name == "result");
};
burrowPkg = pkgs.rustPlatform.buildRustPackage {
pname = "burrow";
version = "0.1.0";
src = burrowSrc;
cargoLock = {
lockFile = ./Cargo.lock;
outputHashes = {
"tracing-oslog-0.1.2" = "sha256-DjJDiPCTn43zJmmOfuRnyti8iQf9qoXICMKIx4bAG3I=";
};
};
cargoBuildFlags = [
"-p"
"burrow"
"--bin"
"burrow"
];
nativeBuildInputs = [ pkgs.protobuf ];
meta.mainProgram = "burrow";
};
in
{
devShells.default = pkgs.mkShell {
@ -201,7 +166,6 @@
packages =
{
agenix = agenix.packages.${system}.agenix;
burrow = burrowPkg;
hcloud-upload-image = hcloudUploadImagePkg;
forgejo-nsc-dispatcher = forgejoNscDispatcher;
forgejo-nsc-autoscaler = forgejoNscAutoscaler;
@ -211,10 +175,10 @@
// {
nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix;
nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix;
nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default;
nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix;
nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix;
nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix;
nixosModules.burrow-zulip = import ./nixos/modules/burrow-zulip.nix;
nixosConfigurations.burrow-forge = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = {

View file

@ -9,7 +9,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
- `hosts/burrow-forge/default.nix`: host entrypoint
- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module
- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap
- upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input
- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services
- `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes
- `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC
- `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets
@ -23,7 +23,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
- `../Scripts/cloudflare-upsert-a-record.sh`: upsert DNS-only Cloudflare `A` records for Burrow host cutovers
- `../Scripts/forge-deploy.sh`: remote `nixos-rebuild` entrypoint for the forge host
- `../Scripts/provision-forgejo-nsc.sh`: render Burrow Namespace dispatcher/autoscaler runtime inputs and ensure the default Forgejo scope exists
- `../Scripts/seal-forgejo-nsc-secrets.sh`: encrypt forgejo-nsc runtime inputs into the agenix secrets consumed by `burrow-forge`
- `../Scripts/sync-forgejo-nsc-config.sh`: copy intake-backed dispatcher/autoscaler inputs to the host
## Intended Flow
@ -32,17 +32,15 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`.
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account.
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally to refresh `intake/forgejo_nsc_token.txt`, `intake/forgejo_nsc_dispatcher.yaml`, and `intake/forgejo_nsc_autoscaler.yaml`.
7. Run `Scripts/seal-forgejo-nsc-secrets.sh` to encrypt those runtime inputs into the agenix secrets used by `burrow-forge`.
8. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, `secrets/infra/headscale-oidc-client-secret.age`, `secrets/infra/forgejo-nsc-token.age`, `secrets/infra/forgejo-nsc-dispatcher-config.age`, and `secrets/infra/forgejo-nsc-autoscaler-config.age`, and let agenix materialize them under `/run/agenix/`.
9. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
10. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.
11. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`.
7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`.
8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.
10. Configure Forward Email custom S3 backups for `burrow.net` and `burrow.rs` out-of-band with `Tools/forwardemail-custom-s3.sh`.
## Current Constraints
- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`.
- `services.forgejo-nsc` now expects agenix-backed runtime inputs at `/run/agenix/burrowForgejoNscToken`, `/run/agenix/burrowForgejoNscDispatcherConfig`, and `/run/agenix/burrowForgejoNscAutoscalerConfig`.
- `burrow-forge` is live on NixOS in `hel1` at `89.167.47.21`, and `Scripts/check-forge-host.sh --expect-nsc` passes locally against that host.
- Authentik and Headscale secrets now live in tracked agenix blobs under `secrets/infra/` and decrypt to `/run/agenix/` on the forge host.
- Public Burrow forge cutover completed on March 15, 2026:
- `burrow.net`, `git.burrow.net`, and `nsc-autoscaler.burrow.net` now publish public `A` records to `89.167.47.21`

View file

@ -1,56 +1,4 @@
{ config, lib, pkgs, self, ... }:
let
contributors = import ../../../contributors.nix;
identities = contributors.identities;
linearGroups = contributors.groups.linear;
stripNewline = value: lib.replaceStrings [ "\n" ] [ "" ] value;
authentikPasswordSecretPath = identity:
if identity ? authentikPasswordSecret
then config.age.secrets.${identity.authentikPasswordSecret}.path
else null;
bootstrapUsers = lib.mapAttrsToList
(
username: identity: {
inherit username;
name = identity.displayName;
email = identity.canonicalEmail;
isAdmin = identity.isAdmin or false;
groups = lib.optionals (identity.isAdmin or false) [ linearGroups.owners ];
passwordFile = authentikPasswordSecretPath identity;
}
)
(lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities);
headscaleBootstrapUsers = lib.mapAttrsToList
(
username: identity: {
name = username;
displayName = identity.displayName;
email = identity.canonicalEmail;
}
)
(lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities);
forgeUnixUsernames =
builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeUnixUser or false) identities);
forgeUnixUsers = lib.genAttrs forgeUnixUsernames (username:
let
identity = identities.${username};
sshKeys = lib.optional (identity ? sshPublicKeyPath) (stripNewline (builtins.readFile identity.sshPublicKeyPath));
in
{
isNormalUser = true;
createHome = true;
home = "/home/${username}";
shell = pkgs.bashInteractive;
extraGroups = lib.optional (identity.isAdmin or false) "wheel";
openssh.authorizedKeys.keys = sshKeys;
});
forgeUnixAdminUsernames =
builtins.attrNames (lib.filterAttrs (_: identity: (identity.forgeUnixUser or false) && (identity.isAdmin or false)) identities);
forgeAuthorizedKeys = map
(username: builtins.readFile identities.${username}.sshPublicKeyPath)
(builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities));
in
{ config, self, ... }:
{
imports = [
@ -61,7 +9,6 @@ in
self.nixosModules.burrow-forgejo-nsc
self.nixosModules.burrow-authentik
self.nixosModules.burrow-headscale
self.nixosModules.burrow-zulip
];
system.stateVersion = "24.11";
@ -73,22 +20,6 @@ in
"flakes"
];
users.users = forgeUnixUsers;
security.sudo.extraRules = lib.map (username: {
users = [ username ];
commands = [
{
command = "ALL";
options = [ "NOPASSWD" ];
}
];
}) forgeUnixAdminUsernames;
environment.systemPackages = lib.optionals config.services.forgejo-nsc.enable [
self.packages.${pkgs.stdenv.hostPlatform.system}.nsc
];
age.identityPaths = [ "/var/lib/agenix/agenix.key" ];
age.secrets.burrowAuthentikEnv = {
file = ../../../secrets/infra/authentik.env.age;
@ -102,24 +33,6 @@ in
group = "root";
mode = "0400";
};
age.secrets.burrowForgejoOidcClientSecret = {
file = ../../../secrets/infra/forgejo-oidc-client-secret.age;
owner = "forgejo";
group = "forgejo";
mode = "0440";
};
age.secrets.burrowTailscaleOidcClientSecret = {
file = ../../../secrets/infra/tailscale-oidc-client-secret.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowLinearScimToken = {
file = ../../../secrets/infra/linear-scim-token.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikGoogleClientId = {
file = ../../../secrets/infra/authentik-google-client-id.age;
owner = "root";
@ -132,144 +45,48 @@ in
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikGoogleAccountMap = {
file = ../../../secrets/infra/authentik-google-account-map.json.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikUiTestPassword = {
file = ../../../secrets/infra/authentik-ui-test-password.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowForgejoNscToken = {
file = ../../../secrets/infra/forgejo-nsc-token.age;
owner = "forgejo-nsc";
group = "forgejo-nsc";
mode = "0400";
};
age.secrets.burrowForgejoNscDispatcherConfig = {
file = ../../../secrets/infra/forgejo-nsc-dispatcher-config.age;
owner = "forgejo-nsc";
group = "forgejo-nsc";
mode = "0400";
};
age.secrets.burrowForgejoNscAutoscalerConfig = {
file = ../../../secrets/infra/forgejo-nsc-autoscaler-config.age;
owner = "forgejo-nsc";
group = "forgejo-nsc";
mode = "0400";
};
age.secrets.burrowZulipPostgresPassword = {
file = ../../../secrets/infra/zulip-postgres-password.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowZulipRabbitmqPassword = {
file = ../../../secrets/infra/zulip-rabbitmq-password.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowZulipRedisPassword = {
file = ../../../secrets/infra/zulip-redis-password.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowZulipSecretKey = {
file = ../../../secrets/infra/zulip-secret-key.age;
owner = "root";
group = "root";
mode = "0400";
};
networking.extraHosts = ''
127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net
::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net chat.burrow.net nsc-autoscaler.burrow.net
127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net
::1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net
'';
services.burrow.forge = {
enable = true;
contactEmail = identities.contact.canonicalEmail;
adminUsername = "contact";
adminEmail = identities.contact.canonicalEmail;
adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt";
oidcAdminGroup = contributors.groups.admins;
oidcRestrictedGroup = contributors.groups.users;
oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path;
authorizedKeys = forgeAuthorizedKeys;
authorizedKeys = [
(builtins.readFile ../../keys/contact_at_burrow_net.pub)
(builtins.readFile ../../keys/agent_at_burrow_net.pub)
];
};
services.burrow.forgeRunner = {
enable = true;
sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519";
labels = [
"self-hosted"
"linux"
"x86_64"
"burrow-forge"
];
};
services.forgejo-nsc = {
services.burrow.forgejoNsc = {
enable = true;
nscTokenFile = config.age.secrets.burrowForgejoNscToken.path;
nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt";
dispatcher = {
configFile = config.age.secrets.burrowForgejoNscDispatcherConfig.path;
configFile = "/var/lib/burrow/intake/forgejo_nsc_dispatcher.yaml";
};
autoscaler = {
enable = true;
configFile = config.age.secrets.burrowForgejoNscAutoscalerConfig.path;
configFile = "/var/lib/burrow/intake/forgejo_nsc_autoscaler.yaml";
};
};
services.burrow.authentik = {
enable = true;
envFile = config.age.secrets.burrowAuthentikEnv.path;
forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path;
headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path;
tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path;
defaultExternalApplicationSlug = "tailscale";
googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path;
googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path;
googleAccountMapFile = config.age.secrets.burrowAuthentikGoogleAccountMap.path;
googleLoginMode = "redirect";
userGroupName = contributors.groups.users;
adminGroupName = contributors.groups.admins;
tailscaleAccessGroupName = contributors.groups.users;
bootstrapUsers = bootstrapUsers;
linearAcsUrl = "https://api.linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de/acs";
linearAudience = "https://auth.linear.app/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de";
linearDefaultRelayState = "https://linear.app/auth/sso/d0ca13dc-ac41-4824-8aab-e0ca352fc3de";
linearScimUrl = "https://api.linear.app/auth/scim/d0ca13dc-ac41-4824-8aab-e0ca352fc3de";
linearScimTokenFile = config.age.secrets.burrowLinearScimToken.path;
linearScimUserIdentifier = "email";
linearOwnerGroupName = linearGroups.owners;
linearAdminGroupName = linearGroups.admins;
linearGuestGroupName = linearGroups.guests;
zulipAccessGroupName = contributors.groups.users;
};
services.burrow.headscale = {
enable = true;
oidcClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path;
bootstrapUsers = headscaleBootstrapUsers;
};
services.burrow.zulip = {
enable = true;
administratorEmail = identities.contact.canonicalEmail;
postgresPasswordFile = config.age.secrets.burrowZulipPostgresPassword.path;
rabbitmqPasswordFile = config.age.secrets.burrowZulipRabbitmqPassword.path;
redisPasswordFile = config.age.secrets.burrowZulipRedisPassword.path;
secretKeyFile = config.age.secrets.burrowZulipSecretKey.path;
};
}

View file

@ -1 +0,0 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMe960j6TC869F6RvElpICxlBauIT3E0uLyy0m7n70ZC

View file

@ -8,15 +8,7 @@ let
blueprintFile = "${blueprintDir}/burrow-authentik.yaml";
postgresVolume = "burrow-authentik-postgresql:/var/lib/postgresql/data";
dataVolume = "burrow-authentik-data:/data";
directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh;
forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh;
tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh;
onePasswordOidcSyncScript = ../../Scripts/authentik-sync-1password-oidc.sh;
zulipSamlSyncScript = ../../Scripts/authentik-sync-zulip-saml.sh;
linearSamlSyncScript = ../../Scripts/authentik-sync-linear-saml.sh;
linearScimSyncScript = ../../Scripts/authentik-sync-linear-scim.sh;
googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh;
tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh;
authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" ''
version: 1
metadata:
@ -38,19 +30,6 @@ let
"email_verified": True,
}
- model: authentik_providers_oauth2.scopemapping
id: burrow-oidc-groups
identifiers:
name: Burrow OIDC Groups
attrs:
name: Burrow OIDC Groups
scope_name: groups
description: Group membership mapping for Burrow
expression: |
return {
"groups": [group.name for group in request.user.ak_groups.all()],
}
- model: authentik_providers_oauth2.oauth2provider
id: burrow-oidc-provider-ts
identifiers:
@ -70,7 +49,6 @@ let
property_mappings:
- !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-openid]]
- !KeyOf burrow-oidc-email
- !KeyOf burrow-oidc-groups
- !Find [authentik_providers_oauth2.scopemapping, [managed, goauthentik.io/providers/oauth2/scope-profile]]
signing_key: !Find [authentik_crypto.certificatekeypair, [name, authentik Self-signed Certificate]]
@ -124,189 +102,6 @@ in
description = "Authentik provider slug for Headscale.";
};
forgejoDomain = lib.mkOption {
type = lib.types.str;
default = "git.burrow.net";
description = "Forgejo public domain used for the bundled OIDC client.";
};
forgejoProviderSlug = lib.mkOption {
type = lib.types.str;
default = "git";
description = "Authentik application slug for Forgejo.";
};
tailscaleProviderSlug = lib.mkOption {
type = lib.types.str;
default = "tailscale";
description = "Authentik application slug for Tailscale custom OIDC sign-in.";
};
tailscaleClientId = lib.mkOption {
type = lib.types.str;
default = "tailscale.burrow.net";
description = "Client ID Authentik should present to Tailscale.";
};
tailscaleClientSecretFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Authentik Tailscale OIDC client secret.";
};
tailscaleAccessGroupName = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Authentik group that should be allowed to launch the Tailscale application.";
};
defaultExternalApplicationSlug = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Authentik application slug that external users should land on instead of /if/user/.";
};
onePasswordDomain = lib.mkOption {
type = lib.types.str;
default = "burrow-team.1password.com";
description = "1Password team sign-in domain used for Burrow Unlock with SSO.";
};
onePasswordProviderSlug = lib.mkOption {
type = lib.types.str;
default = "onepassword";
description = "Authentik application slug for 1Password Unlock with SSO.";
};
onePasswordClientId = lib.mkOption {
type = lib.types.str;
default = "1password.burrow.net";
description = "Public OIDC client ID Authentik should present to 1Password.";
};
onePasswordRedirectUris = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [
"https://burrow-team.1password.com/sso/oidc/redirect/"
"onepassword://sso/oidc/redirect"
];
description = "Allowed 1Password OIDC redirect URIs.";
};
linearProviderSlug = lib.mkOption {
type = lib.types.str;
default = "linear";
description = "Authentik application slug for Linear SAML.";
};
zulipDomain = lib.mkOption {
type = lib.types.str;
default = "chat.burrow.net";
description = "Public Zulip domain exposed through Authentik SAML.";
};
zulipProviderSlug = lib.mkOption {
type = lib.types.str;
default = "zulip";
description = "Authentik application slug for Zulip SAML.";
};
zulipAcsUrl = lib.mkOption {
type = lib.types.str;
default = "https://${config.services.burrow.authentik.zulipDomain}/complete/saml/";
description = "Zulip SAML ACS URL.";
};
zulipAudience = lib.mkOption {
type = lib.types.str;
default = "https://${config.services.burrow.authentik.zulipDomain}";
description = "Zulip SAML audience/entity identifier.";
};
zulipLaunchUrl = lib.mkOption {
type = lib.types.str;
default = "https://${config.services.burrow.authentik.zulipDomain}/";
description = "Zulip URL exposed in Authentik.";
};
zulipAccessGroupName = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Authentik group allowed to launch Zulip from Burrow SSO surfaces.";
};
linearAcsUrl = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Linear SAML ACS URL.";
};
linearAudience = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Linear SAML audience/entity identifier.";
};
linearLaunchUrl = lib.mkOption {
type = lib.types.str;
default = "https://linear.app/burrownet";
description = "Linear workspace URL exposed in Authentik.";
};
linearDefaultRelayState = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Optional Linear relay state or login URL for IdP-initiated launches.";
};
linearScimUrl = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Linear SCIM base connector URL.";
};
linearScimTokenFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Linear SCIM bearer token.";
};
linearScimUserIdentifier = lib.mkOption {
type = lib.types.str;
default = "email";
description = "Linear SCIM unique identifier field for users.";
};
linearOwnerGroupName = lib.mkOption {
type = lib.types.str;
default = "linear-owners";
description = "Authentik group name that should map to Linear owners.";
};
linearAdminGroupName = lib.mkOption {
type = lib.types.str;
default = "linear-admins";
description = "Authentik group name that should map to Linear admins.";
};
linearGuestGroupName = lib.mkOption {
type = lib.types.str;
default = "linear-guests";
description = "Authentik group name that should map to Linear guests.";
};
forgejoClientId = lib.mkOption {
type = lib.types.str;
default = "git.burrow.net";
description = "Client ID Authentik should present to Forgejo.";
};
forgejoClientSecretFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Authentik Forgejo OIDC client secret.";
};
headscaleClientSecretFile = lib.mkOption {
type = lib.types.str;
default = "/var/lib/burrow/intake/authentik_headscale_client_secret.txt";
@ -325,12 +120,6 @@ in
description = "Host-local file containing the Google OAuth client secret for the Authentik source.";
};
googleAccountMapFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Optional host-local JSON file mapping external Google accounts onto Burrow Authentik users.";
};
googleSourceSlug = lib.mkOption {
type = lib.types.str;
default = "google";
@ -345,89 +134,6 @@ in
default = "redirect";
description = "Identification-stage behavior for the Google Authentik source.";
};
headscaleAuthenticationFlowSlug = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-authentication";
description = "Authentik authentication flow slug used for Burrow Tailnet sign-in.";
};
headscaleAuthenticationFlowName = lib.mkOption {
type = lib.types.str;
default = "Burrow Tailnet Authentication";
description = "Authentik authentication flow name used for Burrow Tailnet sign-in.";
};
headscaleIdentificationStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-identification-stage";
description = "Authentik identification stage used for Burrow Tailnet sign-in.";
};
headscalePasswordStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-password-stage";
description = "Authentik password stage used for Burrow Tailnet sign-in.";
};
headscaleUserLoginStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-user-login-stage";
description = "Authentik user-login stage used for Burrow Tailnet sign-in.";
};
userGroupName = lib.mkOption {
type = lib.types.str;
default = "burrow-users";
description = "Authentik group granted baseline Burrow access.";
};
adminGroupName = lib.mkOption {
type = lib.types.str;
default = "burrow-admins";
description = "Authentik group granted Burrow administrator access.";
};
bootstrapUsers = lib.mkOption {
type = with lib.types; listOf (submodule {
options = {
username = lib.mkOption {
type = str;
description = "Authentik username.";
};
name = lib.mkOption {
type = str;
description = "Display name for the user.";
};
email = lib.mkOption {
type = str;
description = "Canonical email stored in Authentik.";
};
sourceEmail = lib.mkOption {
type = nullOr str;
default = null;
description = "External Google account email that should map onto this Authentik user.";
};
groups = lib.mkOption {
type = listOf str;
default = [ ];
description = "Additional Authentik groups for this user.";
};
isAdmin = lib.mkOption {
type = bool;
default = false;
description = "Whether this user should be in the Burrow admin group.";
};
passwordFile = lib.mkOption {
type = nullOr str;
default = null;
description = "Optional host-local file containing a bootstrap password for this user.";
};
};
});
default = [ ];
description = "Declarative Burrow users to create in Authentik.";
};
};
config = lib.mkIf cfg.enable {
@ -476,20 +182,6 @@ in
exit 1
fi
${lib.optionalString (cfg.forgejoClientSecretFile != null) ''
if [ ! -s ${lib.escapeShellArg cfg.forgejoClientSecretFile} ]; then
echo "Forgejo client secret missing: ${cfg.forgejoClientSecretFile}" >&2
exit 1
fi
''}
${lib.optionalString (cfg.tailscaleClientSecretFile != null) ''
if [ ! -s ${lib.escapeShellArg cfg.tailscaleClientSecretFile} ]; then
echo "Tailscale client secret missing: ${cfg.tailscaleClientSecretFile}" >&2
exit 1
fi
''}
install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir}
install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile}
@ -516,7 +208,6 @@ AUTHENTIK_SECRET_KEY=$AUTHENTIK_SECRET_KEY
AUTHENTIK_BOOTSTRAP_PASSWORD=$AUTHENTIK_BOOTSTRAP_PASSWORD
AUTHENTIK_BOOTSTRAP_TOKEN=$AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_BURROW_TS_CLIENT_SECRET=$(read_secret ${lib.escapeShellArg cfg.headscaleClientSecretFile})
${lib.optionalString (cfg.forgejoClientSecretFile != null) "AUTHENTIK_BURROW_FORGEJO_CLIENT_SECRET=$(read_secret ${lib.escapeShellArg cfg.forgejoClientSecretFile})"}
EOF
chown root:root ${envFile}
chmod 0600 ${envFile}
@ -571,16 +262,6 @@ EOF
];
};
systemd.services.podman-burrow-authentik-server.restartTriggers = [
blueprintFile
envFile
];
systemd.services.podman-burrow-authentik-worker.restartTriggers = [
blueprintFile
envFile
];
systemd.services.burrow-authentik-ready = {
description = "Wait for Burrow Authentik to become ready";
after = [ "podman-burrow-authentik-server.service" ];
@ -628,7 +309,7 @@ EOF
cfg.envFile
cfg.googleClientIDFile
cfg.googleClientSecretFile
] ++ lib.optional (cfg.googleAccountMapFile != null) cfg.googleAccountMapFile;
];
path = [
pkgs.bash
pkgs.coreutils
@ -639,6 +320,8 @@ EOF
Type = "oneshot";
User = "root";
Group = "root";
Restart = "on-failure";
RestartSec = 5;
};
script = ''
set -euo pipefail
@ -652,418 +335,11 @@ EOF
export AUTHENTIK_GOOGLE_USER_MATCHING_MODE=email_link
export AUTHENTIK_GOOGLE_CLIENT_ID="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientIDFile})"
export AUTHENTIK_GOOGLE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.googleClientSecretFile})"
if [ -n ${lib.escapeShellArg (cfg.googleAccountMapFile or "")} ]; then
export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON="$(tr -d '\n' < ${lib.escapeShellArg (cfg.googleAccountMapFile or "/dev/null")})"
else
export AUTHENTIK_GOOGLE_ACCOUNT_MAP_JSON='${builtins.toJSON (map (user: {
source_email = user.sourceEmail;
username = user.username;
email = user.email;
name = user.name;
}) (lib.filter (user: user.sourceEmail != null) cfg.bootstrapUsers))}'
fi
${pkgs.bash}/bin/bash ${googleSourceSyncScript}
'';
};
systemd.services.burrow-authentik-directory = lib.mkIf (cfg.bootstrapUsers != [ ]) {
description = "Reconcile Burrow Authentik users and groups";
after =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ];
wants =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (cfg.forgejoClientSecretFile != null) [ "burrow-authentik-forgejo-oidc.service" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
directorySyncScript
cfg.envFile
] ++ lib.concatMap (user: lib.optional (user.passwordFile != null) user.passwordFile) cfg.bootstrapUsers;
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_BURROW_USERS_GROUP=${lib.escapeShellArg cfg.userGroupName}
export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName}
export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug}
export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: {
inherit (user) username name email isAdmin passwordFile;
groups = user.groups;
}) cfg.bootstrapUsers)}'
${pkgs.bash}/bin/bash ${directorySyncScript}
'';
};
systemd.services.burrow-authentik-tailnet-auth-flow = {
description = "Reconcile the Burrow Tailnet authentication flow";
after =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (
cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null
) [ "burrow-authentik-google-source.service" ];
wants =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (
cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null
) [ "burrow-authentik-google-source.service" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
tailnetAuthFlowSyncScript
cfg.envFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_TAILNET_PROVIDER_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug}
export AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON='["${cfg.headscaleProviderSlug}","${cfg.tailscaleProviderSlug}"]'
export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME=${lib.escapeShellArg cfg.headscaleAuthenticationFlowName}
export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG=${lib.escapeShellArg cfg.headscaleAuthenticationFlowSlug}
export AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME=${lib.escapeShellArg cfg.headscaleIdentificationStageName}
export AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME=${lib.escapeShellArg cfg.headscalePasswordStageName}
export AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME=${lib.escapeShellArg cfg.headscaleUserLoginStageName}
export AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug}
${pkgs.bash}/bin/bash ${tailnetAuthFlowSyncScript}
'';
};
systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) {
description = "Reconcile the Burrow Authentik Forgejo OIDC application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
forgejoOidcSyncScript
cfg.envFile
cfg.forgejoClientSecretFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug}
export AUTHENTIK_FORGEJO_APPLICATION_NAME=burrow.net
export AUTHENTIK_FORGEJO_PROVIDER_NAME=burrow.net
export AUTHENTIK_FORGEJO_CLIENT_ID=${lib.escapeShellArg cfg.forgejoClientId}
export AUTHENTIK_FORGEJO_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.forgejoClientSecretFile})"
export AUTHENTIK_FORGEJO_LAUNCH_URL=https://${cfg.forgejoDomain}/
export AUTHENTIK_FORGEJO_REDIRECT_URIS_JSON='["https://${cfg.forgejoDomain}/user/oauth2/burrow.net/callback","https://${cfg.forgejoDomain}/user/oauth2/authentik/callback","https://${cfg.forgejoDomain}/user/oauth2/GitHub/callback"]'
${pkgs.bash}/bin/bash ${forgejoOidcSyncScript}
'';
};
systemd.services.burrow-authentik-tailscale-oidc = lib.mkIf (cfg.tailscaleClientSecretFile != null) {
description = "Reconcile the Burrow Authentik Tailscale OIDC application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
tailscaleOidcSyncScript
cfg.envFile
cfg.tailscaleClientSecretFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_TAILSCALE_APPLICATION_SLUG=${lib.escapeShellArg cfg.tailscaleProviderSlug}
export AUTHENTIK_TAILSCALE_APPLICATION_NAME=Tailscale
export AUTHENTIK_TAILSCALE_PROVIDER_NAME=Tailscale
export AUTHENTIK_TAILSCALE_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug}
export AUTHENTIK_TAILSCALE_CLIENT_ID=${lib.escapeShellArg cfg.tailscaleClientId}
export AUTHENTIK_TAILSCALE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.tailscaleClientSecretFile})"
export AUTHENTIK_TAILSCALE_LAUNCH_URL=https://login.tailscale.com/start/oidc
export AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON='["https://login.tailscale.com/a/oauth_response"]'
${lib.optionalString (cfg.tailscaleAccessGroupName != null) ''
export AUTHENTIK_TAILSCALE_ACCESS_GROUP=${lib.escapeShellArg cfg.tailscaleAccessGroupName}
''}
${lib.optionalString (cfg.defaultExternalApplicationSlug != null) ''
export AUTHENTIK_DEFAULT_EXTERNAL_APPLICATION_SLUG=${lib.escapeShellArg cfg.defaultExternalApplicationSlug}
''}
${pkgs.bash}/bin/bash ${tailscaleOidcSyncScript}
'';
};
systemd.services.burrow-authentik-1password-oidc = {
description = "Reconcile the Burrow Authentik 1Password OIDC application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
onePasswordOidcSyncScript
cfg.envFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_ONEPASSWORD_APPLICATION_SLUG=${lib.escapeShellArg cfg.onePasswordProviderSlug}
export AUTHENTIK_ONEPASSWORD_APPLICATION_NAME=1Password
export AUTHENTIK_ONEPASSWORD_PROVIDER_NAME=1Password
export AUTHENTIK_ONEPASSWORD_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug}
export AUTHENTIK_ONEPASSWORD_CLIENT_ID=${lib.escapeShellArg cfg.onePasswordClientId}
export AUTHENTIK_ONEPASSWORD_LAUNCH_URL=https://${cfg.onePasswordDomain}/
export AUTHENTIK_ONEPASSWORD_REDIRECT_URIS_JSON='${builtins.toJSON cfg.onePasswordRedirectUris}'
${pkgs.bash}/bin/bash ${onePasswordOidcSyncScript}
'';
};
systemd.services.burrow-authentik-zulip-saml = {
description = "Reconcile the Burrow Authentik Zulip SAML application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
zulipSamlSyncScript
cfg.envFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_ZULIP_APPLICATION_SLUG=${lib.escapeShellArg cfg.zulipProviderSlug}
export AUTHENTIK_ZULIP_APPLICATION_NAME=Zulip
export AUTHENTIK_ZULIP_PROVIDER_NAME=Zulip
export AUTHENTIK_ZULIP_ACS_URL=${lib.escapeShellArg cfg.zulipAcsUrl}
export AUTHENTIK_ZULIP_AUDIENCE=${lib.escapeShellArg cfg.zulipAudience}
export AUTHENTIK_ZULIP_LAUNCH_URL=${lib.escapeShellArg cfg.zulipLaunchUrl}
${lib.optionalString (cfg.zulipAccessGroupName != null) ''
export AUTHENTIK_ZULIP_ACCESS_GROUP=${lib.escapeShellArg cfg.zulipAccessGroupName}
''}
export AUTHENTIK_ZULIP_ADMIN_GROUP=${lib.escapeShellArg cfg.adminGroupName}
${pkgs.bash}/bin/bash ${zulipSamlSyncScript}
'';
};
systemd.services.burrow-authentik-linear-saml = lib.mkIf (
cfg.linearAcsUrl != null && cfg.linearAudience != null
) {
description = "Reconcile the Burrow Authentik Linear SAML application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
linearSamlSyncScript
cfg.envFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug}
export AUTHENTIK_LINEAR_APPLICATION_NAME=Linear
export AUTHENTIK_LINEAR_PROVIDER_NAME=Linear
export AUTHENTIK_LINEAR_ACS_URL=${lib.escapeShellArg cfg.linearAcsUrl}
export AUTHENTIK_LINEAR_AUDIENCE=${lib.escapeShellArg cfg.linearAudience}
export AUTHENTIK_LINEAR_LAUNCH_URL=${lib.escapeShellArg cfg.linearLaunchUrl}
${lib.optionalString (cfg.linearDefaultRelayState != null) ''
export AUTHENTIK_LINEAR_DEFAULT_RELAY_STATE=${lib.escapeShellArg cfg.linearDefaultRelayState}
''}
${pkgs.bash}/bin/bash ${linearSamlSyncScript}
'';
};
systemd.services.burrow-authentik-linear-scim = lib.mkIf (
cfg.linearScimUrl != null && cfg.linearScimTokenFile != null
) {
description = "Reconcile the Burrow Authentik Linear SCIM provider";
after = [
"burrow-authentik-ready.service"
"burrow-authentik-directory.service"
"burrow-authentik-linear-saml.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"burrow-authentik-directory.service"
"burrow-authentik-linear-saml.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
linearScimSyncScript
cfg.envFile
cfg.linearScimTokenFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_LINEAR_APPLICATION_SLUG=${lib.escapeShellArg cfg.linearProviderSlug}
export AUTHENTIK_LINEAR_SCIM_PROVIDER_NAME="Linear SCIM"
export AUTHENTIK_LINEAR_SCIM_URL=${lib.escapeShellArg cfg.linearScimUrl}
export AUTHENTIK_LINEAR_SCIM_TOKEN_FILE=${lib.escapeShellArg cfg.linearScimTokenFile}
export AUTHENTIK_LINEAR_SCIM_USER_IDENTIFIER=${lib.escapeShellArg cfg.linearScimUserIdentifier}
export AUTHENTIK_LINEAR_OWNER_GROUP=${lib.escapeShellArg cfg.linearOwnerGroupName}
export AUTHENTIK_LINEAR_ADMIN_GROUP=${lib.escapeShellArg cfg.linearAdminGroupName}
export AUTHENTIK_LINEAR_GUEST_GROUP=${lib.escapeShellArg cfg.linearGuestGroupName}
${pkgs.bash}/bin/bash ${linearScimSyncScript}
'';
};
services.caddy.virtualHosts."${cfg.domain}".extraConfig = ''
encode gzip zstd
reverse_proxy 127.0.0.1:${toString cfg.port}

View file

@ -5,10 +5,8 @@ let
runnerPkg = pkgs.forgejo-runner;
stateDir = cfg.stateDir;
runnerFile = "${stateDir}/.runner";
registrationFingerprintFile = "${stateDir}/.runner-registration-fingerprint";
configFile = "${stateDir}/runner.yaml";
labelsCsv = lib.concatStringsSep "," (map (label: "${label}:host") cfg.labels);
registrationFingerprint = builtins.hashString "sha256" "${cfg.instanceUrl}\n${cfg.name}\n${labelsCsv}";
sshPrivateKeyFile = cfg.sshPrivateKeyFile or "";
in
{
@ -143,17 +141,6 @@ EOF
chown ${cfg.user}:${cfg.group} ${configFile}
chmod 0640 ${configFile}
expected_fingerprint=${lib.escapeShellArg registrationFingerprint}
if [ -s ${runnerFile} ]; then
current_fingerprint=""
if [ -s ${registrationFingerprintFile} ]; then
current_fingerprint="$(tr -d '\r\n' < ${registrationFingerprintFile})"
fi
if [ "${"$"}current_fingerprint" != "${"$"}expected_fingerprint" ]; then
rm -f ${runnerFile} ${registrationFingerprintFile}
fi
fi
install -d -m 0700 -o ${cfg.user} -g ${cfg.group} ${stateDir}/.ssh
${pkgs.util-linux}/bin/runuser -u ${cfg.user} -- \
${pkgs.git}/bin/git config --global user.name ${lib.escapeShellArg cfg.gitUserName}
@ -190,10 +177,6 @@ EOF
--name ${lib.escapeShellArg cfg.name} \
--labels ${lib.escapeShellArg labelsCsv} \
--config ${configFile}
printf '%s\n' "${"$"}expected_fingerprint" > ${registrationFingerprintFile}
chown ${cfg.user}:${cfg.group} ${registrationFingerprintFile}
chmod 0640 ${registrationFingerprintFile}
fi
'';
};
@ -208,7 +191,6 @@ EOF
User = cfg.user;
Group = cfg.group;
WorkingDirectory = stateDir;
Environment = [ "BURROW_RUNNER_REGISTRATION_FINGERPRINT=${registrationFingerprint}" ];
Restart = "on-failure";
RestartSec = 2;
ExecStart = pkgs.writeShellScript "burrow-forgejo-runner" ''

View file

@ -68,77 +68,6 @@ in
description = "Host-local path to the plaintext bootstrap password file for the initial Forgejo admin.";
};
oidcDisplayName = lib.mkOption {
type = lib.types.str;
default = "burrow.net";
description = "Login button label for the Forgejo OIDC provider.";
};
oidcClientId = lib.mkOption {
type = lib.types.str;
default = "git.burrow.net";
description = "OIDC client ID that Forgejo should use against Authentik.";
};
oidcClientSecretFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local path to the Forgejo OIDC client secret.";
};
oidcDiscoveryUrl = lib.mkOption {
type = lib.types.str;
default = "https://auth.burrow.net/application/o/git/.well-known/openid-configuration";
description = "OpenID Connect discovery URL for the Forgejo login source.";
};
oidcScopes = lib.mkOption {
type = with lib.types; listOf str;
default = [
"openid"
"profile"
"email"
"groups"
];
description = "OIDC scopes requested from Authentik.";
};
oidcGroupClaimName = lib.mkOption {
type = lib.types.str;
default = "groups";
description = "OIDC claim name that carries group membership.";
};
oidcAdminGroup = lib.mkOption {
type = lib.types.str;
default = "burrow-admins";
description = "OIDC group that should grant Forgejo admin access.";
};
oidcRestrictedGroup = lib.mkOption {
type = lib.types.str;
default = "burrow-users";
description = "OIDC group that is required to log into Forgejo.";
};
oidcAutoRegistration = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Whether Forgejo should automatically create users for new OIDC sign-ins.";
};
oidcAccountLinking = lib.mkOption {
type = lib.types.enum [ "disabled" "login" "auto" ];
default = "auto";
description = "How Forgejo should link existing local accounts for OIDC sign-ins.";
};
oidcUsernameSource = lib.mkOption {
type = lib.types.enum [ "userid" "nickname" "email" ];
default = "email";
description = "Which OIDC claim Forgejo should use to derive usernames for auto-registration.";
};
authorizedKeys = lib.mkOption {
type = with lib.types; listOf str;
default = [ ];
@ -203,9 +132,6 @@ in
service = {
DISABLE_REGISTRATION = true;
ENABLE_INTERNAL_SIGNIN = false;
ENABLE_BASIC_AUTHENTICATION = false;
SHOW_REGISTRATION_BUTTON = false;
REQUIRE_SIGNIN_VIEW = false;
DEFAULT_ALLOW_CREATE_ORGANIZATION = false;
ENABLE_NOTIFY_MAIL = false;
@ -222,13 +148,6 @@ in
ENABLE_OPENID_SIGNUP = false;
};
oauth2_client = {
OPENID_CONNECT_SCOPES = lib.concatStringsSep " " (lib.subtractLists [ "openid" ] cfg.oidcScopes);
ENABLE_AUTO_REGISTRATION = cfg.oidcAutoRegistration;
ACCOUNT_LINKING = cfg.oidcAccountLinking;
USERNAME = cfg.oidcUsernameSource;
};
actions = {
ENABLED = true;
};
@ -256,22 +175,13 @@ in
reverse_proxy 127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}
'';
"${cfg.siteDomain}".extraConfig = ''
encode gzip zstd
@oidcConfig path /.well-known/openid-configuration
redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/.well-known/openid-configuration 308
@tailnetConfig path /.well-known/burrow-tailnet
header @tailnetConfig Content-Type application/json
respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200
@webfinger path /.well-known/webfinger
header @webfinger Content-Type application/jrd+json
respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200
@root path /
redir @root ${homeRepoUrl} 308
respond 404
'';
}
// lib.optionalAttrs (
config.services.forgejo-nsc.enable && config.services.forgejo-nsc.autoscaler.enable
config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable
) {
"${cfg.nscAutoscalerDomain}".extraConfig = ''
encode gzip zstd
@ -333,117 +243,5 @@ in
fi
'';
};
systemd.services.burrow-forgejo-oidc-bootstrap = lib.mkIf (cfg.oidcClientSecretFile != null) {
description = "Seed the Burrow Forgejo OIDC login source";
after = [
"forgejo.service"
"postgresql.service"
] ++ lib.optionals config.services.burrow.authentik.enable [
"burrow-authentik-ready.service"
];
wants = lib.optionals config.services.burrow.authentik.enable [
"burrow-authentik-ready.service"
];
requires = [
"forgejo.service"
"postgresql.service"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
cfg.oidcClientSecretFile
];
path = [
pkgs.coreutils
pkgs.gnugrep
pkgs.jq
pkgs.postgresql
];
serviceConfig = {
Type = "oneshot";
User = forgejoCfg.user;
Group = forgejoCfg.group;
WorkingDirectory = forgejoCfg.stateDir;
};
script = ''
set -euo pipefail
if [ ! -s ${lib.escapeShellArg cfg.oidcClientSecretFile} ]; then
echo "Forgejo OIDC client secret missing: ${cfg.oidcClientSecretFile}" >&2
exit 1
fi
ready=0
for attempt in $(seq 1 60); do
if ${pkgs.postgresql}/bin/psql -h /run/postgresql -U forgejo forgejo -tAc \
"SELECT 1 FROM pg_tables WHERE schemaname='public' AND tablename='login_source';" \
| grep -q 1; then
ready=1
break
fi
sleep 1
done
if [ "$ready" -ne 1 ]; then
echo "Forgejo login_source table did not become ready" >&2
exit 1
fi
oidc_secret="$(${pkgs.coreutils}/bin/tr -d '\r\n' < ${lib.escapeShellArg cfg.oidcClientSecretFile})"
if [ -z "$oidc_secret" ]; then
echo "Forgejo OIDC client secret is empty" >&2
exit 1
fi
cfg_json="$(${pkgs.jq}/bin/jq -nc \
--arg client_id ${lib.escapeShellArg cfg.oidcClientId} \
--arg client_secret "$oidc_secret" \
--arg discovery_url ${lib.escapeShellArg cfg.oidcDiscoveryUrl} \
--argjson scopes '${builtins.toJSON cfg.oidcScopes}' \
--arg group_claim_name ${lib.escapeShellArg cfg.oidcGroupClaimName} \
--arg admin_group ${lib.escapeShellArg cfg.oidcAdminGroup} \
--arg restricted_group ${lib.escapeShellArg cfg.oidcRestrictedGroup} \
'{
Provider: "openidConnect",
ClientID: $client_id,
ClientSecret: $client_secret,
OpenIDConnectAutoDiscoveryURL: $discovery_url,
CustomURLMapping: null,
IconURL: "",
Scopes: $scopes,
AttributeSSHPublicKey: "",
RequiredClaimName: "",
RequiredClaimValue: "",
GroupClaimName: $group_claim_name,
AdminGroup: $admin_group,
GroupTeamMap: "",
GroupTeamMapRemoval: false,
RestrictedGroup: $restricted_group
}')"
${pkgs.postgresql}/bin/psql -v ON_ERROR_STOP=1 \
-h /run/postgresql -U forgejo forgejo \
-v oidc_name=${lib.escapeShellArg cfg.oidcDisplayName} \
-v cfg_json="$cfg_json" <<'SQL'
INSERT INTO login_source (
type, name, is_active, is_sync_enabled, cfg, created_unix, updated_unix
) VALUES (
6,
:'oidc_name',
TRUE,
FALSE,
:'cfg_json',
EXTRACT(EPOCH FROM NOW())::BIGINT,
EXTRACT(EPOCH FROM NOW())::BIGINT
)
ON CONFLICT (name) DO UPDATE SET
type = EXCLUDED.type,
is_active = TRUE,
is_sync_enabled = FALSE,
cfg = EXCLUDED.cfg,
updated_unix = EXCLUDED.updated_unix;
SQL
'';
};
};
}

View file

@ -0,0 +1,234 @@
{ config, lib, pkgs, self, ... }:
let
inherit (lib)
mkEnableOption
mkIf
mkOption
types
mkAfter
mkDefault
optional
optionalAttrs
optionalString
;
cfg = config.services.burrow.forgejoNsc;
dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml";
autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml";
pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" ''
set -euo pipefail
if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then
echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2
exit 1
fi
'';
nscTokenPath = "${cfg.stateDir}/nsc.token";
tokenSync = optionalString (cfg.nscTokenFile != null) ''
install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath}
chown ${cfg.user}:${cfg.group} ${nscTokenPath}
chmod 600 ${nscTokenPath}
'';
dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig}
chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig}
'';
autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig}
chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig}
'';
dispatcherEnv =
cfg.extraEnv
// optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; }
// optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; }
// optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; };
in {
options.services.burrow.forgejoNsc = {
enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher";
user = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System user that runs the forgejo-nsc services.";
};
group = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System group for the forgejo-nsc services.";
};
stateDir = mkOption {
type = types.str;
default = "/var/lib/forgejo-nsc";
description = "State directory for the dispatcher/autoscaler.";
};
nscTokenFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token file (exported as NSC_TOKEN_FILE).";
};
nscTokenSpecFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE).";
};
nscEndpoint = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC endpoint override (exported as NSC_ENDPOINT).";
};
extraEnv = mkOption {
type = types.attrsOf types.str;
default = { };
description = "Extra environment variables injected into the services.";
};
nscPackage = mkOption {
type = types.nullOr types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null;
description = "Optional nsc CLI package added to the service PATH.";
};
dispatcher = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable the forgejo-nsc dispatcher service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher;
description = "Package providing the forgejo-nsc dispatcher binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the dispatcher.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the dispatcher config.";
};
};
autoscaler = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable the forgejo-nsc autoscaler service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler;
description = "Package providing the forgejo-nsc autoscaler binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the autoscaler.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the autoscaler config.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null;
message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled.";
}
{
assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null;
message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled.";
}
];
users.groups.${cfg.group} = { };
users.users.${cfg.user} = {
uid = mkDefault 2011;
isSystemUser = true;
group = cfg.group;
description = "Forgejo Namespace Cloud runner services";
home = cfg.stateDir;
createHome = true;
shell = pkgs.bashInteractive;
};
systemd.tmpfiles.rules = mkAfter [
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable {
description = "Forgejo Namespace Cloud dispatcher";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile))
dispatcherConfigSync
tokenSync
]);
};
systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable {
description = "Forgejo Namespace Cloud autoscaler";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile))
autoscalerConfigSync
tokenSync
]);
};
};
}

View file

@ -1,587 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.burrow.zulip;
realmSignupDomain =
let
parts = lib.splitString "@" cfg.administratorEmail;
in
if builtins.length parts == 2 then builtins.elemAt parts 1 else cfg.domain;
yamlFormat = pkgs.formats.yaml { };
composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" {
services = {
zulip = {
image = "ghcr.io/zulip/zulip-server:11.6-1";
restart = "unless-stopped";
network_mode = "host";
secrets = [
"zulip__postgres_password"
"zulip__rabbitmq_password"
"zulip__redis_password"
"zulip__secret_key"
"zulip__email_password"
];
environment = {
SETTING_REMOTE_POSTGRES_HOST = "127.0.0.1";
SETTING_MEMCACHED_LOCATION = "127.0.0.1:11211";
SETTING_RABBITMQ_HOST = "127.0.0.1";
SETTING_REDIS_HOST = "127.0.0.1";
};
volumes = [ "${cfg.dataDir}/data:/data:rw" ];
ulimits.nofile = {
soft = 1000000;
hard = 1048576;
};
};
};
};
in
{
options.services.burrow.zulip = {
enable = lib.mkEnableOption "the Burrow Zulip deployment";
domain = lib.mkOption {
type = lib.types.str;
default = "chat.burrow.net";
description = "Public Zulip domain.";
};
port = lib.mkOption {
type = lib.types.port;
default = 18090;
description = "Local loopback port Caddy should proxy to.";
};
dataDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/burrow/zulip";
description = "Host directory storing Zulip compose state and generated runtime files.";
};
administratorEmail = lib.mkOption {
type = lib.types.str;
default = "contact@burrow.net";
description = "Operational Zulip administrator email.";
};
realmName = lib.mkOption {
type = lib.types.str;
default = "Burrow";
description = "Initial Zulip organization name for single-tenant bootstrap.";
};
realmOwnerName = lib.mkOption {
type = lib.types.str;
default = "Burrow";
description = "Display name used for the initial Zulip organization owner.";
};
authentikDomain = lib.mkOption {
type = lib.types.str;
default = config.services.burrow.authentik.domain;
description = "Authentik domain Zulip should trust as its SAML IdP.";
};
authentikProviderSlug = lib.mkOption {
type = lib.types.str;
default = config.services.burrow.authentik.zulipProviderSlug;
description = "Authentik SAML application slug used for Zulip.";
};
postgresPasswordFile = lib.mkOption {
type = lib.types.str;
description = "File containing the Zulip PostgreSQL password.";
};
rabbitmqPasswordFile = lib.mkOption {
type = lib.types.str;
description = "File containing the Zulip RabbitMQ password.";
};
redisPasswordFile = lib.mkOption {
type = lib.types.str;
description = "File containing the Zulip Redis password.";
};
secretKeyFile = lib.mkOption {
type = lib.types.str;
description = "File containing the Zulip Django secret key.";
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.podman
pkgs.podman-compose
];
services.postgresql = {
ensureDatabases = [ "zulip" ];
ensureUsers = [
{
name = "zulip";
ensureDBOwnership = true;
}
];
settings = {
listen_addresses = lib.mkDefault "127.0.0.1";
password_encryption = lib.mkDefault "scram-sha-256";
};
authentication = lib.mkAfter ''
host zulip zulip 127.0.0.1/32 scram-sha-256
'';
};
services.postgresqlBackup = {
enable = true;
backupAll = false;
databases = [ "zulip" ];
};
services.memcached = {
enable = true;
listen = "127.0.0.1";
port = 11211;
extraOptions = [ "-U 0" ];
};
services.redis.servers.zulip = {
enable = true;
bind = "127.0.0.1";
port = 6379;
requirePassFile = cfg.redisPasswordFile;
};
services.rabbitmq = {
enable = true;
listenAddress = "127.0.0.1";
port = 5672;
};
services.caddy.virtualHosts."${cfg.domain}".extraConfig = ''
encode gzip zstd
reverse_proxy 127.0.0.1:${toString cfg.port}
'';
systemd.tmpfiles.rules = [
"d ${cfg.dataDir} 0755 root root - -"
"d ${cfg.dataDir}/data 0755 root root - -"
"d ${cfg.dataDir}/data/logs 0755 root root - -"
"d ${cfg.dataDir}/data/logs/emails 0755 root root - -"
"d ${cfg.dataDir}/data/secrets 0700 root root - -"
"d ${cfg.dataDir}/secrets 0700 root root - -"
"d ${cfg.dataDir}/logs 0755 root root - -"
];
systemd.services.burrow-zulip-postgres-bootstrap = {
description = "Bootstrap PostgreSQL role for Burrow Zulip";
after = [ "postgresql.service" ];
wants = [ "postgresql.service" ];
requiredBy = [ "burrow-zulip.service" ];
before = [ "burrow-zulip.service" ];
path = [
config.services.postgresql.package
pkgs.bash
pkgs.coreutils
pkgs.python3
pkgs.util-linux
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
db_password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.postgresPasswordFile})"
db_password_sql="$(printf '%s' "$db_password" | python3 -c "import sys; print(sys.stdin.read().replace(chr(39), chr(39) * 2), end=\"\")")"
setup_sql="$(mktemp)"
trap 'rm -f "$setup_sql"' EXIT
cat > "$setup_sql" <<SQL
DO \$\$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'zulip') THEN
CREATE ROLE zulip LOGIN;
END IF;
END
\$\$;
ALTER ROLE zulip WITH LOGIN PASSWORD '$db_password_sql';
SQL
chmod 0644 "$setup_sql"
${pkgs.util-linux}/bin/runuser -u postgres -- psql -v ON_ERROR_STOP=1 -f "$setup_sql"
'';
};
systemd.services.burrow-zulip-rabbitmq-bootstrap = {
description = "Bootstrap RabbitMQ user for Burrow Zulip";
after = [ "rabbitmq.service" ];
wants = [ "rabbitmq.service" ];
requiredBy = [ "burrow-zulip.service" ];
before = [ "burrow-zulip.service" ];
path = [
config.services.rabbitmq.package
pkgs.bash
pkgs.coreutils
pkgs.gawk
pkgs.gnugrep
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
rabbit_password="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.rabbitmqPasswordFile})"
export HOME=${config.services.rabbitmq.dataDir}
rabbitmqctl await_startup
if rabbitmqctl list_users -q | awk '{ print $1 }' | grep -qx zulip; then
rabbitmqctl change_password zulip "$rabbit_password"
else
rabbitmqctl add_user zulip "$rabbit_password"
fi
rabbitmqctl set_permissions -p / zulip '.*' '.*' '.*'
if rabbitmqctl list_users -q | awk '{ print $1 }' | grep -qx guest; then
rabbitmqctl delete_user guest
fi
'';
};
systemd.services.burrow-zulip-runtime = {
description = "Prepare Burrow Zulip compose and SAML runtime files";
after = [
"postgresql.service"
"redis-zulip.service"
"memcached.service"
"rabbitmq.service"
"burrow-zulip-postgres-bootstrap.service"
"burrow-zulip-rabbitmq-bootstrap.service"
"burrow-authentik-ready.service"
"burrow-authentik-zulip-saml.service"
"network-online.target"
];
wants = [
"postgresql.service"
"redis-zulip.service"
"memcached.service"
"rabbitmq.service"
"burrow-zulip-postgres-bootstrap.service"
"burrow-zulip-rabbitmq-bootstrap.service"
"burrow-authentik-ready.service"
"burrow-authentik-zulip-saml.service"
"network-online.target"
];
requiredBy = [ "burrow-zulip.service" ];
before = [ "burrow-zulip.service" ];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.python3
];
restartTriggers = [
composeFile
cfg.postgresPasswordFile
cfg.rabbitmqPasswordFile
cfg.redisPasswordFile
cfg.secretKeyFile
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
install -d -m 0755 ${lib.escapeShellArg cfg.dataDir}
install -d -m 0755 ${lib.escapeShellArg "${cfg.dataDir}/data"}
install -d -m 0755 ${lib.escapeShellArg "${cfg.dataDir}/data/logs"}
install -d -m 0755 ${lib.escapeShellArg "${cfg.dataDir}/data/logs/emails"}
install -d -m 0700 ${lib.escapeShellArg "${cfg.dataDir}/data/secrets"}
install -d -m 0700 ${lib.escapeShellArg "${cfg.dataDir}/secrets"}
install -d -m 0755 ${lib.escapeShellArg "${cfg.dataDir}/logs"}
install -m 0644 ${composeFile} ${lib.escapeShellArg "${cfg.dataDir}/compose.yaml"}
: > ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"}
chmod 0600 ${lib.escapeShellArg "${cfg.dataDir}/secrets/email-password"}
metadata_xml="$(${pkgs.curl}/bin/curl -fsSL https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/metadata/)"
saml_cert="$(printf '%s' "$metadata_xml" | ${pkgs.python3}/bin/python3 -c '
import xml.etree.ElementTree as ET, sys
xml = sys.stdin.read()
root = ET.fromstring(xml)
ns = {"ds": "http://www.w3.org/2000/09/xmldsig#"}
node = root.find(".//ds:X509Certificate", ns)
if node is None or not (node.text or "").strip():
raise SystemExit("missing X509 certificate in Authentik metadata")
print((node.text or "").strip())
')"
cat > ${lib.escapeShellArg "${cfg.dataDir}/compose.override.yaml"} <<EOF
secrets:
zulip__postgres_password:
file: ${cfg.postgresPasswordFile}
zulip__rabbitmq_password:
file: ${cfg.rabbitmqPasswordFile}
zulip__redis_password:
file: ${cfg.redisPasswordFile}
zulip__secret_key:
file: ${cfg.secretKeyFile}
zulip__email_password:
file: ${cfg.dataDir}/secrets/email-password
services:
zulip:
environment:
SETTING_EXTERNAL_HOST: "${cfg.domain}"
SETTING_ZULIP_ADMINISTRATOR: "${cfg.administratorEmail}"
TRUST_GATEWAY_IP: "True"
SETTING_SEND_LOGIN_EMAILS: "False"
ZULIP_AUTH_BACKENDS: "SAMLAuthBackend"
CONFIG_application_server__http_only: true
CONFIG_application_server__nginx_listen_port: ${toString cfg.port}
CONFIG_application_server__queue_workers_multiprocess: false
ZULIP_CUSTOM_SETTINGS: |
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = "/data/logs/emails"
EXTERNAL_URI_SCHEME = "https://"
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
USE_X_FORWARDED_HOST = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_TRUSTED_ORIGINS = ["https://${cfg.domain}"]
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_SAML_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://${cfg.domain}"
SOCIAL_AUTH_SAML_SP_EXTRA = {
"assertionConsumerService": {
"url": "https://${cfg.domain}/complete/saml/",
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST",
},
}
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"displayname": "Burrow Zulip",
"name": "zulip",
"url": "https://${cfg.domain}",
},
}
SOCIAL_AUTH_SAML_ENABLED_IDPS = {
"authentik": {
"entity_id": "https://${cfg.authentikDomain}",
"url": "https://${cfg.authentikDomain}/application/saml/${cfg.authentikProviderSlug}/sso/binding/redirect/",
"display_name": "burrow.net",
"auto_signup": True,
"extra_attrs": ["zulip_role"],
"x509cert": """$saml_cert""",
"attr_user_permanent_id": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress",
"attr_username": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress",
"attr_email": "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress",
"attr_first_name": "firstName",
"attr_last_name": "lastName",
},
}
SOCIAL_AUTH_SYNC_ATTRS_DICT = {
"": {
"saml": {
"role": "zulip_role",
},
},
}
EOF
'';
};
systemd.services.burrow-zulip = {
description = "Run Burrow Zulip with host-managed dependencies";
after = [
"burrow-zulip-runtime.service"
"network-online.target"
];
wants = [
"burrow-zulip-runtime.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
path = [
pkgs.bash
pkgs.coreutils
pkgs.gawk
pkgs.gnugrep
pkgs.openssl
pkgs.podman
pkgs.podman-compose
];
restartTriggers = [
composeFile
cfg.postgresPasswordFile
cfg.rabbitmqPasswordFile
cfg.redisPasswordFile
cfg.secretKeyFile
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
WorkingDirectory = cfg.dataDir;
RemainAfterExit = true;
TimeoutStopSec = "20s";
ExecStop = "${pkgs.bash}/bin/bash -lc 'set -euo pipefail; if ${pkgs.podman}/bin/podman container exists burrow-zulip_zulip_1; then ${pkgs.podman}/bin/podman stop --ignore --time 10 burrow-zulip_zulip_1 >/dev/null || true; ${pkgs.podman}/bin/podman rm -f --ignore burrow-zulip_zulip_1 >/dev/null || true; fi'";
};
script = ''
set -euo pipefail
cd ${lib.escapeShellArg cfg.dataDir}
compose() {
${pkgs.podman-compose}/bin/podman-compose -p burrow-zulip "$@"
}
ensure_zulip_data_layout() {
local zulip_data_dir=${lib.escapeShellArg "${cfg.dataDir}/data"}
install -d -m 0755 "$zulip_data_dir/logs"
install -d -m 0755 "$zulip_data_dir/logs/emails"
install -d -m 0700 "$zulip_data_dir/secrets"
chown 1000:1000 "$zulip_data_dir/logs" "$zulip_data_dir/logs/emails" "$zulip_data_dir/secrets"
if [ ! -s "$zulip_data_dir/secrets/bootstrap-owner-password" ]; then
umask 077
openssl rand -base64 24 > "$zulip_data_dir/secrets/bootstrap-owner-password"
fi
chown 1000:1000 "$zulip_data_dir/secrets/bootstrap-owner-password"
chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password"
}
wait_for_zulip_supervisor() {
local attempts=0
while ! podman exec burrow-zulip_zulip_1 supervisorctl status >/dev/null 2>&1; do
attempts=$((attempts + 1))
if [ "$attempts" -ge 90 ]; then
echo "error: Zulip supervisor did not become ready" >&2
exit 1
fi
sleep 2
done
}
patch_uwsgi_scheme_handling() {
wait_for_zulip_supervisor
podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/zulip-include/trusted-proto <<'EOF'
map \$remote_addr \$trusted_x_forwarded_proto {
default \$scheme;
127.0.0.1 \$http_x_forwarded_proto;
::1 \$http_x_forwarded_proto;
172.31.1.1 \$http_x_forwarded_proto;
}
map \$remote_addr \$trusted_x_forwarded_for {
default \"\";
127.0.0.1 \$http_x_forwarded_for;
::1 \$http_x_forwarded_for;
172.31.1.1 \$http_x_forwarded_for;
}
map \$remote_addr \$x_proxy_misconfiguration {
default \"\";
}
EOF
cat > /etc/nginx/uwsgi_params <<'EOF'
uwsgi_param QUERY_STRING \$query_string;
uwsgi_param REQUEST_METHOD \$request_method;
uwsgi_param CONTENT_TYPE \$content_type;
uwsgi_param CONTENT_LENGTH \$content_length;
uwsgi_param REQUEST_URI \$request_uri;
uwsgi_param PATH_INFO \$document_uri;
uwsgi_param DOCUMENT_ROOT \$document_root;
uwsgi_param SERVER_PROTOCOL \$server_protocol;
uwsgi_param REQUEST_SCHEME \$trusted_x_forwarded_proto;
uwsgi_param HTTPS on;
uwsgi_param REMOTE_ADDR \$remote_addr;
uwsgi_param REMOTE_PORT \$remote_port;
uwsgi_param SERVER_ADDR \$server_addr;
uwsgi_param SERVER_PORT \$server_port;
uwsgi_param SERVER_NAME \$server_name;
uwsgi_param HTTP_X_REAL_IP \$remote_addr;
uwsgi_param HTTP_X_FORWARDED_PROTO \$trusted_x_forwarded_proto;
uwsgi_param HTTP_X_FORWARDED_SSL \"\";
uwsgi_param HTTP_X_PROXY_MISCONFIGURATION \$x_proxy_misconfiguration;
# This value is the default, and is provided for explicitness; it must
# be longer than the configured 55s harakiri timeout in uwsgi
uwsgi_read_timeout 60s;
uwsgi_pass django;
EOF
supervisorctl restart nginx zulip-django >/dev/null"
}
bootstrap_realm_if_needed() {
wait_for_zulip_supervisor
local realm_exists
realm_exists="$(
podman exec burrow-zulip_zulip_1 bash -lc \
"su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \
| awk '$NF == "https://${cfg.domain}" { print "yes" }'
)"
if [ -n "$realm_exists" ]; then
return 0
fi
local realm_name=${lib.escapeShellArg cfg.realmName}
local admin_email=${lib.escapeShellArg cfg.administratorEmail}
local owner_name=${lib.escapeShellArg cfg.realmOwnerName}
local create_realm_cmd
printf -v create_realm_cmd '%q ' \
/home/zulip/deployments/current/manage.py \
create_realm \
--string-id= \
--password-file /data/secrets/bootstrap-owner-password \
--automated \
"$realm_name" \
"$admin_email" \
"$owner_name"
podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd"
}
reconcile_realm_policy() {
wait_for_zulip_supervisor
local realm_id
realm_id="$(
podman exec burrow-zulip_zulip_1 bash -lc \
"su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \
| awk '$NF == "https://${cfg.domain}" { print $1 }'
)"
podman exec burrow-zulip_zulip_1 su zulip -c \
"/home/zulip/deployments/current/manage.py realm_domain --op add -r $realm_id ${realmSignupDomain} --allow-subdomains --automated" \
>/dev/null 2>&1 || true
podman exec burrow-zulip_zulip_1 su zulip -c \
"/home/zulip/deployments/current/manage.py shell -c 'from zerver.models import Realm; realm = Realm.objects.get(id=$realm_id); realm.invite_required = False; realm.save(update_fields=[\"invite_required\"])'"
}
if [ ! -e .initialized ]; then
compose pull
compose run --rm -T zulip app:init
touch .initialized
fi
ensure_zulip_data_layout
compose up -d zulip
bootstrap_realm_if_needed
reconcile_realm_policy
patch_uwsgi_scheme_handling
'';
};
};
}

View file

@ -5,7 +5,6 @@ import "google/protobuf/timestamp.proto";
service Tunnel {
rpc TunnelConfiguration (Empty) returns (stream TunnelConfigurationResponse);
rpc TunnelPackets (stream TunnelPacket) returns (stream TunnelPacket);
rpc TunnelStart (Empty) returns (Empty);
rpc TunnelStop (Empty) returns (Empty);
rpc TunnelStatus (Empty) returns (stream TunnelStatusResponse);
@ -18,14 +17,6 @@ service Networks {
rpc NetworkDelete (NetworkDeleteRequest) returns (Empty);
}
service TailnetControl {
rpc Discover (TailnetDiscoverRequest) returns (TailnetDiscoverResponse);
rpc Probe (TailnetProbeRequest) returns (TailnetProbeResponse);
rpc LoginStart (TailnetLoginStartRequest) returns (TailnetLoginStatusResponse);
rpc LoginStatus (TailnetLoginStatusRequest) returns (TailnetLoginStatusResponse);
rpc LoginCancel (TailnetLoginCancelRequest) returns (Empty);
}
message NetworkReorderRequest {
int32 id = 1;
int32 index = 2;
@ -65,57 +56,6 @@ message Empty {
}
message TailnetDiscoverRequest {
string email = 1;
}
message TailnetDiscoverResponse {
string domain = 1;
string authority = 2;
string oidc_issuer = 3;
bool managed = 4;
}
message TailnetProbeRequest {
string authority = 1;
}
message TailnetProbeResponse {
string authority = 1;
int32 status_code = 2;
string summary = 3;
string detail = 4;
bool reachable = 5;
}
message TailnetLoginStartRequest {
string account_name = 1;
string identity_name = 2;
string hostname = 3;
string authority = 4;
}
message TailnetLoginStatusRequest {
string session_id = 1;
}
message TailnetLoginCancelRequest {
string session_id = 1;
}
message TailnetLoginStatusResponse {
string session_id = 1;
string backend_state = 2;
string auth_url = 3;
bool running = 4;
bool needs_login = 5;
string tailnet_name = 6;
string magic_dns_suffix = 7;
string self_dns_name = 8;
repeated string tailnet_ips = 9;
repeated string health = 10;
}
enum State {
Stopped = 0;
Running = 1;
@ -129,12 +69,4 @@ message TunnelStatusResponse {
message TunnelConfigurationResponse {
repeated string addresses = 1;
int32 mtu = 2;
repeated string routes = 3;
repeated string dns_servers = 4;
repeated string search_domains = 5;
bool include_default_route = 6;
}
message TunnelPacket {
bytes payload = 1;
}

View file

@ -1,33 +1,16 @@
let
conradev = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBueQxNbP2246pxr/m7au4zNVm+ShC96xuOcfEcpIjWZ";
contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa";
agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net";
jett = builtins.replaceStrings [ "\n" ] [ "" ] (builtins.readFile ./nixos/keys/jett_at_burrow_net.pub);
burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l";
burrowForgeRecipients = [
contact
agent
jett
burrowForgeHost
];
uiTestRecipients = burrowForgeRecipients ++ [ conradev ];
in
{
"secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-google-account-map.json.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients;
"secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/forgejo-nsc-autoscaler-config.age".publicKeys = burrowForgeRecipients;
"secrets/infra/forgejo-nsc-dispatcher-config.age".publicKeys = burrowForgeRecipients;
"secrets/infra/forgejo-nsc-token.age".publicKeys = burrowForgeRecipients;
"secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/linear-scim-token.age".publicKeys = burrowForgeRecipients;
"secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/zulip-postgres-password.age".publicKeys = burrowForgeRecipients;
"secrets/infra/zulip-memcached-password.age".publicKeys = burrowForgeRecipients;
"secrets/infra/zulip-rabbitmq-password.age".publicKeys = burrowForgeRecipients;
"secrets/infra/zulip-redis-password.age".publicKeys = burrowForgeRecipients;
"secrets/infra/zulip-secret-key.age".publicKeys = burrowForgeRecipients;
}

View file

@ -1,11 +1,9 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q Q3rYrGroJXarMLdatYCHVERefWDyGwM0Ii/kOp5m3Fs
W3tgHNXLSVfGU5p8MhBj0mX72SNgMl8nf8sQX29yvBw
-> ssh-ed25519 IrZmAg fyFQQkd51GthNZ4R+W5Al266LnlKbr4ZoMERlCM1OTQ
rNjnHTGCfF8LkqU8mzTrHlL5G4az1k62gvH4gW8zmjc
-> ssh-ed25519 0kWPgQ OWokv9XAphqbkDi1cznb9V09VcM6Li1eIh0JpcIlVTY
TnPVlqKB78y7NPYp02UJmuRXdBMKJKCngpvo8TjpFZ8
-> X25519 HWaWhyejjo4IjDrNsBYxU1JaGU0899FqiBYgstInuiU
enbBGnhH+uJKY3NBD6mmy09Uos+in6ytRQ5BakvTUvI
--- gOBrh88hnvlUSmnRiowJiUIwgIz5zzVKH8YCRb8Ckdw
Úx¥¢õokPà²íáÐàn8¬ý­vòµ„™HRÊ<>oMºÒðªÃ¼ê¢9&TÁb]ĉ¬Àƒ'|ý<ÒèPbe†
-> ssh-ed25519 ux4N8Q 4uq5z93mRUUgcMOxP4+Yfe2Jq4tGYErwtzvtMHUvgi0
J9DkDeSPkQbOjFM3QoV+1Kz3ZVLfR4PUxCT8Zxz+Wvk
-> ssh-ed25519 IrZmAg uLEVmJ+e9ZiLas5YooR4GfgyspWTsFdMB2WPvluU/VI
7vqqQ/BIDQaOp6VDVLa5ugoRxVZZsMj116cTHY6+8KM
-> X25519 9spF9eLz63UOaBfuG9vTIr6bCKwzFsWMjnaIj1PIR3Y
iGFELg2RQUT9rEal7pblQhfxtwYhxsZdXYxEhvjtHpw
--- 3TDrUnIN826N/n5gc+YY8ilMMc/6K8zGTh6FxzKC/JM
ÊÉXÍHºéÓ#IJG§uíÃâeÒüÖ¹f&1€a2ƒB„JÔŽõg¯ºÁ=Ì¿Šä”.ÅÕ÷ë*7™F<E284A2>·<EFBFBD>´

Binary file not shown.

Binary file not shown.

View file

@ -1,11 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q Tb3hxc6ZscCQpr7s8raup25FA8YAmq30jHZfOQp28Xs
L9YhaX9IVinud0IOs5K55ldGx82wjXHxnVBHZnRjiTA
-> ssh-ed25519 IrZmAg etIe6hWDP9YkqDFCWybnvsOh7h8YO+z3tKc95pG64lU
BT3rH5a+LJZWv2xtWPbMJGS2oM9v4mOI9WPmnHebiew
-> ssh-ed25519 0kWPgQ YpCf5m16VaKp7d+C3oF9MJQB/0xzCNtD7ODsTiV8t1o
xG8G/kSM+7VrWHm299A7fG/kBFnoiWZPiDZuldvimLw
-> X25519 ETltnMPR7lWbBWJvJKmNZhS7wqX0WCa4aNu8UKzxMVE
Ys57VNuclgvN1nJIrLjNrwekbosa7KK9lFt0PTpr/MQ
--- ZeUmSOf8+NycQAFRGCJHYcQvTJqSBIGKEOEdCnNfJbE
±<»qìª1.ïO_š×ÇÕ¤7AÛ·_@%/Â5Ël½7J÷Œõɵ<19>Ä<EFBFBD>üA xÆØûÐèüb "B

View file

@ -1,11 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q x0r1UHgSibFIvKU34kP0+mnvQa5xXnac3P5fyqb7qFc
MfKnr5N0DV2NIoo4MFVFV0ULMayy0zzZqIq4FDzgDGc
-> ssh-ed25519 IrZmAg rzoR8knGrsTGuh9Hqg/NB0NQKI1vx1WI0ZRyrLIPwVY
7gV/d1slrIT+W0+iX5YK/uUWjHGJfee6vA+f9a35nEY
-> ssh-ed25519 0kWPgQ SyuEAfqmBAqLcuuQUHM5OzAv2hoquMMYtVdbKpBVhjI
7QqXens2363ln0euoormMh9a3Csh+nS2eBkHuQJmOWc
-> X25519 qDjNNkYBUhWTYyBhrw9tYl8a7G6TCkVZbR4aPcP+J0c
QF33V6hFUuYRj0B8Eo4jqyyvCpBbpD2ViVWoS8A8f3E
--- 1/Jb0nvWlcszMmxI0yVr6kfexDN0sSk1p+wsTUL4WvU
¨¤ãö9a¿ª5ÌIµÛÙƒçŽèV[fÀÁàç,Dàb \æv&§L½­Z7õ!åû?4=JxFíÁeVÔ

View file

@ -1,11 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q s1hLIWvkXmlIv/VeHXpDSCe+dh09mE+iZd7xJiQccy0
8WosTJQLGRPhTR06SIDjgtXNebcf+H/pFzY/lBCjXcs
-> ssh-ed25519 IrZmAg zBNlK+o/RCTCyp8BRkoAYqsDn//kIKtYk3SICkMu3BA
EhBQy8QdSnCZKkdGzQho7zEMmAbJVoU5jZOMPN6tHG0
-> ssh-ed25519 0kWPgQ hv06idPXqAATkLeUC5vILdEO2NXNWPczlWnwMFvOdkA
3EeajviunGlcfcF1QlRJrVA9bwPT+fJZFX0uneYVs0c
-> X25519 vm9rPYnQB16VSidi7+nr70lFaH0W/jIGY8zwUObZUV8
jFgPy/w4j0/p1USKGjQY+coo1OUFXiIjJ5apIZCrZVI
--- Cf2c6WzLYOi8xE/sIn7ZtUqBy5AToASDUNpAxyjrI9M
˜:Š,+!°¬›ÏÛöϨϬB4DÿŸmHè÷®|Ä(çŸø9ñl9†LßPZ^ïzed=im¡óëz¢æ?øŸ

View file

@ -1,11 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q DqDE3ZZlPUWUyyLA185xsOmfGi146SNk+hENMQXaiFY
D6FhZgynbdccPJQiFRJ18EYvCyDLz3cak0YuQa4f5p4
-> ssh-ed25519 IrZmAg lXgVeADmgjeHeVOOIS5oHqrhkN59ZWDemMOBJo3ubH8
AQ24P+DnxNoHEguNnLaROIW4/Sq96w/UxzzQwEOyGRc
-> ssh-ed25519 0kWPgQ 8x0pMohdACYueLY6jbNwg7MYVaZcjwBU4axthvDoFx4
SgUVnd6MK1MccWVYOu9R3PtoMCBBNGKQ7jt5MSA+KkI
-> X25519 UaO5huJPx8d8eMUnGhbI77tZjsFlIPWEffT4fgoO22w
DVz016ibRxJoa4TDmb2m0Qu9Dn8jpjWEBVtdm2TZx0c
--- 5+MHuvC26SjEBFSmRm0kXjiI27QnJGxvPl2w13EkMrw
Óí¯FžoQ˜˜ƒ]ÈŸ­‹õ‘ˆþš‰ûëeU³/óíÑ/ó®ÂnÀoø.XþGù£J •Э¹|‡<>õ+ïž­

View file

@ -1,11 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q ml+kmLmuRb2nMXJyhKigby2+lPddxM/U7tjhGGQ/JGk
B3UCv/3+4GHeKR964o/m0CoicHwDgWQGEarPW94tb3I
-> ssh-ed25519 IrZmAg AO0ELOuGGj+WanDZFRkHKUEJyZqJYFdhWbqmUfwbpiM
5RZMxVBvW5+TzCBFnn66ry3o5V5cJykweyoYMVBgczY
-> ssh-ed25519 0kWPgQ gqQ/S33Re2OYLz1D9LoSAoqOKxuL4aUes8r6+NyAoXw
NHo2xFsxxJO1ZjnG9r3oxMuvjOUsCyyPvcar2ejZp9w
-> X25519 vUAjBCE197YsckVNM4SYVIPBEESTWnBPCWnUlEwYs1I
L3l85DXFoAVm2ssHfjBeqRpWGlo1UGbmcNkEgoUB9fM
--- X/2O8ufjbTGrt2zCm4gSRqqoxT5v6a+13XjH4dpRsHs
÷¼âM¬kÜf"¹¬ž(qëxöÅèF2Bd…ÖMRîYji ¯õˆÖ°Ü´Å<¿òøÒ‘¾™Ñí¯b_.!r+¸<ÀÞUsž²sëô<C3AB>Êæ<C38A>uí?g«ÉDçã\Öðú<C3B0>V aÊÓÄÓ¨mÇã(ȈÞ&.ñ& Àc/|½w˜²ƒ(Wð¡ÁýïªHޝœ4rÑ ¾¸á°æ+ j"üñÙB §

View file

@ -1,5 +1,20 @@
import { Space_Mono, Poppins } from "next/font/google";
import localFont from "next/font/local";
const space_mono = Space_Mono({
weight: ["400", "700"],
subsets: ["latin"],
display: "swap",
variable: "--font-space-mono",
});
const poppins = Poppins({
weight: ["400", "500", "600", "700", "800", "900"],
subsets: ["latin"],
display: "swap",
variable: "--font-poppins",
});
const phantomSans = localFont({
src: [
{
@ -21,18 +36,10 @@ const phantomSans = localFont({
variable: "--font-phantom-sans",
});
const fallbackFontVariables = {
"--font-space-mono":
'"SFMono-Regular", "SF Mono", ui-monospace, Menlo, Monaco, "Cascadia Mono", "Segoe UI Mono", "Roboto Mono", monospace',
"--font-poppins":
'var(--font-phantom-sans), -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif',
} as React.CSSProperties;
export default function Layout({ children }: { children: React.ReactNode }) {
return (
<div
className={phantomSans.variable}
style={fallbackFontVariables}
className={`${space_mono.variable} ${poppins.variable} ${phantomSans.variable}`}
>
{children}
</div>

3907
site/package-lock.json generated

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more