Compare commits

...

11 commits

Author SHA1 Message Date
Conrad Kramer
3ebb0a8e61 Fix tailnet auth flow provider lookup
Some checks are pending
Build Rust / Cargo Test (push) Waiting to run
Build Site / Next.js Build (push) Waiting to run
Lint Governance / BEP Metadata (push) Waiting to run
2026-04-05 01:36:52 -07:00
Conrad Kramer
8de798469b Bind tailnet auth flow to tailscale 2026-04-05 01:34:32 -07:00
Conrad Kramer
c8aa036ade Add Tailscale Authentik OIDC app 2026-04-04 23:53:33 -07:00
Conrad Kramer
b15b6624cb Add Forgejo namespace release workflow 2026-04-04 22:21:03 -07:00
Conrad Kramer
9e3e8fa783 Use upstream nsc-autoscaler on burrow forge 2026-04-04 22:20:55 -07:00
Conrad Kramer
3d80e772c8 Add tailnet connectivity smoke path 2026-04-03 17:49:11 -07:00
Conrad Kramer
5079786515 Allow local UI test secret decryption 2026-04-03 03:08:06 -07:00
Conrad Kramer
75bcfaf655 Add Tailnet UI auth test flow 2026-04-03 03:03:17 -07:00
Conrad Kramer
0c660acd1e Add daemon-owned Tailnet login flow 2026-04-03 02:09:58 -07:00
Conrad Kramer
d1e28b8817 Route Tailnet Apple flows through daemon gRPC 2026-04-03 01:36:55 -07:00
Conrad Kramer
f6a7f0922d Add governance and identity registry scaffolding 2026-04-03 01:36:10 -07:00
46 changed files with 4547 additions and 788 deletions

View file

@ -0,0 +1,27 @@
name: Lint Governance
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
jobs:
governance:
name: BEP Metadata
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Validate BEP metadata
shell: bash
run: |
set -euo pipefail
python3 Scripts/check-bep-metadata.py

View file

@ -0,0 +1,60 @@
name: Release
on:
push:
tags:
- "v*"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false
jobs:
release:
name: Release Build
runs-on: namespace-profile-linux-medium
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Bootstrap Nix
shell: bash
run: |
set -euo pipefail
chmod +x Scripts/ci/ensure-nix.sh
Scripts/ci/ensure-nix.sh
- name: Build release artifacts
shell: bash
env:
RELEASE_REF: ${{ github.ref_name }}
run: |
set -euo pipefail
ref="${RELEASE_REF:-manual-${GITHUB_SHA::7}}"
export RELEASE_REF="${ref}"
chmod +x Scripts/ci/build-release-artifacts.sh
nix develop .#ci -c Scripts/ci/build-release-artifacts.sh
- name: Upload release artifacts
uses: https://code.forgejo.org/actions/upload-artifact@v4
with:
name: burrow-release-${{ github.ref_name }}
path: dist/*
if-no-files-found: error
- name: Publish Forgejo release
if: startsWith(github.ref, 'refs/tags/')
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
API_URL: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
TOKEN: ${{ github.token }}
run: |
set -euo pipefail
chmod +x Scripts/ci/publish-forgejo-release.sh
nix develop .#ci -c Scripts/ci/publish-forgejo-release.sh

23
.github/workflows/lint-governance.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: Governance Lint
on:
pull_request:
branches:
- "*"
jobs:
governance:
name: BEP Metadata
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- name: Validate BEP metadata
shell: bash
run: |
set -euo pipefail
python3 Scripts/check-bep-metadata.py

14
AGENTS.md Normal file
View file

@ -0,0 +1,14 @@
# instructions for agents
1. Spell the project name as `Burrow` in user-facing copy and `burrow` in code, package, and protocol identifiers unless an existing integration requires a different literal.
2. Read [CONSTITUTION.md](CONSTITUTION.md) before changing Apple clients, the daemon, the control plane, forge infrastructure, identity, or security-sensitive code.
3. Anchor non-trivial changes in a Burrow Evolution Proposal (BEP) under [evolution/](evolution/README.md) so future contributors can inherit the rationale, safeguards, and rollout shape.
4. Before touching the Apple app, daemon IPC, or Tailnet flows, review:
- [evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md](evolution/proposals/BEP-0002-control-plane-bootstrap-and-local-auth.md)
- [evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md](evolution/proposals/BEP-0003-connect-ip-and-negotiation-roadmap.md)
- [evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md](evolution/proposals/BEP-0005-daemon-ipc-and-apple-boundary.md)
- [evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md](evolution/proposals/BEP-0006-tailnet-authority-first-control-plane.md)
5. Apple clients must talk only to the daemon over gRPC. Do not add direct HTTP, control-plane, or helper-process calls from Swift UI code.
6. Treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a separate user-facing protocol surface.
7. Maintain canonical identity and operator metadata in [contributors.nix](contributors.nix). If Burrow forge, Authentik, Headscale, or admin/group mappings need to change, edit that registry first and derive runtime configuration from it.
8. When process or architecture is unclear, stop and draft or update a BEP instead of improvising durable behavior in code.

View file

@ -0,0 +1,232 @@
import XCTest
@MainActor
final class BurrowTailnetLoginUITests: XCTestCase {
override func setUpWithError() throws {
continueAfterFailure = false
}
func testTailnetLoginThroughAuthentikWebSession() throws {
let email = try requiredEnvironment("BURROW_UI_TEST_EMAIL")
let username = ProcessInfo.processInfo.environment["BURROW_UI_TEST_USERNAME"] ?? email
let password = try requiredEnvironment("BURROW_UI_TEST_PASSWORD")
let app = XCUIApplication()
app.launch()
let tailnetButton = app.buttons["quick-add-tailnet"]
XCTAssertTrue(tailnetButton.waitForExistence(timeout: 15), "Tailnet add button did not appear")
tailnetButton.tap()
let discoveryField = app.textFields["tailnet-discovery-email"]
XCTAssertTrue(discoveryField.waitForExistence(timeout: 10), "Tailnet discovery email field did not appear")
replaceText(in: discoveryField, with: email)
let findServerButton = app.buttons["tailnet-find-server"]
XCTAssertTrue(findServerButton.waitForExistence(timeout: 5), "Find Server button did not appear")
findServerButton.tap()
let discoveryCard = app.otherElements["tailnet-discovery-card"]
XCTAssertTrue(discoveryCard.waitForExistence(timeout: 20), "Tailnet discovery result did not appear")
let authorityField = app.textFields["tailnet-authority"]
XCTAssertTrue(authorityField.waitForExistence(timeout: 10), "Tailnet authority field did not appear")
XCTAssertTrue(
waitForFieldValue(authorityField, containing: "ts.burrow.net", timeout: 20),
"Tailnet authority was not populated from discovery"
)
let probeButton = app.buttons["tailnet-check-connection"]
XCTAssertTrue(probeButton.waitForExistence(timeout: 5), "Check Connection button did not appear")
probeButton.tap()
let probeCard = app.otherElements["tailnet-authority-probe-card"]
XCTAssertTrue(probeCard.waitForExistence(timeout: 20), "Tailnet connection probe did not complete")
let signInButton = app.buttons["tailnet-start-sign-in"]
XCTAssertTrue(signInButton.waitForExistence(timeout: 10), "Tailnet sign-in button did not appear")
signInButton.tap()
acceptAuthenticationPromptIfNeeded(in: app)
let webSession = webAuthenticationSession()
XCTAssertTrue(webSession.waitForExistence(timeout: 20), "Safari authentication session did not appear")
signIntoAuthentik(in: webSession, username: username, password: password)
app.activate()
XCTAssertTrue(
waitForButtonLabel(app.buttons["tailnet-start-sign-in"], equals: "Signed In", timeout: 60),
"Tailnet sign-in never reached the running state"
)
}
private func acceptAuthenticationPromptIfNeeded(in app: XCUIApplication) {
let springboard = XCUIApplication(bundleIdentifier: "com.apple.springboard")
let promptCandidates = [
springboard.buttons["Continue"],
springboard.buttons["Allow"],
app.buttons["Continue"],
app.buttons["Allow"],
]
for button in promptCandidates where button.waitForExistence(timeout: 3) {
button.tap()
return
}
}
private func webAuthenticationSession() -> XCUIApplication {
let safariViewService = XCUIApplication(bundleIdentifier: "com.apple.SafariViewService")
if safariViewService.waitForExistence(timeout: 5) {
return safariViewService
}
let safari = XCUIApplication(bundleIdentifier: "com.apple.mobilesafari")
_ = safari.waitForExistence(timeout: 5)
return safari
}
private func signIntoAuthentik(in webSession: XCUIApplication, username: String, password: String) {
let usernameField = firstExistingElement(
in: webSession,
queries: [
{ $0.textFields["Username"] },
{ $0.textFields["Email or Username"] },
{ $0.textFields["Email address"] },
{ $0.textFields["Email"] },
{ $0.webViews.textFields["Username"] },
{ $0.webViews.textFields["Email or Username"] },
{ $0.descendants(matching: .textField).firstMatch },
],
timeout: 25
)
XCTAssertTrue(usernameField.exists, "Authentik username field did not appear")
replaceText(in: usernameField, with: username)
let immediatePasswordField = firstExistingSecureField(in: webSession, timeout: 2)
if immediatePasswordField.exists {
replaceSecureText(in: immediatePasswordField, with: password)
tapFirstExistingButton(
in: webSession,
titles: ["Continue", "Sign In", "Log in", "Login"],
timeout: 5
)
return
}
tapFirstExistingButton(
in: webSession,
titles: ["Continue", "Next", "Sign In", "Log in", "Login"],
timeout: 5
)
let passwordField = firstExistingSecureField(in: webSession, timeout: 20)
XCTAssertTrue(passwordField.exists, "Authentik password field did not appear")
replaceSecureText(in: passwordField, with: password)
tapFirstExistingButton(
in: webSession,
titles: ["Continue", "Sign In", "Log in", "Login"],
timeout: 5
)
}
private func firstExistingSecureField(in app: XCUIApplication, timeout: TimeInterval) -> XCUIElement {
let candidates = [
app.secureTextFields["Password"],
app.secureTextFields["Password or Token"],
app.webViews.secureTextFields["Password"],
app.webViews.secureTextFields["Password or Token"],
app.descendants(matching: .secureTextField).firstMatch,
]
return firstExistingElement(from: candidates, timeout: timeout)
}
private func tapFirstExistingButton(
in app: XCUIApplication,
titles: [String],
timeout: TimeInterval
) {
let candidates = titles.flatMap { title in
[
app.buttons[title],
app.webViews.buttons[title],
]
} + [app.descendants(matching: .button).firstMatch]
let button = firstExistingElement(from: candidates, timeout: timeout)
XCTAssertTrue(button.exists, "Expected one of \(titles.joined(separator: ", ")) to appear")
button.tap()
}
private func requiredEnvironment(_ key: String) throws -> String {
guard let value = ProcessInfo.processInfo.environment[key],
!value.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
else {
throw XCTSkip("Missing required UI test environment variable \(key)")
}
return value
}
private func waitForFieldValue(
_ field: XCUIElement,
containing substring: String,
timeout: TimeInterval
) -> Bool {
let predicate = NSPredicate(format: "value CONTAINS %@", substring)
let expectation = XCTNSPredicateExpectation(predicate: predicate, object: field)
return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed
}
private func waitForButtonLabel(
_ button: XCUIElement,
equals expected: String,
timeout: TimeInterval
) -> Bool {
let predicate = NSPredicate(format: "label == %@", expected)
let expectation = XCTNSPredicateExpectation(predicate: predicate, object: button)
return XCTWaiter.wait(for: [expectation], timeout: timeout) == .completed
}
private func firstExistingElement(
in app: XCUIApplication,
queries: [(XCUIApplication) -> XCUIElement],
timeout: TimeInterval
) -> XCUIElement {
firstExistingElement(from: queries.map { $0(app) }, timeout: timeout)
}
private func firstExistingElement(from candidates: [XCUIElement], timeout: TimeInterval) -> XCUIElement {
let deadline = Date().addingTimeInterval(timeout)
repeat {
for candidate in candidates where candidate.exists {
return candidate
}
RunLoop.current.run(until: Date().addingTimeInterval(0.2))
} while Date() < deadline
return candidates[0]
}
private func replaceText(in element: XCUIElement, with value: String) {
element.tap()
clearText(in: element)
element.typeText(value)
}
private func replaceSecureText(in element: XCUIElement, with value: String) {
element.tap()
clearText(in: element)
element.typeText(value)
}
private func clearText(in element: XCUIElement) {
guard let currentValue = element.value as? String, !currentValue.isEmpty else {
return
}
let deleteSequence = String(repeating: XCUIKeyboardKey.delete.rawValue, count: currentValue.count)
element.typeText(deleteSequence)
}
}

View file

@ -8,6 +8,7 @@
/* Begin PBXBuildFile section */ /* Begin PBXBuildFile section */
D00AA8972A4669BC005C8102 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00AA8962A4669BC005C8102 /* AppDelegate.swift */; }; D00AA8972A4669BC005C8102 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00AA8962A4669BC005C8102 /* AppDelegate.swift */; };
D11000012F70000100112233 /* BurrowUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D11000042F70000100112233 /* BurrowUITests.swift */; };
D020F65829E4A697002790F6 /* PacketTunnelProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = D020F65729E4A697002790F6 /* PacketTunnelProvider.swift */; }; D020F65829E4A697002790F6 /* PacketTunnelProvider.swift in Sources */ = {isa = PBXBuildFile; fileRef = D020F65729E4A697002790F6 /* PacketTunnelProvider.swift */; };
D020F65D29E4A697002790F6 /* BurrowNetworkExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; }; D020F65D29E4A697002790F6 /* BurrowNetworkExtension.appex in Embed Foundation Extensions */ = {isa = PBXBuildFile; fileRef = D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */; settings = {ATTRIBUTES = (RemoveHeadersOnCopy, ); }; };
D03383AD2C8E67E300F7C44E /* SwiftProtobuf in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E22C8DA375008A8CEC /* SwiftProtobuf */; }; D03383AD2C8E67E300F7C44E /* SwiftProtobuf in Frameworks */ = {isa = PBXBuildFile; productRef = D078F7E22C8DA375008A8CEC /* SwiftProtobuf */; };
@ -49,6 +50,13 @@
/* End PBXBuildFile section */ /* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */ /* Begin PBXContainerItemProxy section */
D11000022F70000100112233 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */;
proxyType = 1;
remoteGlobalIDString = D05B9F7129E39EEC008CB1F9;
remoteInfo = App;
};
D020F65B29E4A697002790F6 /* PBXContainerItemProxy */ = { D020F65B29E4A697002790F6 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy; isa = PBXContainerItemProxy;
containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */; containerPortal = D05B9F6A29E39EEC008CB1F9 /* Project object */;
@ -130,6 +138,9 @@
/* Begin PBXFileReference section */ /* Begin PBXFileReference section */
D00117422B30348D00D87C25 /* Configuration.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Configuration.xcconfig; sourceTree = "<group>"; }; D00117422B30348D00D87C25 /* Configuration.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Configuration.xcconfig; sourceTree = "<group>"; };
D00AA8962A4669BC005C8102 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; }; D00AA8962A4669BC005C8102 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = "<group>"; };
D11000032F70000100112233 /* BurrowUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = BurrowUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; };
D11000042F70000100112233 /* BurrowUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = BurrowUITests.swift; sourceTree = "<group>"; };
D11000052F70000100112233 /* UITests.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = UITests.xcconfig; sourceTree = "<group>"; };
D020F63D29E4A1FF002790F6 /* Identity.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Identity.xcconfig; sourceTree = "<group>"; }; D020F63D29E4A1FF002790F6 /* Identity.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Identity.xcconfig; sourceTree = "<group>"; };
D020F64029E4A1FF002790F6 /* Compiler.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Compiler.xcconfig; sourceTree = "<group>"; }; D020F64029E4A1FF002790F6 /* Compiler.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Compiler.xcconfig; sourceTree = "<group>"; };
D020F64229E4A1FF002790F6 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; }; D020F64229E4A1FF002790F6 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; };
@ -182,6 +193,13 @@
/* End PBXFileReference section */ /* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */ /* Begin PBXFrameworksBuildPhase section */
D11000062F70000100112233 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
D020F65029E4A697002790F6 /* Frameworks */ = { D020F65029E4A697002790F6 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase; isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647; buildActionMask = 2147483647;
@ -243,6 +261,7 @@
D0D4E4F72C8D941D007F820A /* Framework.xcconfig */, D0D4E4F72C8D941D007F820A /* Framework.xcconfig */,
D020F64029E4A1FF002790F6 /* Compiler.xcconfig */, D020F64029E4A1FF002790F6 /* Compiler.xcconfig */,
D0D4E4F62C8D932D007F820A /* Debug.xcconfig */, D0D4E4F62C8D932D007F820A /* Debug.xcconfig */,
D11000052F70000100112233 /* UITests.xcconfig */,
D04A3E1D2BAF465F0043EC85 /* Version.xcconfig */, D04A3E1D2BAF465F0043EC85 /* Version.xcconfig */,
D020F64229E4A1FF002790F6 /* Info.plist */, D020F64229E4A1FF002790F6 /* Info.plist */,
D0D4E5912C8D9D0A007F820A /* Constants */, D0D4E5912C8D9D0A007F820A /* Constants */,
@ -268,6 +287,7 @@
isa = PBXGroup; isa = PBXGroup;
children = ( children = (
D05B9F7429E39EEC008CB1F9 /* App */, D05B9F7429E39EEC008CB1F9 /* App */,
D11000072F70000100112233 /* AppUITests */,
D020F65629E4A697002790F6 /* NetworkExtension */, D020F65629E4A697002790F6 /* NetworkExtension */,
D0D4E49C2C8D921A007F820A /* Core */, D0D4E49C2C8D921A007F820A /* Core */,
D0D4E4AD2C8D921A007F820A /* UI */, D0D4E4AD2C8D921A007F820A /* UI */,
@ -281,6 +301,7 @@
isa = PBXGroup; isa = PBXGroup;
children = ( children = (
D05B9F7229E39EEC008CB1F9 /* Burrow.app */, D05B9F7229E39EEC008CB1F9 /* Burrow.app */,
D11000032F70000100112233 /* BurrowUITests.xctest */,
D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */, D020F65329E4A697002790F6 /* BurrowNetworkExtension.appex */,
D0BCC6032A09535900AD070D /* libburrow.a */, D0BCC6032A09535900AD070D /* libburrow.a */,
D0D4E5312C8D996F007F820A /* BurrowCore.framework */, D0D4E5312C8D996F007F820A /* BurrowCore.framework */,
@ -303,6 +324,14 @@
path = App; path = App;
sourceTree = "<group>"; sourceTree = "<group>";
}; };
D11000072F70000100112233 /* AppUITests */ = {
isa = PBXGroup;
children = (
D11000042F70000100112233 /* BurrowUITests.swift */,
);
path = AppUITests;
sourceTree = "<group>";
};
D0B98FD729FDDB57004E7149 /* libburrow */ = { D0B98FD729FDDB57004E7149 /* libburrow */ = {
isa = PBXGroup; isa = PBXGroup;
children = ( children = (
@ -375,6 +404,24 @@
/* End PBXGroup section */ /* End PBXGroup section */
/* Begin PBXNativeTarget section */ /* Begin PBXNativeTarget section */
D11000082F70000100112233 /* BurrowUITests */ = {
isa = PBXNativeTarget;
buildConfigurationList = D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */;
buildPhases = (
D110000A2F70000100112233 /* Sources */,
D11000062F70000100112233 /* Frameworks */,
D11000092F70000100112233 /* Resources */,
);
buildRules = (
);
dependencies = (
D110000B2F70000100112233 /* PBXTargetDependency */,
);
name = BurrowUITests;
productName = BurrowUITests;
productReference = D11000032F70000100112233 /* BurrowUITests.xctest */;
productType = "com.apple.product-type.bundle.ui-testing";
};
D020F65229E4A697002790F6 /* NetworkExtension */ = { D020F65229E4A697002790F6 /* NetworkExtension */ = {
isa = PBXNativeTarget; isa = PBXNativeTarget;
buildConfigurationList = D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */; buildConfigurationList = D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */;
@ -490,6 +537,10 @@
LastSwiftUpdateCheck = 1600; LastSwiftUpdateCheck = 1600;
LastUpgradeCheck = 1520; LastUpgradeCheck = 1520;
TargetAttributes = { TargetAttributes = {
D11000082F70000100112233 = {
CreatedOnToolsVersion = 16.0;
TestTargetID = D05B9F7129E39EEC008CB1F9;
};
D020F65229E4A697002790F6 = { D020F65229E4A697002790F6 = {
CreatedOnToolsVersion = 14.3; CreatedOnToolsVersion = 14.3;
}; };
@ -522,6 +573,7 @@
projectRoot = ""; projectRoot = "";
targets = ( targets = (
D05B9F7129E39EEC008CB1F9 /* App */, D05B9F7129E39EEC008CB1F9 /* App */,
D11000082F70000100112233 /* BurrowUITests */,
D020F65229E4A697002790F6 /* NetworkExtension */, D020F65229E4A697002790F6 /* NetworkExtension */,
D0D4E5502C8D9BF2007F820A /* UI */, D0D4E5502C8D9BF2007F820A /* UI */,
D0D4E5302C8D996F007F820A /* Core */, D0D4E5302C8D996F007F820A /* Core */,
@ -531,6 +583,13 @@
/* End PBXProject section */ /* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */ /* Begin PBXResourcesBuildPhase section */
D11000092F70000100112233 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
D05B9F7029E39EEC008CB1F9 /* Resources */ = { D05B9F7029E39EEC008CB1F9 /* Resources */ = {
isa = PBXResourcesBuildPhase; isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647; buildActionMask = 2147483647;
@ -594,6 +653,14 @@
/* End PBXShellScriptBuildPhase section */ /* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */ /* Begin PBXSourcesBuildPhase section */
D110000A2F70000100112233 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
D11000012F70000100112233 /* BurrowUITests.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
D020F64F29E4A697002790F6 /* Sources */ = { D020F64F29E4A697002790F6 /* Sources */ = {
isa = PBXSourcesBuildPhase; isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647; buildActionMask = 2147483647;
@ -652,6 +719,11 @@
/* End PBXSourcesBuildPhase section */ /* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */ /* Begin PBXTargetDependency section */
D110000B2F70000100112233 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = D05B9F7129E39EEC008CB1F9 /* App */;
targetProxy = D11000022F70000100112233 /* PBXContainerItemProxy */;
};
D020F65C29E4A697002790F6 /* PBXTargetDependency */ = { D020F65C29E4A697002790F6 /* PBXTargetDependency */ = {
isa = PBXTargetDependency; isa = PBXTargetDependency;
target = D020F65229E4A697002790F6 /* NetworkExtension */; target = D020F65229E4A697002790F6 /* NetworkExtension */;
@ -694,6 +766,20 @@
/* End PBXTargetDependency section */ /* End PBXTargetDependency section */
/* Begin XCBuildConfiguration section */ /* Begin XCBuildConfiguration section */
D110000C2F70000100112233 /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */;
buildSettings = {
};
name = Debug;
};
D110000D2F70000100112233 /* Release */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = D11000052F70000100112233 /* UITests.xcconfig */;
buildSettings = {
};
name = Release;
};
D020F65F29E4A697002790F6 /* Debug */ = { D020F65F29E4A697002790F6 /* Debug */ = {
isa = XCBuildConfiguration; isa = XCBuildConfiguration;
baseConfigurationReference = D020F66229E4A6E5002790F6 /* NetworkExtension.xcconfig */; baseConfigurationReference = D020F66229E4A6E5002790F6 /* NetworkExtension.xcconfig */;
@ -781,6 +867,15 @@
/* End XCBuildConfiguration section */ /* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */ /* Begin XCConfigurationList section */
D110000E2F70000100112233 /* Build configuration list for PBXNativeTarget "BurrowUITests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
D110000C2F70000100112233 /* Debug */,
D110000D2F70000100112233 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */ = { D020F65E29E4A697002790F6 /* Build configuration list for PBXNativeTarget "NetworkExtension" */ = {
isa = XCConfigurationList; isa = XCConfigurationList;
buildConfigurations = ( buildConfigurations = (

View file

@ -28,7 +28,20 @@
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES" shouldUseLaunchSchemeArgsEnv = "YES"
shouldAutocreateTestPlan = "YES"> shouldAutocreateTestPlan = "NO">
<Testables>
<TestableReference
skipped = "NO"
parallelizable = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "D11000082F70000100112233"
BuildableName = "BurrowUITests.xctest"
BlueprintName = "BurrowUITests"
ReferencedContainer = "container:Burrow.xcodeproj">
</BuildableReference>
</TestableReference>
</Testables>
</TestAction> </TestAction>
<LaunchAction <LaunchAction
buildConfiguration = "Debug" buildConfiguration = "Debug"

View file

@ -0,0 +1,14 @@
#include "Compiler.xcconfig"
SUPPORTED_PLATFORMS = iphonesimulator iphoneos
TARGETED_DEVICE_FAMILY[sdk=iphone*] = 1,2
PRODUCT_NAME = $(TARGET_NAME)
PRODUCT_BUNDLE_IDENTIFIER = $(APP_BUNDLE_IDENTIFIER).uitests
STRING_CATALOG_GENERATE_SYMBOLS = NO
SWIFT_EMIT_LOC_STRINGS = NO
ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES
LD_RUNPATH_SEARCH_PATHS = $(inherited) @executable_path/Frameworks @loader_path/Frameworks
TEST_TARGET_NAME = App

View file

@ -1,5 +1,7 @@
import Foundation
import GRPC import GRPC
import NIOTransportServices import NIOTransportServices
import SwiftProtobuf
public typealias TunnelClient = Burrow_TunnelAsyncClient public typealias TunnelClient = Burrow_TunnelAsyncClient
public typealias NetworksClient = Burrow_NetworksAsyncClient public typealias NetworksClient = Burrow_NetworksAsyncClient
@ -30,3 +32,427 @@ extension NetworksClient: Client {
self.init(channel: channel, defaultCallOptions: .init(), interceptors: .none) self.init(channel: channel, defaultCallOptions: .init(), interceptors: .none)
} }
} }
public struct Burrow_TailnetDiscoverRequest: Sendable {
public var email: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetDiscoverResponse: Sendable {
public var domain: String = ""
public var authority: String = ""
public var oidcIssuer: String = ""
public var managed: Bool = false
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetProbeRequest: Sendable {
public var authority: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetProbeResponse: Sendable {
public var authority: String = ""
public var statusCode: Int32 = 0
public var summary: String = ""
public var detail: String = ""
public var reachable: Bool = false
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStartRequest: Sendable {
public var accountName: String = ""
public var identityName: String = ""
public var hostname: String = ""
public var authority: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStatusRequest: Sendable {
public var sessionID: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginCancelRequest: Sendable {
public var sessionID: String = ""
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
public struct Burrow_TailnetLoginStatusResponse: Sendable {
public var sessionID: String = ""
public var backendState: String = ""
public var authURL: String = ""
public var running: Bool = false
public var needsLogin: Bool = false
public var tailnetName: String = ""
public var magicDNSSuffix: String = ""
public var selfDNSName: String = ""
public var tailnetIPs: [String] = []
public var health: [String] = []
public var unknownFields = SwiftProtobuf.UnknownStorage()
public init() {}
}
extension Burrow_TailnetDiscoverRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetDiscoverRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "email")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.email)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.email.isEmpty {
try visitor.visitSingularStringField(value: self.email, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetDiscoverResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetDiscoverResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "domain"),
2: .same(proto: "authority"),
3: .same(proto: "oidc_issuer"),
4: .same(proto: "managed"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.domain)
case 2: try decoder.decodeSingularStringField(value: &self.authority)
case 3: try decoder.decodeSingularStringField(value: &self.oidcIssuer)
case 4: try decoder.decodeSingularBoolField(value: &self.managed)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.domain.isEmpty {
try visitor.visitSingularStringField(value: self.domain, fieldNumber: 1)
}
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 2)
}
if !self.oidcIssuer.isEmpty {
try visitor.visitSingularStringField(value: self.oidcIssuer, fieldNumber: 3)
}
if self.managed {
try visitor.visitSingularBoolField(value: self.managed, fieldNumber: 4)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetProbeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetProbeRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "authority")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.authority)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetProbeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetProbeResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .same(proto: "authority"),
2: .same(proto: "status_code"),
3: .same(proto: "summary"),
4: .same(proto: "detail"),
5: .same(proto: "reachable"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.authority)
case 2: try decoder.decodeSingularInt32Field(value: &self.statusCode)
case 3: try decoder.decodeSingularStringField(value: &self.summary)
case 4: try decoder.decodeSingularStringField(value: &self.detail)
case 5: try decoder.decodeSingularBoolField(value: &self.reachable)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 1)
}
if self.statusCode != 0 {
try visitor.visitSingularInt32Field(value: self.statusCode, fieldNumber: 2)
}
if !self.summary.isEmpty {
try visitor.visitSingularStringField(value: self.summary, fieldNumber: 3)
}
if !self.detail.isEmpty {
try visitor.visitSingularStringField(value: self.detail, fieldNumber: 4)
}
if self.reachable {
try visitor.visitSingularBoolField(value: self.reachable, fieldNumber: 5)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStartRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStartRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "account_name"),
2: .standard(proto: "identity_name"),
3: .same(proto: "hostname"),
4: .same(proto: "authority"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.accountName)
case 2: try decoder.decodeSingularStringField(value: &self.identityName)
case 3: try decoder.decodeSingularStringField(value: &self.hostname)
case 4: try decoder.decodeSingularStringField(value: &self.authority)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.accountName.isEmpty {
try visitor.visitSingularStringField(value: self.accountName, fieldNumber: 1)
}
if !self.identityName.isEmpty {
try visitor.visitSingularStringField(value: self.identityName, fieldNumber: 2)
}
if !self.hostname.isEmpty {
try visitor.visitSingularStringField(value: self.hostname, fieldNumber: 3)
}
if !self.authority.isEmpty {
try visitor.visitSingularStringField(value: self.authority, fieldNumber: 4)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStatusRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStatusRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginCancelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginCancelRequest"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id")
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
try unknownFields.traverse(visitor: &visitor)
}
}
extension Burrow_TailnetLoginStatusResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
public static let protoMessageName: String = "burrow.TailnetLoginStatusResponse"
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
1: .standard(proto: "session_id"),
2: .standard(proto: "backend_state"),
3: .standard(proto: "auth_url"),
4: .same(proto: "running"),
5: .standard(proto: "needs_login"),
6: .standard(proto: "tailnet_name"),
7: .standard(proto: "magic_dns_suffix"),
8: .standard(proto: "self_dns_name"),
9: .standard(proto: "tailnet_ips"),
10: .same(proto: "health"),
]
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
while let fieldNumber = try decoder.nextFieldNumber() {
switch fieldNumber {
case 1: try decoder.decodeSingularStringField(value: &self.sessionID)
case 2: try decoder.decodeSingularStringField(value: &self.backendState)
case 3: try decoder.decodeSingularStringField(value: &self.authURL)
case 4: try decoder.decodeSingularBoolField(value: &self.running)
case 5: try decoder.decodeSingularBoolField(value: &self.needsLogin)
case 6: try decoder.decodeSingularStringField(value: &self.tailnetName)
case 7: try decoder.decodeSingularStringField(value: &self.magicDNSSuffix)
case 8: try decoder.decodeSingularStringField(value: &self.selfDNSName)
case 9: try decoder.decodeRepeatedStringField(value: &self.tailnetIPs)
case 10: try decoder.decodeRepeatedStringField(value: &self.health)
default: break
}
}
}
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
if !self.sessionID.isEmpty {
try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1)
}
if !self.backendState.isEmpty {
try visitor.visitSingularStringField(value: self.backendState, fieldNumber: 2)
}
if !self.authURL.isEmpty {
try visitor.visitSingularStringField(value: self.authURL, fieldNumber: 3)
}
if self.running {
try visitor.visitSingularBoolField(value: self.running, fieldNumber: 4)
}
if self.needsLogin {
try visitor.visitSingularBoolField(value: self.needsLogin, fieldNumber: 5)
}
if !self.tailnetName.isEmpty {
try visitor.visitSingularStringField(value: self.tailnetName, fieldNumber: 6)
}
if !self.magicDNSSuffix.isEmpty {
try visitor.visitSingularStringField(value: self.magicDNSSuffix, fieldNumber: 7)
}
if !self.selfDNSName.isEmpty {
try visitor.visitSingularStringField(value: self.selfDNSName, fieldNumber: 8)
}
if !self.tailnetIPs.isEmpty {
try visitor.visitRepeatedStringField(value: self.tailnetIPs, fieldNumber: 9)
}
if !self.health.isEmpty {
try visitor.visitRepeatedStringField(value: self.health, fieldNumber: 10)
}
try unknownFields.traverse(visitor: &visitor)
}
}
public struct TailnetClient: Client, GRPCClient {
public let channel: GRPCChannel
public var defaultCallOptions: CallOptions
public init(channel: any GRPCChannel) {
self.channel = channel
self.defaultCallOptions = .init()
}
public func discover(
_ request: Burrow_TailnetDiscoverRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetDiscoverResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/Discover",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func probe(
_ request: Burrow_TailnetProbeRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetProbeResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/Probe",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginStart(
_ request: Burrow_TailnetLoginStartRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetLoginStatusResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginStart",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginStatus(
_ request: Burrow_TailnetLoginStatusRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_TailnetLoginStatusResponse {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginStatus",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
public func loginCancel(
_ request: Burrow_TailnetLoginCancelRequest,
callOptions: CallOptions? = nil
) async throws -> Burrow_Empty {
try await self.performAsyncUnaryCall(
path: "/burrow.TailnetControl/LoginCancel",
request: request,
callOptions: callOptions ?? self.defaultCallOptions,
interceptors: []
)
}
}

View file

@ -1,7 +1,9 @@
import AuthenticationServices
import BurrowConfiguration import BurrowConfiguration
import Foundation import Foundation
import SwiftUI import SwiftUI
#if canImport(AuthenticationServices)
import AuthenticationServices
#endif
#if canImport(UIKit) #if canImport(UIKit)
import UIKit import UIKit
#elseif canImport(AppKit) #elseif canImport(AppKit)
@ -204,7 +206,7 @@ private enum ConfigurationSheet: String, CaseIterable, Identifiable {
switch self { switch self {
case .wireGuard: .wireGuard case .wireGuard: .wireGuard
case .tor: .tor case .tor: .tor
case .tailnet: .headscale case .tailnet: .tailnet
} }
} }
@ -274,6 +276,7 @@ private struct QuickAddButton: View {
} }
.frame(maxWidth: .infinity, minHeight: 64, alignment: .leading) .frame(maxWidth: .infinity, minHeight: 64, alignment: .leading)
} }
.accessibilityIdentifier("quick-add-\(sheet.rawValue)")
.buttonStyle(.floating(color: sheet.quickActionColor, cornerRadius: 18)) .buttonStyle(.floating(color: sheet.quickActionColor, cornerRadius: 18))
} }
} }
@ -285,13 +288,12 @@ private struct AccountDraft {
var wireGuardConfig = "" var wireGuardConfig = ""
var discoveryEmail = "" var discoveryEmail = ""
var tailnetProvider: TailnetProvider = .tailscale
var authority = "" var authority = ""
var tailnet = "" var tailnet = ""
var hostname = ProcessInfo.processInfo.hostName var hostname = ProcessInfo.processInfo.hostName
var username = "" var username = ""
var secret = "" var secret = ""
var authMode: AccountAuthMode = .web var authMode: AccountAuthMode = .none
var torAddresses = "100.64.0.2/32" var torAddresses = "100.64.0.2/32"
var torDNS = "1.1.1.1, 1.0.0.1" var torDNS = "1.1.1.1, 1.0.0.1"
@ -311,13 +313,13 @@ private struct AccountDraft {
accountName = "default" accountName = "default"
identityName = "apple" identityName = "apple"
authority = TailnetProvider.tailscale.defaultAuthority ?? "" authority = TailnetProvider.tailscale.defaultAuthority ?? ""
authMode = .web
} }
} }
} }
private struct ConfigurationSheetView: View { private struct ConfigurationSheetView: View {
@Environment(\.dismiss) private var dismiss @Environment(\.dismiss) private var dismiss
@Environment(\.webAuthenticationSession) private var webAuthenticationSession
let sheet: ConfigurationSheet let sheet: ConfigurationSheet
let networkViewModel: NetworkViewModel let networkViewModel: NetworkViewModel
@ -326,17 +328,21 @@ private struct ConfigurationSheetView: View {
@State private var draft: AccountDraft @State private var draft: AccountDraft
@State private var isSubmitting = false @State private var isSubmitting = false
@State private var errorMessage: String? @State private var errorMessage: String?
@State private var loginSessionID: String?
@State private var loginStatus: TailnetLoginStatus?
@State private var discoveryStatus: TailnetDiscoveryResponse? @State private var discoveryStatus: TailnetDiscoveryResponse?
@State private var discoveryError: String? @State private var discoveryError: String?
@State private var isDiscoveringTailnet = false @State private var isDiscoveringTailnet = false
@State private var authorityProbeStatus: TailnetAuthorityProbeStatus? @State private var authorityProbeStatus: TailnetAuthorityProbeStatus?
@State private var authorityProbeError: String? @State private var authorityProbeError: String?
@State private var isProbingAuthority = false @State private var isProbingAuthority = false
@State private var pollingTask: Task<Void, Never>? @State private var tailnetLoginStatus: TailnetLoginStatus?
@State private var tailnetLoginError: String?
@State private var tailnetLoginSessionID: String?
@State private var isStartingTailnetLogin = false
@State private var tailnetPresentedAuthURL: URL?
@State private var preserveTailnetLoginSession = false
@State private var browserAuthenticator = TailnetBrowserAuthenticator()
@State private var tailnetLoginPollTask: Task<Void, Never>?
@State private var didRunAutomation = false @State private var didRunAutomation = false
@State private var webAuthenticationTask: Task<Void, Never>?
init( init(
sheet: ConfigurationSheet, sheet: ConfigurationSheet,
@ -404,9 +410,12 @@ private struct ConfigurationSheetView: View {
.toolbar { .toolbar {
ToolbarItem(placement: .cancellationAction) { ToolbarItem(placement: .cancellationAction) {
Button("Cancel") { Button("Cancel") {
Task { @MainActor in
await cancelTailnetLoginIfNeeded()
dismiss() dismiss()
} }
} }
}
#if os(iOS) #if os(iOS)
ToolbarItem(placement: .topBarTrailing) { ToolbarItem(placement: .topBarTrailing) {
Menu { Menu {
@ -447,19 +456,26 @@ private struct ConfigurationSheetView: View {
.onAppear { .onAppear {
runAutomationIfNeeded() runAutomationIfNeeded()
} }
.onChange(of: draft.tailnetProvider) { _, _ in
resetAuthorityProbe()
}
.onChange(of: draft.authority) { _, _ in .onChange(of: draft.authority) { _, _ in
resetAuthorityProbe() resetAuthorityProbe()
} }
.onChange(of: draft.discoveryEmail) { _, _ in .onChange(of: draft.discoveryEmail) { _, _ in
resetTailnetDiscoveryFeedback() resetTailnetDiscoveryFeedback()
} }
.onChange(of: draft.authMode) { _, newMode in
guard newMode != .web else { return }
Task { @MainActor in
await cancelTailnetLoginIfNeeded()
}
}
.onDisappear { .onDisappear {
pollingTask?.cancel() tailnetLoginPollTask?.cancel()
webAuthenticationTask?.cancel() browserAuthenticator.cancel()
webAuthenticationTask = nil if !preserveTailnetLoginSession {
Task { @MainActor in
await cancelTailnetLoginIfNeeded()
}
}
} }
} }
@ -467,10 +483,10 @@ private struct ConfigurationSheetView: View {
private var tailnetSections: some View { private var tailnetSections: some View {
Section("Connection") { Section("Connection") {
TextField("Email address", text: $draft.discoveryEmail) TextField("Email address", text: $draft.discoveryEmail)
.textInputAutocapitalization(.never) .burrowEmailField()
.keyboardType(.emailAddress)
.burrowLoginField() .burrowLoginField()
.autocorrectionDisabled() .autocorrectionDisabled()
.accessibilityIdentifier("tailnet-discovery-email")
Button { Button {
discoverTailnetAuthority() discoverTailnetAuthority()
@ -483,6 +499,7 @@ private struct ConfigurationSheetView: View {
} }
.buttonStyle(.borderless) .buttonStyle(.borderless)
.disabled(isDiscoveringTailnet || normalizedOptional(draft.discoveryEmail) == nil) .disabled(isDiscoveringTailnet || normalizedOptional(draft.discoveryEmail) == nil)
.accessibilityIdentifier("tailnet-find-server")
if let discoveryStatus { if let discoveryStatus {
tailnetDiscoveryCard(status: discoveryStatus, failure: nil) tailnetDiscoveryCard(status: discoveryStatus, failure: nil)
@ -490,25 +507,14 @@ private struct ConfigurationSheetView: View {
tailnetDiscoveryCard(status: nil, failure: discoveryError) tailnetDiscoveryCard(status: nil, failure: discoveryError)
} }
Picker( TextField("Authority URL", text: $draft.authority)
"Provider",
selection: Binding(
get: { draft.tailnetProvider },
set: { applyTailnetProvider($0) }
)
) {
ForEach(TailnetProvider.allCases) { provider in
Text(provider.title).tag(provider)
}
}
.pickerStyle(.menu)
tailnetProviderCard
if draft.tailnetProvider.requiresControlURL {
TextField("Server URL", text: $draft.authority)
.burrowLoginField() .burrowLoginField()
.autocorrectionDisabled() .autocorrectionDisabled()
.accessibilityIdentifier("tailnet-authority")
Text("Use the managed Tailnet authority or enter a custom Tailnet control server.")
.font(.footnote)
.foregroundStyle(.secondary)
Button { Button {
probeTailnetAuthority() probeTailnetAuthority()
@ -521,49 +527,64 @@ private struct ConfigurationSheetView: View {
} }
.buttonStyle(.borderless) .buttonStyle(.borderless)
.disabled(isProbingAuthority || normalizedOptional(draft.authority) == nil) .disabled(isProbingAuthority || normalizedOptional(draft.authority) == nil)
.accessibilityIdentifier("tailnet-check-connection")
if let authorityProbeStatus { if let authorityProbeStatus {
tailnetAuthorityProbeCard(status: authorityProbeStatus, failure: nil) tailnetAuthorityProbeCard(status: authorityProbeStatus, failure: nil)
} else if let authorityProbeError { } else if let authorityProbeError {
tailnetAuthorityProbeCard(status: nil, failure: authorityProbeError) tailnetAuthorityProbeCard(status: nil, failure: authorityProbeError)
} }
} else {
LabeledContent("Server") {
Text("Tailscale managed")
.foregroundStyle(.secondary)
}
}
TextField("Tailnet", text: $draft.tailnet) TextField("Tailnet", text: $draft.tailnet)
.burrowLoginField() .burrowLoginField()
.autocorrectionDisabled() .autocorrectionDisabled()
.accessibilityIdentifier("tailnet-name")
} }
Section("Authentication") { Section("Authentication") {
if tailnetUsesWebLogin {
tailnetWebLoginCard
} else {
TextField("Username", text: $draft.username)
.burrowLoginField()
.autocorrectionDisabled()
Picker("Authentication", selection: $draft.authMode) { Picker("Authentication", selection: $draft.authMode) {
ForEach(availableTailnetAuthModes) { mode in ForEach(availableTailnetAuthModes) { mode in
Text(mode.title).tag(mode) Text(mode.title).tag(mode)
} }
} }
.pickerStyle(.menu) .pickerStyle(.menu)
if draft.authMode == .web {
Button {
startTailnetLogin()
} label: {
Label {
Text(isStartingTailnetLogin ? "Starting Sign-In" : tailnetSignInActionTitle)
} icon: {
Image(systemName: isStartingTailnetLogin ? "hourglass" : "person.badge.key")
}
}
.buttonStyle(.borderless)
.disabled(isStartingTailnetLogin || normalizedOptional(draft.authority) == nil)
.accessibilityIdentifier("tailnet-start-sign-in")
if let tailnetLoginStatus {
tailnetLoginCard(status: tailnetLoginStatus, failure: nil)
} else if let tailnetLoginError {
tailnetLoginCard(status: nil, failure: tailnetLoginError)
}
} else {
TextField("Username", text: $draft.username)
.burrowLoginField()
.autocorrectionDisabled()
if draft.authMode != .none { if draft.authMode != .none {
SecureField( SecureField(
draft.authMode == .password ? "Password" : "Preauth Key", draft.authMode == .password ? "Password" : "Preauth Key",
text: $draft.secret text: $draft.secret
) )
} }
Text("Credentials stay on-device. Burrow uses them when it needs to register or refresh this identity.") }
Text(tailnetAuthenticationFootnote)
.font(.footnote) .font(.footnote)
.foregroundStyle(.secondary) .foregroundStyle(.secondary)
} }
} }
}
private var sheetSummaryCard: some View { private var sheetSummaryCard: some View {
VStack(alignment: .leading, spacing: 10) { VStack(alignment: .leading, spacing: 10) {
@ -618,10 +639,11 @@ private struct ConfigurationSheetView: View {
if sheet == .tailnet { if sheet == .tailnet {
HStack(spacing: 8) { HStack(spacing: 8) {
summaryBadge(draft.tailnetProvider.title) summaryBadge(isManagedTailnetAuthority ? "Managed" : "Custom")
summaryBadge( summaryBadge(draft.authMode.title)
tailnetUsesWebLogin ? "Web Sign-In" : draft.authMode.title if tailnetLoginStatus?.running == true {
) summaryBadge("Signed In")
}
} }
} }
} }
@ -632,79 +654,6 @@ private struct ConfigurationSheetView: View {
) )
} }
private var tailnetProviderCard: some View {
VStack(alignment: .leading, spacing: 6) {
HStack(spacing: 10) {
Image(systemName: tailnetProviderIconName)
.font(.headline)
.foregroundStyle(sheetAccentColor)
.frame(width: 28, height: 28)
.background(
Circle()
.fill(sheetAccentColor.opacity(0.14))
)
VStack(alignment: .leading, spacing: 2) {
Text(draft.tailnetProvider.title)
.font(.headline)
Text(draft.tailnetProvider.subtitle)
.font(.footnote)
.foregroundStyle(.secondary)
}
Spacer()
}
}
.padding(12)
.background(
RoundedRectangle(cornerRadius: 16)
.fill(.thinMaterial)
)
}
@ViewBuilder
private var tailnetWebLoginCard: some View {
VStack(alignment: .leading, spacing: 10) {
Text("Sign in with the shared browser session.")
.font(.subheadline.weight(.medium))
if let loginStatus {
labeledValue("State", loginStatus.backendState)
if let tailnetName = loginStatus.tailnetName {
labeledValue("Tailnet", tailnetName)
}
if let dnsName = loginStatus.selfDNSName {
labeledValue("Device", dnsName)
}
if !loginStatus.tailscaleIPs.isEmpty {
labeledValue("Addresses", loginStatus.tailscaleIPs.joined(separator: ", "))
}
if let authURL = loginStatus.authURL {
Button("Resume Sign-In") {
if let url = URL(string: authURL) {
openLoginURL(url)
}
}
.buttonStyle(.borderless)
}
if !loginStatus.health.isEmpty {
Text(loginStatus.health.joined(separator: ""))
.font(.footnote)
.foregroundStyle(.secondary)
}
} else {
Text("Burrow launches the local bridge, then opens the real provider sign-in page in-app.")
.font(.footnote)
.foregroundStyle(.secondary)
}
}
.padding(12)
.background(
RoundedRectangle(cornerRadius: 16)
.fill(.thinMaterial)
)
}
private func tailnetAuthorityProbeCard( private func tailnetAuthorityProbeCard(
status: TailnetAuthorityProbeStatus?, status: TailnetAuthorityProbeStatus?,
failure: String? failure: String?
@ -731,6 +680,7 @@ private struct ConfigurationSheetView: View {
RoundedRectangle(cornerRadius: 16) RoundedRectangle(cornerRadius: 16)
.fill(.thinMaterial) .fill(.thinMaterial)
) )
.accessibilityIdentifier("tailnet-authority-probe-card")
} }
private func tailnetDiscoveryCard( private func tailnetDiscoveryCard(
@ -739,12 +689,15 @@ private struct ConfigurationSheetView: View {
) -> some View { ) -> some View {
VStack(alignment: .leading, spacing: 6) { VStack(alignment: .leading, spacing: 6) {
if let status { if let status {
Text("Discovered \(status.provider.title)") Text("Discovered Tailnet Server")
.font(.subheadline.weight(.medium)) .font(.subheadline.weight(.medium))
Text(status.authority) Text(status.authority)
.font(.footnote.monospaced()) .font(.footnote.monospaced())
.foregroundStyle(.secondary) .foregroundStyle(.secondary)
.textSelection(.enabled) .textSelection(.enabled)
Text(status.provider == .tailscale ? "Managed authority" : "Custom authority")
.font(.footnote)
.foregroundStyle(.secondary)
if let oidcIssuer = status.oidcIssuer { if let oidcIssuer = status.oidcIssuer {
Text("OIDC: \(oidcIssuer)") Text("OIDC: \(oidcIssuer)")
.font(.footnote) .font(.footnote)
@ -766,6 +719,54 @@ private struct ConfigurationSheetView: View {
RoundedRectangle(cornerRadius: 16) RoundedRectangle(cornerRadius: 16)
.fill(.thinMaterial) .fill(.thinMaterial)
) )
.accessibilityIdentifier("tailnet-discovery-card")
}
private func tailnetLoginCard(
status: TailnetLoginStatus?,
failure: String?
) -> some View {
VStack(alignment: .leading, spacing: 6) {
if let status {
Text(status.running ? "Signed In" : status.needsLogin ? "Browser Sign-In Required" : "Checking Sign-In")
.font(.subheadline.weight(.medium))
if let tailnetName = status.tailnetName, !tailnetName.isEmpty {
Text("Tailnet: \(tailnetName)")
.font(.footnote)
.foregroundStyle(.secondary)
}
if let selfDNSName = status.selfDNSName, !selfDNSName.isEmpty {
Text(selfDNSName)
.font(.footnote.monospaced())
.foregroundStyle(.secondary)
.textSelection(.enabled)
}
if !status.tailnetIPs.isEmpty {
Text(status.tailnetIPs.joined(separator: ", "))
.font(.footnote.monospaced())
.foregroundStyle(.secondary)
.textSelection(.enabled)
}
if !status.health.isEmpty {
Text(status.health.joined(separator: ""))
.font(.footnote)
.foregroundStyle(.secondary)
}
} else if let failure {
Text("Sign-In failed")
.font(.subheadline.weight(.medium))
.foregroundStyle(.red)
Text(failure)
.font(.footnote)
.foregroundStyle(.secondary)
}
}
.padding(12)
.background(
RoundedRectangle(cornerRadius: 16)
.fill(.thinMaterial)
)
.accessibilityIdentifier("tailnet-login-card")
} }
private func summaryBadge(_ label: String) -> some View { private func summaryBadge(_ label: String) -> some View {
@ -826,12 +827,8 @@ private struct ConfigurationSheetView: View {
} }
case .tailnet: case .tailnet:
Menu("Provider") { Button("Use Tailscale Managed Server") {
ForEach(TailnetProvider.allCases) { provider in applyTailnetDefaults(for: .tailscale)
Button(provider.title) {
applyTailnetProvider(provider)
}
}
} }
if availableTailnetAuthModes.count > 1 { if availableTailnetAuthModes.count > 1 {
@ -839,7 +836,7 @@ private struct ConfigurationSheetView: View {
ForEach(availableTailnetAuthModes) { mode in ForEach(availableTailnetAuthModes) { mode in
Button(mode.title) { Button(mode.title) {
draft.authMode = mode draft.authMode = mode
if mode == .none || mode == .web { if mode == .none {
draft.secret = "" draft.secret = ""
} }
} }
@ -847,8 +844,8 @@ private struct ConfigurationSheetView: View {
} }
} }
Button("Restore Provider Defaults") { Button("Clear Discovery Result") {
applyTailnetDefaults(for: draft.tailnetProvider) resetTailnetDiscoveryFeedback()
} }
} }
} }
@ -886,17 +883,6 @@ private struct ConfigurationSheetView: View {
} }
} }
private var tailnetProviderIconName: String {
switch draft.tailnetProvider {
case .tailscale:
"globe.badge.chevron.backward"
case .headscale:
"server.rack"
case .burrow:
"shield"
}
}
private var showsBottomActionButton: Bool { private var showsBottomActionButton: Bool {
#if os(iOS) #if os(iOS)
true true
@ -920,9 +906,6 @@ private struct ConfigurationSheetView: View {
case .tor: case .tor:
return "Save Account" return "Save Account"
case .tailnet: case .tailnet:
if tailnetUsesWebLogin {
return loginStatus?.running == true ? "Save Account" : "Start Sign-In"
}
return "Save Account" return "Save Account"
} }
} }
@ -937,11 +920,11 @@ private struct ConfigurationSheetView: View {
if normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil { if normalizedOptional(draft.accountName) == nil || normalizedOptional(draft.identityName) == nil {
return true return true
} }
if draft.tailnetProvider.requiresControlURL && normalizedOptional(draft.authority) == nil { if normalizedOptional(draft.authority) == nil {
return true return true
} }
if tailnetUsesWebLogin { if draft.authMode == .web {
return false return tailnetLoginStatus?.running != true
} }
if draft.authMode != .none && normalizedOptional(draft.secret) == nil { if draft.authMode != .none && normalizedOptional(draft.secret) == nil {
return true return true
@ -1027,41 +1010,13 @@ private struct ConfigurationSheetView: View {
} }
private func submitTailnet() async throws { private func submitTailnet() async throws {
if tailnetUsesWebLogin { let secret = (draft.authMode == .none || draft.authMode == .web) ? nil : draft.secret
if loginStatus?.running == true {
webAuthenticationTask?.cancel()
webAuthenticationTask = nil
try await saveTailnetAccount(secret: nil, username: nil)
dismiss()
} else {
try await startTailnetLogin()
}
return
}
let secret = draft.authMode == .none ? nil : draft.secret
let username = normalizedOptional(draft.username) let username = normalizedOptional(draft.username)
preserveTailnetLoginSession = draft.authMode == .web && tailnetLoginStatus?.running == true
try await saveTailnetAccount(secret: secret, username: username) try await saveTailnetAccount(secret: secret, username: username)
dismiss() dismiss()
} }
private func startTailnetLogin() async throws {
let response = try await TailnetBridgeClient.startLogin(
TailnetLoginStartRequest(
accountName: normalized(draft.accountName, fallback: "default"),
identityName: normalized(draft.identityName, fallback: "apple"),
hostname: normalizedOptional(draft.hostname),
controlURL: normalizedOptional(draft.authority) ?? draft.tailnetProvider.defaultAuthority
)
)
loginSessionID = response.sessionID
loginStatus = response.status
if let authURL = response.status.authURL, let url = URL(string: authURL) {
openLoginURL(url)
}
startPollingTailscaleLogin()
}
private func runAutomationIfNeeded() { private func runAutomationIfNeeded() {
guard !didRunAutomation, guard !didRunAutomation,
sheet == .tailnet, sheet == .tailnet,
@ -1080,79 +1035,19 @@ private struct ConfigurationSheetView: View {
Task { @MainActor in Task { @MainActor in
switch automation.action { switch automation.action {
case .tailnetLogin: case .tailnetLogin:
draft.tailnetProvider = .tailscale applyTailnetDefaults(for: .tailscale)
do { startTailnetLogin()
try await startTailnetLogin()
} catch {
errorMessage = error.localizedDescription
}
case .headscaleProbe: case .headscaleProbe:
applyTailnetProvider(.headscale)
draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority draft.authority = automation.authority ?? TailnetProvider.headscale.defaultAuthority ?? draft.authority
probeTailnetAuthority() probeTailnetAuthority()
} }
} }
} }
private func startPollingTailscaleLogin() {
pollingTask?.cancel()
guard let loginSessionID else { return }
pollingTask = Task { @MainActor in
while !Task.isCancelled {
do {
let status = try await TailnetBridgeClient.status(sessionID: loginSessionID)
let previousAuthURL = loginStatus?.authURL
loginStatus = status
if previousAuthURL == nil,
let authURL = status.authURL,
let url = URL(string: authURL)
{
openLoginURL(url)
}
if status.running {
webAuthenticationTask?.cancel()
webAuthenticationTask = nil
return
}
} catch {
errorMessage = error.localizedDescription
return
}
try? await Task.sleep(for: .seconds(2))
}
}
}
private func openLoginURL(_ url: URL) {
webAuthenticationTask?.cancel()
webAuthenticationTask = Task { @MainActor in
try? await Task.sleep(for: .milliseconds(300))
do {
_ = try await webAuthenticationSession.authenticate(
using: url,
callbackURLScheme: "burrow",
preferredBrowserSession: .shared
)
} catch is CancellationError {
return
} catch let error as ASWebAuthenticationSessionError
where error.code == .canceledLogin
{
return
} catch {
errorMessage = error.localizedDescription
}
webAuthenticationTask = nil
}
}
private func saveTailnetAccount(secret: String?, username: String?) async throws { private func saveTailnetAccount(secret: String?, username: String?) async throws {
let provider = draft.tailnetProvider let provider = inferredTailnetProvider
let title = titleOrFallback( let title = titleOrFallback(
hostnameFallback( hostnameFallback(from: draft.authority, fallback: "Tailnet")
from: tailnetUsesWebLogin ? (loginStatus?.tailnetName ?? "") : draft.authority,
fallback: provider.title
)
) )
let payload = TailnetNetworkPayload( let payload = TailnetNetworkPayload(
@ -1160,21 +1055,17 @@ private struct ConfigurationSheetView: View {
authority: normalizedOptional(draft.authority) ?? normalizedOptional(provider.defaultAuthority ?? ""), authority: normalizedOptional(draft.authority) ?? normalizedOptional(provider.defaultAuthority ?? ""),
account: normalized(draft.accountName, fallback: "default"), account: normalized(draft.accountName, fallback: "default"),
identity: normalized(draft.identityName, fallback: "apple"), identity: normalized(draft.identityName, fallback: "apple"),
tailnet: normalizedOptional(loginStatus?.tailnetName ?? draft.tailnet), tailnet: normalizedOptional(draft.tailnet),
hostname: normalizedOptional(draft.hostname) hostname: normalizedOptional(draft.hostname)
) )
var noteParts: [String] = [ var noteParts: [String] = [
provider.title, isManagedTailnetAuthority ? "Managed Tailnet" : "Custom Tailnet",
tailnetUsesWebLogin "Auth: \(draft.authMode.title)",
? "State: \(loginStatus?.backendState ?? "NeedsLogin")"
: "Auth: \(draft.authMode.title)",
] ]
if let dnsName = loginStatus?.selfDNSName {
noteParts.append("Device: \(dnsName)") if draft.authMode == .web, tailnetLoginStatus?.running == true {
} noteParts.append("Browser sign-in complete")
if let magicDNSSuffix = loginStatus?.magicDNSSuffix {
noteParts.append("MagicDNS: \(magicDNSSuffix)")
} }
do { do {
@ -1186,7 +1077,7 @@ private struct ConfigurationSheetView: View {
let record = NetworkAccountRecord( let record = NetworkAccountRecord(
id: UUID(), id: UUID(),
kind: .headscale, kind: .tailnet,
title: title, title: title,
authority: payload.authority, authority: payload.authority,
provider: provider, provider: provider,
@ -1195,7 +1086,7 @@ private struct ConfigurationSheetView: View {
hostname: payload.hostname, hostname: payload.hostname,
username: username, username: username,
tailnet: payload.tailnet, tailnet: payload.tailnet,
authMode: tailnetUsesWebLogin ? .web : draft.authMode, authMode: draft.authMode,
note: noteParts.joined(separator: ""), note: noteParts.joined(separator: ""),
createdAt: .now, createdAt: .now,
updatedAt: .now updatedAt: .now
@ -1226,33 +1117,44 @@ private struct ConfigurationSheetView: View {
draft.torListen = defaults.torListen draft.torListen = defaults.torListen
} }
private func applyTailnetProvider(_ provider: TailnetProvider) { private func applyTailnetDefaults(for provider: TailnetProvider) {
resetTailnetDiscoveryFeedback() resetTailnetDiscoveryFeedback()
draft.tailnetProvider = provider draft.authority = provider.defaultAuthority ?? ""
applyTailnetDefaults(for: provider) if !availableTailnetAuthModes.contains(draft.authMode) {
draft.authMode = .web
}
} }
private func applyTailnetDefaults(for provider: TailnetProvider) { private func startTailnetLogin() {
draft.authority = provider.defaultAuthority ?? "" guard let authority = normalizedOptional(draft.authority) else {
loginStatus = nil tailnetLoginStatus = nil
loginSessionID = nil tailnetLoginError = "Enter a server URL first."
pollingTask?.cancel() return
if provider == .tailscale {
draft.authMode = .web
draft.username = ""
draft.secret = ""
} else {
if !availableTailnetAuthModes.contains(draft.authMode) {
draft.authMode = provider.supportsWebLogin ? .web : .none
} }
if draft.authMode == .web && !provider.supportsWebLogin {
draft.authMode = .none isStartingTailnetLogin = true
tailnetLoginError = nil
preserveTailnetLoginSession = false
Task { @MainActor in
defer { isStartingTailnetLogin = false }
do {
let status = try await networkViewModel.startTailnetLogin(
accountName: normalized(draft.accountName, fallback: "default"),
identityName: normalized(draft.identityName, fallback: "apple"),
hostname: normalizedOptional(draft.hostname),
authority: authority
)
tailnetLoginSessionID = status.sessionID
updateTailnetLoginStatus(status)
beginTailnetLoginPolling(sessionID: status.sessionID)
} catch {
tailnetLoginError = error.localizedDescription
} }
} }
} }
private func probeTailnetAuthority() { private func probeTailnetAuthority() {
guard draft.tailnetProvider.requiresControlURL else { return }
guard let authority = normalizedOptional(draft.authority) else { guard let authority = normalizedOptional(draft.authority) else {
authorityProbeStatus = nil authorityProbeStatus = nil
authorityProbeError = "Enter a server URL first." authorityProbeError = "Enter a server URL first."
@ -1266,10 +1168,7 @@ private struct ConfigurationSheetView: View {
Task { @MainActor in Task { @MainActor in
defer { isProbingAuthority = false } defer { isProbingAuthority = false }
do { do {
authorityProbeStatus = try await TailnetAuthorityProbeClient.probe( authorityProbeStatus = try await networkViewModel.probeTailnetAuthority(authority)
provider: draft.tailnetProvider,
authority: authority
)
} catch { } catch {
authorityProbeError = error.localizedDescription authorityProbeError = error.localizedDescription
} }
@ -1279,6 +1178,7 @@ private struct ConfigurationSheetView: View {
private func resetAuthorityProbe() { private func resetAuthorityProbe() {
authorityProbeStatus = nil authorityProbeStatus = nil
authorityProbeError = nil authorityProbeError = nil
tailnetLoginError = nil
} }
private func resetTailnetDiscoveryFeedback() { private func resetTailnetDiscoveryFeedback() {
@ -1300,15 +1200,9 @@ private struct ConfigurationSheetView: View {
Task { @MainActor in Task { @MainActor in
defer { isDiscoveringTailnet = false } defer { isDiscoveringTailnet = false }
do { do {
let discovery = try await TailnetDiscoveryClient.discover(email: email) let discovery = try await networkViewModel.discoverTailnet(email: email)
discoveryStatus = discovery discoveryStatus = discovery
draft.tailnetProvider = discovery.provider
draft.authority = discovery.authority draft.authority = discovery.authority
if discovery.provider.supportsWebLogin, discovery.oidcIssuer != nil {
draft.authMode = .web
draft.username = ""
draft.secret = ""
}
probeTailnetAuthority() probeTailnetAuthority()
} catch { } catch {
discoveryError = error.localizedDescription discoveryError = error.localizedDescription
@ -1316,6 +1210,76 @@ private struct ConfigurationSheetView: View {
} }
} }
private func beginTailnetLoginPolling(sessionID: String) {
tailnetLoginPollTask?.cancel()
tailnetLoginPollTask = Task { @MainActor in
while !Task.isCancelled {
do {
let status = try await networkViewModel.tailnetLoginStatus(sessionID: sessionID)
updateTailnetLoginStatus(status)
if status.running {
tailnetLoginPollTask = nil
return
}
} catch {
tailnetLoginError = error.localizedDescription
tailnetLoginPollTask = nil
return
}
try? await Task.sleep(for: .seconds(1))
}
}
}
private func updateTailnetLoginStatus(_ status: TailnetLoginStatus) {
tailnetLoginStatus = status
tailnetLoginError = nil
tailnetLoginSessionID = status.sessionID
if status.running {
browserAuthenticator.cancel()
tailnetPresentedAuthURL = nil
return
}
guard let authURL = status.authURL else {
return
}
if tailnetPresentedAuthURL != authURL {
tailnetPresentedAuthURL = authURL
browserAuthenticator.start(url: authURL) { [sessionID = status.sessionID] in
Task { @MainActor in
if tailnetLoginStatus?.running != true {
tailnetLoginSessionID = sessionID
}
}
}
}
}
private func cancelTailnetLoginIfNeeded() async {
tailnetLoginPollTask?.cancel()
tailnetLoginPollTask = nil
browserAuthenticator.cancel()
tailnetPresentedAuthURL = nil
guard tailnetLoginStatus?.running != true,
let sessionID = tailnetLoginSessionID
else {
return
}
do {
try await networkViewModel.cancelTailnetLogin(sessionID: sessionID)
} catch {
tailnetLoginError = error.localizedDescription
}
tailnetLoginStatus = nil
tailnetLoginSessionID = nil
}
private func pasteWireGuardConfiguration() { private func pasteWireGuardConfiguration() {
guard let clipboardString else { return } guard let clipboardString else { return }
draft.wireGuardConfig = clipboardString draft.wireGuardConfig = clipboardString
@ -1361,19 +1325,40 @@ private struct ConfigurationSheetView: View {
return host return host
} }
private var tailnetUsesWebLogin: Bool { private var availableTailnetAuthModes: [AccountAuthMode] {
draft.authMode == .web && draft.tailnetProvider.supportsWebLogin [.web, .none, .password, .preauthKey]
} }
private var availableTailnetAuthModes: [AccountAuthMode] { private var tailnetSignInActionTitle: String {
switch draft.tailnetProvider { if tailnetLoginStatus?.running == true {
case .tailscale: return "Signed In"
[.web]
case .headscale:
[.web, .none, .password, .preauthKey]
case .burrow:
[.none, .password, .preauthKey]
} }
if tailnetLoginSessionID != nil {
return "Resume Sign-In"
}
return "Start Sign-In"
}
private var tailnetAuthenticationFootnote: String {
switch draft.authMode {
case .web:
return "Burrow asks the daemon to start a Tailnet browser sign-in session, then closes it locally once the daemon reports the device is running."
case .none:
return "Save the authority only. Useful when the control plane handles authentication elsewhere."
case .password, .preauthKey:
return "Tailnet account material stays on-device. Burrow stores the authority and credentials for daemon-managed registration and refresh."
}
}
private var inferredTailnetProvider: TailnetProvider {
TailnetProvider.inferred(
authority: normalizedOptional(draft.authority),
explicit: discoveryStatus?.provider
)
}
private var isManagedTailnetAuthority: Bool {
TailnetProvider.isManagedTailscaleAuthority(normalizedOptional(draft.authority))
} }
@ViewBuilder @ViewBuilder
@ -1469,8 +1454,65 @@ private extension View {
self self
#endif #endif
} }
@ViewBuilder
func burrowEmailField() -> some View {
#if os(iOS)
textInputAutocapitalization(.never)
.keyboardType(.emailAddress)
#else
self
#endif
}
} }
#if canImport(AuthenticationServices)
@MainActor
private final class TailnetBrowserAuthenticator: NSObject {
private var session: ASWebAuthenticationSession?
func start(url: URL, onDismiss: @escaping @Sendable () -> Void) {
cancel()
let session = ASWebAuthenticationSession(url: url, callbackURLScheme: nil) { _, _ in
onDismiss()
}
session.presentationContextProvider = self
session.prefersEphemeralWebBrowserSession = false
self.session = session
_ = session.start()
}
func cancel() {
session?.cancel()
session = nil
}
}
extension TailnetBrowserAuthenticator: ASWebAuthenticationPresentationContextProviding {
func presentationAnchor(for session: ASWebAuthenticationSession) -> ASPresentationAnchor {
#if canImport(AppKit)
return NSApplication.shared.keyWindow
?? NSApplication.shared.windows.first
?? ASPresentationAnchor()
#elseif canImport(UIKit)
return ASPresentationAnchor()
#else
return ASPresentationAnchor()
#endif
}
}
#else
@MainActor
private final class TailnetBrowserAuthenticator {
func start(url: URL, onDismiss: @escaping @Sendable () -> Void) {
_ = url
onDismiss()
}
func cancel() {}
}
#endif
private struct BurrowAutomationConfig { private struct BurrowAutomationConfig {
enum Action: String { enum Action: String {
case tailnetLogin = "tailnet-login" case tailnetLogin = "tailnet-login"

View file

@ -26,13 +26,6 @@ struct TailnetNetworkPayload: Codable, Sendable {
} }
} }
struct TailnetLoginStartRequest: Codable, Sendable {
var accountName: String
var identityName: String
var hostname: String?
var controlURL: String?
}
struct TailnetDiscoveryResponse: Codable, Sendable { struct TailnetDiscoveryResponse: Codable, Sendable {
var domain: String var domain: String
var provider: TailnetProvider var provider: TailnetProvider
@ -40,23 +33,6 @@ struct TailnetDiscoveryResponse: Codable, Sendable {
var oidcIssuer: String? var oidcIssuer: String?
} }
struct TailnetLoginStatus: Codable, Sendable {
var backendState: String
var authURL: String?
var running: Bool
var needsLogin: Bool
var tailnetName: String?
var magicDNSSuffix: String?
var selfDNSName: String?
var tailscaleIPs: [String]
var health: [String]
}
struct TailnetLoginStartResponse: Codable, Sendable {
var sessionID: String
var status: TailnetLoginStatus
}
struct TailnetAuthorityProbeStatus: Sendable { struct TailnetAuthorityProbeStatus: Sendable {
var authority: String var authority: String
var statusCode: Int var statusCode: Int
@ -64,147 +40,102 @@ struct TailnetAuthorityProbeStatus: Sendable {
var detail: String? var detail: String?
} }
enum TailnetBridgeClient { struct TailnetLoginStatus: Sendable {
private static let baseURL = URL(string: "http://127.0.0.1:8080")! var sessionID: String
var backendState: String
static func startLogin(_ request: TailnetLoginStartRequest) async throws -> TailnetLoginStartResponse { var authURL: URL?
var urlRequest = URLRequest( var running: Bool
url: baseURL.appendingPathComponent("v1/tailscale/login/start") var needsLogin: Bool
) var tailnetName: String?
urlRequest.httpMethod = "POST" var magicDNSSuffix: String?
urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type") var selfDNSName: String?
var tailnetIPs: [String]
let encoder = JSONEncoder() var health: [String]
encoder.keyEncodingStrategy = .convertToSnakeCase
urlRequest.httpBody = try encoder.encode(request)
let (data, response) = try await URLSession.shared.data(for: urlRequest)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStartResponse.self, from: data)
}
static func status(sessionID: String) async throws -> TailnetLoginStatus {
let url = baseURL
.appendingPathComponent("v1/tailscale/login")
.appendingPathComponent(sessionID)
let (data, response) = try await URLSession.shared.data(from: url)
try validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetLoginStatus.self, from: data)
}
fileprivate static func validate(response: URLResponse, data: Data) throws {
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
}
} }
enum TailnetDiscoveryClient { enum TailnetDiscoveryClient {
private static let baseURL = URL(string: "http://127.0.0.1:8080")! static func discover(email: String, socketURL: URL) async throws -> TailnetDiscoveryResponse {
var request = Burrow_TailnetDiscoverRequest()
request.email = email
static func discover(email: String) async throws -> TailnetDiscoveryResponse { let response = try await TailnetClient.unix(socketURL: socketURL).discover(request)
guard var components = URLComponents( return TailnetDiscoveryResponse(
url: baseURL.appendingPathComponent("v1/tailnet/discover"), domain: response.domain,
resolvingAgainstBaseURL: false provider: response.managed ? .tailscale : .headscale,
) else { authority: response.authority,
throw URLError(.badURL) oidcIssuer: response.oidcIssuer.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
} ? nil
components.queryItems = [ : response.oidcIssuer
URLQueryItem(name: "email", value: email) )
]
guard let url = components.url else {
throw URLError(.badURL)
}
let (data, response) = try await URLSession.shared.data(from: url)
try TailnetBridgeClient.validate(response: response, data: data)
let decoder = JSONDecoder()
decoder.keyDecodingStrategy = .convertFromSnakeCase
return try decoder.decode(TailnetDiscoveryResponse.self, from: data)
} }
} }
enum TailnetAuthorityProbeClient { enum TailnetAuthorityProbeClient {
static func probe(provider: TailnetProvider, authority: String) async throws -> TailnetAuthorityProbeStatus { static func probe(authority: String, socketURL: URL) async throws -> TailnetAuthorityProbeStatus {
let normalizedAuthority = normalizeAuthority(authority) var request = Burrow_TailnetProbeRequest()
let baseURL = try validatedBaseURL(normalizedAuthority) request.authority = authority
let probeURL = probeURL(for: provider, baseURL: baseURL)
var request = URLRequest(url: probeURL)
request.timeoutInterval = 10
request.setValue("application/json", forHTTPHeaderField: "Accept")
let (data, response) = try await URLSession.shared.data(for: request)
guard let http = response as? HTTPURLResponse else {
throw URLError(.badServerResponse)
}
guard (200..<300).contains(http.statusCode) else {
let message = String(data: data, encoding: .utf8)?.trimmingCharacters(
in: .whitespacesAndNewlines
)
throw TailnetBridgeError.server(message?.ifEmpty("HTTP \(http.statusCode)") ?? "HTTP \(http.statusCode)")
}
let body = String(data: data, encoding: .utf8)?
.trimmingCharacters(in: .whitespacesAndNewlines)
let detail = body.flatMap { $0.isEmpty ? nil : $0 }
let response = try await TailnetClient.unix(socketURL: socketURL).probe(request)
return TailnetAuthorityProbeStatus( return TailnetAuthorityProbeStatus(
authority: normalizedAuthority, authority: response.authority,
statusCode: http.statusCode, statusCode: Int(response.statusCode),
summary: "\(provider.title) reachable", summary: response.summary,
detail: detail detail: response.detail.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.detail
) )
} }
private static func normalizeAuthority(_ authority: String) -> String {
let trimmed = authority.trimmingCharacters(in: .whitespacesAndNewlines)
if trimmed.contains("://") {
return trimmed
}
return "https://\(trimmed)"
}
private static func validatedBaseURL(_ authority: String) throws -> URL {
guard let url = URL(string: authority), url.host != nil else {
throw TailnetBridgeError.server("Invalid server URL")
}
return url
}
private static func probeURL(for provider: TailnetProvider, baseURL: URL) -> URL {
switch provider {
case .headscale:
baseURL.appendingPathComponent("health")
case .burrow:
baseURL.appendingPathComponent("healthz")
case .tailscale:
baseURL
}
}
} }
enum TailnetBridgeError: LocalizedError { enum TailnetLoginClient {
case server(String) static func start(
accountName: String,
var errorDescription: String? { identityName: String,
switch self { hostname: String?,
case .server(let message): authority: String,
message socketURL: URL
) async throws -> TailnetLoginStatus {
var request = Burrow_TailnetLoginStartRequest()
request.accountName = accountName
request.identityName = identityName
request.hostname = hostname ?? ""
request.authority = authority
let response = try await TailnetClient.unix(socketURL: socketURL).loginStart(request)
return decode(response)
} }
static func status(sessionID: String, socketURL: URL) async throws -> TailnetLoginStatus {
var request = Burrow_TailnetLoginStatusRequest()
request.sessionID = sessionID
let response = try await TailnetClient.unix(socketURL: socketURL).loginStatus(request)
return decode(response)
}
static func cancel(sessionID: String, socketURL: URL) async throws {
var request = Burrow_TailnetLoginCancelRequest()
request.sessionID = sessionID
_ = try await TailnetClient.unix(socketURL: socketURL).loginCancel(request)
}
private static func decode(_ response: Burrow_TailnetLoginStatusResponse) -> TailnetLoginStatus {
TailnetLoginStatus(
sessionID: response.sessionID,
backendState: response.backendState,
authURL: URL(string: response.authURL.trimmingCharacters(in: .whitespacesAndNewlines)),
running: response.running,
needsLogin: response.needsLogin,
tailnetName: response.tailnetName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.tailnetName,
magicDNSSuffix: response.magicDNSSuffix.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.magicDNSSuffix,
selfDNSName: response.selfDNSName.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty
? nil
: response.selfDNSName,
tailnetIPs: response.tailnetIPs,
health: response.health
)
} }
} }
@ -215,7 +146,7 @@ final class NetworkViewModel: Sendable {
private(set) var connectionError: String? private(set) var connectionError: String?
private let socketURLResult: Result<URL, Error> private let socketURLResult: Result<URL, Error>
nonisolated(unsafe) private var task: Task<Void, Never>? @ObservationIgnored private var task: Task<Void, Never>?
init(socketURLResult: Result<URL, Error>) { init(socketURLResult: Result<URL, Error>) {
self.socketURLResult = socketURLResult self.socketURLResult = socketURLResult
@ -242,6 +173,42 @@ final class NetworkViewModel: Sendable {
try await addNetwork(type: .tailnet, payload: payload.encoded()) try await addNetwork(type: .tailnet, payload: payload.encoded())
} }
func discoverTailnet(email: String) async throws -> TailnetDiscoveryResponse {
let socketURL = try socketURLResult.get()
return try await TailnetDiscoveryClient.discover(email: email, socketURL: socketURL)
}
func probeTailnetAuthority(_ authority: String) async throws -> TailnetAuthorityProbeStatus {
let socketURL = try socketURLResult.get()
return try await TailnetAuthorityProbeClient.probe(authority: authority, socketURL: socketURL)
}
func startTailnetLogin(
accountName: String,
identityName: String,
hostname: String?,
authority: String
) async throws -> TailnetLoginStatus {
let socketURL = try socketURLResult.get()
return try await TailnetLoginClient.start(
accountName: accountName,
identityName: identityName,
hostname: hostname,
authority: authority,
socketURL: socketURL
)
}
func tailnetLoginStatus(sessionID: String) async throws -> TailnetLoginStatus {
let socketURL = try socketURLResult.get()
return try await TailnetLoginClient.status(sessionID: sessionID, socketURL: socketURL)
}
func cancelTailnetLogin(sessionID: String) async throws {
let socketURL = try socketURLResult.get()
try await TailnetLoginClient.cancel(sessionID: sessionID, socketURL: socketURL)
}
private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 { private func addNetwork(type: Burrow_NetworkType, payload: Data) async throws -> Int32 {
let socketURL = try socketURLResult.get() let socketURL = try socketURLResult.get()
let networkID = nextNetworkID let networkID = nextNetworkID
@ -341,19 +308,6 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable {
} }
} }
var supportsWebLogin: Bool {
switch self {
case .tailscale, .headscale:
true
case .burrow:
false
}
}
var requiresControlURL: Bool {
self != .tailscale
}
var defaultAuthority: String? { var defaultAuthority: String? {
switch self { switch self {
case .tailscale: case .tailscale:
@ -368,19 +322,44 @@ enum TailnetProvider: String, CaseIterable, Codable, Identifiable, Sendable {
var subtitle: String { var subtitle: String {
switch self { switch self {
case .tailscale: case .tailscale:
"Use Tailscale's real browser login flow." "Managed Tailnet authority."
case .headscale: case .headscale:
"Use your Headscale control plane with browser or key-based sign-in." "Custom Tailnet control server."
case .burrow: case .burrow:
"Store Burrow control-plane credentials." "Burrow-native Tailnet authority."
} }
} }
static func inferred(authority: String?, explicit: TailnetProvider?) -> TailnetProvider {
if explicit == .burrow {
return .burrow
}
if isManagedTailscaleAuthority(authority) {
return .tailscale
}
return .headscale
}
static func isManagedTailscaleAuthority(_ authority: String?) -> Bool {
guard let normalized = authority?
.trimmingCharacters(in: .whitespacesAndNewlines)
.lowercased()
.trimmingCharacters(in: CharacterSet(charactersIn: "/")),
!normalized.isEmpty
else {
return false
}
return normalized == "https://controlplane.tailscale.com"
|| normalized == "http://controlplane.tailscale.com"
|| normalized == "controlplane.tailscale.com"
}
} }
enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable { enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
case wireGuard case wireGuard
case tor case tor
case headscale case tailnet
var id: String { rawValue } var id: String { rawValue }
@ -388,7 +367,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self { switch self {
case .wireGuard: "WireGuard" case .wireGuard: "WireGuard"
case .tor: "Tor" case .tor: "Tor"
case .headscale: "Tailnet" case .tailnet: "Tailnet"
} }
} }
@ -396,7 +375,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self { switch self {
case .wireGuard: "Import a tunnel and optional account metadata." case .wireGuard: "Import a tunnel and optional account metadata."
case .tor: "Store Arti account and identity preferences." case .tor: "Store Arti account and identity preferences."
case .headscale: "Save Tailscale, Headscale, or Burrow control-plane identities." case .tailnet: "Save Tailnet authority, identity, and login material."
} }
} }
@ -404,7 +383,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self { switch self {
case .wireGuard: .init("WireGuard") case .wireGuard: .init("WireGuard")
case .tor: .orange case .tor: .orange
case .headscale: .mint case .tailnet: .mint
} }
} }
@ -412,7 +391,7 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
switch self { switch self {
case .wireGuard: "Add Network" case .wireGuard: "Add Network"
case .tor: "Save Account" case .tor: "Save Account"
case .headscale: "Save Account" case .tailnet: "Save Account"
} }
} }
@ -422,15 +401,15 @@ enum AccountNetworkKind: String, CaseIterable, Codable, Identifiable, Sendable {
nil nil
case .tor: case .tor:
"Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet." "Tor account preferences are stored on Apple now. The managed Tor runtime is not wired on Apple in this branch yet."
case .headscale: case .tailnet:
"Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon." "Tailnet accounts can sign in from Apple now. The managed Apple runtime is still pending, but Tailnet networks can be stored in the daemon."
} }
} }
} }
enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable { enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable {
case none
case web case web
case none
case password case password
case preauthKey case preauthKey
@ -438,8 +417,8 @@ enum AccountAuthMode: String, CaseIterable, Codable, Identifiable, Sendable {
var title: String { var title: String {
switch self { switch self {
case .web: "Browser Sign-In"
case .none: "None" case .none: "None"
case .web: "Web Login"
case .password: "Password" case .password: "Password"
case .preauthKey: "Preauth Key" case .preauthKey: "Preauth Key"
} }
@ -465,17 +444,15 @@ struct NetworkAccountRecord: Codable, Identifiable, Hashable, Sendable {
struct TailnetCard { struct TailnetCard {
var id: Int32 var id: Int32
var provider: String
var title: String var title: String
var detail: String var detail: String
init(network: Burrow_Network) { init(network: Burrow_Network) {
let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload)) let payload = (try? JSONDecoder().decode(TailnetNetworkPayload.self, from: network.payload))
id = network.id id = network.id
provider = payload?.provider.title ?? "Tailnet"
title = payload?.tailnet ?? payload?.hostname ?? "Tailnet" title = payload?.tailnet ?? payload?.hostname ?? "Tailnet"
detail = [ detail = [
payload?.provider.title, payload?.authority.flatMap { URL(string: $0)?.host } ?? payload?.authority,
payload?.authority, payload?.authority,
payload.map { "Account: \($0.account)" }, payload.map { "Account: \($0.account)" },
] ]
@ -492,7 +469,7 @@ struct TailnetCard {
VStack(alignment: .leading, spacing: 12) { VStack(alignment: .leading, spacing: 12) {
HStack { HStack {
VStack(alignment: .leading, spacing: 4) { VStack(alignment: .leading, spacing: 4) {
Text(provider) Text("Tailnet")
.font(.headline) .font(.headline)
.foregroundStyle(.white.opacity(0.85)) .foregroundStyle(.white.opacity(0.85))
Text(title) Text(title)

View file

@ -10,6 +10,12 @@ check:
build: build:
@cargo build @cargo build
bep-check:
@python3 Scripts/check-bep-metadata.py
bep-list:
@Scripts/bep list
daemon-console: daemon-console:
@$(sudo_cargo_console) daemon @$(sudo_cargo_console) daemon

View file

@ -10,6 +10,7 @@ Routine verification now runs unprivileged with `cargo test --workspace --all-fe
The repository now carries its own design and deployment record: The repository now carries its own design and deployment record:
- [Constitution](./CONSTITUTION.md) - [Constitution](./CONSTITUTION.md)
- [Agent Instructions](./AGENTS.md)
- [Burrow Evolution](./evolution/README.md) - [Burrow Evolution](./evolution/README.md)
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md) - [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md) - [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
@ -19,6 +20,8 @@ The repository now carries its own design and deployment record:
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh). Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh).
Agent and governance-sensitive work should start with [AGENTS.md](./AGENTS.md), [CONSTITUTION.md](./CONSTITUTION.md), and the relevant BEPs under [`evolution/proposals/`](./evolution/proposals/). Identity and bootstrap metadata now live in [`contributors.nix`](./contributors.nix).
The project structure is divided in the following folders: The project structure is divided in the following folders:
``` ```

View file

@ -116,7 +116,7 @@ lookup_user_pk() {
ensure_user() { ensure_user() {
local user_spec="$1" local user_spec="$1"
local username name email is_admin groups_json effective_groups_json group_name local username name email is_admin groups_json password_file effective_groups_json group_name
local group_pks_json payload user_pk local group_pks_json payload user_pk
username="$(printf '%s\n' "$user_spec" | jq -r '.username')" username="$(printf '%s\n' "$user_spec" | jq -r '.username')"
@ -124,6 +124,7 @@ ensure_user() {
email="$(printf '%s\n' "$user_spec" | jq -r '.email')" email="$(printf '%s\n' "$user_spec" | jq -r '.email')"
is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')" is_admin="$(printf '%s\n' "$user_spec" | jq -r '.isAdmin // false')"
groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')" groups_json="$(printf '%s\n' "$user_spec" | jq -c '.groups // []')"
password_file="$(printf '%s\n' "$user_spec" | jq -r '.passwordFile // empty')"
if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then if [[ -z "$username" || "$username" == "null" || -z "$email" || "$email" == "null" ]]; then
echo "error: each Burrow Authentik user requires username and email" >&2 echo "error: each Burrow Authentik user requires username and email" >&2
@ -178,6 +179,19 @@ ensure_user() {
echo "error: could not create Authentik user ${username}" >&2 echo "error: could not create Authentik user ${username}" >&2
exit 1 exit 1
fi fi
if [[ -n "$password_file" ]]; then
if [[ ! -s "$password_file" ]]; then
echo "error: password file for Authentik user ${username} is missing: ${password_file}" >&2
exit 1
fi
api POST "/api/v3/core/users/${user_pk}/set_password/" "$(
jq -cn \
--arg password "$(tr -d '\r\n' < "$password_file")" \
'{password: $password}'
)" >/dev/null
fi
} }
lookup_application_pk() { lookup_application_pk() {

View file

@ -0,0 +1,309 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
provider_slug="${AUTHENTIK_TAILNET_PROVIDER_SLUG:-ts}"
provider_slugs_json="${AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON:-}"
authentication_flow_name="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME:-Burrow Tailnet Authentication}"
authentication_flow_slug="${AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG:-burrow-tailnet-authentication}"
identification_stage_name="${AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME:-burrow-tailnet-identification-stage}"
password_stage_name="${AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME:-burrow-tailnet-password-stage}"
user_login_stage_name="${AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME:-burrow-tailnet-user-login-stage}"
google_source_slug="${AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG:-google}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-tailnet-auth-flow.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
Optional environment:
AUTHENTIK_URL
AUTHENTIK_TAILNET_PROVIDER_SLUG
AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON
AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME
AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG
AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME
AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME
AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME
AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -n "$provider_slugs_json" ]]; then
if ! printf '%s' "$provider_slugs_json" | jq -e 'type == "array" and length > 0 and all(.[]; type == "string" and length > 0)' >/dev/null; then
echo "error: AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON must be a non-empty JSON array of strings" >&2
exit 1
fi
else
provider_slugs_json="$(jq -cn --arg slug "$provider_slug" '[$slug]')"
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
lookup_stage_by_name() {
local path="$1"
local name="$2"
api GET "${path}?page_size=200" \
| jq -c --arg name "$name" '.results[]? | select(.name == $name)' \
| head -n1
}
lookup_flow_pk() {
local slug="$1"
api GET "/api/v3/flows/instances/?slug=${slug}" \
| jq -r '.results[]? | select(.slug != null) | .pk // empty' \
| head -n1
}
lookup_source_pk() {
local slug="$1"
api GET "/api/v3/sources/oauth/?page_size=200&slug=${slug}" \
| jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .pk // empty' \
| head -n1
}
ensure_password_stage() {
local existing payload stage_pk
existing="$(lookup_stage_by_name "/api/v3/stages/password/" "$password_stage_name")"
payload="$(
jq -cn \
--arg name "$password_stage_name" \
'{
name: $name,
backends: [
"authentik.core.auth.InbuiltBackend",
"authentik.core.auth.TokenBackend"
],
allow_show_password: false,
failed_attempts_before_cancel: 5
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/password/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/password/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_identification_stage() {
local password_stage_pk="$1"
local google_source_pk="$2"
local existing payload stage_pk sources_json
existing="$(lookup_stage_by_name "/api/v3/stages/identification/" "$identification_stage_name")"
if [[ -n "$google_source_pk" ]]; then
sources_json="$(jq -cn --arg source "$google_source_pk" '[$source]')"
else
sources_json='[]'
fi
payload="$(
jq -cn \
--arg name "$identification_stage_name" \
--arg password_stage "$password_stage_pk" \
--argjson sources "$sources_json" \
'{
name: $name,
user_fields: ["username", "email"],
password_stage: $password_stage,
case_insensitive_matching: true,
show_matched_user: true,
sources: $sources,
show_source_labels: true,
pretend_user_exists: false,
enable_remember_me: false
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/identification/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/identification/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_user_login_stage() {
local existing payload stage_pk
existing="$(lookup_stage_by_name "/api/v3/stages/user_login/" "$user_login_stage_name")"
payload="$(
jq -cn \
--arg name "$user_login_stage_name" \
'{
name: $name,
session_duration: "hours=12",
terminate_other_sessions: false,
remember_me_offset: "seconds=0",
network_binding: "no_binding",
geoip_binding: "no_binding"
}'
)"
if [[ -n "$existing" ]]; then
stage_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/stages/user_login/${stage_pk}/" "$payload" >/dev/null
else
stage_pk="$(
api POST "/api/v3/stages/user_login/" "$payload" \
| jq -r '.pk // empty'
)"
fi
printf '%s\n' "$stage_pk"
}
ensure_authentication_flow() {
local existing_pk payload
existing_pk="$(lookup_flow_pk "$authentication_flow_slug")"
payload="$(
jq -cn \
--arg name "$authentication_flow_name" \
--arg slug "$authentication_flow_slug" \
'{
name: $name,
title: $name,
slug: $slug,
designation: "authentication",
policy_engine_mode: "any",
layout: "stacked"
}'
)"
if [[ -n "$existing_pk" ]]; then
api PATCH "/api/v3/flows/instances/${authentication_flow_slug}/" "$payload" >/dev/null
printf '%s\n' "$existing_pk"
else
api POST "/api/v3/flows/instances/" "$payload" \
| jq -r '.pk // empty'
fi
}
ensure_flow_binding() {
local flow_pk="$1"
local stage_pk="$2"
local order="$3"
local existing payload binding_pk
existing="$(
api GET "/api/v3/flows/bindings/?target=${flow_pk}&stage=${stage_pk}&page_size=200" \
| jq -c '.results[]?' \
| head -n1
)"
payload="$(
jq -cn \
--arg target "$flow_pk" \
--arg stage "$stage_pk" \
--argjson order "$order" \
'{
target: $target,
stage: $stage,
order: $order,
policy_engine_mode: "any"
}'
)"
if [[ -n "$existing" ]]; then
binding_pk="$(printf '%s\n' "$existing" | jq -r '.pk')"
api PATCH "/api/v3/flows/bindings/${binding_pk}/" "$payload" >/dev/null
else
api POST "/api/v3/flows/bindings/" "$payload" >/dev/null
fi
}
wait_for_authentik
mapfile -t provider_pks < <(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -r --argjson provider_slugs "$provider_slugs_json" '
.results[]?
| select(
((.assigned_application_slug // empty) as $assigned | ($provider_slugs | index($assigned)) != null)
or ((.slug // empty) as $slug | ($provider_slugs | index($slug)) != null)
)
| .pk // empty
'
)
if [[ "${#provider_pks[@]}" -eq 0 ]]; then
echo "error: could not resolve any Authentik Tailnet OAuth providers from ${provider_slugs_json}" >&2
exit 1
fi
google_source_pk="$(lookup_source_pk "$google_source_slug" || true)"
password_stage_pk="$(ensure_password_stage)"
identification_stage_pk="$(ensure_identification_stage "$password_stage_pk" "$google_source_pk")"
user_login_stage_pk="$(ensure_user_login_stage)"
authentication_flow_pk="$(ensure_authentication_flow)"
ensure_flow_binding "$authentication_flow_pk" "$identification_stage_pk" 10
ensure_flow_binding "$authentication_flow_pk" "$user_login_stage_pk" 30
for provider_pk in "${provider_pks[@]}"; do
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$(
jq -cn --arg flow "$authentication_flow_pk" '{authentication_flow: $flow}'
)" >/dev/null
done
echo "Synced Burrow Tailnet authentication flow for providers ${provider_slugs_json}."

View file

@ -0,0 +1,251 @@
#!/usr/bin/env bash
set -euo pipefail
authentik_url="${AUTHENTIK_URL:-https://auth.burrow.net}"
bootstrap_token="${AUTHENTIK_BOOTSTRAP_TOKEN:-}"
application_slug="${AUTHENTIK_TAILSCALE_APPLICATION_SLUG:-tailscale}"
application_name="${AUTHENTIK_TAILSCALE_APPLICATION_NAME:-Tailscale}"
provider_name="${AUTHENTIK_TAILSCALE_PROVIDER_NAME:-Tailscale}"
template_slug="${AUTHENTIK_TAILSCALE_TEMPLATE_SLUG:-ts}"
client_id="${AUTHENTIK_TAILSCALE_CLIENT_ID:-tailscale.burrow.net}"
client_secret="${AUTHENTIK_TAILSCALE_CLIENT_SECRET:-}"
launch_url="${AUTHENTIK_TAILSCALE_LAUNCH_URL:-https://login.tailscale.com/start/oidc}"
redirect_uris_json="${AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON:-[
\"https://login.tailscale.com/a/oauth_response\"
]}"
usage() {
cat <<'EOF'
Usage: Scripts/authentik-sync-tailscale-oidc.sh
Required environment:
AUTHENTIK_BOOTSTRAP_TOKEN
AUTHENTIK_TAILSCALE_CLIENT_SECRET
Optional environment:
AUTHENTIK_URL
AUTHENTIK_TAILSCALE_APPLICATION_SLUG
AUTHENTIK_TAILSCALE_APPLICATION_NAME
AUTHENTIK_TAILSCALE_PROVIDER_NAME
AUTHENTIK_TAILSCALE_TEMPLATE_SLUG
AUTHENTIK_TAILSCALE_CLIENT_ID
AUTHENTIK_TAILSCALE_LAUNCH_URL
AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON
EOF
}
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
usage
exit 0
fi
if [[ -z "$bootstrap_token" ]]; then
echo "error: AUTHENTIK_BOOTSTRAP_TOKEN is required" >&2
exit 1
fi
if [[ -z "$client_secret" || "$client_secret" == PENDING* ]]; then
echo "Tailscale OIDC client secret is not configured; skipping Authentik Tailscale sync." >&2
exit 0
fi
if ! printf '%s' "$redirect_uris_json" | jq -e 'type == "array" and length > 0' >/dev/null; then
echo "error: AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON must be a non-empty JSON array" >&2
exit 1
fi
api() {
local method="$1"
local path="$2"
local data="${3:-}"
if [[ -n "$data" ]]; then
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
else
curl -fsS \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
fi
}
api_with_status() {
local method="$1"
local path="$2"
local data="${3:-}"
local response_file status
response_file="$(mktemp)"
trap 'rm -f "$response_file"' RETURN
if [[ -n "$data" ]]; then
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
-H "Content-Type: application/json" \
-d "$data" \
"${authentik_url}${path}"
)"
else
status="$(
curl -sS \
-o "$response_file" \
-w '%{http_code}' \
-X "$method" \
-H "Authorization: Bearer ${bootstrap_token}" \
"${authentik_url}${path}"
)"
fi
printf '%s\n' "$status"
cat "$response_file"
}
wait_for_authentik() {
for _ in $(seq 1 90); do
if curl -fsS "${authentik_url}/-/health/ready/" >/dev/null 2>&1; then
return 0
fi
sleep 2
done
echo "error: Authentik did not become ready at ${authentik_url}" >&2
exit 1
}
wait_for_authentik
template_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c --arg template_slug "$template_slug" '.results[]? | select(.assigned_application_slug == $template_slug)' \
| head -n1
)"
if [[ -z "$template_provider" ]]; then
echo "error: could not resolve the Authentik OAuth provider template ${template_slug}" >&2
exit 1
fi
authorization_flow="$(printf '%s\n' "$template_provider" | jq -r '.authorization_flow')"
invalidation_flow="$(printf '%s\n' "$template_provider" | jq -r '.invalidation_flow')"
property_mappings="$(printf '%s\n' "$template_provider" | jq -c '.property_mappings')"
signing_key="$(printf '%s\n' "$template_provider" | jq -r '.signing_key')"
provider_payload="$(
jq -n \
--arg name "$provider_name" \
--arg authorization_flow "$authorization_flow" \
--arg invalidation_flow "$invalidation_flow" \
--arg client_id "$client_id" \
--arg client_secret "$client_secret" \
--arg signing_key "$signing_key" \
--argjson property_mappings "$property_mappings" \
--argjson redirect_uris "$redirect_uris_json" \
'{
name: $name,
authorization_flow: $authorization_flow,
invalidation_flow: $invalidation_flow,
client_type: "confidential",
client_id: $client_id,
client_secret: $client_secret,
include_claims_in_id_token: true,
redirect_uris: ($redirect_uris | map({matching_mode: "strict", url: .})),
property_mappings: $property_mappings,
signing_key: $signing_key,
issuer_mode: "per_provider",
sub_mode: "hashed_user_id"
}'
)"
existing_provider="$(
api GET "/api/v3/providers/oauth2/?page_size=200" \
| jq -c \
--arg application_slug "$application_slug" \
--arg provider_name "$provider_name" \
'.results[]? | select(.assigned_application_slug == $application_slug or .name == $provider_name)' \
| head -n1
)"
if [[ -n "$existing_provider" ]]; then
provider_pk="$(printf '%s\n' "$existing_provider" | jq -r '.pk')"
api PATCH "/api/v3/providers/oauth2/${provider_pk}/" "$provider_payload" >/dev/null
else
provider_pk="$(
api POST "/api/v3/providers/oauth2/" "$provider_payload" \
| jq -r '.pk // empty'
)"
fi
if [[ -z "${provider_pk:-}" ]]; then
echo "error: Tailscale OIDC provider did not return a primary key" >&2
exit 1
fi
application_payload="$(
jq -n \
--arg name "$application_name" \
--arg slug "$application_slug" \
--arg provider "$provider_pk" \
--arg launch_url "$launch_url" \
'{
name: $name,
slug: $slug,
provider: ($provider | tonumber),
meta_launch_url: $launch_url,
open_in_new_tab: true,
policy_engine_mode: "any"
}'
)"
existing_application="$(
api GET "/api/v3/core/applications/?page_size=200" \
| jq -c --arg slug "$application_slug" '.results[]? | select(.slug == $slug)' \
| head -n1
)"
if [[ -n "$existing_application" ]]; then
application_pk="$(printf '%s\n' "$existing_application" | jq -r '.pk')"
else
create_application_result="$(
api_with_status POST "/api/v3/core/applications/" "$application_payload"
)"
create_application_status="$(printf '%s\n' "$create_application_result" | sed -n '1p')"
create_application_body="$(printf '%s\n' "$create_application_result" | sed '1d')"
if [[ "$create_application_status" =~ ^20[01]$ ]]; then
application_pk="$(printf '%s\n' "$create_application_body" | jq -r '.pk // empty')"
elif [[ "$create_application_status" == "400" ]] && printf '%s\n' "$create_application_body" | jq -e '
(.slug // [] | index("Application with this slug already exists.")) != null
or (.provider // [] | index("Application with this provider already exists.")) != null
' >/dev/null; then
application_pk="existing-duplicate"
else
printf '%s\n' "$create_application_body" >&2
echo "error: could not reconcile Authentik application ${application_slug}" >&2
exit 1
fi
fi
if [[ -z "${application_pk:-}" ]]; then
echo "error: Tailscale OIDC application did not return a primary key" >&2
exit 1
fi
for _ in $(seq 1 30); do
if curl -fsS "${authentik_url}/application/o/${application_slug}/.well-known/openid-configuration" >/dev/null 2>&1; then
echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})."
exit 0
fi
sleep 2
done
echo "warning: Tailscale OIDC issuer document for ${application_slug} was not immediately readable; keeping reconciled config." >&2
echo "Synced Authentik Tailscale OIDC application ${application_slug} (${application_name})."

133
Scripts/bep Executable file
View file

@ -0,0 +1,133 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root=$(git rev-parse --show-toplevel)
proposals_dir="$repo_root/evolution/proposals"
auto_browse() {
if command -v wisu >/dev/null 2>&1; then
exec wisu -i -g --icons "$repo_root/evolution"
fi
exec ls -la "$repo_root/evolution"
}
usage() {
cat <<'USAGE'
Usage: bep [command]
Commands:
list [--status <Status>] List BEPs, optionally filtered by status.
open <BEP-XXXX|XXXX|X> Open a BEP in $EDITOR.
help Show this help.
If no command is provided, bep launches a simple browser for evolution/.
USAGE
}
normalize_id() {
local raw="$1"
if [[ "$raw" =~ ^BEP-[0-9]+$ ]]; then
printf '%s' "$raw"
return 0
fi
if [[ "$raw" =~ ^[0-9]+$ ]]; then
printf 'BEP-%04d' "$raw"
return 0
fi
return 1
}
read_status() {
local file="$1"
awk -F ': ' '/^Status:/ {print $2; exit}' "$file"
}
read_title() {
local file="$1"
local line
line=$(head -n 1 "$file" || true)
printf '%s' "$line" | sed -E 's/^# `[^`]+`[[:space:]]+//; s/^[^A-Za-z0-9]+//'
}
list_bep() {
local filter="${1:-}"
local filter_lower=""
if [[ -n "$filter" ]]; then
filter_lower=$(printf '%s' "$filter" | tr '[:upper:]' '[:lower:]')
fi
printf '%-10s %-18s %s\n' "BEP" "Status" "Title"
local file
local entries=()
for file in "$proposals_dir"/BEP-*.md; do
[[ -e "$file" ]] || continue
local base
base=$(basename "$file")
local id
id=$(printf '%s' "$base" | cut -d- -f1-2)
local status
status=$(read_status "$file")
local status_lower
status_lower=$(printf '%s' "$status" | tr '[:upper:]' '[:lower:]')
if [[ -n "$filter_lower" && "$status_lower" != "$filter_lower" ]]; then
continue
fi
local title
title=$(read_title "$file")
entries+=("$(printf '%-10s %-18s %s' "$id" "$status" "$title")")
done
if [[ ${#entries[@]} -gt 0 ]]; then
printf '%s\n' "${entries[@]}" | sort
fi
}
open_bep() {
local raw="$1"
local id
if ! id=$(normalize_id "$raw"); then
echo "Unknown BEP id: $raw" >&2
exit 1
fi
local matches
matches=("$proposals_dir"/"$id"-*.md)
if [[ ${#matches[@]} -eq 0 || ! -e "${matches[0]}" ]]; then
echo "No proposal found for $id" >&2
exit 1
fi
if [[ ${#matches[@]} -gt 1 ]]; then
echo "Multiple proposals match $id:" >&2
printf ' %s\n' "${matches[@]}" >&2
exit 1
fi
local editor="${EDITOR:-vi}"
exec "$editor" "${matches[0]}"
}
command=${1:-}
case "$command" in
"")
auto_browse
;;
list)
if [[ ${2:-} == "--status" && -n ${3:-} ]]; then
list_bep "$3"
else
list_bep
fi
;;
open)
if [[ -z ${2:-} ]]; then
echo "bep open requires an id" >&2
exit 1
fi
open_bep "$2"
;;
help|-h|--help)
usage
;;
*)
echo "Unknown command: $command" >&2
usage
exit 1
;;
esac

94
Scripts/check-bep-metadata.py Executable file
View file

@ -0,0 +1,94 @@
#!/usr/bin/env python3
from __future__ import annotations
import pathlib
import re
import sys
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
PROPOSALS_DIR = REPO_ROOT / "evolution" / "proposals"
ALLOWED_STATUSES = {
"Pitch",
"Draft",
"In Review",
"Accepted",
"Implemented",
"Rejected",
"Returned for Revision",
"Superseded",
"Archived",
}
REQUIRED_FIELDS = [
"Status",
"Proposal",
"Authors",
"Coordinator",
"Reviewers",
"Constitution Sections",
"Implementation PRs",
"Decision Date",
]
def text_block_lines(path: pathlib.Path) -> list[str]:
content = path.read_text(encoding="utf-8")
match = re.search(r"```text\n(.*?)\n```", content, re.DOTALL)
if not match:
raise ValueError("missing leading ```text metadata block")
return [line.rstrip() for line in match.group(1).splitlines() if line.strip()]
def validate(path: pathlib.Path) -> list[str]:
errors: list[str] = []
proposal_id = path.name.split("-", 2)[:2]
expected_id = "-".join(proposal_id).removesuffix(".md")
try:
lines = text_block_lines(path)
except ValueError as exc:
return [f"{path}: {exc}"]
field_names = [line.split(":", 1)[0] for line in lines]
if field_names != REQUIRED_FIELDS:
errors.append(
f"{path}: metadata fields must appear in order {', '.join(REQUIRED_FIELDS)}"
)
return errors
fields = dict(line.split(":", 1) for line in lines)
fields = {key.strip(): value.strip() for key, value in fields.items()}
if fields["Status"] not in ALLOWED_STATUSES:
errors.append(f"{path}: invalid Status {fields['Status']!r}")
if fields["Proposal"] != expected_id:
errors.append(
f"{path}: Proposal field {fields['Proposal']!r} does not match filename id {expected_id!r}"
)
if fields["Status"] in {"Accepted", "Implemented", "Superseded", "Rejected", "Archived"} and fields["Decision Date"] == "Pending":
errors.append(
f"{path}: Decision Date must not be Pending once status is {fields['Status']}"
)
return errors
def main() -> int:
errors: list[str] = []
for path in sorted(PROPOSALS_DIR.glob("BEP-*.md")):
errors.extend(validate(path))
if errors:
for error in errors:
print(error, file=sys.stderr)
return 1
print(f"checked {len(list(PROPOSALS_DIR.glob('BEP-*.md')))} BEPs")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View file

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "${repo_root}"
release_ref="${RELEASE_REF:-manual-${GITHUB_SHA:-unknown}}"
target="x86_64-unknown-linux-gnu"
out_dir="${repo_root}/dist"
staging="${out_dir}/burrow-${release_ref}-${target}"
mkdir -p "${staging}"
cargo build --locked --release -p burrow --bin burrow
install -m 0755 target/release/burrow "${staging}/burrow"
cp README.md "${staging}/README.md"
tarball="${out_dir}/burrow-${release_ref}-${target}.tar.gz"
tar -C "${out_dir}" -czf "${tarball}" "$(basename "${staging}")"
shasum -a 256 "${tarball}" > "${tarball}.sha256"

157
Scripts/ci/ensure-nix.sh Executable file
View file

@ -0,0 +1,157 @@
#!/usr/bin/env bash
set -euo pipefail
source_nix_profile() {
local candidate
for candidate in \
"/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" \
"${HOME}/.nix-profile/etc/profile.d/nix.sh"
do
if [[ -f "${candidate}" ]]; then
# shellcheck disable=SC1090
. "${candidate}"
return 0
fi
done
return 1
}
linux_cp_supports_preserve() {
cp --help 2>&1 | grep -q -- '--preserve'
}
ensure_root_owned_home() {
if [[ "$(id -u)" -ne 0 ]]; then
return 0
fi
if [[ ! -d "${HOME}" ]] || [[ ! -O "${HOME}" ]]; then
export HOME="/root"
fi
mkdir -p "${HOME}"
}
ensure_linux_nixbld_accounts() {
if [[ "$(id -u)" -ne 0 ]]; then
return 0
fi
if command -v getent >/dev/null 2>&1 && getent group nixbld >/dev/null 2>&1; then
return 0
fi
if command -v addgroup >/dev/null 2>&1 && ! command -v groupadd >/dev/null 2>&1; then
addgroup -S nixbld >/dev/null 2>&1 || true
for i in $(seq 1 10); do
adduser -S -D -H -h /var/empty -s /sbin/nologin -G nixbld "nixbld${i}" >/dev/null 2>&1 || true
done
return 0
fi
if command -v groupadd >/dev/null 2>&1; then
groupadd -r nixbld >/dev/null 2>&1 || true
for i in $(seq 1 10); do
useradd \
--system \
--no-create-home \
--home-dir /var/empty \
--shell /usr/sbin/nologin \
--gid nixbld \
"nixbld${i}" >/dev/null 2>&1 || true
done
return 0
fi
echo "linux nix bootstrap requires nixbld group creation support" >&2
exit 1
}
ensure_linux_nix_bootstrap_prereqs() {
if linux_cp_supports_preserve; then
ensure_root_owned_home
ensure_linux_nixbld_accounts
return 0
fi
if command -v apk >/dev/null 2>&1; then
apk add --no-cache coreutils xz >/dev/null
elif command -v apt-get >/dev/null 2>&1; then
export DEBIAN_FRONTEND=noninteractive
apt-get update -y >/dev/null
apt-get install -y coreutils xz-utils >/dev/null
elif command -v dnf >/dev/null 2>&1; then
dnf install -y coreutils xz >/dev/null
elif command -v yum >/dev/null 2>&1; then
yum install -y coreutils xz >/dev/null
else
echo "linux nix bootstrap requires GNU cp but no supported package manager was found" >&2
exit 1
fi
linux_cp_supports_preserve || {
echo "linux nix bootstrap still lacks GNU cp after installing prerequisites" >&2
exit 1
}
ensure_root_owned_home
ensure_linux_nixbld_accounts
}
if ! command -v nix >/dev/null 2>&1; then
if ! command -v curl >/dev/null 2>&1; then
echo "curl is required to install nix" >&2
exit 1
fi
case "$(uname -s)" in
Linux)
ensure_linux_nix_bootstrap_prereqs
curl -fsSL https://nixos.org/nix/install | sh -s -- --no-daemon
;;
Darwin)
installer="$(mktemp -t burrow-nix.XXXXXX)"
trap 'rm -f "${installer}"' EXIT
curl -fsSL -o "${installer}" https://install.determinate.systems/nix
chmod +x "${installer}"
if command -v sudo >/dev/null 2>&1; then
if sudo -n true 2>/dev/null; then
sudo -n sh "${installer}" install --no-confirm
else
sudo sh "${installer}" install --no-confirm
fi
else
sh "${installer}" install --no-confirm
fi
;;
*)
echo "unsupported platform for nix bootstrap: $(uname -s)" >&2
exit 1
;;
esac
fi
source_nix_profile || true
export PATH="${HOME}/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}"
config_root="${XDG_CONFIG_HOME:-$HOME/.config}"
config_file="${config_root}/nix/nix.conf"
if [[ -e "${config_file}" && ! -w "${config_file}" ]]; then
config_root="$(mktemp -d -t burrow-nix-config.XXXXXX)"
export XDG_CONFIG_HOME="${config_root}"
config_file="${XDG_CONFIG_HOME}/nix/nix.conf"
fi
mkdir -p "$(dirname -- "${config_file}")"
cat > "${config_file}" <<'EOF'
experimental-features = nix-command flakes
sandbox = true
fallback = true
substituters = https://cache.nixos.org
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
EOF
command -v nix >/dev/null 2>&1 || {
echo "nix is still unavailable after bootstrap" >&2
exit 1
}

View file

@ -0,0 +1,65 @@
#!/usr/bin/env bash
set -euo pipefail
: "${API_URL:?API_URL is required}"
: "${REPOSITORY:?REPOSITORY is required}"
: "${RELEASE_TAG:?RELEASE_TAG is required}"
: "${TOKEN:?TOKEN is required}"
release_api="${API_URL}/repos/${REPOSITORY}/releases"
tag_api="${release_api}/tags/${RELEASE_TAG}"
release_json="$(mktemp)"
create_json="$(mktemp)"
trap 'rm -f "${release_json}" "${create_json}"' EXIT
status="$(
curl -sS -o "${release_json}" -w '%{http_code}' \
-H "Authorization: token ${TOKEN}" \
"${tag_api}"
)"
if [[ "${status}" == "404" ]]; then
jq -n \
--arg tag "${RELEASE_TAG}" \
--arg name "Burrow ${RELEASE_TAG}" \
'{
tag_name: $tag,
target_commitish: $tag,
name: $name,
body: "Automated prerelease built on Forgejo Namespace runners.",
draft: false,
prerelease: true
}' > "${create_json}"
curl -fsS \
-H "Authorization: token ${TOKEN}" \
-H "Content-Type: application/json" \
-d @"${create_json}" \
"${release_api}" > "${release_json}"
elif [[ "${status}" != "200" ]]; then
echo "failed to query Forgejo release for ${RELEASE_TAG} (HTTP ${status})" >&2
cat "${release_json}" >&2
exit 1
fi
release_id="$(jq -r '.id' "${release_json}")"
if [[ -z "${release_id}" || "${release_id}" == "null" ]]; then
echo "Forgejo release payload is missing an id" >&2
cat "${release_json}" >&2
exit 1
fi
for file in dist/*; do
name="$(basename "${file}")"
asset_id="$(jq -r --arg name "${name}" '.assets[]? | select(.name == $name) | .id' "${release_json}" | head -n1)"
if [[ -n "${asset_id}" ]]; then
curl -fsS -X DELETE \
-H "Authorization: token ${TOKEN}" \
"${release_api}/${release_id}/assets/${asset_id}" >/dev/null
fi
curl -fsS \
-H "Authorization: token ${TOKEN}" \
-F "attachment=@${file}" \
"${release_api}/${release_id}/assets?name=${name}" >/dev/null
done

View file

@ -0,0 +1,73 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}"
simulator_name="${BURROW_UI_TEST_SIMULATOR_NAME:-iPhone 17 Pro}"
simulator_os="${BURROW_UI_TEST_SIMULATOR_OS:-26.4}"
derived_data_path="${BURROW_UI_TEST_DERIVED_DATA_PATH:-/tmp/burrow-ui-tests-deriveddata}"
source_packages_path="${BURROW_UI_TEST_SOURCE_PACKAGES_PATH:-/tmp/burrow-ui-tests-sourcepackages}"
fallback_dir="${HOME}/Library/Application Support/${bundle_id}/SimulatorFallback"
socket_path="${fallback_dir}/burrow.sock"
daemon_log="${BURROW_UI_TEST_DAEMON_LOG:-/tmp/burrow-ui-test-daemon.log}"
ui_test_email="${BURROW_UI_TEST_EMAIL:-ui-test@burrow.net}"
ui_test_username="${BURROW_UI_TEST_USERNAME:-ui-test}"
password_secret="${repo_root}/secrets/infra/authentik-ui-test-password.age"
age_identity="${BURROW_UI_TEST_AGE_IDENTITY:-${HOME}/.ssh/id_ed25519}"
ui_test_password="${BURROW_UI_TEST_PASSWORD:-}"
if [[ -z "$ui_test_password" ]]; then
if [[ -f "$password_secret" && -f "$age_identity" ]]; then
ui_test_password="$(age -d -i "$age_identity" "$password_secret" | tr -d '\r\n')"
else
echo "error: BURROW_UI_TEST_PASSWORD is unset and ${password_secret} could not be decrypted" >&2
exit 1
fi
fi
mkdir -p "$fallback_dir" "$derived_data_path" "$source_packages_path"
rm -f "$socket_path"
cleanup() {
if [[ -n "${daemon_pid:-}" ]]; then
kill "$daemon_pid" >/dev/null 2>&1 || true
wait "$daemon_pid" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
cargo build -p burrow --bin burrow
(
cd "$fallback_dir"
BURROW_SOCKET_PATH="burrow.sock" \
"${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1
) &
daemon_pid=$!
for _ in $(seq 1 50); do
[[ -S "$socket_path" ]] && break
sleep 0.2
done
if [[ ! -S "$socket_path" ]]; then
echo "error: Burrow daemon did not create ${socket_path}" >&2
[[ -f "$daemon_log" ]] && cat "$daemon_log" >&2
exit 1
fi
BURROW_UI_TEST_EMAIL="$ui_test_email" \
BURROW_UI_TEST_USERNAME="$ui_test_username" \
BURROW_UI_TEST_PASSWORD="$ui_test_password" \
xcodebuild \
-quiet \
-skipPackagePluginValidation \
-project "${repo_root}/Apple/Burrow.xcodeproj" \
-scheme App \
-configuration Debug \
-destination "platform=iOS Simulator,name=${simulator_name},OS=${simulator_os}" \
-derivedDataPath "$derived_data_path" \
-clonedSourcePackagesDirPath "$source_packages_path" \
-only-testing:BurrowUITests \
CODE_SIGNING_ALLOWED=NO \
test

View file

@ -0,0 +1,186 @@
#!/usr/bin/env bash
set -euo pipefail
repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
bundle_id="${BURROW_UI_TEST_APP_BUNDLE_ID:-com.hackclub.burrow}"
smoke_root="${BURROW_TAILNET_SMOKE_ROOT:-/tmp/burrow-tailnet-connectivity}"
socket_path="${smoke_root}/burrow.sock"
db_path="${smoke_root}/burrow.db"
daemon_log="${BURROW_TAILNET_SMOKE_DAEMON_LOG:-${smoke_root}/daemon.log}"
payload_path="${smoke_root}/tailnet.json"
authority="${BURROW_TAILNET_SMOKE_AUTHORITY:-https://ts.burrow.net}"
account_name="${BURROW_TAILNET_SMOKE_ACCOUNT:-ui-test}"
identity_name="${BURROW_TAILNET_SMOKE_IDENTITY:-apple}"
hostname="${BURROW_TAILNET_SMOKE_HOSTNAME:-burrow-apple}"
message="${BURROW_TAILNET_SMOKE_MESSAGE:-burrow-tailnet-smoke}"
timeout_ms="${BURROW_TAILNET_SMOKE_TIMEOUT_MS:-8000}"
remote_ip="${BURROW_TAILNET_SMOKE_REMOTE_IP:-}"
remote_port="${BURROW_TAILNET_SMOKE_REMOTE_PORT:-18081}"
remote_hostname="${BURROW_TAILNET_SMOKE_REMOTE_HOSTNAME:-burrow-echo}"
remote_authkey="${BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY:-}"
helper_bin="${BURROW_TAILNET_SMOKE_HELPER_BIN:-${smoke_root}/tailscale-login-bridge}"
remote_state_root="${BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT:-${smoke_root}/remote-state}"
remote_stdout="${smoke_root}/remote-helper.stdout"
remote_stderr="${BURROW_TAILNET_SMOKE_REMOTE_LOG:-${smoke_root}/remote-helper.log}"
if [[ -n "${TS_AUTHKEY:-}" ]]; then
default_tailnet_state_root="${smoke_root}/local-state"
else
default_tailnet_state_root="/tmp/${bundle_id}/SimulatorTailnetState"
fi
tailnet_state_root="${BURROW_TAILNET_STATE_ROOT:-${default_tailnet_state_root}}"
need_login=0
if [[ -z "${TS_AUTHKEY:-}" ]] && { [[ ! -d "$tailnet_state_root" ]] || [[ -z "$(find "$tailnet_state_root" -mindepth 1 -maxdepth 2 -print -quit 2>/dev/null)" ]]; }; then
need_login=1
fi
if [[ "$need_login" -eq 1 ]]; then
echo "Tailnet state root is empty; running iOS login bootstrap first..."
"${repo_root}/Scripts/run-ios-tailnet-ui-tests.sh"
fi
rm -rf "$smoke_root"
mkdir -p "$smoke_root"
cleanup() {
rm -f "$payload_path"
if [[ -n "${daemon_pid:-}" ]]; then
kill "$daemon_pid" >/dev/null 2>&1 || true
wait "$daemon_pid" >/dev/null 2>&1 || true
fi
if [[ -n "${remote_pid:-}" ]]; then
kill "$remote_pid" >/dev/null 2>&1 || true
wait "$remote_pid" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
wait_for_helper_listen() {
python3 - <<'PY' "$1"
import json
import pathlib
import sys
import time
path = pathlib.Path(sys.argv[1])
deadline = time.time() + 20
while time.time() < deadline:
if path.exists():
with path.open("r", encoding="utf-8") as handle:
line = handle.readline().strip()
if line:
hello = json.loads(line)
print(hello["listen_addr"])
raise SystemExit(0)
time.sleep(0.1)
raise SystemExit("timed out waiting for helper startup line")
PY
}
wait_for_helper_ip() {
python3 - <<'PY' "$1"
import json
import sys
import time
import urllib.request
url = sys.argv[1]
deadline = time.time() + 30
while time.time() < deadline:
with urllib.request.urlopen(url, timeout=5) as response:
status = json.load(response)
if status.get("running") and status.get("tailscale_ips"):
print(status["tailscale_ips"][0])
raise SystemExit(0)
time.sleep(0.25)
raise SystemExit("timed out waiting for helper to become ready")
PY
}
python3 - <<'PY' "$payload_path" "$authority" "$account_name" "$identity_name" "$hostname"
import json
import pathlib
import sys
path = pathlib.Path(sys.argv[1])
payload = {
"authority": sys.argv[2],
"account": sys.argv[3],
"identity": sys.argv[4],
"hostname": sys.argv[5],
}
path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
PY
cargo build -p burrow --bin burrow
(
cd "${repo_root}/Tools/tailscale-login-bridge"
GOWORK=off go build -o "$helper_bin" .
)
if [[ -z "$remote_ip" ]]; then
if [[ -z "$remote_authkey" ]] && { [[ ! -d "$remote_state_root" ]] || [[ -z "$(find "$remote_state_root" -mindepth 1 -maxdepth 1 -print -quit 2>/dev/null)" ]]; }; then
echo "error: set BURROW_TAILNET_SMOKE_REMOTE_IP, BURROW_TAILNET_SMOKE_REMOTE_AUTHKEY, or BURROW_TAILNET_SMOKE_REMOTE_STATE_ROOT to an existing logged-in helper state" >&2
exit 1
fi
if [[ -n "$remote_authkey" ]]; then
rm -rf "$remote_state_root"
mkdir -p "$remote_state_root"
fi
(
cd "$repo_root"
if [[ -n "$remote_authkey" ]]; then
export TS_AUTHKEY="$remote_authkey"
fi
"$helper_bin" \
--listen 127.0.0.1:0 \
--state-dir "$remote_state_root" \
--hostname "$remote_hostname" \
--control-url "$authority" \
--udp-echo-port "$remote_port" \
>"$remote_stdout" 2>"$remote_stderr"
) &
remote_pid=$!
remote_listen_addr="$(wait_for_helper_listen "$remote_stdout")"
remote_ip="$(wait_for_helper_ip "http://${remote_listen_addr}/status")"
fi
(
cd "$smoke_root"
RUST_LOG="${BURROW_TAILNET_SMOKE_RUST_LOG:-info,burrow=debug}" \
BURROW_SOCKET_PATH="$socket_path" \
BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \
"${repo_root}/target/debug/burrow" daemon >"$daemon_log" 2>&1
) &
daemon_pid=$!
for _ in $(seq 1 50); do
[[ -S "$socket_path" ]] && break
sleep 0.2
done
if [[ ! -S "$socket_path" ]]; then
echo "error: Burrow daemon did not create ${socket_path}" >&2
[[ -f "$daemon_log" ]] && cat "$daemon_log" >&2
exit 1
fi
run_burrow() {
BURROW_SOCKET_PATH="$socket_path" \
BURROW_TAILSCALE_STATE_ROOT="$tailnet_state_root" \
"${repo_root}/target/debug/burrow" "$@"
}
run_burrow network-add 1 1 "$payload_path"
run_burrow start
run_burrow tunnel-config
run_burrow tailnet-udp-echo "${remote_ip}:${remote_port}" --message "$message" --timeout-ms "$timeout_ms"
echo
echo "Tailnet connectivity smoke passed."
echo "State root: $tailnet_state_root"
echo "Remote: ${remote_ip}:${remote_port}"

View file

@ -2,17 +2,26 @@ package main
import ( import (
"context" "context"
"encoding/binary"
"encoding/json" "encoding/json"
"errors"
"flag" "flag"
"fmt" "fmt"
"io"
"log" "log"
"net" "net"
"net/netip"
"net/http" "net/http"
"os" "os"
"strconv"
"sync"
"time" "time"
"github.com/tailscale/wireguard-go/tun"
"tailscale.com/client/local" "tailscale.com/client/local"
"tailscale.com/ipn" "tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/tailcfg"
"tailscale.com/tsnet" "tailscale.com/tsnet"
) )
@ -26,13 +35,123 @@ type statusResponse struct {
SelfDNSName string `json:"self_dns_name,omitempty"` SelfDNSName string `json:"self_dns_name,omitempty"`
TailscaleIPs []string `json:"tailscale_ips,omitempty"` TailscaleIPs []string `json:"tailscale_ips,omitempty"`
Health []string `json:"health,omitempty"` Health []string `json:"health,omitempty"`
Peers []peerSummary `json:"peers,omitempty"`
} }
type peerSummary struct {
Name string `json:"name,omitempty"`
DNSName string `json:"dns_name,omitempty"`
TailscaleIPs []string `json:"tailscale_ips,omitempty"`
Online bool `json:"online"`
Active bool `json:"active"`
Relay string `json:"relay,omitempty"`
CurAddr string `json:"cur_addr,omitempty"`
LastSeenUnix int64 `json:"last_seen_unix,omitempty"`
}
type pingResponse struct {
Result *ipnstate.PingResult `json:"result,omitempty"`
}
type helperHello struct {
ListenAddr string `json:"listen_addr"`
PacketSocket string `json:"packet_socket,omitempty"`
}
type helperState struct {
mu sync.RWMutex
authURL string
}
func (s *helperState) authURLSnapshot() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.authURL
}
func (s *helperState) setAuthURL(url string) {
s.mu.Lock()
defer s.mu.Unlock()
s.authURL = url
}
func (s *helperState) clearAuthURL() {
s.setAuthURL("")
}
// chanTUN is a tun.Device backed by channels so another process can feed and
// consume raw IP packets while tsnet handles the Tailnet control/data plane.
type chanTUN struct {
Inbound chan []byte
Outbound chan []byte
closed chan struct{}
events chan tun.Event
}
func newChanTUN() *chanTUN {
t := &chanTUN{
Inbound: make(chan []byte, 1024),
Outbound: make(chan []byte, 1024),
closed: make(chan struct{}),
events: make(chan tun.Event, 1),
}
t.events <- tun.EventUp
return t
}
func (t *chanTUN) File() *os.File { return nil }
func (t *chanTUN) Close() error {
select {
case <-t.closed:
default:
close(t.closed)
close(t.Inbound)
}
return nil
}
func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) {
select {
case <-t.closed:
return 0, io.EOF
case pkt, ok := <-t.Outbound:
if !ok {
return 0, io.EOF
}
sizes[0] = copy(bufs[0][offset:], pkt)
return 1, nil
}
}
func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) {
for _, buf := range bufs {
pkt := buf[offset:]
if len(pkt) == 0 {
continue
}
select {
case <-t.closed:
return 0, errors.New("closed")
case t.Inbound <- append([]byte(nil), pkt...):
default:
}
}
return len(bufs), nil
}
func (t *chanTUN) MTU() (int, error) { return 1280, nil }
func (t *chanTUN) Name() (string, error) { return "burrow-tailnet", nil }
func (t *chanTUN) Events() <-chan tun.Event { return t.events }
func (t *chanTUN) BatchSize() int { return 1 }
func main() { func main() {
listen := flag.String("listen", "127.0.0.1:0", "local listen address") listen := flag.String("listen", "127.0.0.1:0", "local listen address")
stateDir := flag.String("state-dir", "", "persistent state directory") stateDir := flag.String("state-dir", "", "persistent state directory")
hostname := flag.String("hostname", "burrow-apple", "tailnet hostname") hostname := flag.String("hostname", "burrow-apple", "tailnet hostname")
controlURL := flag.String("control-url", "", "optional control URL") controlURL := flag.String("control-url", "", "optional control URL")
packetSocket := flag.String("packet-socket", "", "optional unix socket path for raw packet bridging")
udpEchoPort := flag.Int("udp-echo-port", 0, "optional tailnet UDP echo port")
flag.Parse() flag.Parse()
if *stateDir == "" { if *stateDir == "" {
@ -48,6 +167,24 @@ func main() {
Hostname: *hostname, Hostname: *hostname,
UserLogf: log.Printf, UserLogf: log.Printf,
} }
var tunDevice *chanTUN
var packetListener net.Listener
if *packetSocket != "" {
_ = os.Remove(*packetSocket)
ln, err := net.Listen("unix", *packetSocket)
if err != nil {
log.Fatalf("packet listen: %v", err)
}
packetListener = ln
defer func() {
packetListener.Close()
_ = os.Remove(*packetSocket)
}()
tunDevice = newChanTUN()
server.Tun = tunDevice
}
if *controlURL != "" { if *controlURL != "" {
server.ControlURL = *controlURL server.ControlURL = *controlURL
} }
@ -61,6 +198,7 @@ func main() {
if err != nil { if err != nil {
log.Fatalf("local client: %v", err) log.Fatalf("local client: %v", err)
} }
state := &helperState{}
ln, err := net.Listen("tcp", *listen) ln, err := net.Listen("tcp", *listen)
if err != nil { if err != nil {
@ -68,12 +206,27 @@ func main() {
} }
defer ln.Close() defer ln.Close()
fmt.Printf("{\"listen_addr\":%q}\n", ln.Addr().String()) if packetListener != nil {
go servePacketBridge(packetListener, tunDevice)
}
if *udpEchoPort > 0 {
go serveUDPEcho(context.Background(), server, localClient, *udpEchoPort)
}
hello := helperHello{
ListenAddr: ln.Addr().String(),
}
if *packetSocket != "" {
hello.PacketSocket = *packetSocket
}
if err := json.NewEncoder(os.Stdout).Encode(hello); err != nil {
log.Fatalf("write hello: %v", err)
}
_ = os.Stdout.Sync() _ = os.Stdout.Sync()
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/status", func(w http.ResponseWriter, r *http.Request) {
status, err := snapshot(r.Context(), localClient) status, err := snapshot(r.Context(), localClient, state)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway) http.Error(w, err.Error(), http.StatusBadGateway)
return return
@ -81,6 +234,40 @@ func main() {
w.Header().Set("content-type", "application/json") w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(status) _ = json.NewEncoder(w).Encode(status)
}) })
mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) {
ip := r.URL.Query().Get("ip")
if ip == "" {
http.Error(w, "missing ip", http.StatusBadRequest)
return
}
target, err := netip.ParseAddr(ip)
if err != nil {
http.Error(w, fmt.Sprintf("invalid ip: %v", err), http.StatusBadRequest)
return
}
pingType := tailcfg.PingTSMP
switch r.URL.Query().Get("type") {
case "", "tsmp", "TSMP":
pingType = tailcfg.PingTSMP
case "icmp", "ICMP":
pingType = tailcfg.PingICMP
case "peerapi":
pingType = tailcfg.PingPeerAPI
default:
http.Error(w, "unsupported ping type", http.StatusBadRequest)
return
}
result, err := localClient.Ping(r.Context(), target, pingType)
if err != nil {
http.Error(w, err.Error(), http.StatusBadGateway)
return
}
w.Header().Set("content-type", "application/json")
_ = json.NewEncoder(w).Encode(&pingResponse{Result: result})
})
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
go func() { go func() {
@ -96,16 +283,110 @@ func main() {
log.Fatal(httpServer.Serve(ln)) log.Fatal(httpServer.Serve(ln))
} }
func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse, error) { func servePacketBridge(listener net.Listener, device *chanTUN) {
status, err := localClient.StatusWithoutPeers(ctx) for {
conn, err := listener.Accept()
if err != nil {
if errors.Is(err, net.ErrClosed) {
return
}
log.Printf("packet accept: %v", err)
continue
}
log.Printf("packet bridge connected")
if err := bridgePacketConn(conn, device); err != nil && !errors.Is(err, io.EOF) {
log.Printf("packet bridge error: %v", err)
}
_ = conn.Close()
log.Printf("packet bridge disconnected")
}
}
func bridgePacketConn(conn net.Conn, device *chanTUN) error {
errCh := make(chan error, 2)
go func() {
for {
pkt, err := readFrame(conn)
if err != nil {
errCh <- err
return
}
select {
case <-device.closed:
errCh <- io.EOF
return
case device.Outbound <- pkt:
}
}
}()
go func() {
for {
select {
case <-device.closed:
errCh <- io.EOF
return
case pkt, ok := <-device.Inbound:
if !ok {
errCh <- io.EOF
return
}
if err := writeFrame(conn, pkt); err != nil {
errCh <- err
return
}
}
}
}()
return <-errCh
}
func readFrame(r io.Reader) ([]byte, error) {
var size [4]byte
if _, err := io.ReadFull(r, size[:]); err != nil {
return nil, err
}
length := binary.BigEndian.Uint32(size[:])
if length == 0 {
return []byte{}, nil
}
packet := make([]byte, length)
if _, err := io.ReadFull(r, packet); err != nil {
return nil, err
}
return packet, nil
}
func writeFrame(w io.Writer, packet []byte) error {
var size [4]byte
binary.BigEndian.PutUint32(size[:], uint32(len(packet)))
if _, err := w.Write(size[:]); err != nil {
return err
}
if len(packet) == 0 {
return nil
}
_, err := w.Write(packet)
return err
}
func snapshot(ctx context.Context, localClient *local.Client, state *helperState) (*statusResponse, error) {
status, err := localClient.Status(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && status.AuthURL == "" {
if err := localClient.StartLoginInteractive(ctx); err != nil { authURL := status.AuthURL
return nil, err if authURL == "" {
authURL = state.authURLSnapshot()
} }
status, err = localClient.StatusWithoutPeers(ctx) if status.BackendState == ipn.Running.String() {
state.clearAuthURL()
authURL = ""
} else if (status.BackendState == ipn.NeedsLogin.String() || status.BackendState == ipn.NoState.String()) && authURL == "" {
authURL, err = awaitAuthURL(ctx, localClient, state)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -113,7 +394,7 @@ func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse,
response := &statusResponse{ response := &statusResponse{
BackendState: status.BackendState, BackendState: status.BackendState,
AuthURL: status.AuthURL, AuthURL: authURL,
Running: status.BackendState == ipn.Running.String(), Running: status.BackendState == ipn.Running.String(),
NeedsLogin: status.BackendState == ipn.NeedsLogin.String(), NeedsLogin: status.BackendState == ipn.NeedsLogin.String(),
Health: append([]string(nil), status.Health...), Health: append([]string(nil), status.Health...),
@ -129,5 +410,114 @@ func snapshot(ctx context.Context, localClient *local.Client) (*statusResponse,
for _, ip := range status.TailscaleIPs { for _, ip := range status.TailscaleIPs {
response.TailscaleIPs = append(response.TailscaleIPs, ip.String()) response.TailscaleIPs = append(response.TailscaleIPs, ip.String())
} }
for _, key := range status.Peers() {
peer := status.Peer[key]
if peer == nil {
continue
}
summary := peerSummary{
Name: peer.HostName,
DNSName: peer.DNSName,
Online: peer.Online,
Active: peer.Active,
Relay: peer.Relay,
CurAddr: peer.CurAddr,
LastSeenUnix: peer.LastSeen.Unix(),
}
for _, ip := range peer.TailscaleIPs {
summary.TailscaleIPs = append(summary.TailscaleIPs, ip.String())
}
response.Peers = append(response.Peers, summary)
}
return response, nil return response, nil
} }
func serveUDPEcho(ctx context.Context, server *tsnet.Server, localClient *local.Client, port int) {
ip, err := awaitTailscaleIP(ctx, localClient)
if err != nil {
log.Printf("udp echo setup failed: %v", err)
return
}
listenAddr := net.JoinHostPort(ip.String(), strconv.Itoa(port))
pc, err := server.ListenPacket("udp", listenAddr)
if err != nil {
log.Printf("udp echo listen failed on %s: %v", listenAddr, err)
return
}
defer pc.Close()
log.Printf("udp echo listening on %s", pc.LocalAddr())
buf := make([]byte, 64<<10)
for {
n, addr, err := pc.ReadFrom(buf)
if err != nil {
if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) {
return
}
log.Printf("udp echo read failed: %v", err)
return
}
if _, err := pc.WriteTo(buf[:n], addr); err != nil {
log.Printf("udp echo write failed: %v", err)
return
}
}
}
func awaitTailscaleIP(ctx context.Context, localClient *local.Client) (netip.Addr, error) {
for range 60 {
status, err := localClient.StatusWithoutPeers(ctx)
if err == nil {
for _, ip := range status.TailscaleIPs {
if ip.Is4() {
return ip, nil
}
}
for _, ip := range status.TailscaleIPs {
if ip.Is6() {
return ip, nil
}
}
}
select {
case <-ctx.Done():
return netip.Addr{}, ctx.Err()
case <-time.After(250 * time.Millisecond):
}
}
return netip.Addr{}, errors.New("timed out waiting for tailscale IP")
}
func awaitAuthURL(ctx context.Context, localClient *local.Client, state *helperState) (string, error) {
watchCtx, cancel := context.WithTimeout(ctx, 8*time.Second)
defer cancel()
watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState)
if err != nil {
return "", err
}
defer watcher.Close()
if err := localClient.StartLoginInteractive(ctx); err != nil {
return "", err
}
for {
notify, err := watcher.Next()
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return state.authURLSnapshot(), nil
}
return "", err
}
if notify.BrowseToURL != nil && *notify.BrowseToURL != "" {
state.setAuthURL(*notify.BrowseToURL)
return *notify.BrowseToURL, nil
}
if notify.State != nil && *notify.State == ipn.Running {
state.clearAuthURL()
return "", nil
}
}
}

View file

@ -82,12 +82,23 @@ impl TailscaleBridgeManager {
let key = session_key(&request.account_name, &request.identity_name); let key = session_key(&request.account_name, &request.identity_name);
if let Some(existing) = self.sessions.lock().await.get(&key).cloned() { if let Some(existing) = self.sessions.lock().await.get(&key).cloned() {
let status = self.fetch_status(existing.as_ref()).await?; match self.fetch_status(existing.as_ref()).await {
Ok(status) => {
return Ok(TailscaleLoginStartResponse { return Ok(TailscaleLoginStartResponse {
session_id: existing.session_id.clone(), session_id: existing.session_id.clone(),
status, status,
}); });
} }
Err(err) => {
log::warn!(
"tailscale login session {} is stale, restarting: {err}",
existing.session_id
);
self.sessions.lock().await.remove(&key);
let _ = self.shutdown_session(existing.as_ref()).await;
}
}
}
let state_dir = state_root().join(session_dir_name(&request)); let state_dir = state_root().join(session_dir_name(&request));
tokio::fs::create_dir_all(&state_dir) tokio::fs::create_dir_all(&state_dir)
@ -155,11 +166,28 @@ impl TailscaleBridgeManager {
}; };
match session { match session {
Some(session) => self.fetch_status(session.as_ref()).await.map(Some), Some(session) => match self.fetch_status(session.as_ref()).await {
Ok(status) => Ok(Some(status)),
Err(err) => {
self.remove_session_by_id(session_id).await;
Err(err)
}
},
None => Ok(None), None => Ok(None),
} }
} }
pub async fn cancel(&self, session_id: &str) -> Result<bool> {
let session = self.remove_session_by_id(session_id).await;
match session {
Some(session) => {
self.shutdown_session(session.as_ref()).await?;
Ok(true)
}
None => Ok(false),
}
}
async fn wait_for_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> { async fn wait_for_status(&self, session: &ManagedSession) -> Result<TailscaleLoginStatus> {
let mut last_error = None; let mut last_error = None;
let mut last_status = None; let mut last_status = None;
@ -201,6 +229,38 @@ impl TailscaleBridgeManager {
.await .await
.context("invalid tailscale helper status response") .context("invalid tailscale helper status response")
} }
async fn remove_session_by_id(&self, session_id: &str) -> Option<Arc<ManagedSession>> {
let mut sessions = self.sessions.lock().await;
let key = sessions
.iter()
.find_map(|(key, session)| (session.session_id == session_id).then(|| key.clone()))?;
sessions.remove(&key)
}
async fn shutdown_session(&self, session: &ManagedSession) -> Result<()> {
let _ = self
.client
.post(format!("{}/shutdown", session.listen_url))
.send()
.await;
for _ in 0..10 {
let mut child = session.child.lock().await;
if child.try_wait()?.is_some() {
return Ok(());
}
drop(child);
tokio::time::sleep(Duration::from_millis(100)).await;
}
let mut child = session.child.lock().await;
child
.start_kill()
.context("failed to kill tailscale helper")?;
let _ = child.wait().await;
Ok(())
}
} }
fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result<Command> { fn helper_command(request: &TailscaleLoginStartRequest, state_dir: &Path) -> Result<Command> {
@ -249,7 +309,10 @@ fn state_root() -> PathBuf {
.join("Burrow") .join("Burrow")
.join("tailscale"); .join("tailscale");
} }
home.join(".local").join("share").join("burrow").join("tailscale") home.join(".local")
.join("share")
.join("burrow")
.join("tailscale")
} }
fn session_dir_name(request: &TailscaleLoginStartRequest) -> String { fn session_dir_name(request: &TailscaleLoginStartRequest) -> String {

View file

@ -7,6 +7,7 @@ use super::TailnetProvider;
pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server"; pub const TAILNET_DISCOVERY_REL: &str = "https://burrow.net/rel/tailnet-control-server";
const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet"; const TAILNET_DISCOVERY_PATH: &str = "/.well-known/burrow-tailnet";
const WEBFINGER_PATH: &str = "/.well-known/webfinger"; const WEBFINGER_PATH: &str = "/.well-known/webfinger";
const MANAGED_TAILSCALE_AUTHORITY: &str = "controlplane.tailscale.com";
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TailnetDiscovery { pub struct TailnetDiscovery {
@ -17,6 +18,15 @@ pub struct TailnetDiscovery {
pub oidc_issuer: Option<String>, pub oidc_issuer: Option<String>,
} }
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TailnetAuthorityProbe {
pub authority: String,
pub status_code: i32,
pub summary: String,
pub detail: String,
pub reachable: bool,
}
#[derive(Clone, Debug, Default, Deserialize)] #[derive(Clone, Debug, Default, Deserialize)]
struct WebFingerDocument { struct WebFingerDocument {
#[serde(default)] #[serde(default)]
@ -43,6 +53,63 @@ pub async fn discover_tailnet(email: &str) -> Result<TailnetDiscovery> {
discover_tailnet_at(&client, email, &base_url).await discover_tailnet_at(&client, email, &base_url).await
} }
pub fn normalize_authority(authority: &str) -> String {
let trimmed = authority.trim();
if trimmed.contains("://") {
trimmed.to_owned()
} else {
format!("https://{trimmed}")
}
}
pub fn is_managed_tailscale_authority(authority: &str) -> bool {
let normalized = normalize_authority(authority)
.trim_end_matches('/')
.to_ascii_lowercase();
normalized == format!("https://{MANAGED_TAILSCALE_AUTHORITY}")
|| normalized == format!("http://{MANAGED_TAILSCALE_AUTHORITY}")
}
pub async fn probe_tailnet_authority(authority: &str) -> Result<TailnetAuthorityProbe> {
let authority = normalize_authority(authority);
if is_managed_tailscale_authority(&authority) {
return Ok(TailnetAuthorityProbe {
authority,
status_code: 200,
summary: "Tailscale-managed control plane".to_owned(),
detail: "Using Tailscale's default login server.".to_owned(),
reachable: true,
});
}
let base_url =
Url::parse(&authority).with_context(|| format!("invalid tailnet authority {authority}"))?;
let client = Client::builder()
.user_agent("burrow-tailnet-probe")
.timeout(std::time::Duration::from_secs(10))
.build()
.context("failed to build tailnet authority probe client")?;
if let Some(status) =
probe_url(&client, base_url.join("/health")?, &authority, "Tailnet server reachable").await?
{
return Ok(status);
}
if let Some(status) = probe_url(
&client,
base_url.clone(),
&authority,
"Tailnet server reachable",
)
.await?
{
return Ok(status);
}
Err(anyhow!("could not connect to the server"))
}
pub async fn discover_tailnet_at( pub async fn discover_tailnet_at(
client: &Client, client: &Client,
email: &str, email: &str,
@ -57,7 +124,7 @@ pub async fn discover_tailnet_at(
if let Some(authority) = discover_webfinger(client, email, base_url).await? { if let Some(authority) = discover_webfinger(client, email, base_url).await? {
return Ok(TailnetDiscovery { return Ok(TailnetDiscovery {
domain, domain,
provider: TailnetProvider::Headscale, provider: inferred_provider(Some(&authority), None),
authority, authority,
oidc_issuer: None, oidc_issuer: None,
}); });
@ -78,6 +145,19 @@ pub fn email_domain(email: &str) -> Result<String> {
Ok(domain) Ok(domain)
} }
pub fn inferred_provider(
authority: Option<&str>,
explicit: Option<&TailnetProvider>,
) -> TailnetProvider {
if matches!(explicit, Some(TailnetProvider::Burrow)) {
return TailnetProvider::Burrow;
}
if authority.is_some_and(is_managed_tailscale_authority) {
return TailnetProvider::Tailscale;
}
TailnetProvider::Headscale
}
async fn discover_well_known(client: &Client, base_url: &Url) -> Result<Option<TailnetDiscovery>> { async fn discover_well_known(client: &Client, base_url: &Url) -> Result<Option<TailnetDiscovery>> {
let url = base_url let url = base_url
.join(TAILNET_DISCOVERY_PATH) .join(TAILNET_DISCOVERY_PATH)
@ -133,6 +213,37 @@ async fn discover_webfinger(client: &Client, email: &str, base_url: &Url) -> Res
} }
} }
async fn probe_url(
client: &Client,
url: Url,
authority: &str,
summary: &str,
) -> Result<Option<TailnetAuthorityProbe>> {
let response = match client
.get(url)
.header("accept", "application/json")
.send()
.await
{
Ok(response) => response,
Err(_) => return Ok(None),
};
let status = response.status();
if !status.is_success() {
return Ok(None);
}
let detail = response.text().await.unwrap_or_default().trim().to_owned();
Ok(Some(TailnetAuthorityProbe {
authority: authority.to_owned(),
status_code: i32::from(status.as_u16()),
summary: summary.to_owned(),
detail,
reachable: true,
}))
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use axum::{routing::get, Router}; use axum::{routing::get, Router};
@ -147,6 +258,13 @@ mod tests {
assert!(email_domain("contact").is_err()); assert!(email_domain("contact").is_err());
} }
#[test]
fn detects_managed_tailscale_authority() {
assert!(is_managed_tailscale_authority("controlplane.tailscale.com"));
assert!(is_managed_tailscale_authority("https://controlplane.tailscale.com/"));
assert!(!is_managed_tailscale_authority("https://ts.burrow.net"));
}
#[tokio::test] #[tokio::test]
async fn discovers_from_well_known_document() -> Result<()> { async fn discovers_from_well_known_document() -> Result<()> {
let router = Router::new().route( let router = Router::new().route(
@ -209,4 +327,20 @@ mod tests {
server.abort(); server.abort();
Ok(()) Ok(())
} }
#[tokio::test]
async fn probes_custom_authority() -> Result<()> {
let router = Router::new().route("/health", get(|| async { "ok" }));
let listener = TcpListener::bind("127.0.0.1:0").await?;
let authority = format!("http://{}", listener.local_addr()?);
let server = tokio::spawn(async move { axum::serve(listener, router).await });
let status = probe_tailnet_authority(&authority).await?;
assert_eq!(status.authority, authority);
assert_eq!(status.status_code, 200);
assert!(status.reachable);
server.abort();
Ok(())
}
} }

View file

@ -13,13 +13,20 @@ use tun::tokio::TunInterface;
use super::{ use super::{
rpc::grpc_defs::{ rpc::grpc_defs::{
networks_server::Networks, tunnel_server::Tunnel, Empty, Network, NetworkDeleteRequest, networks_server::Networks, tailnet_control_server::TailnetControl, tunnel_server::Tunnel,
NetworkListResponse, NetworkReorderRequest, State as RPCTunnelState, Empty, Network, NetworkDeleteRequest, NetworkListResponse, NetworkReorderRequest,
TunnelConfigurationResponse, TunnelStatusResponse, State as RPCTunnelState, TailnetDiscoverRequest, TailnetDiscoverResponse,
TailnetProbeRequest, TailnetProbeResponse, TunnelConfigurationResponse,
TunnelStatusResponse,
}, },
runtime::{ActiveTunnel, ResolvedTunnel}, runtime::{ActiveTunnel, ResolvedTunnel},
}; };
use crate::{ use crate::{
auth::server::tailscale::{
TailscaleBridgeManager, TailscaleLoginStartRequest as BridgeLoginStartRequest,
TailscaleLoginStatus,
},
control::discovery,
daemon::rpc::ServerConfig, daemon::rpc::ServerConfig,
database::{add_network, delete_network, get_connection, list_networks, reorder_network}, database::{add_network, delete_network, get_connection, list_networks, reorder_network},
}; };
@ -46,6 +53,7 @@ pub struct DaemonRPCServer {
wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>), wg_state_chan: (watch::Sender<RunState>, watch::Receiver<RunState>),
network_update_chan: (watch::Sender<()>, watch::Receiver<()>), network_update_chan: (watch::Sender<()>, watch::Receiver<()>),
active_tunnel: Arc<RwLock<Option<ActiveTunnel>>>, active_tunnel: Arc<RwLock<Option<ActiveTunnel>>>,
tailnet_login: TailscaleBridgeManager,
} }
impl DaemonRPCServer { impl DaemonRPCServer {
@ -56,6 +64,7 @@ impl DaemonRPCServer {
wg_state_chan: watch::channel(RunState::Idle), wg_state_chan: watch::channel(RunState::Idle),
network_update_chan: watch::channel(()), network_update_chan: watch::channel(()),
active_tunnel: Arc::new(RwLock::new(None)), active_tunnel: Arc::new(RwLock::new(None)),
tailnet_login: TailscaleBridgeManager::default(),
}) })
} }
@ -127,6 +136,11 @@ impl DaemonRPCServer {
Ok(()) Ok(())
} }
fn tailnet_control_url(authority: &str) -> Option<String> {
let authority = discovery::normalize_authority(authority);
(!discovery::is_managed_tailscale_authority(&authority)).then_some(authority)
}
} }
#[tonic::async_trait] #[tonic::async_trait]
@ -266,6 +280,101 @@ impl Networks for DaemonRPCServer {
} }
} }
#[tonic::async_trait]
impl TailnetControl for DaemonRPCServer {
async fn discover(
&self,
request: Request<TailnetDiscoverRequest>,
) -> Result<Response<TailnetDiscoverResponse>, RspStatus> {
let request = request.into_inner();
let discovery = discovery::discover_tailnet(&request.email)
.await
.map_err(proc_err)?;
Ok(Response::new(TailnetDiscoverResponse {
domain: discovery.domain,
authority: discovery.authority.clone(),
oidc_issuer: discovery.oidc_issuer.unwrap_or_default(),
managed: matches!(
discovery::inferred_provider(Some(&discovery.authority), Some(&discovery.provider)),
crate::control::TailnetProvider::Tailscale
),
}))
}
async fn probe(
&self,
request: Request<TailnetProbeRequest>,
) -> Result<Response<TailnetProbeResponse>, RspStatus> {
let request = request.into_inner();
let status = discovery::probe_tailnet_authority(&request.authority)
.await
.map_err(proc_err)?;
Ok(Response::new(TailnetProbeResponse {
authority: status.authority,
status_code: status.status_code,
summary: status.summary,
detail: status.detail,
reachable: status.reachable,
}))
}
async fn login_start(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginStartRequest>,
) -> Result<Response<super::rpc::grpc_defs::TailnetLoginStatusResponse>, RspStatus> {
let request = request.into_inner();
let response = self
.tailnet_login
.start_login(BridgeLoginStartRequest {
account_name: request.account_name,
identity_name: request.identity_name,
hostname: (!request.hostname.trim().is_empty()).then_some(request.hostname),
control_url: Self::tailnet_control_url(&request.authority),
})
.await
.map_err(proc_err)?;
Ok(Response::new(tailnet_login_rsp(
response.session_id,
response.status,
)))
}
async fn login_status(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginStatusRequest>,
) -> Result<Response<super::rpc::grpc_defs::TailnetLoginStatusResponse>, RspStatus> {
let request = request.into_inner();
let status = self
.tailnet_login
.status(&request.session_id)
.await
.map_err(proc_err)?;
let Some(status) = status else {
return Err(RspStatus::not_found("tailnet login session not found"));
};
Ok(Response::new(tailnet_login_rsp(request.session_id, status)))
}
async fn login_cancel(
&self,
request: Request<super::rpc::grpc_defs::TailnetLoginCancelRequest>,
) -> Result<Response<Empty>, RspStatus> {
let request = request.into_inner();
let canceled = self
.tailnet_login
.cancel(&request.session_id)
.await
.map_err(proc_err)?;
if !canceled {
return Err(RspStatus::not_found("tailnet login session not found"));
}
Ok(Response::new(Empty {}))
}
}
fn proc_err(err: impl ToString) -> RspStatus { fn proc_err(err: impl ToString) -> RspStatus {
RspStatus::internal(err.to_string()) RspStatus::internal(err.to_string())
} }
@ -283,3 +392,21 @@ fn status_rsp(state: RunState) -> TunnelStatusResponse {
start: None, // TODO: Add timestamp start: None, // TODO: Add timestamp
} }
} }
fn tailnet_login_rsp(
session_id: String,
status: TailscaleLoginStatus,
) -> super::rpc::grpc_defs::TailnetLoginStatusResponse {
super::rpc::grpc_defs::TailnetLoginStatusResponse {
session_id,
backend_state: status.backend_state,
auth_url: status.auth_url.unwrap_or_default(),
running: status.running,
needs_login: status.needs_login,
tailnet_name: status.tailnet_name.unwrap_or_default(),
magic_dns_suffix: status.magic_dns_suffix.unwrap_or_default(),
self_dns_name: status.self_dns_name.unwrap_or_default(),
tailnet_ips: status.tailscale_ips,
health: status.health,
}
}

View file

@ -16,7 +16,10 @@ use tonic::transport::Server;
use tracing::info; use tracing::info;
use crate::{ use crate::{
daemon::rpc::grpc_defs::{networks_server::NetworksServer, tunnel_server::TunnelServer}, daemon::rpc::grpc_defs::{
networks_server::NetworksServer, tailnet_control_server::TailnetControlServer,
tunnel_server::TunnelServer,
},
database::get_connection, database::get_connection,
}; };
@ -36,9 +39,11 @@ pub async fn daemon_main(
let uds = UnixListener::bind(sock_path)?; let uds = UnixListener::bind(sock_path)?;
let serve_job = tokio::spawn(async move { let serve_job = tokio::spawn(async move {
let uds_stream = UnixListenerStream::new(uds); let uds_stream = UnixListenerStream::new(uds);
let tailnet_server = burrow_server.clone();
let _srv = Server::builder() let _srv = Server::builder()
.add_service(TunnelServer::new(burrow_server.clone())) .add_service(TunnelServer::new(burrow_server.clone()))
.add_service(NetworksServer::new(burrow_server)) .add_service(NetworksServer::new(burrow_server))
.add_service(TailnetControlServer::new(tailnet_server))
.serve_with_incoming(uds_stream) .serve_with_incoming(uds_stream)
.await?; .await?;
Ok::<(), AhError>(()) Ok::<(), AhError>(())

View file

@ -5,11 +5,15 @@ use tokio::net::UnixStream;
use tonic::transport::{Endpoint, Uri}; use tonic::transport::{Endpoint, Uri};
use tower::service_fn; use tower::service_fn;
use super::grpc_defs::{networks_client::NetworksClient, tunnel_client::TunnelClient}; use super::grpc_defs::{
networks_client::NetworksClient, tailnet_control_client::TailnetControlClient,
tunnel_client::TunnelClient,
};
use crate::daemon::get_socket_path; use crate::daemon::get_socket_path;
pub struct BurrowClient<T> { pub struct BurrowClient<T> {
pub networks_client: NetworksClient<T>, pub networks_client: NetworksClient<T>,
pub tailnet_client: TailnetControlClient<T>,
pub tunnel_client: TunnelClient<T>, pub tunnel_client: TunnelClient<T>,
} }
@ -31,9 +35,11 @@ impl BurrowClient<tonic::transport::Channel> {
})) }))
.await?; .await?;
let nw_client = NetworksClient::new(channel.clone()); let nw_client = NetworksClient::new(channel.clone());
let tailnet_client = TailnetControlClient::new(channel.clone());
let tun_client = TunnelClient::new(channel.clone()); let tun_client = TunnelClient::new(channel.clone());
Ok(BurrowClient { Ok(BurrowClient {
networks_client: nw_client, networks_client: nw_client,
tailnet_client,
tunnel_client: tun_client, tunnel_client: tun_client,
}) })
} }

View file

@ -72,6 +72,14 @@ enum Commands {
NetworkReorder(NetworkReorderArgs), NetworkReorder(NetworkReorderArgs),
/// Delete Network /// Delete Network
NetworkDelete(NetworkDeleteArgs), NetworkDelete(NetworkDeleteArgs),
/// Discover a Tailnet authority through the daemon
TailnetDiscover(TailnetDiscoverArgs),
/// Probe a Tailnet authority through the daemon
TailnetProbe(TailnetProbeArgs),
/// Send an ICMP echo probe through the active Tailnet tunnel over daemon packet streaming
TailnetPing(TailnetPingArgs),
/// Send a UDP echo probe through the active Tailnet tunnel over daemon packet streaming
TailnetUdpEcho(TailnetUdpEchoArgs),
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
/// Run a command in an unshared Linux namespace using a Burrow backend /// Run a command in an unshared Linux namespace using a Burrow backend
Exec(ExecArgs), Exec(ExecArgs),
@ -110,6 +118,36 @@ struct NetworkDeleteArgs {
id: i32, id: i32,
} }
#[derive(Args)]
struct TailnetDiscoverArgs {
email: String,
}
#[derive(Args)]
struct TailnetProbeArgs {
authority: String,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
#[derive(Args)]
struct TailnetPingArgs {
remote: String,
#[arg(long, default_value = "burrow-tailnet-smoke")]
payload: String,
#[arg(long, default_value_t = 5000)]
timeout_ms: u64,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
#[derive(Args)]
struct TailnetUdpEchoArgs {
remote: String,
#[arg(long, default_value = "burrow-tailnet-smoke")]
message: String,
#[arg(long, default_value_t = 5000)]
timeout_ms: u64,
}
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
#[derive(Args)] #[derive(Args)]
struct TorExecArgs { struct TorExecArgs {
@ -240,6 +278,393 @@ async fn try_network_delete(id: i32) -> Result<()> {
Ok(()) Ok(())
} }
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_discover(email: &str) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
let response = client
.tailnet_client
.discover(crate::daemon::rpc::grpc_defs::TailnetDiscoverRequest {
email: email.to_owned(),
})
.await?
.into_inner();
println!("Tailnet Discover Response: {:?}", response);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_probe(authority: &str) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
let response = client
.tailnet_client
.probe(crate::daemon::rpc::grpc_defs::TailnetProbeRequest {
authority: authority.to_owned(),
})
.await?
.into_inner();
println!("Tailnet Probe Response: {:?}", response);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_ping(remote: &str, payload: &str, timeout_ms: u64) -> Result<()> {
use std::net::IpAddr;
use anyhow::Context;
use rand::Rng;
use tokio::{
sync::mpsc,
time::{timeout, Duration},
};
use tokio_stream::wrappers::ReceiverStream;
use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket};
let remote_ip: IpAddr = remote
.parse()
.with_context(|| format!("invalid remote IP address {remote}"))?;
let message = payload.as_bytes().to_vec();
let mut client = BurrowClient::from_uds().await?;
client.tunnel_client.tunnel_start(Empty {}).await?;
let mut config_stream = client
.tunnel_client
.tunnel_configuration(Empty {})
.await?
.into_inner();
let config = config_stream
.message()
.await?
.context("tunnel configuration stream ended before yielding a config")?;
let local_ip = select_tailnet_local_ip(&config.addresses, remote_ip)?;
let identifier = rand::thread_rng().gen::<u16>();
let sequence = 1_u16;
let packet = build_icmp_echo_request(local_ip, remote_ip, identifier, sequence, &message)?;
let (outbound_tx, outbound_rx) = mpsc::channel::<TunnelPacket>(128);
let mut tunnel_packets = client
.tunnel_client
.tunnel_packets(ReceiverStream::new(outbound_rx))
.await?
.into_inner();
outbound_tx
.send(TunnelPacket { payload: packet })
.await
.context("failed to send ICMP echo probe into daemon packet stream")?;
log::debug!(
"tailnet ping probe queued from {local_ip} to {remote_ip} identifier={identifier} sequence={sequence}"
);
drop(outbound_tx);
let reply = timeout(Duration::from_millis(timeout_ms), async {
loop {
let packet = tunnel_packets
.message()
.await
.context("failed to read packet from daemon packet stream")?
.context("daemon packet stream ended before returning a reply")?;
log::debug!(
"tailnet ping received {} bytes from daemon packet stream",
packet.payload.len()
);
if let Some(reply) = parse_icmp_echo_reply(
&packet.payload,
local_ip,
remote_ip,
identifier,
sequence,
)? {
break Ok::<_, anyhow::Error>(reply);
}
}
})
.await
.with_context(|| format!("timed out waiting for ICMP echo reply from {remote_ip}"))??;
println!("Tailnet Ping Source: {}", reply.source);
println!("Tailnet Ping Destination: {}", reply.destination);
println!(
"Tailnet Ping Payload: {}",
String::from_utf8_lossy(&reply.payload)
);
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_tailnet_udp_echo(remote: &str, message: &str, timeout_ms: u64) -> Result<()> {
use std::net::SocketAddr;
use anyhow::{bail, Context};
use futures::{SinkExt, StreamExt};
use netstack_smoltcp::StackBuilder;
use tokio::{
sync::mpsc,
time::{timeout, Duration},
};
use tokio_stream::wrappers::ReceiverStream;
use crate::daemon::rpc::grpc_defs::{Empty, TunnelPacket};
let remote_addr: SocketAddr = remote
.parse()
.with_context(|| format!("invalid remote socket address {remote}"))?;
let mut client = BurrowClient::from_uds().await?;
client.tunnel_client.tunnel_start(Empty {}).await?;
let mut config_stream = client
.tunnel_client
.tunnel_configuration(Empty {})
.await?
.into_inner();
let config = config_stream
.message()
.await?
.context("tunnel configuration stream ended before yielding a config")?;
let local_addr = select_tailnet_local_socket(&config.addresses, remote_addr.ip())?;
let (stack, runner, udp_socket, _) = StackBuilder::default()
.enable_udp(true)
.enable_tcp(true)
.build()
.context("failed to build userspace UDP stack")?;
let runner = runner.context("userspace UDP stack runner unavailable")?;
let udp_socket = udp_socket.context("userspace UDP stack socket unavailable")?;
let (mut stack_sink, mut stack_stream) = stack.split();
let (mut udp_reader, mut udp_writer) = udp_socket.split();
let (outbound_tx, outbound_rx) = mpsc::channel::<TunnelPacket>(128);
let mut tunnel_packets = client
.tunnel_client
.tunnel_packets(ReceiverStream::new(outbound_rx))
.await?
.into_inner();
let ingress_task = tokio::spawn(async move {
loop {
match tunnel_packets.message().await? {
Some(packet) => {
log::debug!(
"tailnet udp echo received {} bytes from daemon packet stream",
packet.payload.len()
);
stack_sink
.send(packet.payload)
.await
.context("failed to feed inbound tailnet packet into userspace stack")?;
}
None => break,
}
}
Result::<()>::Ok(())
});
let egress_task = tokio::spawn(async move {
while let Some(packet) = stack_stream.next().await {
let payload =
packet.context("failed to read outbound packet from userspace stack")?;
log::debug!(
"tailnet udp echo sending {} bytes into daemon packet stream",
payload.len()
);
outbound_tx
.send(TunnelPacket { payload })
.await
.context("failed to forward outbound tailnet packet to daemon")?;
}
Result::<()>::Ok(())
});
let runner_task = tokio::spawn(async move { runner.await.map_err(anyhow::Error::from) });
udp_writer
.send((message.as_bytes().to_vec(), local_addr, remote_addr))
.await
.context("failed to send UDP echo probe into userspace stack")?;
log::debug!(
"tailnet udp echo probe queued from {local_addr} to {remote_addr}"
);
let response = timeout(Duration::from_millis(timeout_ms), udp_reader.next())
.await
.with_context(|| format!("timed out waiting for UDP echo from {remote_addr}"))?
.context("userspace UDP stack ended before returning a reply")?;
let (payload, reply_source, reply_destination) = response;
let response_text = String::from_utf8_lossy(&payload);
ingress_task.abort();
egress_task.abort();
runner_task.abort();
if reply_source != remote_addr {
bail!("received UDP reply from unexpected source {reply_source}");
}
if reply_destination != local_addr {
bail!("received UDP reply for unexpected local socket {reply_destination}");
}
if payload != message.as_bytes() {
bail!("UDP echo payload mismatch");
}
println!("Tailnet UDP Echo Source: {reply_source}");
println!("Tailnet UDP Echo Destination: {reply_destination}");
println!("Tailnet UDP Echo Payload: {response_text}");
Ok(())
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn select_tailnet_local_ip(addresses: &[String], remote_ip: std::net::IpAddr) -> Result<std::net::IpAddr> {
use anyhow::Context;
let family_is_v4 = remote_ip.is_ipv4();
addresses
.iter()
.filter_map(|cidr| cidr.split('/').next())
.filter_map(|ip| ip.parse::<std::net::IpAddr>().ok())
.find(|ip| ip.is_ipv4() == family_is_v4)
.with_context(|| {
format!(
"no local {} tailnet address found in daemon config {:?}",
if family_is_v4 { "IPv4" } else { "IPv6" },
addresses
)
})
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn select_tailnet_local_socket(
addresses: &[String],
remote_ip: std::net::IpAddr,
) -> Result<std::net::SocketAddr> {
use rand::Rng;
let local_ip = select_tailnet_local_ip(addresses, remote_ip)?;
let port = rand::thread_rng().gen_range(40000..50000);
Ok(std::net::SocketAddr::new(local_ip, port))
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
struct IcmpEchoReply {
source: std::net::IpAddr,
destination: std::net::IpAddr,
payload: Vec<u8>,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn build_icmp_echo_request(
source: std::net::IpAddr,
destination: std::net::IpAddr,
identifier: u16,
sequence: u16,
payload: &[u8],
) -> Result<Vec<u8>> {
use anyhow::bail;
let (source, destination) = match (source, destination) {
(std::net::IpAddr::V4(source), std::net::IpAddr::V4(destination)) => (source, destination),
_ => bail!("tailnet ping currently supports IPv4 only"),
};
let mut icmp = Vec::with_capacity(8 + payload.len());
icmp.push(8);
icmp.push(0);
icmp.extend_from_slice(&[0, 0]);
icmp.extend_from_slice(&identifier.to_be_bytes());
icmp.extend_from_slice(&sequence.to_be_bytes());
icmp.extend_from_slice(payload);
let icmp_checksum = internet_checksum(&icmp);
icmp[2..4].copy_from_slice(&icmp_checksum.to_be_bytes());
let total_len = 20 + icmp.len();
let mut packet = Vec::with_capacity(total_len);
packet.push(0x45);
packet.push(0);
packet.extend_from_slice(&(total_len as u16).to_be_bytes());
packet.extend_from_slice(&0u16.to_be_bytes());
packet.extend_from_slice(&0u16.to_be_bytes());
packet.push(64);
packet.push(1);
packet.extend_from_slice(&[0, 0]);
packet.extend_from_slice(&source.octets());
packet.extend_from_slice(&destination.octets());
let header_checksum = internet_checksum(&packet);
packet[10..12].copy_from_slice(&header_checksum.to_be_bytes());
packet.extend_from_slice(&icmp);
Ok(packet)
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn parse_icmp_echo_reply(
packet: &[u8],
local_ip: std::net::IpAddr,
remote_ip: std::net::IpAddr,
identifier: u16,
sequence: u16,
) -> Result<Option<IcmpEchoReply>> {
use anyhow::bail;
let (local_ip, remote_ip) = match (local_ip, remote_ip) {
(std::net::IpAddr::V4(local_ip), std::net::IpAddr::V4(remote_ip)) => (local_ip, remote_ip),
_ => bail!("tailnet ping currently supports IPv4 only"),
};
if packet.len() < 20 {
return Ok(None);
}
let version = packet[0] >> 4;
if version != 4 {
return Ok(None);
}
let ihl = (packet[0] & 0x0f) as usize * 4;
if packet.len() < ihl + 8 {
return Ok(None);
}
if packet[9] != 1 {
return Ok(None);
}
let source = std::net::Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15]);
let destination = std::net::Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19]);
if source != remote_ip || destination != local_ip {
return Ok(None);
}
let icmp = &packet[ihl..];
if icmp[0] != 0 || icmp[1] != 0 {
return Ok(None);
}
let reply_identifier = u16::from_be_bytes([icmp[4], icmp[5]]);
let reply_sequence = u16::from_be_bytes([icmp[6], icmp[7]]);
if reply_identifier != identifier || reply_sequence != sequence {
return Ok(None);
}
Ok(Some(IcmpEchoReply {
source: std::net::IpAddr::V4(source),
destination: std::net::IpAddr::V4(destination),
payload: icmp[8..].to_vec(),
}))
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
fn internet_checksum(bytes: &[u8]) -> u16 {
let mut sum = 0u32;
let mut chunks = bytes.chunks_exact(2);
for chunk in &mut chunks {
sum += u16::from_be_bytes([chunk[0], chunk[1]]) as u32;
}
if let Some(&last) = chunks.remainder().first() {
sum += (last as u32) << 8;
}
while (sum >> 16) != 0 {
sum = (sum & 0xffff) + (sum >> 16);
}
!(sum as u16)
}
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> { async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> {
let exit_code = usernet::run_exec(usernet::ExecInvocation { let exit_code = usernet::run_exec(usernet::ExecInvocation {
@ -348,6 +773,14 @@ async fn main() -> Result<()> {
Commands::NetworkList => try_network_list().await?, Commands::NetworkList => try_network_list().await?,
Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?, Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?,
Commands::NetworkDelete(args) => try_network_delete(args.id).await?, Commands::NetworkDelete(args) => try_network_delete(args.id).await?,
Commands::TailnetDiscover(args) => try_tailnet_discover(&args.email).await?,
Commands::TailnetProbe(args) => try_tailnet_probe(&args.authority).await?,
Commands::TailnetPing(args) => {
try_tailnet_ping(&args.remote, &args.payload, args.timeout_ms).await?
}
Commands::TailnetUdpEcho(args) => {
try_tailnet_udp_echo(&args.remote, &args.message, args.timeout_ms).await?
}
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
Commands::Exec(args) => { Commands::Exec(args) => {
try_exec( try_exec(

60
contributors.nix Normal file
View file

@ -0,0 +1,60 @@
{
groups = {
users = "burrow-users";
admins = "burrow-admins";
};
identities = {
contact = {
displayName = "Burrow";
canonicalEmail = "contact@burrow.net";
sourceEmail = "net.burrow@gmail.com";
isAdmin = true;
forgeAuthorized = true;
bootstrapAuthentik = true;
sshPublicKeyPath = ./nixos/keys/contact_at_burrow_net.pub;
roles = [
"operator"
"forge-admin"
];
};
conrad = {
displayName = "Conrad Kramer";
canonicalEmail = "conrad@burrow.net";
sourceEmail = "ckrames1234@gmail.com";
isAdmin = true;
forgeAuthorized = false;
bootstrapAuthentik = true;
roles = [
"operator"
"founder"
];
};
agent = {
displayName = "Burrow Agent";
canonicalEmail = "agent@burrow.net";
isAdmin = false;
forgeAuthorized = true;
bootstrapAuthentik = false;
sshPublicKeyPath = ./nixos/keys/agent_at_burrow_net.pub;
roles = [
"automation"
];
};
ui-test = {
displayName = "Burrow UI Test";
canonicalEmail = "ui-test@burrow.net";
isAdmin = false;
forgeAuthorized = false;
bootstrapAuthentik = true;
authentikPasswordSecret = "burrowAuthentikUiTestPassword";
roles = [
"testing"
"apple-ui"
];
};
};
}

View file

@ -58,3 +58,17 @@ evolution/
``` ```
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly. Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.
## BEP Helper
Use the `bep` helper under `Scripts/` to browse or list proposals:
- `Scripts/bep` opens a quick browser for `evolution/`.
- `Scripts/bep list --status Draft` lists proposals by status.
- `Scripts/bep open BEP-0005` opens a proposal in `$EDITOR`.
Validate proposal metadata with:
```bash
python3 Scripts/check-bep-metadata.py
```

View file

@ -0,0 +1,78 @@
# `BEP-0005` - Daemon IPC and Apple Boundary
```text
Status: Draft
Proposal: BEP-0005
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should formalize one Apple/runtime boundary: Apple clients speak only to the daemon over gRPC on the app-group Unix socket, and the daemon owns all external control-plane, helper-process, and runtime coordination work. This prevents UI code from accreting side HTTP paths or ad hoc control-plane integrations that bypass the system Burrow is supposed to own.
## Motivation
- The current Tailnet work already showed the failure mode: Swift UI code started reaching around the daemon boundary to talk to helper HTTP endpoints directly.
- Apple-specific process ownership is easy to blur between the app, the network extension, and helper daemons unless the contract is explicit.
- If Burrow wants a durable multi-runtime architecture, the daemon must remain the only orchestration boundary between clients and control/data-plane behavior.
## Detailed Design
- Apple UI and Apple support libraries may call only daemon gRPC methods over the declared Burrow Unix socket.
- Direct Swift calls to external control-plane HTTP APIs, localhost helper HTTP servers, or runtime-specific subprocesses are forbidden.
- The daemon is responsible for:
- discovery of Tailnet authorities and related metadata
- control-plane session setup and tracking
- login/session lifecycle brokering
- runtime start/stop/reconcile
- translating helper or bridge processes into stable daemon RPCs
- `burrow/src/control/` owns transport-neutral control-plane semantics such as discovery, authority normalization, and request/response shaping.
- Apple UI owns presentation only:
- forms
- local state
- presenting returned auth URLs or statuses
- surfacing daemon availability and errors
- Any new Apple-facing runtime capability requires a daemon RPC first.
## Security and Operational Considerations
- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code.
- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized.
- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly.
## Contributor Playbook
- Before adding a new Apple-side workflow, identify the daemon RPC that should own it.
- If the RPC does not exist, add the protocol shape in `proto/burrow.proto`, implement it in the daemon, and only then wire Swift UI.
- Verify that no Swift UI or support code calls external control-plane HTTP endpoints directly.
- For Tailnet and similar flows, test:
- daemon unavailable behavior
- successful RPC path
- error propagation through the UI
## Alternatives Considered
- Let Apple UI call control-plane endpoints directly for convenience. Rejected because it creates parallel orchestration paths and breaks the daemon contract.
- Allow one-off exceptions for login helpers. Rejected because those exceptions become the architecture.
## Impact on Other Work
- Governs the Tailnet refactor and future Apple runtime work.
- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring.
## Decision
Pending.
## References
- `Apple/UI/`
- `Apple/Core/`
- `Apple/NetworkExtension/`
- `burrow/src/daemon/`
- `burrow/src/control/`

View file

@ -0,0 +1,71 @@
# `BEP-0006` - Tailnet Authority-First Control Plane
```text
Status: Draft
Proposal: BEP-0006
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-hosted Headscale-style deployments differ by authority, policy, and auth details, not by a distinct user-facing protocol. Burrows config and UI should therefore be authority-first rather than provider-first.
## Motivation
- Splitting Tailscale and Headscale into separate user-facing providers causes fake architectural divergence.
- Discovery already naturally returns an authority and optional issuer; that is the stable contract users actually need.
- Future managed or enterprise deployments should fit the same model without requiring another protocol picker.
## Detailed Design
- Tailnet configuration is centered on:
- account
- identity
- authority/login server URL
- optional tailnet name
- optional hostname
- auth method/material
- User-facing surfaces should not force a protocol choice between Tailscale and Headscale.
- Provider inference may remain internal metadata for compatibility and diagnostics:
- default managed Tailscale authority
- custom self-hosted authority
- Burrow-owned authority when explicitly applicable
- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server.
- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority.
## Security and Operational Considerations
- Authority-first config reduces UI complexity and makes misconfiguration easier to reason about.
- Provider-specific assumptions must not leak into packet or control-plane semantics unless the authority actually requires them.
- Auth material must remain authority-scoped and identity-scoped in daemon storage.
## Contributor Playbook
- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one.
- Store the authority explicitly in payloads and infer provider internally only when needed.
- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching.
## Alternatives Considered
- Keep separate user-facing providers for Tailscale and Headscale. Rejected because it models deployment shape as protocol shape.
- Collapse all control planes into one opaque Burrow provider. Rejected because the authority still matters operationally and diagnostically.
## Impact on Other Work
- Refines BEP-0002s Tailscale-shaped control-plane work.
- Constrains the Tailnet Apple refactor and future daemon control-plane storage.
## Decision
Pending.
## References
- `burrow/src/control/`
- `Apple/UI/Networks/`
- `proto/burrow.proto`

View file

@ -0,0 +1,73 @@
# `BEP-0007` - Identity Registry and Operator Bootstrap
```text
Status: Draft
Proposal: BEP-0007
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, IV, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should maintain one canonical registry for project identities, aliases, bootstrap users, SSH keys, and admin-group mappings. Forgejo, Authentik, and related bootstrap configuration should derive from that registry instead of hardcoding overlapping identity facts in multiple modules.
## Motivation
- Burrow currently hardcodes operator and admin/bootstrap user facts directly in host configuration.
- Multi-account and self-hosted identity are becoming core architecture, not incidental infra details.
- A single registry reduces drift across Forgejo, Authentik, Headscale, SSH authorization, and future control-plane bootstrap.
## Detailed Design
- Add a root-level identity registry (`contributors.nix`) as the canonical source of truth for:
- usernames
- display names
- canonical emails
- external source emails or aliases
- admin scope
- bootstrap eligibility
- forge authorized SSH keys
- named roles
- Consume that registry from host configuration for:
- Forgejo authorized keys
- Forgejo bootstrap admin defaults
- Authentik bootstrap users
- Burrow user/admin group names
- Future work may derive contributor docs, OIDC bootstrap, and additional runtime configuration from the same registry.
## Security and Operational Considerations
- Identity drift is a security bug when it affects admin groups, bootstrap accounts, or SSH authorization.
- The registry stores metadata only; secrets remain in agenix or other declared secret paths.
- Changes to the registry should receive explicit review because they affect access and governance.
## Contributor Playbook
- Edit `contributors.nix` first when changing operator, admin, alias, or bootstrap identity state.
- Derive runtime configuration from the registry instead of duplicating the same facts elsewhere.
- Keep secret references separate from identity metadata.
## Alternatives Considered
- Continue hardcoding users in module options. Rejected because drift is inevitable once Forgejo, Authentik, and Headscale all depend on the same identities.
- Create separate per-service user lists. Rejected because it duplicates governance facts and weakens review.
## Impact on Other Work
- Supports forge auth, Authentik group sync, and future multi-account Burrow control-plane work.
- Creates the basis for stronger contributor and operator provenance later.
## Decision
Pending.
## References
- `contributors.nix`
- `nixos/hosts/burrow-forge/default.nix`
- `nixos/modules/burrow-authentik.nix`
- `nixos/modules/burrow-forge.nix`

26
flake.lock generated
View file

@ -123,13 +123,37 @@
"url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable" "url": "https://codeload.github.com/NixOS/nixpkgs/tar.gz/nixos-unstable"
} }
}, },
"nsc-autoscaler": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1775221037,
"narHash": "sha256-tv6Y3cqn76PEyZpSMMItVW96KKIboovBWTOv5Lt7PXg=",
"ref": "refs/heads/main",
"rev": "2c485752fde28ec3be2f228b571d1906f4bcf917",
"revCount": 10,
"type": "git",
"url": "https://compatible.systems/conrad/nsc-autoscaler.git"
},
"original": {
"type": "git",
"url": "https://compatible.systems/conrad/nsc-autoscaler.git"
}
},
"root": { "root": {
"inputs": { "inputs": {
"agenix": "agenix", "agenix": "agenix",
"disko": "disko", "disko": "disko",
"flake-utils": "flake-utils", "flake-utils": "flake-utils",
"hcloud-upload-image-src": "hcloud-upload-image-src", "hcloud-upload-image-src": "hcloud-upload-image-src",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs",
"nsc-autoscaler": "nsc-autoscaler"
} }
}, },
"systems": { "systems": {

View file

@ -12,13 +12,18 @@
url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master"; url = "tarball+https://codeload.github.com/nix-community/disko/tar.gz/master";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
nsc-autoscaler = {
url = "git+https://compatible.systems/conrad/nsc-autoscaler.git";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-utils.follows = "flake-utils";
};
hcloud-upload-image-src = { hcloud-upload-image-src = {
url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0"; url = "tarball+https://codeload.github.com/apricote/hcloud-upload-image/tar.gz/v1.3.0";
flake = false; flake = false;
}; };
}; };
outputs = { self, nixpkgs, flake-utils, agenix, disko, hcloud-upload-image-src }: outputs = { self, nixpkgs, flake-utils, agenix, disko, nsc-autoscaler, hcloud-upload-image-src }:
let let
supportedSystems = [ supportedSystems = [
"x86_64-linux" "x86_64-linux"
@ -175,7 +180,7 @@
// { // {
nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix; nixosModules.burrow-forge = import ./nixos/modules/burrow-forge.nix;
nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix; nixosModules.burrow-forge-runner = import ./nixos/modules/burrow-forge-runner.nix;
nixosModules.burrow-forgejo-nsc = import ./nixos/modules/burrow-forgejo-nsc.nix; nixosModules.burrow-forgejo-nsc = nsc-autoscaler.nixosModules.default;
nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix; nixosModules.burrow-authentik = import ./nixos/modules/burrow-authentik.nix;
nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix; nixosModules.burrow-headscale = import ./nixos/modules/burrow-headscale.nix;

View file

@ -9,7 +9,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
- `hosts/burrow-forge/default.nix`: host entrypoint - `hosts/burrow-forge/default.nix`: host entrypoint
- `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module - `modules/burrow-forge.nix`: Forgejo, Caddy, PostgreSQL, and admin bootstrap module
- `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap - `modules/burrow-forge-runner.nix`: Forgejo Actions runner and agent identity bootstrap
- `modules/burrow-forgejo-nsc.nix`: Namespace-backed ephemeral Forgejo runner services - upstream `compatible.systems/conrad/nsc-autoscaler`: Namespace-backed ephemeral Forgejo runner module consumed via the Burrow flake input
- `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes - `modules/burrow-authentik.nix`: minimal Authentik IdP for Burrow control planes
- `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC - `modules/burrow-headscale.nix`: Headscale control plane rooted in Authentik OIDC
- `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets - `../secrets.nix`: agenix recipient map for tracked Burrow forge secrets
@ -32,7 +32,7 @@ Mail hosting is intentionally not part of this NixOS host in the current plan. B
3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`. 3. Run `Scripts/bootstrap-forge-intake.sh` to place the Forgejo bootstrap password file and automation SSH key under `/var/lib/burrow/intake/`.
4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account. 4. Let `burrow-forgejo-bootstrap.service` create or rotate the initial Forgejo admin account.
5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`. 5. Let `burrow-forgejo-runner-bootstrap.service` register the self-hosted Forgejo runner and seed Git identity as `agent <agent@burrow.net>`.
6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/`. 6. Run `Scripts/provision-forgejo-nsc.sh` locally, then `Scripts/sync-forgejo-nsc-config.sh` to place the raw Namespace dispatcher/autoscaler runtime inputs under `/var/lib/burrow/intake/` for the upstream `services.forgejo-nsc` module.
7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`. 7. Ensure `/var/lib/agenix/agenix.key` exists on the host, encrypt `secrets/infra/authentik.env.age`, `secrets/infra/authentik-google-client-id.age`, `secrets/infra/authentik-google-client-secret.age`, `secrets/infra/forgejo-oidc-client-secret.age`, and `secrets/infra/headscale-oidc-client-secret.age`, and let agenix materialize them under `/run/agenix/`.
8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME. 8. Use `Scripts/cloudflare-upsert-a-record.sh` to point `git.burrow.net`, `burrow.net`, `auth.burrow.net`, `ts.burrow.net`, and `nsc-autoscaler.burrow.net` at the host with Cloudflare proxying disabled for ACME.
9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace. 9. Use `Scripts/forge-deploy.sh --allow-dirty` for subsequent remote `nixos-rebuild` runs from the live workspace.

View file

@ -1,4 +1,28 @@
{ config, self, ... }: { config, lib, self, ... }:
let
contributors = import ../../../contributors.nix;
identities = contributors.identities;
authentikPasswordSecretPath = identity:
if identity ? authentikPasswordSecret
then config.age.secrets.${identity.authentikPasswordSecret}.path
else null;
bootstrapUsers = lib.mapAttrsToList
(
username: identity: {
inherit username;
name = identity.displayName;
email = identity.canonicalEmail;
sourceEmail = identity.sourceEmail or null;
isAdmin = identity.isAdmin or false;
passwordFile = authentikPasswordSecretPath identity;
}
)
(lib.filterAttrs (_: identity: identity.bootstrapAuthentik or false) identities);
forgeAuthorizedKeys = map
(username: builtins.readFile identities.${username}.sshPublicKeyPath)
(builtins.attrNames (lib.filterAttrs (_: identity: identity.forgeAuthorized or false) identities));
in
{ {
imports = [ imports = [
@ -39,6 +63,12 @@
group = "forgejo"; group = "forgejo";
mode = "0440"; mode = "0440";
}; };
age.secrets.burrowTailscaleOidcClientSecret = {
file = ../../../secrets/infra/tailscale-oidc-client-secret.age;
owner = "root";
group = "root";
mode = "0400";
};
age.secrets.burrowAuthentikGoogleClientId = { age.secrets.burrowAuthentikGoogleClientId = {
file = ../../../secrets/infra/authentik-google-client-id.age; file = ../../../secrets/infra/authentik-google-client-id.age;
owner = "root"; owner = "root";
@ -51,6 +81,12 @@
group = "root"; group = "root";
mode = "0400"; mode = "0400";
}; };
age.secrets.burrowAuthentikUiTestPassword = {
file = ../../../secrets/infra/authentik-ui-test-password.age;
owner = "root";
group = "root";
mode = "0400";
};
networking.extraHosts = '' networking.extraHosts = ''
127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net 127.0.0.1 burrow.net git.burrow.net auth.burrow.net ts.burrow.net nsc-autoscaler.burrow.net
@ -59,12 +95,14 @@
services.burrow.forge = { services.burrow.forge = {
enable = true; enable = true;
contactEmail = identities.contact.canonicalEmail;
adminUsername = "contact";
adminEmail = identities.contact.canonicalEmail;
adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt"; adminPasswordFile = "/var/lib/burrow/intake/forgejo_pass_contact_at_burrow_net.txt";
oidcAdminGroup = contributors.groups.admins;
oidcRestrictedGroup = contributors.groups.users;
oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; oidcClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path;
authorizedKeys = [ authorizedKeys = forgeAuthorizedKeys;
(builtins.readFile ../../keys/contact_at_burrow_net.pub)
(builtins.readFile ../../keys/agent_at_burrow_net.pub)
];
}; };
services.burrow.forgeRunner = { services.burrow.forgeRunner = {
@ -72,7 +110,7 @@
sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519"; sshPrivateKeyFile = "/var/lib/burrow/intake/agent_at_burrow_net_ed25519";
}; };
services.burrow.forgejoNsc = { services.forgejo-nsc = {
enable = true; enable = true;
nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt"; nscTokenFile = "/var/lib/burrow/intake/forgejo_nsc_token.txt";
dispatcher = { dispatcher = {
@ -89,25 +127,13 @@
envFile = config.age.secrets.burrowAuthentikEnv.path; envFile = config.age.secrets.burrowAuthentikEnv.path;
forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path; forgejoClientSecretFile = config.age.secrets.burrowForgejoOidcClientSecret.path;
headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path; headscaleClientSecretFile = config.age.secrets.burrowHeadscaleOidcClientSecret.path;
tailscaleClientSecretFile = config.age.secrets.burrowTailscaleOidcClientSecret.path;
googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path; googleClientIDFile = config.age.secrets.burrowAuthentikGoogleClientId.path;
googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path; googleClientSecretFile = config.age.secrets.burrowAuthentikGoogleClientSecret.path;
googleLoginMode = "redirect"; googleLoginMode = "redirect";
bootstrapUsers = [ userGroupName = contributors.groups.users;
{ adminGroupName = contributors.groups.admins;
username = "contact"; bootstrapUsers = bootstrapUsers;
name = "Burrow";
email = "contact@burrow.net";
sourceEmail = "net.burrow@gmail.com";
isAdmin = true;
}
{
username = "conrad";
name = "Conrad Kramer";
email = "conrad@burrow.net";
sourceEmail = "ckrames1234@gmail.com";
isAdmin = true;
}
];
}; };
services.burrow.headscale = { services.burrow.headscale = {

View file

@ -10,7 +10,9 @@ let
dataVolume = "burrow-authentik-data:/data"; dataVolume = "burrow-authentik-data:/data";
directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh; directorySyncScript = ../../Scripts/authentik-sync-burrow-directory.sh;
forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh; forgejoOidcSyncScript = ../../Scripts/authentik-sync-forgejo-oidc.sh;
tailscaleOidcSyncScript = ../../Scripts/authentik-sync-tailscale-oidc.sh;
googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh; googleSourceSyncScript = ../../Scripts/authentik-sync-google-source.sh;
tailnetAuthFlowSyncScript = ../../Scripts/authentik-sync-tailnet-auth-flow.sh;
authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" '' authentikBlueprint = pkgs.writeText "burrow-authentik-blueprint.yaml" ''
version: 1 version: 1
metadata: metadata:
@ -130,6 +132,24 @@ in
description = "Authentik application slug for Forgejo."; description = "Authentik application slug for Forgejo.";
}; };
tailscaleProviderSlug = lib.mkOption {
type = lib.types.str;
default = "tailscale";
description = "Authentik application slug for Tailscale custom OIDC sign-in.";
};
tailscaleClientId = lib.mkOption {
type = lib.types.str;
default = "tailscale.burrow.net";
description = "Client ID Authentik should present to Tailscale.";
};
tailscaleClientSecretFile = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Host-local file containing the Authentik Tailscale OIDC client secret.";
};
forgejoClientId = lib.mkOption { forgejoClientId = lib.mkOption {
type = lib.types.str; type = lib.types.str;
default = "git.burrow.net"; default = "git.burrow.net";
@ -175,6 +195,36 @@ in
description = "Identification-stage behavior for the Google Authentik source."; description = "Identification-stage behavior for the Google Authentik source.";
}; };
headscaleAuthenticationFlowSlug = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-authentication";
description = "Authentik authentication flow slug used for Burrow Tailnet sign-in.";
};
headscaleAuthenticationFlowName = lib.mkOption {
type = lib.types.str;
default = "Burrow Tailnet Authentication";
description = "Authentik authentication flow name used for Burrow Tailnet sign-in.";
};
headscaleIdentificationStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-identification-stage";
description = "Authentik identification stage used for Burrow Tailnet sign-in.";
};
headscalePasswordStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-password-stage";
description = "Authentik password stage used for Burrow Tailnet sign-in.";
};
headscaleUserLoginStageName = lib.mkOption {
type = lib.types.str;
default = "burrow-tailnet-user-login-stage";
description = "Authentik user-login stage used for Burrow Tailnet sign-in.";
};
userGroupName = lib.mkOption { userGroupName = lib.mkOption {
type = lib.types.str; type = lib.types.str;
default = "burrow-users"; default = "burrow-users";
@ -217,6 +267,11 @@ in
default = false; default = false;
description = "Whether this user should be in the Burrow admin group."; description = "Whether this user should be in the Burrow admin group.";
}; };
passwordFile = lib.mkOption {
type = nullOr str;
default = null;
description = "Optional host-local file containing a bootstrap password for this user.";
};
}; };
}); });
default = [ ]; default = [ ];
@ -277,6 +332,13 @@ in
fi fi
''} ''}
${lib.optionalString (cfg.tailscaleClientSecretFile != null) ''
if [ ! -s ${lib.escapeShellArg cfg.tailscaleClientSecretFile} ]; then
echo "Tailscale client secret missing: ${cfg.tailscaleClientSecretFile}" >&2
exit 1
fi
''}
install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir} install -d -m 0750 -o root -g root ${runtimeDir} ${blueprintDir}
install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile} install -m 0644 -o root -g root ${authentikBlueprint} ${blueprintFile}
@ -468,7 +530,7 @@ EOF
restartTriggers = [ restartTriggers = [
directorySyncScript directorySyncScript
cfg.envFile cfg.envFile
]; ] ++ lib.concatMap (user: lib.optional (user.passwordFile != null) user.passwordFile) cfg.bootstrapUsers;
path = [ path = [
pkgs.bash pkgs.bash
pkgs.coreutils pkgs.coreutils
@ -491,7 +553,7 @@ EOF
export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName} export AUTHENTIK_BURROW_ADMINS_GROUP=${lib.escapeShellArg cfg.adminGroupName}
export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug} export AUTHENTIK_FORGEJO_APPLICATION_SLUG=${lib.escapeShellArg cfg.forgejoProviderSlug}
export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: { export AUTHENTIK_BURROW_DIRECTORY_JSON='${builtins.toJSON (map (user: {
inherit (user) username name email isAdmin; inherit (user) username name email isAdmin passwordFile;
groups = user.groups; groups = user.groups;
}) cfg.bootstrapUsers)}' }) cfg.bootstrapUsers)}'
@ -499,6 +561,60 @@ EOF
''; '';
}; };
systemd.services.burrow-authentik-tailnet-auth-flow = {
description = "Reconcile the Burrow Tailnet authentication flow";
after =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (
cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null
) [ "burrow-authentik-google-source.service" ];
wants =
[
"burrow-authentik-ready.service"
"network-online.target"
]
++ lib.optionals (
cfg.googleClientIDFile != null && cfg.googleClientSecretFile != null
) [ "burrow-authentik-google-source.service" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
tailnetAuthFlowSyncScript
cfg.envFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_TAILNET_PROVIDER_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug}
export AUTHENTIK_TAILNET_PROVIDER_SLUGS_JSON='["${cfg.headscaleProviderSlug}","${cfg.tailscaleProviderSlug}"]'
export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_NAME=${lib.escapeShellArg cfg.headscaleAuthenticationFlowName}
export AUTHENTIK_TAILNET_AUTHENTICATION_FLOW_SLUG=${lib.escapeShellArg cfg.headscaleAuthenticationFlowSlug}
export AUTHENTIK_TAILNET_IDENTIFICATION_STAGE_NAME=${lib.escapeShellArg cfg.headscaleIdentificationStageName}
export AUTHENTIK_TAILNET_PASSWORD_STAGE_NAME=${lib.escapeShellArg cfg.headscalePasswordStageName}
export AUTHENTIK_TAILNET_USER_LOGIN_STAGE_NAME=${lib.escapeShellArg cfg.headscaleUserLoginStageName}
export AUTHENTIK_TAILNET_GOOGLE_SOURCE_SLUG=${lib.escapeShellArg cfg.googleSourceSlug}
${pkgs.bash}/bin/bash ${tailnetAuthFlowSyncScript}
'';
};
systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) { systemd.services.burrow-authentik-forgejo-oidc = lib.mkIf (cfg.forgejoClientSecretFile != null) {
description = "Reconcile the Burrow Authentik Forgejo OIDC application"; description = "Reconcile the Burrow Authentik Forgejo OIDC application";
after = [ after = [
@ -545,6 +661,53 @@ EOF
''; '';
}; };
systemd.services.burrow-authentik-tailscale-oidc = lib.mkIf (cfg.tailscaleClientSecretFile != null) {
description = "Reconcile the Burrow Authentik Tailscale OIDC application";
after = [
"burrow-authentik-ready.service"
"network-online.target"
];
wants = [
"burrow-authentik-ready.service"
"network-online.target"
];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
tailscaleOidcSyncScript
cfg.envFile
cfg.tailscaleClientSecretFile
];
path = [
pkgs.bash
pkgs.coreutils
pkgs.curl
pkgs.jq
];
serviceConfig = {
Type = "oneshot";
User = "root";
Group = "root";
};
script = ''
set -euo pipefail
set -a
source ${lib.escapeShellArg cfg.envFile}
set +a
export AUTHENTIK_URL=https://${cfg.domain}
export AUTHENTIK_TAILSCALE_APPLICATION_SLUG=${lib.escapeShellArg cfg.tailscaleProviderSlug}
export AUTHENTIK_TAILSCALE_APPLICATION_NAME=Tailscale
export AUTHENTIK_TAILSCALE_PROVIDER_NAME=Tailscale
export AUTHENTIK_TAILSCALE_TEMPLATE_SLUG=${lib.escapeShellArg cfg.headscaleProviderSlug}
export AUTHENTIK_TAILSCALE_CLIENT_ID=${lib.escapeShellArg cfg.tailscaleClientId}
export AUTHENTIK_TAILSCALE_CLIENT_SECRET="$(tr -d '\r\n' < ${lib.escapeShellArg cfg.tailscaleClientSecretFile})"
export AUTHENTIK_TAILSCALE_LAUNCH_URL=https://login.tailscale.com/start/oidc
export AUTHENTIK_TAILSCALE_REDIRECT_URIS_JSON='["https://login.tailscale.com/a/oauth_response"]'
${pkgs.bash}/bin/bash ${tailscaleOidcSyncScript}
'';
};
services.caddy.virtualHosts."${cfg.domain}".extraConfig = '' services.caddy.virtualHosts."${cfg.domain}".extraConfig = ''
encode gzip zstd encode gzip zstd
reverse_proxy 127.0.0.1:${toString cfg.port} reverse_proxy 127.0.0.1:${toString cfg.port}

View file

@ -258,20 +258,20 @@ in
"${cfg.siteDomain}".extraConfig = '' "${cfg.siteDomain}".extraConfig = ''
encode gzip zstd encode gzip zstd
@oidcConfig path /.well-known/openid-configuration @oidcConfig path /.well-known/openid-configuration
redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/.well-known/openid-configuration 308 redir @oidcConfig https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/.well-known/openid-configuration 308
@tailnetConfig path /.well-known/burrow-tailnet @tailnetConfig path /.well-known/burrow-tailnet
header @tailnetConfig Content-Type application/json header @tailnetConfig Content-Type application/json
respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200 respond @tailnetConfig "{\"domain\":\"${cfg.siteDomain}\",\"provider\":\"headscale\",\"authority\":\"https://${config.services.burrow.headscale.domain}\",\"oidc_issuer\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.headscaleProviderSlug}/\"}" 200
@webfinger path /.well-known/webfinger @webfinger path /.well-known/webfinger
header @webfinger Content-Type application/jrd+json header @webfinger Content-Type application/jrd+json
respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.forgejoProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200 respond @webfinger "{\"subject\":\"{query.resource}\",\"links\":[{\"rel\":\"http://openid.net/specs/connect/1.0/issuer\",\"href\":\"https://${config.services.burrow.authentik.domain}/application/o/${config.services.burrow.authentik.tailscaleProviderSlug}/\"},{\"rel\":\"https://burrow.net/rel/tailnet-control-server\",\"href\":\"https://${config.services.burrow.headscale.domain}\"}]}" 200
@root path / @root path /
redir @root ${homeRepoUrl} 308 redir @root ${homeRepoUrl} 308
respond 404 respond 404
''; '';
} }
// lib.optionalAttrs ( // lib.optionalAttrs (
config.services.burrow.forgejoNsc.enable && config.services.burrow.forgejoNsc.autoscaler.enable config.services.forgejo-nsc.enable && config.services.forgejo-nsc.autoscaler.enable
) { ) {
"${cfg.nscAutoscalerDomain}".extraConfig = '' "${cfg.nscAutoscalerDomain}".extraConfig = ''
encode gzip zstd encode gzip zstd

View file

@ -1,234 +0,0 @@
{ config, lib, pkgs, self, ... }:
let
inherit (lib)
mkEnableOption
mkIf
mkOption
types
mkAfter
mkDefault
optional
optionalAttrs
optionalString
;
cfg = config.services.burrow.forgejoNsc;
dispatcherRuntimeConfig = "${cfg.stateDir}/dispatcher.yaml";
autoscalerRuntimeConfig = "${cfg.stateDir}/autoscaler.yaml";
pendingCheck = configPath: pkgs.writeShellScript "forgejo-nsc-check-pending" ''
set -euo pipefail
if ${pkgs.gnugrep}/bin/grep -q 'PENDING-' '${configPath}'; then
echo "forgejo-nsc config still contains placeholder values (PENDING-); update ${configPath} before starting." >&2
exit 1
fi
'';
nscTokenPath = "${cfg.stateDir}/nsc.token";
tokenSync = optionalString (cfg.nscTokenFile != null) ''
install -m 600 ${lib.escapeShellArg cfg.nscTokenFile} ${lib.escapeShellArg nscTokenPath}
chown ${cfg.user}:${cfg.group} ${nscTokenPath}
chmod 600 ${nscTokenPath}
'';
dispatcherConfigSync = optionalString (cfg.dispatcher.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.dispatcher.configFile} ${lib.escapeShellArg dispatcherRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg dispatcherRuntimeConfig}
chmod 400 ${lib.escapeShellArg dispatcherRuntimeConfig}
'';
autoscalerConfigSync = optionalString (cfg.autoscaler.configFile != null) ''
install -m 400 ${lib.escapeShellArg cfg.autoscaler.configFile} ${lib.escapeShellArg autoscalerRuntimeConfig}
chown ${cfg.user}:${cfg.group} ${lib.escapeShellArg autoscalerRuntimeConfig}
chmod 400 ${lib.escapeShellArg autoscalerRuntimeConfig}
'';
dispatcherEnv =
cfg.extraEnv
// optionalAttrs (cfg.nscTokenFile != null) { NSC_TOKEN_FILE = nscTokenPath; }
// optionalAttrs (cfg.nscTokenSpecFile != null) { NSC_TOKEN_SPEC_FILE = cfg.nscTokenSpecFile; }
// optionalAttrs (cfg.nscEndpoint != null) { NSC_ENDPOINT = cfg.nscEndpoint; };
in {
options.services.burrow.forgejoNsc = {
enable = mkEnableOption "Forgejo Namespace Cloud runner dispatcher";
user = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System user that runs the forgejo-nsc services.";
};
group = mkOption {
type = types.str;
default = "forgejo-nsc";
description = "System group for the forgejo-nsc services.";
};
stateDir = mkOption {
type = types.str;
default = "/var/lib/forgejo-nsc";
description = "State directory for the dispatcher/autoscaler.";
};
nscTokenFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token file (exported as NSC_TOKEN_FILE).";
};
nscTokenSpecFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC token spec file (exported as NSC_TOKEN_SPEC_FILE).";
};
nscEndpoint = mkOption {
type = types.nullOr types.str;
default = null;
description = "Optional NSC endpoint override (exported as NSC_ENDPOINT).";
};
extraEnv = mkOption {
type = types.attrsOf types.str;
default = { };
description = "Extra environment variables injected into the services.";
};
nscPackage = mkOption {
type = types.nullOr types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.nsc or null;
description = "Optional nsc CLI package added to the service PATH.";
};
dispatcher = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable the forgejo-nsc dispatcher service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-dispatcher;
description = "Package providing the forgejo-nsc dispatcher binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the dispatcher.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the dispatcher config.";
};
};
autoscaler = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable the forgejo-nsc autoscaler service.";
};
package = mkOption {
type = types.package;
default = self.packages.${pkgs.stdenv.hostPlatform.system}.forgejo-nsc-autoscaler;
description = "Package providing the forgejo-nsc autoscaler binary.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Host-local YAML config file for the autoscaler.";
};
allowPending = mkOption {
type = types.bool;
default = false;
description = "Allow placeholder values (PENDING-) in the autoscaler config.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = (!cfg.dispatcher.enable) || cfg.dispatcher.configFile != null;
message = "services.burrow.forgejoNsc.dispatcher.configFile must be set when the dispatcher is enabled.";
}
{
assertion = (!cfg.autoscaler.enable) || cfg.autoscaler.configFile != null;
message = "services.burrow.forgejoNsc.autoscaler.configFile must be set when the autoscaler is enabled.";
}
];
users.groups.${cfg.group} = { };
users.users.${cfg.user} = {
uid = mkDefault 2011;
isSystemUser = true;
group = cfg.group;
description = "Forgejo Namespace Cloud runner services";
home = cfg.stateDir;
createHome = true;
shell = pkgs.bashInteractive;
};
systemd.tmpfiles.rules = mkAfter [
"d ${cfg.stateDir} 0750 ${cfg.user} ${cfg.group} - -"
];
systemd.services.forgejo-nsc-dispatcher = mkIf cfg.dispatcher.enable {
description = "Forgejo Namespace Cloud dispatcher";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.dispatcher.configFile != null) cfg.dispatcher.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.dispatcher.package}/bin/forgejo-nsc-dispatcher --config ${dispatcherRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.dispatcher.allowPending) (pendingCheck cfg.dispatcher.configFile))
dispatcherConfigSync
tokenSync
]);
};
systemd.services.forgejo-nsc-autoscaler = mkIf cfg.autoscaler.enable {
description = "Forgejo Namespace Cloud autoscaler";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "forgejo-nsc-dispatcher.service" ];
wants = [ "network-online.target" ];
unitConfig.ConditionPathExists =
optional (cfg.autoscaler.configFile != null) cfg.autoscaler.configFile
++ optional (cfg.nscTokenFile != null) cfg.nscTokenFile;
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${cfg.autoscaler.package}/bin/forgejo-nsc-autoscaler --config ${autoscalerRuntimeConfig}";
Restart = "on-failure";
RestartSec = 5;
};
path = lib.optional (cfg.nscPackage != null) cfg.nscPackage;
environment = dispatcherEnv;
preStart = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
(optionalString (!cfg.autoscaler.allowPending) (pendingCheck cfg.autoscaler.configFile))
autoscalerConfigSync
tokenSync
]);
};
};
}

View file

@ -17,6 +17,14 @@ service Networks {
rpc NetworkDelete (NetworkDeleteRequest) returns (Empty); rpc NetworkDelete (NetworkDeleteRequest) returns (Empty);
} }
service TailnetControl {
rpc Discover (TailnetDiscoverRequest) returns (TailnetDiscoverResponse);
rpc Probe (TailnetProbeRequest) returns (TailnetProbeResponse);
rpc LoginStart (TailnetLoginStartRequest) returns (TailnetLoginStatusResponse);
rpc LoginStatus (TailnetLoginStatusRequest) returns (TailnetLoginStatusResponse);
rpc LoginCancel (TailnetLoginCancelRequest) returns (Empty);
}
message NetworkReorderRequest { message NetworkReorderRequest {
int32 id = 1; int32 id = 1;
int32 index = 2; int32 index = 2;
@ -56,6 +64,57 @@ message Empty {
} }
message TailnetDiscoverRequest {
string email = 1;
}
message TailnetDiscoverResponse {
string domain = 1;
string authority = 2;
string oidc_issuer = 3;
bool managed = 4;
}
message TailnetProbeRequest {
string authority = 1;
}
message TailnetProbeResponse {
string authority = 1;
int32 status_code = 2;
string summary = 3;
string detail = 4;
bool reachable = 5;
}
message TailnetLoginStartRequest {
string account_name = 1;
string identity_name = 2;
string hostname = 3;
string authority = 4;
}
message TailnetLoginStatusRequest {
string session_id = 1;
}
message TailnetLoginCancelRequest {
string session_id = 1;
}
message TailnetLoginStatusResponse {
string session_id = 1;
string backend_state = 2;
string auth_url = 3;
bool running = 4;
bool needs_login = 5;
string tailnet_name = 6;
string magic_dns_suffix = 7;
string self_dns_name = 8;
repeated string tailnet_ips = 9;
repeated string health = 10;
}
enum State { enum State {
Stopped = 0; Stopped = 0;
Running = 1; Running = 1;

View file

@ -1,4 +1,5 @@
let let
conradev = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBueQxNbP2246pxr/m7au4zNVm+ShC96xuOcfEcpIjWZ";
contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa"; contact = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO42guJ5QvNMw3k6YKWlQnjcTsc+X4XI9F2GBtl8aHOa";
agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net"; agent = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEN0+tRJy7Y2DW0uGYHb86N2t02WyU5lDNX6FaxBF/G8 agent@burrow.net";
burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l"; burrowForgeHost = "age1quxf27gnun0xghlnxf3jrmqr3h3a3fzd8qxpallsaztd2u74pdfq9e7w9l";
@ -7,11 +8,14 @@ let
agent agent
burrowForgeHost burrowForgeHost
]; ];
uiTestRecipients = burrowForgeRecipients ++ [ conradev ];
in in
{ {
"secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik.env.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-id.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/authentik-google-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/authentik-ui-test-password.age".publicKeys = uiTestRecipients;
"secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/forgejo-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients; "secrets/infra/headscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
"secrets/infra/tailscale-oidc-client-secret.age".publicKeys = burrowForgeRecipients;
} }

View file

@ -0,0 +1,14 @@
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHNzaC1lZDI1NTE5IHRibTVDdyBSVUdQ
Tko1VEZzRDZReWt0T0dxaW56Y1Vaa2xlYjF2YjNJbVA5S1R0bUNjCjV4S0FOOE0z
UUcxRHhGN0orRXhLWFZEMnduK0dJQ1VGNnEybXkzVzgvZlEKLT4gc3NoLWVkMjU1
MTkgdXg0TjhRIHFBR1B1alJMek1iOVhnVFNOZ2M0bktITU1FWTBEY1JVQUFDRmNO
b1NmekEKUjM1VDEyaUlIYUsvUXFnVTlDWDNvMW50Z2R6blZEM3IzdmtOblRXZTAw
UQotPiBzc2gtZWQyNTUxOSBJclptQWcgMEZoSW9ZdDZpeS8zVHdyK3BsNTloaFUy
aExDVGkxMFEzOFNKYURVQXB3NApTY1cxTlVGb2tXRWhySXZFYnBMdzJraCsweHQv
ZUlRTkQ3VlZiL0NqOTJzCi0+IFgyNTUxOSBHckgvbjN4NkFqN2tDREliU2RYRlRn
Yk5aWUtialplcWZ2b2ZCTDc1VUZZCkh1cFF4TkZmUjJmTTlvWjBsbHZiYVlmVE5K
Q29FOHhUd0NuK1dRTXhEWUkKLS0tIG5rWkdSa2VhT3c0Y2dwTW45c3o5YTBmR3Fx
cHNkeVB6NU5ScGd5bC9hcncKkd74xLid/7HzY7vVQmcrAO7Y/PRMjE6Tvd1VCf4W
T0nt+3kJHshawMTa2Lnjwp1bG2j6THJWhXp5vKxde1s1mg==
-----END AGE ENCRYPTED FILE-----

View file

@ -0,0 +1,10 @@
age-encryption.org/v1
-> ssh-ed25519 ux4N8Q KfvLMiH7JHE6v74Pp//SqzBP8WU1MNy1/EcqsONTTQQ
Y6SFXWe/5Pru6+3vU6e67bRZDWDkukdfgEX7uQjB4Uw
-> ssh-ed25519 IrZmAg AFn7BP4FktUYH9QvNJPVDdNcEpJjYqmOrisvX9XGV08
Zho+KNtk1vUQZ55j1xUHdswAj0T0Soji/HC6p1tsVcA
-> X25519 sv50iZjBijWKfp6I+LfRlEJ2sqnj5/2m0hRWz5NqLTk
Hdfvo+87zemSCFWDSlzkpmvHLuvc0tjxEt0ociTPrCg
--- BkQd4O2m/i98rlBcNhczU6Wj0htoiNLQDn0W6yKn1/c
 ªîº¿"ÁWÓLØï€§\š#ŸzDæö“ÿðRq6.¹ç«‚Òæ}#8²kâoÜyq>ÂLǸñ<E28093>\`wÆ”õ>f/ïñƒÈ®·Ñ´ý^,#
hD<>]C