Commit remaining Burrow platform work

This commit is contained in:
Conrad Kramer 2026-03-31 23:35:36 -07:00
parent fff5475914
commit 7f280c08cf
48 changed files with 2508 additions and 1864 deletions

View file

@ -1,6 +1,3 @@
[target.'cfg(unix)']
runner = "sudo -E"
[alias] # command aliases [alias] # command aliases
rr = "run --release" rr = "run --release"
bb = "build --release" bb = "build --release"

View file

@ -0,0 +1,31 @@
name: Build Rust
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
rust:
name: Cargo Test
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Test
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c cargo test --workspace --all-features

View file

@ -0,0 +1,31 @@
name: Build Site
on:
push:
branches:
- main
pull_request:
branches:
- "**"
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
site:
name: Next.js Build
runs-on: [self-hosted, linux, x86_64, burrow-forge]
steps:
- name: Checkout
uses: https://code.forgejo.org/actions/checkout@v4
with:
token: ${{ github.token }}
fetch-depth: 0
- name: Build
shell: bash
run: |
set -euo pipefail
nix develop .#ci -c bash -lc 'cd site && npm install && npm run build'

View file

@ -54,6 +54,7 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: 1.85.0
targets: ${{ join(matrix.rust-targets, ', ') }} targets: ${{ join(matrix.rust-targets, ', ') }}
- name: Install Protobuf - name: Install Protobuf
shell: bash shell: bash

View file

@ -6,6 +6,9 @@ on:
pull_request: pull_request:
branches: branches:
- "*" - "*"
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
build: build:
name: Build Crate (${{ matrix.platform }}) name: Build Crate (${{ matrix.platform }})
@ -72,14 +75,14 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: stable toolchain: 1.85.0
components: rustfmt components: rustfmt
targets: ${{ join(matrix.targets, ', ') }} targets: ${{ join(matrix.targets, ', ') }}
- name: Setup Rust Cache - name: Setup Rust Cache
uses: Swatinem/rust-cache@v2 uses: Swatinem/rust-cache@v2
- name: Build - name: Build
shell: bash shell: bash
run: cargo build --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }} run: cargo build --locked --verbose --workspace --all-features --target ${{ join(matrix.targets, ' --target ') }} --target ${{ join(matrix.test-targets, ' --target ') }}
- name: Test - name: Test
shell: bash shell: bash
run: cargo test --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }} run: cargo test --locked --verbose --workspace --all-features --target ${{ join(matrix.test-targets, ' --target ') }}

View file

@ -47,6 +47,7 @@ jobs:
- name: Install Rust - name: Install Rust
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@stable
with: with:
toolchain: 1.85.0
targets: ${{ join(matrix.rust-targets, ', ') }} targets: ${{ join(matrix.rust-targets, ', ') }}
- name: Install Protobuf - name: Install Protobuf
shell: bash shell: bash

3
.gitignore vendored
View file

@ -1,5 +1,6 @@
# Xcode # Xcode
xcuserdata xcuserdata
Apple/build/
# Swift # Swift
Apple/Package/.swiftpm/ Apple/Package/.swiftpm/
@ -12,6 +13,8 @@ target/
.idea/ .idea/
tmp/ tmp/
intake/
*.db *.db
*.sqlite3
*.sock *.sock

38
CONSTITUTION.md Normal file
View file

@ -0,0 +1,38 @@
# Burrow Constitution
1. Mission
Burrow exists to build a proper VPN: fast, inspectable, deployable on infrastructure the project controls, and legible enough that future contributors can extend it without guesswork.
2. Commitments
- Protocol work must favor correctness over novelty. Burrow does not claim support for a transport or control-plane feature until the wire format, state handling, and recovery behavior are implemented and tested.
- Security is a design constraint, not a cleanup phase. Key material, bootstrap credentials, control-plane tokens, and routing policy must have explicit storage and rotation paths.
- Performance matters. Burrow should avoid needless copies, hidden blocking, and ad hoc process graphs that make packet forwarding or control-plane convergence harder to reason about.
- Source, infrastructure, and release logic live in the repository. If the forge cannot be rebuilt from the tree, the work is incomplete.
- Non-trivial changes require a Burrow Evolution Proposal. Durable rationale belongs in the repository, not only in chat.
3. Infrastructure
Burrow controls its own forge, runners, deployment automation, and edge configuration for `burrow.net` and `burrow.rs`.
- Dedicated compute is preferred over SaaS dependencies when the dependency would hold release, source, or identity authority.
- Secrets may be bootstrapped from local intake for initial bring-up, but long-lived operation must converge on encrypted, versioned secret handling.
- Production access must be attributable. Automation identities, SSH keys, and service accounts must be named and documented.
4. Contributors
- Read this constitution before drafting product, protocol, or infrastructure changes.
- Capture intent, testing expectations, and rollback procedures in proposals.
- Prefer reversible migrations. If a change is destructive, document the preconditions and teardown plan first.
- Security-sensitive work requires explicit reviewer attention, even when the implementation is performed by an agent.
5. Governance
- Burrow Evolution Proposals (BEPs) are the primary design record for architectural, protocol, forge, and deployment changes.
- Accepted proposals are authoritative until superseded.
- Constitutional changes require a dedicated proposal that quotes the affected text and records the decision.
6. Origin
Burrow started as a firewall-burrowing client and now carries its own transport, daemon, mesh, and control-plane work. This constitution exists so the project can finish that evolution coherently.

1937
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
FROM docker.io/library/rust:1.79-slim-bookworm AS builder FROM docker.io/library/rust:1.85-slim-bookworm AS builder
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG LLVM_VERSION=16 ARG LLVM_VERSION=16

View file

@ -1,21 +1,23 @@
tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1) tun := $(shell ifconfig -l | sed 's/ /\n/g' | grep utun | tail -n 1)
cargo_console := RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features cargo_console := env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
cargo_norm := RUST_BACKTRACE=1 RUST_LOG=debug cargo run cargo_norm := env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
sudo_cargo_console := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug RUSTFLAGS='--cfg tokio_unstable' cargo run --all-features --
sudo_cargo_norm := sudo -E env RUST_BACKTRACE=1 RUST_LOG=debug cargo run --
check: check:
@cargo check @cargo check
build: build:
@cargo run build @cargo build
daemon-console: daemon-console:
@$(cargo_console) daemon @$(sudo_cargo_console) daemon
daemon: daemon:
@$(cargo_norm) daemon @$(sudo_cargo_norm) daemon
start: start:
@$(cargo_norm) start @$(sudo_cargo_norm) start
stop: stop:
@$(cargo_norm) stop @$(cargo_norm) stop

View file

@ -5,10 +5,19 @@
Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/). Burrow is an open source tool for burrowing through firewalls, built by teenagers at [Hack Club](https://hackclub.com/).
`burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them. `burrow` provides a simple command-line tool to open virtual interfaces and direct traffic through them.
Routine verification now runs unprivileged with `cargo test --workspace --all-features`; only tunnel startup needs elevation.
The repository now carries its own design and deployment record:
- [Constitution](./CONSTITUTION.md)
- [Burrow Evolution](./evolution/README.md)
- [WireGuard Rust Lineage](./docs/WIREGUARD_LINEAGE.md)
- [Protocol Roadmap](./docs/PROTOCOL_ROADMAP.md)
- [Forward Email Runbook](./docs/FORWARDEMAIL.md)
## Contributing ## Contributing
Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow! Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Burrow is fully open source, you can fork the repo and start contributing easily. For more information and in-depth discussions, visit the `#burrow` channel on the [Hack Club Slack](https://hackclub.com/slack/), here you can ask for help and talk with other people interested in burrow. Checkout [GETTING_STARTED.md](./docs/GETTING_STARTED.md) for build instructions and [GTK_APP.md](./docs/GTK_APP.md) for the Linux app. Forge and deployment scaffolding live in [`flake.nix`](./flake.nix), [`nixos/`](./nixos), and [`.forgejo/workflows/`](./.forgejo/workflows/). Hosted mail backup operations live in [`docs/FORWARDEMAIL.md`](./docs/FORWARDEMAIL.md) and [`Tools/forwardemail-custom-s3.sh`](./Tools/forwardemail-custom-s3.sh).
The project structure is divided in the following folders: The project structure is divided in the following folders:

171
Tools/forwardemail-custom-s3.sh Executable file
View file

@ -0,0 +1,171 @@
#!/usr/bin/env bash
set -euo pipefail
umask 077
usage() {
cat <<'EOF'
Usage:
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://<endpoint> \
--s3-region <region> \
--s3-bucket <bucket> \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
Options:
--domain <domain> Forward Email domain to update.
--api-token-file <path> File containing the Forward Email API token.
--s3-endpoint <url> S3-compatible endpoint URL.
--s3-region <region> S3 region string expected by Forward Email.
--s3-bucket <name> Bucket used for alias backup uploads.
--s3-access-key-file <path> File containing the S3 access key id.
--s3-secret-key-file <path> File containing the S3 secret access key.
--test-only Skip the update call and only test the saved connection.
--help Show this help text.
Notes:
- Secrets are passed to curl through a temporary config file to avoid putting
them in the process list.
- By default the script updates the domain settings and then calls
/test-s3-connection.
- For Hetzner Object Storage, use the regional S3 endpoint such as
https://hel1.your-objectstorage.com, not an account alias endpoint.
EOF
}
fail() {
printf 'error: %s\n' "$*" >&2
exit 1
}
require_file() {
local path="$1"
[[ -f "$path" ]] || fail "missing file: $path"
}
read_secret() {
local path="$1"
local value
value="$(tr -d '\r\n' < "$path")"
[[ -n "$value" ]] || fail "empty secret file: $path"
printf '%s' "$value"
}
domain=""
api_token_file=""
s3_endpoint=""
s3_region=""
s3_bucket=""
s3_access_key_file=""
s3_secret_key_file=""
test_only=false
while [[ $# -gt 0 ]]; do
case "$1" in
--domain)
domain="${2:-}"
shift 2
;;
--api-token-file)
api_token_file="${2:-}"
shift 2
;;
--s3-endpoint)
s3_endpoint="${2:-}"
shift 2
;;
--s3-region)
s3_region="${2:-}"
shift 2
;;
--s3-bucket)
s3_bucket="${2:-}"
shift 2
;;
--s3-access-key-file)
s3_access_key_file="${2:-}"
shift 2
;;
--s3-secret-key-file)
s3_secret_key_file="${2:-}"
shift 2
;;
--test-only)
test_only=true
shift
;;
--help|-h)
usage
exit 0
;;
*)
fail "unknown argument: $1"
;;
esac
done
[[ -n "$domain" ]] || fail "--domain is required"
[[ -n "$api_token_file" ]] || fail "--api-token-file is required"
[[ -n "$s3_endpoint" || "$test_only" == true ]] || fail "--s3-endpoint is required unless --test-only is set"
[[ -n "$s3_region" || "$test_only" == true ]] || fail "--s3-region is required unless --test-only is set"
[[ -n "$s3_bucket" || "$test_only" == true ]] || fail "--s3-bucket is required unless --test-only is set"
[[ -n "$s3_access_key_file" || "$test_only" == true ]] || fail "--s3-access-key-file is required unless --test-only is set"
[[ -n "$s3_secret_key_file" || "$test_only" == true ]] || fail "--s3-secret-key-file is required unless --test-only is set"
require_file "$api_token_file"
api_token="$(read_secret "$api_token_file")"
if [[ "$test_only" == false ]]; then
require_file "$s3_access_key_file"
require_file "$s3_secret_key_file"
s3_access_key_id="$(read_secret "$s3_access_key_file")"
s3_secret_access_key="$(read_secret "$s3_secret_key_file")"
case "$s3_endpoint" in
http://*|https://*)
;;
*)
fail "--s3-endpoint must start with http:// or https://"
;;
esac
fi
curl_config="$(mktemp)"
trap 'rm -f "$curl_config"' EXIT
if [[ "$test_only" == false ]]; then
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}"
request = "PUT"
user = "${api_token}:"
data = "has_custom_s3=true"
data-urlencode = "s3_endpoint=${s3_endpoint}"
data-urlencode = "s3_access_key_id=${s3_access_key_id}"
data-urlencode = "s3_secret_access_key=${s3_secret_access_key}"
data-urlencode = "s3_region=${s3_region}"
data-urlencode = "s3_bucket=${s3_bucket}"
EOF
printf 'Configuring Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2
fi
cat >"$curl_config" <<EOF
silent
show-error
fail-with-body
url = "https://api.forwardemail.net/v1/domains/${domain}/test-s3-connection"
request = "POST"
user = "${api_token}:"
EOF
printf 'Testing Forward Email custom S3 for %s\n' "$domain" >&2
curl --config "$curl_config"
printf '\n' >&2

View file

@ -0,0 +1,261 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import datetime as dt
import hashlib
import hmac
import sys
import textwrap
from pathlib import Path
from urllib.parse import urlencode, urlparse
import requests
def read_secret(path: str) -> str:
value = Path(path).read_text(encoding="utf-8").strip()
if not value:
raise SystemExit(f"error: empty secret file: {path}")
return value
def sign(key: bytes, msg: str) -> bytes:
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def request(
*,
method: str,
endpoint: str,
region: str,
access_key: str,
secret_key: str,
bucket: str,
query: dict[str, str] | None = None,
body: bytes = b"",
content_type: str | None = None,
) -> requests.Response:
parsed = urlparse(endpoint)
if parsed.scheme != "https":
raise SystemExit("error: endpoint must use https")
host = parsed.netloc
canonical_uri = f"/{bucket}"
query = query or {}
canonical_querystring = urlencode(sorted(query.items()), doseq=True, safe="~")
now = dt.datetime.now(dt.timezone.utc)
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
date_stamp = now.strftime("%Y%m%d")
payload_hash = hashlib.sha256(body).hexdigest()
headers = {
"host": host,
"x-amz-content-sha256": payload_hash,
"x-amz-date": amz_date,
}
if content_type:
headers["content-type"] = content_type
signed_headers = ";".join(sorted(headers.keys()))
canonical_headers = "".join(f"{name}:{headers[name]}\n" for name in sorted(headers.keys()))
canonical_request = "\n".join(
[
method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash,
]
)
algorithm = "AWS4-HMAC-SHA256"
credential_scope = f"{date_stamp}/{region}/s3/aws4_request"
string_to_sign = "\n".join(
[
algorithm,
amz_date,
credential_scope,
hashlib.sha256(canonical_request.encode("utf-8")).hexdigest(),
]
)
k_date = sign(("AWS4" + secret_key).encode("utf-8"), date_stamp)
k_region = sign(k_date, region)
k_service = sign(k_region, "s3")
signing_key = sign(k_service, "aws4_request")
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
auth_header = (
f"{algorithm} Credential={access_key}/{credential_scope}, "
f"SignedHeaders={signed_headers}, Signature={signature}"
)
url = f"{parsed.scheme}://{host}{canonical_uri}"
if canonical_querystring:
url = f"{url}?{canonical_querystring}"
response = requests.request(
method,
url,
headers={**headers, "Authorization": auth_header},
data=body,
timeout=30,
)
return response
def ensure_bucket(args: argparse.Namespace, bucket: str) -> None:
head = request(
method="HEAD",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
)
if head.status_code == 200:
print(f"{bucket}: exists")
return
if head.status_code != 404:
raise SystemExit(f"error: HEAD {bucket} returned {head.status_code}: {head.text[:200]}")
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>{args.region}</LocationConstraint>
</CreateBucketConfiguration>
"""
).encode("utf-8")
create = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
body=body,
content_type="application/xml",
)
if create.status_code not in (200, 204):
raise SystemExit(f"error: PUT {bucket} returned {create.status_code}: {create.text[:200]}")
print(f"{bucket}: created")
def put_lifecycle(args: argparse.Namespace, bucket: str) -> None:
body = textwrap.dedent(
f"""\
<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ID>expire-forwardemail-backups-after-{args.expire_days}-days</ID>
<Status>Enabled</Status>
<Filter>
<Prefix></Prefix>
</Filter>
<Expiration>
<Days>{args.expire_days}</Days>
</Expiration>
</Rule>
</LifecycleConfiguration>
"""
).encode("utf-8")
response = request(
method="PUT",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
body=body,
content_type="application/xml",
)
if response.status_code not in (200, 204):
raise SystemExit(
f"error: PUT lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"{bucket}: lifecycle set to {args.expire_days} days")
def get_lifecycle(args: argparse.Namespace, bucket: str) -> None:
response = request(
method="GET",
endpoint=args.endpoint,
region=args.region,
access_key=args.access_key,
secret_key=args.secret_key,
bucket=bucket,
query={"lifecycle": ""},
)
if response.status_code != 200:
raise SystemExit(
f"error: GET lifecycle for {bucket} returned {response.status_code}: {response.text[:200]}"
)
print(f"=== {bucket} lifecycle ===")
print(response.text.strip())
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Provision Hetzner object-storage buckets for Forward Email backups."
)
parser.add_argument(
"--endpoint",
default="https://hel1.your-objectstorage.com",
help="Public S3-compatible endpoint URL. For Hetzner, use the regional endpoint, not the account alias.",
)
parser.add_argument("--region", default="hel1", help="S3 region.")
parser.add_argument(
"--access-key-file",
default="intake/hetzner-s3-user.txt",
help="File containing the S3 access key id.",
)
parser.add_argument(
"--secret-key-file",
default="intake/hetzner-s3-secret.txt",
help="File containing the S3 secret key.",
)
parser.add_argument(
"--bucket",
action="append",
required=True,
help="Bucket to provision. Repeat for multiple buckets.",
)
parser.add_argument(
"--expire-days",
type=int,
default=90,
help="Lifecycle expiry window in days.",
)
parser.add_argument(
"--verify-only",
action="store_true",
help="Skip create/update and only read the current lifecycle.",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
args.access_key = read_secret(args.access_key_file)
args.secret_key = read_secret(args.secret_key_file)
for bucket in args.bucket:
if args.verify_only:
get_lifecycle(args, bucket)
continue
ensure_bucket(args, bucket)
put_lifecycle(args, bucket)
get_lifecycle(args, bucket)
if __name__ == "__main__":
try:
main()
except requests.RequestException as err:
raise SystemExit(f"error: request failed: {err}") from err

View file

@ -11,11 +11,7 @@ use tokio::{
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use crate::daemon::rpc::{ use crate::daemon::rpc::{
DaemonCommand, DaemonCommand, DaemonMessage, DaemonNotification, DaemonRequest, DaemonResponse,
DaemonMessage,
DaemonNotification,
DaemonRequest,
DaemonResponse,
DaemonResponseData, DaemonResponseData,
}; };

View file

@ -1,5 +1,6 @@
use anyhow::Result; use anyhow::Result;
use hyper_util::rt::TokioIo; use hyper_util::rt::TokioIo;
use std::path::Path;
use tokio::net::UnixStream; use tokio::net::UnixStream;
use tonic::transport::{Endpoint, Uri}; use tonic::transport::{Endpoint, Uri};
use tower::service_fn; use tower::service_fn;
@ -15,10 +16,18 @@ pub struct BurrowClient<T> {
impl BurrowClient<tonic::transport::Channel> { impl BurrowClient<tonic::transport::Channel> {
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub async fn from_uds() -> Result<Self> { pub async fn from_uds() -> Result<Self> {
Self::from_uds_path(get_socket_path()).await
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub async fn from_uds_path(path: impl AsRef<Path>) -> Result<Self> {
let socket_path = path.as_ref().to_owned();
let channel = Endpoint::try_from("http://[::]:50051")? // NOTE: this is a hack(?) let channel = Endpoint::try_from("http://[::]:50051")? // NOTE: this is a hack(?)
.connect_with_connector(service_fn(|_: Uri| async { .connect_with_connector(service_fn(move |_: Uri| {
let sock_path = get_socket_path(); let socket_path = socket_path.clone();
Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(sock_path).await?)) async move {
Ok::<_, std::io::Error>(TokioIo::new(UnixStream::connect(&socket_path).await?))
}
})) }))
.await?; .await?;
let nw_client = NetworksClient::new(channel.clone()); let nw_client = NetworksClient::new(channel.clone());

View file

@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use tun::TunOptions; use tun::TunOptions;
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
#[serde(tag="method", content="params")] #[serde(tag = "method", content = "params")]
pub enum DaemonCommand { pub enum DaemonCommand {
Start(DaemonStartOptions), Start(DaemonStartOptions),
ServerInfo, ServerInfo,

View file

@ -10,11 +10,11 @@ mod auth;
mod daemon; mod daemon;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod database; pub mod database;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub mod mesh;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod tor; pub mod tor;
pub(crate) mod tracing; pub(crate) mod tracing;
#[cfg(target_os = "linux")]
pub mod usernet;
#[cfg(target_vendor = "apple")] #[cfg(target_vendor = "apple")]
pub use daemon::apple::spawn_in_process; pub use daemon::apple::spawn_in_process;

View file

@ -11,11 +11,10 @@ mod wireguard;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod auth; mod auth;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
mod mesh;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
mod tor; mod tor;
#[cfg(target_os = "linux")]
mod usernet;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
use daemon::{DaemonClient, DaemonCommand}; use daemon::{DaemonClient, DaemonCommand};
@ -74,6 +73,9 @@ enum Commands {
/// Delete Network /// Delete Network
NetworkDelete(NetworkDeleteArgs), NetworkDelete(NetworkDeleteArgs),
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
/// Run a command in an unshared Linux namespace using a Burrow backend
Exec(ExecArgs),
#[cfg(target_os = "linux")]
/// Run a command in a Linux user namespace with Tor-backed networking /// Run a command in a Linux user namespace with Tor-backed networking
TorExec(TorExecArgs), TorExec(TorExecArgs),
} }
@ -116,6 +118,17 @@ struct TorExecArgs {
command: Vec<String>, command: Vec<String>,
} }
#[cfg(target_os = "linux")]
#[derive(Args)]
struct ExecArgs {
#[arg(long, value_enum)]
backend: usernet::ExecBackendKind,
#[arg(long)]
payload: Option<String>,
#[arg(required = true, num_args = 1.., trailing_var_arg = true)]
command: Vec<String>,
}
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
async fn try_start() -> Result<()> { async fn try_start() -> Result<()> {
let mut client = BurrowClient::from_uds().await?; let mut client = BurrowClient::from_uds().await?;
@ -229,9 +242,30 @@ async fn try_network_delete(id: i32) -> Result<()> {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> { async fn try_tor_exec(payload_path: &str, command: Vec<String>) -> Result<()> {
let payload = tokio::fs::read(payload_path).await?; let exit_code = usernet::run_exec(usernet::ExecInvocation {
let config = tor::Config::from_payload(&payload)?; backend: usernet::ExecBackendKind::Tor,
let exit_code = tor::run_exec(config, command).await?; payload_path: Some(payload_path.into()),
command,
})
.await?;
if exit_code != 0 {
std::process::exit(exit_code);
}
Ok(())
}
#[cfg(target_os = "linux")]
async fn try_exec(
backend: usernet::ExecBackendKind,
payload: Option<String>,
command: Vec<String>,
) -> Result<()> {
let exit_code = usernet::run_exec(usernet::ExecInvocation {
backend,
payload_path: payload.map(Into::into),
command,
})
.await?;
if exit_code != 0 { if exit_code != 0 {
std::process::exit(exit_code); std::process::exit(exit_code);
} }
@ -315,6 +349,15 @@ async fn main() -> Result<()> {
Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?, Commands::NetworkReorder(args) => try_network_reorder(args.id, args.index).await?,
Commands::NetworkDelete(args) => try_network_delete(args.id).await?, Commands::NetworkDelete(args) => try_network_delete(args.id).await?,
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
Commands::Exec(args) => {
try_exec(
args.backend.clone(),
args.payload.clone(),
args.command.clone(),
)
.await?
}
#[cfg(target_os = "linux")]
Commands::TorExec(args) => try_tor_exec(&args.payload_path, args.command.clone()).await?, Commands::TorExec(args) => try_tor_exec(&args.payload_path, args.command.clone()).await?,
} }

View file

@ -76,13 +76,10 @@ pub async fn spawn(
} }
}); });
Ok(TorDnsHandle { Ok(TorDnsHandle { shutdown: shutdown_tx, task })
shutdown: shutdown_tx,
task,
})
} }
async fn build_response( pub(crate) async fn build_response(
packet: &[u8], packet: &[u8],
tor_client: &TorClient<PreferredRuntime>, tor_client: &TorClient<PreferredRuntime>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
@ -133,9 +130,11 @@ fn record_for_address(
addr: IpAddr, addr: IpAddr,
) -> Option<Record> { ) -> Option<Record> {
match (record_type, addr) { match (record_type, addr) {
(RecordType::A, IpAddr::V4(ip)) => { (RecordType::A, IpAddr::V4(ip)) => Some(Record::from_rdata(
Some(Record::from_rdata(name, DNS_TTL_SECS, RData::A(A::from(ip)))) name,
} DNS_TTL_SECS,
RData::A(A::from(ip)),
)),
(RecordType::AAAA, IpAddr::V6(ip)) => Some(Record::from_rdata( (RecordType::AAAA, IpAddr::V6(ip)) => Some(Record::from_rdata(
name, name,
DNS_TTL_SECS, DNS_TTL_SECS,

View file

@ -1,5 +1,5 @@
mod config; mod config;
mod dns; pub(crate) mod dns;
mod exec; mod exec;
mod runtime; mod runtime;
mod system; mod system;

View file

@ -118,10 +118,7 @@ pub async fn spawn_with_client(
}), }),
}; };
Ok(TorHandle { Ok(TorHandle { shutdown: shutdown_tx, task })
shutdown: shutdown_tx,
task,
})
} }
fn join_error(err: JoinError) -> anyhow::Error { fn join_error(err: JoinError) -> anyhow::Error {

View file

@ -118,7 +118,10 @@ mod tests {
}; };
let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in>()).unwrap(); let parsed = socket_addr_from_storage(&storage, size_of::<libc::sockaddr_in>()).unwrap();
assert_eq!(parsed, SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 9040))); assert_eq!(
parsed,
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 9040))
);
} }
#[test] #[test]

935
burrow/src/usernet/mod.rs Normal file
View file

@ -0,0 +1,935 @@
use std::{
collections::HashMap,
env,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
os::fd::{AsRawFd, FromRawFd, RawFd},
os::unix::net::UnixStream as StdUnixStream,
os::unix::process::ExitStatusExt,
path::{Path, PathBuf},
process::{Command as StdCommand, ExitStatus},
str,
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, bail, Context, Result};
use clap::ValueEnum;
use futures::{SinkExt, StreamExt};
use ipnetwork::IpNetwork;
use netstack_smoltcp::{
StackBuilder, TcpListener as StackTcpListener, TcpStream as StackTcpStream,
UdpSocket as StackUdpSocket,
};
use nix::{
cmsg_space,
fcntl::{fcntl, FcntlArg, FdFlag},
sys::socket::{recvmsg, sendmsg, ControlMessage, ControlMessageOwned, MsgFlags},
};
use serde::{Deserialize, Serialize};
use tokio::{
io::copy_bidirectional,
net::{TcpStream, UdpSocket},
process::{Child, Command},
sync::{mpsc, Mutex, RwLock},
task::JoinSet,
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::{debug, warn};
use tun::{tokio::TunInterface as TokioTunInterface, TunOptions};
use crate::{
tor::{bootstrap_client, dns::build_response as build_tor_dns_response, Config as TorConfig},
wireguard::{Config as WireGuardConfig, Interface as WireGuardInterface},
};
const INNER_ENV: &str = "BURROW_USERNET_INNER";
const INNER_CONTROL_FD_ENV: &str = "BURROW_USERNET_CONTROL_FD";
const INNER_TUN_CONFIG_ENV: &str = "BURROW_USERNET_TUN_CONFIG";
const DEFAULT_MTU: u32 = 1500;
const DEFAULT_TUN_V4: &str = "100.64.0.2/24";
const DEFAULT_TUN_V6: &str = "fd00:64::2/64";
const UDP_IDLE_TIMEOUT: Duration = Duration::from_secs(30);
const READY_ACK: &[u8; 1] = b"1";
#[derive(Clone, Debug, Eq, PartialEq, ValueEnum)]
pub enum ExecBackendKind {
Direct,
Tor,
Wireguard,
}
impl ExecBackendKind {
fn cli_name(&self) -> &'static str {
match self {
Self::Direct => "direct",
Self::Tor => "tor",
Self::Wireguard => "wireguard",
}
}
}
#[derive(Clone, Debug)]
pub struct ExecInvocation {
pub backend: ExecBackendKind,
pub payload_path: Option<PathBuf>,
pub command: Vec<String>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct DirectConfig {
#[serde(default)]
pub address: Vec<String>,
#[serde(default)]
pub dns: Vec<String>,
#[serde(default)]
pub mtu: Option<u32>,
#[serde(default)]
pub tun_name: Option<String>,
}
impl DirectConfig {
pub fn from_payload(payload: &[u8]) -> Result<Self> {
if payload.is_empty() {
return Ok(Self::default());
}
if let Ok(config) = serde_json::from_slice(payload) {
return Ok(config);
}
let payload = str::from_utf8(payload).context("direct payload must be valid UTF-8")?;
toml::from_str(payload).context("failed to parse direct payload as JSON or TOML")
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TunNetworkConfig {
tun_name: String,
addresses: Vec<IpNetwork>,
mtu: u32,
}
enum PreparedBackend {
Socket {
backend: SocketBackend,
tun_config: TunNetworkConfig,
},
Wireguard {
config: WireGuardConfig,
tun_config: TunNetworkConfig,
},
}
impl PreparedBackend {
fn tun_config(&self) -> &TunNetworkConfig {
match self {
Self::Socket { tun_config, .. } => tun_config,
Self::Wireguard { tun_config, .. } => tun_config,
}
}
}
struct NamespaceChild {
child: Child,
control: StdUnixStream,
}
#[derive(Clone)]
enum SocketBackend {
Direct,
Tor(Arc<arti_client::TorClient<tor_rtcompat::PreferredRuntime>>),
}
#[derive(Debug)]
struct UdpReply {
payload: Vec<u8>,
source: SocketAddr,
destination: SocketAddr,
}
#[derive(Debug, Clone, Eq, Hash, PartialEq)]
struct UdpFlowKey {
local: SocketAddr,
remote: SocketAddr,
}
pub async fn run_exec(invocation: ExecInvocation) -> Result<i32> {
if invocation.command.is_empty() {
bail!("exec requires a command to run");
}
if env::var_os(INNER_ENV).is_some() {
run_inner(invocation.command).await
} else {
run_supervisor(invocation).await
}
}
async fn run_supervisor(invocation: ExecInvocation) -> Result<i32> {
let prepared = prepare_backend(&invocation).await?;
let mut child = spawn_namespaced_child(&invocation, prepared.tun_config())?;
let tun = child.receive_tun().await?;
match prepared {
PreparedBackend::Socket { backend, .. } => run_socket_backend(backend, tun, child).await,
PreparedBackend::Wireguard { config, .. } => {
run_wireguard_backend(config, tun, child).await
}
}
}
async fn prepare_backend(invocation: &ExecInvocation) -> Result<PreparedBackend> {
match invocation.backend {
ExecBackendKind::Direct => {
let payload = read_optional_payload(invocation.payload_path.as_deref()).await?;
let config = DirectConfig::from_payload(&payload)?;
let tun_config = socket_tun_config(
&config.address,
config.mtu,
config.tun_name.as_deref(),
"burrow-direct",
)?;
Ok(PreparedBackend::Socket {
backend: SocketBackend::Direct,
tun_config,
})
}
ExecBackendKind::Tor => {
let payload = read_required_payload(invocation.payload_path.as_deref(), "tor").await?;
let mut config = TorConfig::from_payload(&payload)?;
let (state_dir, cache_dir) = config.runtime_dirs(std::process::id() as i32);
config.arti.state_dir = state_dir;
config.arti.cache_dir = cache_dir;
let tun_config = socket_tun_config(
&config.address,
config.mtu,
config.tun_name.as_deref(),
"burrow-tor",
)?;
let tor_client = bootstrap_client(&config).await?;
Ok(PreparedBackend::Socket {
backend: SocketBackend::Tor(tor_client),
tun_config,
})
}
ExecBackendKind::Wireguard => {
let payload =
read_required_payload(invocation.payload_path.as_deref(), "wireguard").await?;
let config = parse_wireguard_payload(&payload, invocation.payload_path.as_deref())?;
let tun_config = wireguard_tun_config(&config)?;
Ok(PreparedBackend::Wireguard { config, tun_config })
}
}
}
fn spawn_namespaced_child(
invocation: &ExecInvocation,
tun_config: &TunNetworkConfig,
) -> Result<NamespaceChild> {
ensure_tool("unshare")?;
ensure_tool("ip")?;
let (parent_control, child_control) =
StdUnixStream::pair().context("failed to create namespace control socket")?;
set_inheritable(child_control.as_raw_fd())?;
let current_exe = env::current_exe().context("failed to locate current burrow binary")?;
let mut cmd = Command::new("unshare");
cmd.args([
"--user",
"--map-root-user",
"--net",
"--mount",
"--pid",
"--fork",
"--kill-child",
"--mount-proc",
]);
cmd.env(INNER_ENV, "1");
cmd.env(INNER_CONTROL_FD_ENV, child_control.as_raw_fd().to_string());
cmd.env(
INNER_TUN_CONFIG_ENV,
serde_json::to_string(tun_config).context("failed to encode namespace tun config")?,
);
cmd.arg(current_exe);
cmd.arg("exec");
cmd.args(["--backend", invocation.backend.cli_name()]);
if let Some(payload_path) = &invocation.payload_path {
cmd.arg("--payload");
cmd.arg(payload_path);
}
cmd.arg("--");
cmd.args(&invocation.command);
let child = cmd
.spawn()
.context("failed to enter unshared Linux namespace")?;
drop(child_control);
Ok(NamespaceChild { child, control: parent_control })
}
async fn run_inner(command: Vec<String>) -> Result<i32> {
run_ip(["link", "set", "lo", "up"])?;
let tun_config = read_inner_tun_config()?;
let tun = open_tun_device(&tun_config)?;
configure_tun_addresses(&tun, &tun_config.addresses, tun_config.mtu)?;
let name = tun.name().context("failed to retrieve tun device name")?;
run_ip(["link", "set", "dev", &name, "up"])?;
install_default_routes(&name, &tun_config.addresses)?;
let control_fd = env::var(INNER_CONTROL_FD_ENV)
.context("missing namespace control fd")?
.parse::<RawFd>()
.context("invalid namespace control fd")?;
send_tun_fd(control_fd, tun.as_raw_fd())?;
await_parent_ready(control_fd).await?;
drop(tun);
let status = spawn_child(&command).await?;
child_exit_code(status)
}
impl NamespaceChild {
async fn receive_tun(&mut self) -> Result<TokioTunInterface> {
let control = self
.control
.try_clone()
.context("failed to clone namespace control socket")?;
let fd = tokio::task::spawn_blocking(move || recv_tun_fd(&control))
.await
.context("failed to join namespace tun receive task")??;
tokio_tun_from_fd(fd)
}
async fn signal_ready(&self) -> Result<()> {
let mut control = self
.control
.try_clone()
.context("failed to clone namespace control socket")?;
tokio::task::spawn_blocking(move || -> Result<()> {
std::io::Write::write_all(&mut control, READY_ACK)
.context("failed to acknowledge namespace readiness")?;
Ok(())
})
.await
.context("failed to join namespace ready task")??;
Ok(())
}
async fn wait(mut self) -> Result<ExitStatus> {
self.child
.wait()
.await
.context("failed to wait for namespace child")
}
}
async fn run_socket_backend(
backend: SocketBackend,
tun: TokioTunInterface,
child: NamespaceChild,
) -> Result<i32> {
let tun = Arc::new(tun);
let (stack, runner, udp_socket, tcp_listener) = StackBuilder::default()
.stack_buffer_size(1024)
.udp_buffer_size(1024)
.tcp_buffer_size(1024)
.enable_udp(true)
.enable_tcp(true)
.enable_icmp(true)
.build()
.context("failed to build userspace netstack")?;
let (mut stack_sink, mut stack_stream) = stack.split();
let mut tasks = JoinSet::new();
if let Some(runner) = runner {
tasks.spawn(async move { runner.await.map_err(anyhow::Error::from) });
}
{
let tun = tun.clone();
tasks.spawn(async move {
let mut buf = vec![0u8; 65_535];
loop {
let len = tun
.recv(&mut buf)
.await
.context("failed to read packet from tun")?;
if len == 0 {
continue;
}
stack_sink
.send(buf[..len].to_vec())
.await
.context("failed to send tun packet into userspace stack")?;
}
#[allow(unreachable_code)]
Result::<()>::Ok(())
});
}
{
let tun = tun.clone();
tasks.spawn(async move {
while let Some(packet) = stack_stream.next().await {
let packet = packet.context("failed to receive packet from userspace stack")?;
tun.send(&packet)
.await
.context("failed to write userspace stack packet to tun")?;
}
Result::<()>::Ok(())
});
}
if let Some(tcp_listener) = tcp_listener {
let backend = backend.clone();
tasks.spawn(async move { tcp_dispatch_loop(tcp_listener, backend).await });
}
if let Some(udp_socket) = udp_socket {
tasks.spawn(async move { udp_dispatch_loop(udp_socket, backend).await });
}
child.signal_ready().await?;
let status = child.wait().await?;
tasks.abort_all();
while let Some(joined) = tasks.join_next().await {
match joined {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "usernet background task exited with error"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "usernet background task panicked"),
}
}
child_exit_code(status)
}
async fn run_wireguard_backend(
config: WireGuardConfig,
tun: TokioTunInterface,
child: NamespaceChild,
) -> Result<i32> {
let interface: WireGuardInterface = config.try_into()?;
interface.set_tun(tun).await;
let interface = Arc::new(interface);
let runner = {
let interface = interface.clone();
tokio::spawn(async move { interface.run().await })
};
child.signal_ready().await?;
let status = child.wait().await?;
interface.remove_tun().await;
match runner.await {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "wireguard exec runtime exited with error"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "wireguard exec runtime panicked"),
}
child_exit_code(status)
}
async fn tcp_dispatch_loop(mut listener: StackTcpListener, backend: SocketBackend) -> Result<()> {
let mut tasks = JoinSet::new();
loop {
tokio::select! {
Some(result) = tasks.join_next(), if !tasks.is_empty() => {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => warn!(?err, "tcp bridge task failed"),
Err(err) if err.is_cancelled() => {}
Err(err) => warn!(?err, "tcp bridge task panicked"),
}
}
next = listener.next() => match next {
Some((stream, local_addr, remote_addr)) => {
debug!(%local_addr, %remote_addr, "accepted userspace tcp stream");
let backend = backend.clone();
tasks.spawn(async move {
bridge_tcp(backend, stream, local_addr, remote_addr).await
});
}
None => break,
}
}
}
tasks.abort_all();
while let Some(result) = tasks.join_next().await {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "tcp bridge task exited during shutdown"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "tcp bridge task panicked during shutdown"),
}
}
Ok(())
}
async fn bridge_tcp(
backend: SocketBackend,
mut inbound: StackTcpStream,
_local_addr: SocketAddr,
remote_addr: SocketAddr,
) -> Result<()> {
match backend {
SocketBackend::Direct => {
debug!(%remote_addr, "dialing direct outbound tcp");
let mut outbound = TcpStream::connect(remote_addr)
.await
.with_context(|| format!("failed to connect to {remote_addr}"))?;
copy_bidirectional(&mut inbound, &mut outbound)
.await
.with_context(|| format!("failed to bridge tcp stream for {remote_addr}"))?;
}
SocketBackend::Tor(tor_client) => {
debug!(%remote_addr, "dialing tor outbound tcp");
let tor_stream = tor_client
.connect((remote_addr.ip().to_string(), remote_addr.port()))
.await
.with_context(|| format!("failed to connect to {remote_addr} over tor"))?;
let mut tor_stream = tor_stream.compat();
copy_bidirectional(&mut inbound, &mut tor_stream)
.await
.with_context(|| format!("failed to bridge tor stream for {remote_addr}"))?;
}
}
Ok(())
}
async fn udp_dispatch_loop(socket: StackUdpSocket, backend: SocketBackend) -> Result<()> {
let (mut udp_reader, mut udp_writer) = socket.split();
let (reply_tx, mut reply_rx) = mpsc::channel::<UdpReply>(128);
let direct_sessions = Arc::new(Mutex::new(
HashMap::<UdpFlowKey, mpsc::Sender<Vec<u8>>>::new(),
));
let mut session_tasks = JoinSet::new();
loop {
tokio::select! {
Some(result) = session_tasks.join_next(), if !session_tasks.is_empty() => {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => warn!(?err, "udp session task failed"),
Err(err) if err.is_cancelled() => {}
Err(err) => warn!(?err, "udp session task panicked"),
}
}
maybe_reply = reply_rx.recv() => match maybe_reply {
Some(reply) => {
udp_writer
.send((reply.payload, reply.source, reply.destination))
.await
.context("failed to write udp reply into userspace stack")?;
}
None => break,
},
maybe_datagram = udp_reader.next() => match maybe_datagram {
Some((payload, local_addr, remote_addr)) => {
match &backend {
SocketBackend::Direct => {
dispatch_direct_udp(
payload,
local_addr,
remote_addr,
reply_tx.clone(),
direct_sessions.clone(),
&mut session_tasks,
).await?;
}
SocketBackend::Tor(tor_client) => {
if remote_addr.port() != 53 {
debug!(%remote_addr, "dropping non-DNS UDP datagram for tor backend");
continue;
}
let response = build_tor_dns_response(&payload, tor_client.as_ref()).await?;
reply_tx
.send(UdpReply {
payload: response,
source: remote_addr,
destination: local_addr,
})
.await
.context("failed to enqueue tor dns response")?;
}
}
}
None => break,
}
}
}
session_tasks.abort_all();
while let Some(result) = session_tasks.join_next().await {
match result {
Ok(Ok(())) => {}
Ok(Err(err)) => debug!(?err, "udp session task exited during shutdown"),
Err(err) if err.is_cancelled() => {}
Err(err) => debug!(?err, "udp session task panicked during shutdown"),
}
}
Ok(())
}
async fn dispatch_direct_udp(
payload: Vec<u8>,
local_addr: SocketAddr,
remote_addr: SocketAddr,
reply_tx: mpsc::Sender<UdpReply>,
sessions: Arc<Mutex<HashMap<UdpFlowKey, mpsc::Sender<Vec<u8>>>>>,
session_tasks: &mut JoinSet<Result<()>>,
) -> Result<()> {
let key = UdpFlowKey {
local: local_addr,
remote: remote_addr,
};
let existing = { sessions.lock().await.get(&key).cloned() };
if let Some(sender) = existing {
if sender.send(payload.clone()).await.is_ok() {
return Ok(());
}
sessions.lock().await.remove(&key);
}
let (tx, rx) = mpsc::channel::<Vec<u8>>(32);
tx.send(payload)
.await
.context("failed to enqueue outbound udp payload")?;
sessions.lock().await.insert(key.clone(), tx);
session_tasks.spawn(async move { run_direct_udp_session(key, rx, reply_tx, sessions).await });
Ok(())
}
async fn run_direct_udp_session(
key: UdpFlowKey,
mut outbound_rx: mpsc::Receiver<Vec<u8>>,
reply_tx: mpsc::Sender<UdpReply>,
sessions: Arc<Mutex<HashMap<UdpFlowKey, mpsc::Sender<Vec<u8>>>>>,
) -> Result<()> {
let bind_addr = match key.remote {
SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
SocketAddr::V6(_) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
};
let socket = UdpSocket::bind(bind_addr)
.await
.with_context(|| format!("failed to bind udp socket for {}", key.remote))?;
socket
.connect(key.remote)
.await
.with_context(|| format!("failed to connect udp socket to {}", key.remote))?;
let mut buf = vec![0u8; 65_535];
loop {
tokio::select! {
maybe_payload = outbound_rx.recv() => match maybe_payload {
Some(payload) => {
socket
.send(&payload)
.await
.with_context(|| format!("failed to send udp payload to {}", key.remote))?;
}
None => break,
},
recv = tokio::time::timeout(UDP_IDLE_TIMEOUT, socket.recv(&mut buf)) => match recv {
Ok(Ok(len)) => {
reply_tx
.send(UdpReply {
payload: buf[..len].to_vec(),
source: key.remote,
destination: key.local,
})
.await
.context("failed to enqueue inbound udp reply")?;
}
Ok(Err(err)) => return Err(err).with_context(|| format!("failed to receive udp response from {}", key.remote)),
Err(_) => break,
}
}
}
sessions.lock().await.remove(&key);
Ok(())
}
fn wireguard_tun_config(config: &WireGuardConfig) -> Result<TunNetworkConfig> {
parse_tun_config(
&config.interface.address,
config.interface.mtu,
Some("burrow-wireguard"),
)
}
fn socket_tun_config(
addresses: &[String],
mtu: Option<u32>,
tun_name: Option<&str>,
default_name: &str,
) -> Result<TunNetworkConfig> {
let default_addresses;
let addresses = if addresses.is_empty() {
default_addresses = vec![DEFAULT_TUN_V4.to_string(), DEFAULT_TUN_V6.to_string()];
default_addresses.as_slice()
} else {
addresses
};
parse_tun_config(addresses, mtu, Some(tun_name.unwrap_or(default_name)))
}
fn parse_tun_config(
addresses: &[String],
mtu: Option<u32>,
tun_name: Option<&str>,
) -> Result<TunNetworkConfig> {
let addresses = addresses
.iter()
.map(|addr| {
addr.parse::<IpNetwork>()
.with_context(|| format!("invalid tunnel address '{addr}'"))
})
.collect::<Result<Vec<_>>>()?;
Ok(TunNetworkConfig {
tun_name: tun_name.unwrap_or("burrow-exec").to_string(),
addresses,
mtu: mtu.unwrap_or(DEFAULT_MTU),
})
}
fn open_tun_device(config: &TunNetworkConfig) -> Result<tun::TunInterface> {
let tun = TunOptions::new()
.name(&config.tun_name)
.no_pi(true)
.tun_excl(true)
.open()
.context("failed to create tun device")?;
Ok(tun.inner.into_inner())
}
fn tokio_tun_from_fd(fd: RawFd) -> Result<TokioTunInterface> {
let tun = unsafe { tun::TunInterface::from_raw_fd(fd) };
TokioTunInterface::new(tun).context("failed to wrap tun fd in tokio interface")
}
fn read_inner_tun_config() -> Result<TunNetworkConfig> {
let raw = env::var(INNER_TUN_CONFIG_ENV).context("missing namespace tun config")?;
serde_json::from_str(&raw).context("invalid namespace tun config")
}
fn configure_tun_addresses(
iface: &tun::TunInterface,
networks: &[IpNetwork],
mtu: u32,
) -> Result<()> {
for network in networks {
match network {
IpNetwork::V4(net) => {
iface.set_ipv4_addr(net.ip())?;
let netmask = prefix_to_netmask_v4(net.prefix());
iface.set_netmask(netmask)?;
iface.set_broadcast_addr(broadcast_v4(net.ip(), netmask))?;
}
IpNetwork::V6(net) => iface.add_ipv6_addr(net.ip(), net.prefix())?,
}
}
iface.set_mtu(mtu as i32)?;
Ok(())
}
fn install_default_routes(name: &str, networks: &[IpNetwork]) -> Result<()> {
if networks
.iter()
.any(|network| matches!(network, IpNetwork::V4(_)))
{
run_ip(["route", "replace", "default", "dev", name])?;
}
if networks
.iter()
.any(|network| matches!(network, IpNetwork::V6(_)))
{
run_ip(["-6", "route", "replace", "default", "dev", name])?;
}
Ok(())
}
fn run_ip<const N: usize>(args: [&str; N]) -> Result<()> {
let status = StdCommand::new("ip")
.args(args)
.status()
.context("failed to execute ip command")?;
if !status.success() {
bail!("ip {} failed with status {}", args.join(" "), status);
}
Ok(())
}
fn set_inheritable(fd: RawFd) -> Result<()> {
let flags = FdFlag::from_bits_truncate(
fcntl(fd, FcntlArg::F_GETFD).context("failed to query descriptor flags")?,
);
let flags = flags & !FdFlag::FD_CLOEXEC;
fcntl(fd, FcntlArg::F_SETFD(flags)).context("failed to clear close-on-exec")?;
Ok(())
}
async fn await_parent_ready(control_fd: RawFd) -> Result<()> {
tokio::task::spawn_blocking(move || -> Result<()> {
let mut control = unsafe { StdUnixStream::from_raw_fd(control_fd) };
let mut ack = [0u8; 1];
std::io::Read::read_exact(&mut control, &mut ack)
.context("failed to read namespace ready ack")?;
if ack != *READY_ACK {
bail!("unexpected namespace ready ack");
}
Ok(())
})
.await
.context("failed to join namespace ready wait task")??;
Ok(())
}
fn send_tun_fd(control_fd: RawFd, tun_fd: RawFd) -> Result<()> {
let buf = [0u8; 1];
let iov = [std::io::IoSlice::new(&buf)];
let fds = [tun_fd];
sendmsg::<()>(
control_fd,
&iov,
&[ControlMessage::ScmRights(&fds)],
MsgFlags::empty(),
None,
)
.context("failed to send tun fd to parent")?;
Ok(())
}
fn recv_tun_fd(control: &StdUnixStream) -> Result<RawFd> {
let mut buf = [0u8; 1];
let mut iov = [std::io::IoSliceMut::new(&mut buf)];
let mut cmsgspace = cmsg_space!([RawFd; 1]);
let msg = recvmsg::<()>(
control.as_raw_fd(),
&mut iov,
Some(&mut cmsgspace),
MsgFlags::empty(),
)
.context("failed to receive tun fd from namespace child")?;
for cmsg in msg.cmsgs() {
if let ControlMessageOwned::ScmRights(fds) = cmsg {
if let Some(fd) = fds.first() {
return Ok(*fd);
}
}
}
bail!("namespace child did not send a tun fd")
}
fn ensure_tool(tool: &str) -> Result<()> {
let status = StdCommand::new("sh")
.args(["-lc", &format!("command -v {tool} >/dev/null")])
.status()
.with_context(|| format!("failed to probe required tool '{tool}'"))?;
if !status.success() {
bail!("required host tool '{tool}' is not available");
}
Ok(())
}
async fn read_optional_payload(path: Option<&Path>) -> Result<Vec<u8>> {
match path {
Some(path) => tokio::fs::read(path)
.await
.with_context(|| format!("failed to read payload from {}", path.display())),
None => Ok(Vec::new()),
}
}
async fn read_required_payload(path: Option<&Path>, backend: &str) -> Result<Vec<u8>> {
let path = path.ok_or_else(|| anyhow!("{backend} exec requires --payload"))?;
tokio::fs::read(path)
.await
.with_context(|| format!("failed to read payload from {}", path.display()))
}
fn parse_wireguard_payload(payload: &[u8], path: Option<&Path>) -> Result<WireGuardConfig> {
let payload = str::from_utf8(payload).context("wireguard payload must be valid UTF-8")?;
if let Some(path) = path {
if let Some(ext) = path.extension().and_then(|ext| ext.to_str()) {
return WireGuardConfig::from_content_fmt(payload, ext);
}
}
WireGuardConfig::from_toml(payload).or_else(|_| WireGuardConfig::from_ini(payload))
}
async fn spawn_child(command: &[String]) -> Result<ExitStatus> {
let mut cmd = Command::new(&command[0]);
if command.len() > 1 {
cmd.args(&command[1..]);
}
cmd.stdin(std::process::Stdio::inherit());
cmd.stdout(std::process::Stdio::inherit());
cmd.stderr(std::process::Stdio::inherit());
cmd.kill_on_drop(true);
cmd.status()
.await
.with_context(|| format!("failed to spawn '{}'", command[0]))
}
fn child_exit_code(status: ExitStatus) -> Result<i32> {
if let Some(code) = status.code() {
return Ok(code);
}
if let Some(signal) = status.signal() {
return Ok(128 + signal);
}
bail!("child process terminated without an exit code");
}
fn prefix_to_netmask_v4(prefix: u8) -> Ipv4Addr {
if prefix == 0 {
Ipv4Addr::new(0, 0, 0, 0)
} else {
let mask = (!0u32) << (32 - prefix);
Ipv4Addr::from(mask)
}
}
fn broadcast_v4(ip: Ipv4Addr, netmask: Ipv4Addr) -> Ipv4Addr {
let ip_u32 = u32::from(ip);
let mask = u32::from(netmask);
Ipv4Addr::from(ip_u32 | !mask)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parses_direct_json_payload() {
let payload = br#"{"address":["10.0.0.2/24"],"mtu":1400,"tun_name":"burrow0"}"#;
let config = DirectConfig::from_payload(payload).unwrap();
assert_eq!(config.address, vec!["10.0.0.2/24"]);
assert_eq!(config.mtu, Some(1400));
assert_eq!(config.tun_name.as_deref(), Some("burrow0"));
}
#[test]
fn socket_tun_config_uses_dual_stack_defaults() {
let config = socket_tun_config(&[], None, None, "burrow-test").unwrap();
assert_eq!(config.tun_name, "burrow-test");
assert!(config
.addresses
.iter()
.any(|network| matches!(network, IpNetwork::V4(_))));
assert!(config
.addresses
.iter()
.any(|network| matches!(network, IpNetwork::V6(_))));
}
}

View file

@ -148,7 +148,7 @@ impl Interface {
debug!("Routing packet to {}", dst_addr); debug!("Routing packet to {}", dst_addr);
let Some(idx) = pcbs.find(dst_addr) else { let Some(idx) = pcbs.find(dst_addr) else {
continue continue;
}; };
debug!("Found peer:{}", idx); debug!("Found peer:{}", idx);

View file

@ -9,20 +9,15 @@ use std::{
use aead::{Aead, Payload}; use aead::{Aead, Payload};
use blake2::{ use blake2::{
digest::{FixedOutput, KeyInit}, digest::{FixedOutput, KeyInit},
Blake2s256, Blake2s256, Blake2sMac, Digest,
Blake2sMac,
Digest,
}; };
use chacha20poly1305::XChaCha20Poly1305; use chacha20poly1305::XChaCha20Poly1305;
use rand_core::OsRng; use rand_core::OsRng;
use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305};
use subtle::ConstantTimeEq;
use super::{ use super::{
errors::WireGuardError, errors::WireGuardError, session::Session, x25519, HandshakeInit, HandshakeResponse,
session::Session,
x25519,
HandshakeInit,
HandshakeResponse,
PacketCookieReply, PacketCookieReply,
}; };
@ -209,7 +204,7 @@ impl Tai64N {
/// Parse a timestamp from a 12 byte u8 slice /// Parse a timestamp from a 12 byte u8 slice
fn parse(buf: &[u8; 12]) -> Result<Tai64N, WireGuardError> { fn parse(buf: &[u8; 12]) -> Result<Tai64N, WireGuardError> {
if buf.len() < 12 { if buf.len() < 12 {
return Err(WireGuardError::InvalidTai64nTimestamp) return Err(WireGuardError::InvalidTai64nTimestamp);
} }
let (sec_bytes, nano_bytes) = buf.split_at(std::mem::size_of::<u64>()); let (sec_bytes, nano_bytes) = buf.split_at(std::mem::size_of::<u64>());
@ -534,11 +529,14 @@ impl Handshake {
&hash, &hash,
)?; )?;
ring::constant_time::verify_slices_are_equal( if !bool::from(
self.params.peer_static_public.as_bytes(), self.params
&peer_static_public_decrypted, .peer_static_public
) .as_bytes()
.map_err(|_| WireGuardError::WrongKey)?; .ct_eq(&peer_static_public_decrypted),
) {
return Err(WireGuardError::WrongKey);
}
// initiator.hash = HASH(initiator.hash || msg.encrypted_static) // initiator.hash = HASH(initiator.hash || msg.encrypted_static)
hash = b2s_hash(&hash, packet.encrypted_static); hash = b2s_hash(&hash, packet.encrypted_static);
@ -556,19 +554,22 @@ impl Handshake {
let timestamp = Tai64N::parse(&timestamp)?; let timestamp = Tai64N::parse(&timestamp)?;
if !timestamp.after(&self.last_handshake_timestamp) { if !timestamp.after(&self.last_handshake_timestamp) {
// Possibly a replay // Possibly a replay
return Err(WireGuardError::WrongTai64nTimestamp) return Err(WireGuardError::WrongTai64nTimestamp);
} }
self.last_handshake_timestamp = timestamp; self.last_handshake_timestamp = timestamp;
// initiator.hash = HASH(initiator.hash || msg.encrypted_timestamp) // initiator.hash = HASH(initiator.hash || msg.encrypted_timestamp)
hash = b2s_hash(&hash, packet.encrypted_timestamp); hash = b2s_hash(&hash, packet.encrypted_timestamp);
self.previous = std::mem::replace(&mut self.state, HandshakeState::InitReceived { self.previous = std::mem::replace(
chaining_key, &mut self.state,
hash, HandshakeState::InitReceived {
peer_ephemeral_public, chaining_key,
peer_index, hash,
}); peer_ephemeral_public,
peer_index,
},
);
self.format_handshake_response(dst) self.format_handshake_response(dst)
} }
@ -669,7 +670,7 @@ impl Handshake {
let local_index = self.cookies.index; let local_index = self.cookies.index;
if packet.receiver_idx != local_index { if packet.receiver_idx != local_index {
return Err(WireGuardError::WrongIndex) return Err(WireGuardError::WrongIndex);
} }
// msg.encrypted_cookie = XAEAD(HASH(LABEL_COOKIE || responder.static_public), // msg.encrypted_cookie = XAEAD(HASH(LABEL_COOKIE || responder.static_public),
// msg.nonce, cookie, last_received_msg.mac1) // msg.nonce, cookie, last_received_msg.mac1)
@ -725,7 +726,7 @@ impl Handshake {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<&'a mut [u8], WireGuardError> { ) -> Result<&'a mut [u8], WireGuardError> {
if dst.len() < super::HANDSHAKE_INIT_SZ { if dst.len() < super::HANDSHAKE_INIT_SZ {
return Err(WireGuardError::DestinationBufferTooSmall) return Err(WireGuardError::DestinationBufferTooSmall);
} }
let (message_type, rest) = dst.split_at_mut(4); let (message_type, rest) = dst.split_at_mut(4);
@ -808,7 +809,7 @@ impl Handshake {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<(&'a mut [u8], Session), WireGuardError> { ) -> Result<(&'a mut [u8], Session), WireGuardError> {
if dst.len() < super::HANDSHAKE_RESP_SZ { if dst.len() < super::HANDSHAKE_RESP_SZ {
return Err(WireGuardError::DestinationBufferTooSmall) return Err(WireGuardError::DestinationBufferTooSmall);
} }
let state = std::mem::replace(&mut self.state, HandshakeState::None); let state = std::mem::replace(&mut self.state, HandshakeState::None);

View file

@ -133,9 +133,9 @@ pub enum Packet<'a> {
impl Tunnel { impl Tunnel {
#[inline(always)] #[inline(always)]
pub fn parse_incoming_packet(src: &[u8]) -> Result<Packet, WireGuardError> { pub fn parse_incoming_packet(src: &[u8]) -> Result<Packet<'_>, WireGuardError> {
if src.len() < 4 { if src.len() < 4 {
return Err(WireGuardError::InvalidPacket) return Err(WireGuardError::InvalidPacket);
} }
// Checks the type, as well as the reserved zero fields // Checks the type, as well as the reserved zero fields
@ -177,7 +177,7 @@ impl Tunnel {
pub fn dst_address(packet: &[u8]) -> Option<IpAddr> { pub fn dst_address(packet: &[u8]) -> Option<IpAddr> {
if packet.is_empty() { if packet.is_empty() {
return None return None;
} }
match packet[0] >> 4 { match packet[0] >> 4 {
@ -201,7 +201,7 @@ impl Tunnel {
pub fn src_address(packet: &[u8]) -> Option<IpAddr> { pub fn src_address(packet: &[u8]) -> Option<IpAddr> {
if packet.is_empty() { if packet.is_empty() {
return None return None;
} }
match packet[0] >> 4 { match packet[0] >> 4 {
@ -296,7 +296,7 @@ impl Tunnel {
self.timer_tick(TimerName::TimeLastDataPacketSent); self.timer_tick(TimerName::TimeLastDataPacketSent);
} }
self.tx_bytes += src.len(); self.tx_bytes += src.len();
return TunnResult::WriteToNetwork(packet) return TunnResult::WriteToNetwork(packet);
} }
// If there is no session, queue the packet for future retry // If there is no session, queue the packet for future retry
@ -320,7 +320,7 @@ impl Tunnel {
) -> TunnResult<'a> { ) -> TunnResult<'a> {
if datagram.is_empty() { if datagram.is_empty() {
// Indicates a repeated call // Indicates a repeated call
return self.send_queued_packet(dst) return self.send_queued_packet(dst);
} }
let mut cookie = [0u8; COOKIE_REPLY_SZ]; let mut cookie = [0u8; COOKIE_REPLY_SZ];
@ -331,7 +331,7 @@ impl Tunnel {
Ok(packet) => packet, Ok(packet) => packet,
Err(TunnResult::WriteToNetwork(cookie)) => { Err(TunnResult::WriteToNetwork(cookie)) => {
dst[..cookie.len()].copy_from_slice(cookie); dst[..cookie.len()].copy_from_slice(cookie);
return TunnResult::WriteToNetwork(&mut dst[..cookie.len()]) return TunnResult::WriteToNetwork(&mut dst[..cookie.len()]);
} }
Err(TunnResult::Err(e)) => return TunnResult::Err(e), Err(TunnResult::Err(e)) => return TunnResult::Err(e),
_ => unreachable!(), _ => unreachable!(),
@ -435,7 +435,7 @@ impl Tunnel {
let cur_idx = self.current; let cur_idx = self.current;
if cur_idx == new_idx { if cur_idx == new_idx {
// There is nothing to do, already using this session, this is the common case // There is nothing to do, already using this session, this is the common case
return return;
} }
if self.sessions[cur_idx % N_SESSIONS].is_none() if self.sessions[cur_idx % N_SESSIONS].is_none()
|| self.timers.session_timers[new_idx % N_SESSIONS] || self.timers.session_timers[new_idx % N_SESSIONS]
@ -481,7 +481,7 @@ impl Tunnel {
force_resend: bool, force_resend: bool,
) -> TunnResult<'a> { ) -> TunnResult<'a> {
if self.handshake.is_in_progress() && !force_resend { if self.handshake.is_in_progress() && !force_resend {
return TunnResult::Done return TunnResult::Done;
} }
if self.handshake.is_expired() { if self.handshake.is_expired() {
@ -540,7 +540,7 @@ impl Tunnel {
}; };
if computed_len > packet.len() { if computed_len > packet.len() {
return TunnResult::Err(WireGuardError::InvalidPacket) return TunnResult::Err(WireGuardError::InvalidPacket);
} }
self.timer_tick(TimerName::TimeLastDataPacketReceived); self.timer_tick(TimerName::TimeLastDataPacketReceived);

View file

@ -8,23 +8,13 @@ use aead::{generic_array::GenericArray, AeadInPlace, KeyInit};
use chacha20poly1305::{Key, XChaCha20Poly1305}; use chacha20poly1305::{Key, XChaCha20Poly1305};
use parking_lot::Mutex; use parking_lot::Mutex;
use rand_core::{OsRng, RngCore}; use rand_core::{OsRng, RngCore};
use ring::constant_time::verify_slices_are_equal; use subtle::ConstantTimeEq;
use super::{ use super::{
handshake::{ handshake::{
b2s_hash, b2s_hash, b2s_keyed_mac_16, b2s_keyed_mac_16_2, b2s_mac_24, LABEL_COOKIE, LABEL_MAC1,
b2s_keyed_mac_16,
b2s_keyed_mac_16_2,
b2s_mac_24,
LABEL_COOKIE,
LABEL_MAC1,
}, },
HandshakeInit, HandshakeInit, HandshakeResponse, Packet, TunnResult, Tunnel, WireGuardError,
HandshakeResponse,
Packet,
TunnResult,
Tunnel,
WireGuardError,
}; };
const COOKIE_REFRESH: u64 = 128; // Use 128 and not 120 so the compiler can optimize out the division const COOKIE_REFRESH: u64 = 128; // Use 128 and not 120 so the compiler can optimize out the division
@ -136,7 +126,7 @@ impl RateLimiter {
dst: &'a mut [u8], dst: &'a mut [u8],
) -> Result<&'a mut [u8], WireGuardError> { ) -> Result<&'a mut [u8], WireGuardError> {
if dst.len() < super::COOKIE_REPLY_SZ { if dst.len() < super::COOKIE_REPLY_SZ {
return Err(WireGuardError::DestinationBufferTooSmall) return Err(WireGuardError::DestinationBufferTooSmall);
} }
let (message_type, rest) = dst.split_at_mut(4); let (message_type, rest) = dst.split_at_mut(4);
@ -185,8 +175,9 @@ impl RateLimiter {
let (mac1, mac2) = macs.split_at(16); let (mac1, mac2) = macs.split_at(16);
let computed_mac1 = b2s_keyed_mac_16(&self.mac1_key, msg); let computed_mac1 = b2s_keyed_mac_16(&self.mac1_key, msg);
verify_slices_are_equal(&computed_mac1[..16], mac1) if !bool::from(computed_mac1[..16].ct_eq(mac1)) {
.map_err(|_| TunnResult::Err(WireGuardError::InvalidMac))?; return Err(TunnResult::Err(WireGuardError::InvalidMac));
}
if self.is_under_load() { if self.is_under_load() {
let addr = match src_addr { let addr = match src_addr {
@ -198,11 +189,11 @@ impl RateLimiter {
let cookie = self.current_cookie(addr); let cookie = self.current_cookie(addr);
let computed_mac2 = b2s_keyed_mac_16_2(&cookie, msg, mac1); let computed_mac2 = b2s_keyed_mac_16_2(&cookie, msg, mac1);
if verify_slices_are_equal(&computed_mac2[..16], mac2).is_err() { if !bool::from(computed_mac2[..16].ct_eq(mac2)) {
let cookie_packet = self let cookie_packet = self
.format_cookie_reply(sender_idx, cookie, mac1, dst) .format_cookie_reply(sender_idx, cookie, mac1, dst)
.map_err(TunnResult::Err)?; .map_err(TunnResult::Err)?;
return Err(TunnResult::WriteToNetwork(cookie_packet)) return Err(TunnResult::WriteToNetwork(cookie_packet));
} }
} }
} }

View file

@ -88,11 +88,11 @@ impl ReceivingKeyCounterValidator {
fn will_accept(&self, counter: u64) -> Result<(), WireGuardError> { fn will_accept(&self, counter: u64) -> Result<(), WireGuardError> {
if counter >= self.next { if counter >= self.next {
// As long as the counter is growing no replay took place for sure // As long as the counter is growing no replay took place for sure
return Ok(()) return Ok(());
} }
if counter + N_BITS < self.next { if counter + N_BITS < self.next {
// Drop if too far back // Drop if too far back
return Err(WireGuardError::InvalidCounter) return Err(WireGuardError::InvalidCounter);
} }
if !self.check_bit(counter) { if !self.check_bit(counter) {
Ok(()) Ok(())
@ -107,22 +107,22 @@ impl ReceivingKeyCounterValidator {
fn mark_did_receive(&mut self, counter: u64) -> Result<(), WireGuardError> { fn mark_did_receive(&mut self, counter: u64) -> Result<(), WireGuardError> {
if counter + N_BITS < self.next { if counter + N_BITS < self.next {
// Drop if too far back // Drop if too far back
return Err(WireGuardError::InvalidCounter) return Err(WireGuardError::InvalidCounter);
} }
if counter == self.next { if counter == self.next {
// Usually the packets arrive in order, in that case we simply mark the bit and // Usually the packets arrive in order, in that case we simply mark the bit and
// increment the counter // increment the counter
self.set_bit(counter); self.set_bit(counter);
self.next += 1; self.next += 1;
return Ok(()) return Ok(());
} }
if counter < self.next { if counter < self.next {
// A packet arrived out of order, check if it is valid, and mark // A packet arrived out of order, check if it is valid, and mark
if self.check_bit(counter) { if self.check_bit(counter) {
return Err(WireGuardError::InvalidCounter) return Err(WireGuardError::InvalidCounter);
} }
self.set_bit(counter); self.set_bit(counter);
return Ok(()) return Ok(());
} }
// Packets where dropped, or maybe reordered, skip them and mark unused // Packets where dropped, or maybe reordered, skip them and mark unused
if counter - self.next >= N_BITS { if counter - self.next >= N_BITS {
@ -247,7 +247,7 @@ impl Session {
panic!("The destination buffer is too small"); panic!("The destination buffer is too small");
} }
if packet.receiver_idx != self.receiving_index { if packet.receiver_idx != self.receiving_index {
return Err(WireGuardError::WrongIndex) return Err(WireGuardError::WrongIndex);
} }
// Don't reuse counters, in case this is a replay attack we want to quickly // Don't reuse counters, in case this is a replay attack we want to quickly
// check the counter without running expensive decryption // check the counter without running expensive decryption

View file

@ -190,7 +190,7 @@ impl Tunnel {
{ {
if self.handshake.is_expired() { if self.handshake.is_expired() {
return TunnResult::Err(WireGuardError::ConnectionExpired) return TunnResult::Err(WireGuardError::ConnectionExpired);
} }
// Clear cookie after COOKIE_EXPIRATION_TIME // Clear cookie after COOKIE_EXPIRATION_TIME
@ -206,7 +206,7 @@ impl Tunnel {
tracing::error!("CONNECTION_EXPIRED(REJECT_AFTER_TIME * 3)"); tracing::error!("CONNECTION_EXPIRED(REJECT_AFTER_TIME * 3)");
self.handshake.set_expired(); self.handshake.set_expired();
self.clear_all(); self.clear_all();
return TunnResult::Err(WireGuardError::ConnectionExpired) return TunnResult::Err(WireGuardError::ConnectionExpired);
} }
if let Some(time_init_sent) = self.handshake.timer() { if let Some(time_init_sent) = self.handshake.timer() {
@ -219,7 +219,7 @@ impl Tunnel {
tracing::error!("CONNECTION_EXPIRED(REKEY_ATTEMPT_TIME)"); tracing::error!("CONNECTION_EXPIRED(REKEY_ATTEMPT_TIME)");
self.handshake.set_expired(); self.handshake.set_expired();
self.clear_all(); self.clear_all();
return TunnResult::Err(WireGuardError::ConnectionExpired) return TunnResult::Err(WireGuardError::ConnectionExpired);
} }
if time_init_sent.elapsed() >= REKEY_TIMEOUT { if time_init_sent.elapsed() >= REKEY_TIMEOUT {
@ -299,11 +299,11 @@ impl Tunnel {
} }
if handshake_initiation_required { if handshake_initiation_required {
return self.format_handshake_initiation(dst, true) return self.format_handshake_initiation(dst, true);
} }
if keepalive_required { if keepalive_required {
return self.encapsulate(&[], dst) return self.encapsulate(&[], dst);
} }
TunnResult::Done TunnResult::Done

View file

@ -64,7 +64,7 @@ impl PeerPcb {
let guard = self.socket.read().await; let guard = self.socket.read().await;
let Some(socket) = guard.as_ref() else { let Some(socket) = guard.as_ref() else {
self.open_if_closed().await?; self.open_if_closed().await?;
continue continue;
}; };
let mut res_buf = [0; 1500]; let mut res_buf = [0; 1500];
// tracing::debug!("{} : waiting for readability on {:?}", rid, socket); // tracing::debug!("{} : waiting for readability on {:?}", rid, socket);
@ -72,7 +72,7 @@ impl PeerPcb {
Ok(l) => l, Ok(l) => l,
Err(e) => { Err(e) => {
log::error!("{}: error reading from socket: {:?}", rid, e); log::error!("{}: error reading from socket: {:?}", rid, e);
continue continue;
} }
}; };
let mut res_dat = &res_buf[..len]; let mut res_dat = &res_buf[..len];
@ -88,7 +88,7 @@ impl PeerPcb {
TunnResult::Done => break, TunnResult::Done => break,
TunnResult::Err(e) => { TunnResult::Err(e) => {
tracing::error!(message = "Decapsulate error", error = ?e); tracing::error!(message = "Decapsulate error", error = ?e);
break break;
} }
TunnResult::WriteToNetwork(packet) => { TunnResult::WriteToNetwork(packet) => {
tracing::debug!("WriteToNetwork: {:?}", packet); tracing::debug!("WriteToNetwork: {:?}", packet);
@ -102,17 +102,29 @@ impl PeerPcb {
.await?; .await?;
tracing::debug!("WriteToNetwork done"); tracing::debug!("WriteToNetwork done");
res_dat = &[]; res_dat = &[];
continue continue;
} }
TunnResult::WriteToTunnelV4(packet, addr) => { TunnResult::WriteToTunnelV4(packet, addr) => {
tracing::debug!("WriteToTunnelV4: {:?}, {:?}", packet, addr); tracing::debug!("WriteToTunnelV4: {:?}, {:?}", packet, addr);
tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?; tun_interface
break .read()
.await
.as_ref()
.ok_or(anyhow::anyhow!("tun interface does not exist"))?
.send(packet)
.await?;
break;
} }
TunnResult::WriteToTunnelV6(packet, addr) => { TunnResult::WriteToTunnelV6(packet, addr) => {
tracing::debug!("WriteToTunnelV6: {:?}, {:?}", packet, addr); tracing::debug!("WriteToTunnelV6: {:?}, {:?}", packet, addr);
tun_interface.read().await.as_ref().ok_or(anyhow::anyhow!("tun interface does not exist"))?.send(packet).await?; tun_interface
break .read()
.await
.as_ref()
.ok_or(anyhow::anyhow!("tun interface does not exist"))?
.send(packet)
.await?;
break;
} }
} }
} }
@ -134,7 +146,7 @@ impl PeerPcb {
let handle = self.socket.read().await; let handle = self.socket.read().await;
let Some(socket) = handle.as_ref() else { let Some(socket) = handle.as_ref() else {
tracing::error!("No socket for peer"); tracing::error!("No socket for peer");
return Ok(()) return Ok(());
}; };
tracing::debug!("Our Encapsulated packet: {:?}", packet); tracing::debug!("Our Encapsulated packet: {:?}", packet);
socket.send(packet).await?; socket.send(packet).await?;
@ -157,7 +169,7 @@ impl PeerPcb {
let handle = self.socket.read().await; let handle = self.socket.read().await;
let Some(socket) = handle.as_ref() else { let Some(socket) = handle.as_ref() else {
tracing::error!("No socket for peer"); tracing::error!("No socket for peer");
return Ok(()) return Ok(());
}; };
socket.send(packet).await?; socket.send(packet).await?;
tracing::debug!("Sent Packet for timer update"); tracing::debug!("Sent Packet for timer update");

101
docs/FORWARDEMAIL.md Normal file
View file

@ -0,0 +1,101 @@
# Forward Email Backups
Burrow's mail direction is hosted mail on [Forward Email](https://forwardemail.net/), with domain-owned backup retention in our own S3-compatible object storage.
This is the first mail path to operationalize for `burrow.net` and `burrow.rs`. It keeps SMTP/IMAP hosting off the first forge host while still giving Burrow control over backup retention and object ownership.
## What Forward Email Requires
Forward Email exposes custom backup storage per domain. The documented API shape is:
- `PUT /v1/domains/{domain}` with:
- `has_custom_s3=true`
- `s3_endpoint`
- `s3_access_key_id`
- `s3_secret_access_key`
- `s3_region`
- `s3_bucket`
- `POST /v1/domains/{domain}/test-s3-connection`
Forward Email also documents these operational constraints:
- the bucket must remain private
- credentials are validated with `HeadBucket`
- failed or public-bucket configurations fall back to Forward Email's default storage and notify domain administrators
- custom S3 keeps every backup version, so lifecycle expiration is our responsibility
## Burrow Secret Layout
Present in `intake/` today:
- `intake/forwardemail_api_token.txt`
- `intake/hetzner-s3-user.txt`
- `intake/hetzner-s3-secret.txt`
- Hetzner public S3 endpoint for Forward Email: `https://hel1.your-objectstorage.com`
- Hetzner object storage region: `hel1`
- Hetzner bucket used for Forward Email backups: `burrow`
## Verified Storage State
As of March 15, 2026, Burrow's Forward Email custom S3 configuration is live:
- endpoint: `https://hel1.your-objectstorage.com`
- region: `hel1`
- bucket: `burrow`
- `burrow.net` has `has_custom_s3=true`
- `burrow.rs` has `has_custom_s3=true`
- Forward Email's `/test-s3-connection` succeeded for both domains
- the `burrow` bucket enforces lifecycle expiration after `90` days
Forward Email performs bucket validation with bucket-style addressing. For Hetzner Object Storage, this means the working endpoint is the regional S3 endpoint (`https://hel1.your-objectstorage.com`), not the account alias (`https://burrow.hel1.your-objectstorage.com`). Using the account alias causes TLS hostname mismatches when the vendor prepends the bucket name.
## Helper
Use [`Tools/forwardemail-custom-s3.sh`](../Tools/forwardemail-custom-s3.sh) to configure or retest the domain setting without putting secrets on the process list.
Use [`Tools/forwardemail-hetzner-storage.py`](../Tools/forwardemail-hetzner-storage.py) to ensure the Hetzner backup bucket exists and to apply lifecycle expiry before enabling custom S3 on the Forward Email side.
Bucket bootstrap example:
```sh
Tools/forwardemail-hetzner-storage.py \
--endpoint https://hel1.your-objectstorage.com \
--bucket burrow \
--expire-days 90
```
Example:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--s3-endpoint https://hel1.your-objectstorage.com \
--s3-region hel1 \
--s3-bucket burrow \
--s3-access-key-file intake/hetzner-s3-user.txt \
--s3-secret-key-file intake/hetzner-s3-secret.txt
```
Retest an existing domain configuration without rewriting it:
```sh
Tools/forwardemail-custom-s3.sh \
--domain burrow.net \
--api-token-file intake/forwardemail_api_token.txt \
--test-only
```
## Retention
Forward Email preserves every backup object when custom S3 is enabled. Configure lifecycle expiration on the bucket itself. A 30-day or 90-day expiry window is the baseline recommendation from the vendor docs; Burrow should choose explicitly per domain instead of letting the bucket grow without bound. The current Burrow bootstrap helper defaults to `90` days.
## Identity Direction
Hosted mail and SaaS identity are separate concerns:
- mail hosting/backups: Forward Email + Burrow-owned S3-compatible storage
- interactive identity: Authentik as the long-term IdP
- future SaaS SSO target: Linear via SAML once the workspace and plan are ready
This means the forge host does not need to become the first mail server just to give Burrow mailboxes or retention control.

View file

@ -98,10 +98,14 @@ code burrow
You can run burrow on the command line with cargo: You can run burrow on the command line with cargo:
``` ```
cargo run sudo -E cargo run -- start
``` ```
Cargo will ask for your password because burrow needs permission in order to create a tunnel. Creating the tunnel requires elevated privileges. Regular checks and tests can run without `sudo`:
```
cargo test --workspace --all-features
```
</details> </details>

31
docs/PROTOCOL_ROADMAP.md Normal file
View file

@ -0,0 +1,31 @@
# Protocol Roadmap
Burrow currently has two tunnel paths in-tree:
- a WireGuard data plane
- a Tor-backed userspace TCP path
What it does not have yet is a transport-neutral control plane that can honestly claim full MASQUE `CONNECT-IP` or full Tailscale-style negotiation parity. This repository now contains the beginnings of that layer:
- control-plane data structures in `burrow/src/control/mod.rs`
- local auth bootstrap and persistent node/session storage in `burrow/src/auth/server/`
- governance documents under `evolution/` for the bigger protocol work
## `CONNECT-IP`
Full RFC 9484 support requires more than packet forwarding. It needs HTTP/3 session management, Capsule handling, HTTP Datagram context identifiers, address assignment, route advertisement, and request-scope enforcement. Burrow does not implement those end to end yet.
## Tailscale-Style Negotiation
Burrow now has register/map request and response types plus persistent node records, but it does not yet implement the full Tailscale capability surface, peer delta protocol, DERP coordination, or Noise-based control transport.
## Current Direction
The intended sequence is:
1. Stabilize the control-plane data model and bootstrap auth.
2. Introduce transport-neutral route and address abstractions.
3. Add MASQUE framing and HTTP/3 transport support.
4. Expand policy, relay, and interoperability testing.
This keeps Burrow honest about what is running today while creating a clean path for the rest.

30
docs/WIREGUARD_LINEAGE.md Normal file
View file

@ -0,0 +1,30 @@
# WireGuard Rust Lineage
Burrow's in-tree WireGuard engine is not a greenfield implementation. It was lifted from the Rust WireGuard lineage around Cloudflare's BoringTun, then cut down and reshaped to fit Burrow's own daemon and tunnel abstractions.
## What Was Lifted
- The repository history includes `1b39eca` (`boringtun wip`) and `28af9003` (`merge boringtun into burrow`).
- The current `burrow/src/wireguard/noise/*` files still carry the original Cloudflare copyright and SPDX headers.
- Core protocol machinery such as the Noise handshake, session state, rate limiter, and timer logic came from that imported body of work.
## What Changed in Burrow
Burrow does not embed BoringTun unchanged.
- The original device layer was replaced with Burrow-specific interface and peer control blocks in `burrow/src/wireguard/iface.rs` and `burrow/src/wireguard/pcb.rs`.
- Configuration handling was rewritten around Burrow's own INI parser and config model in `burrow/src/wireguard/config.rs`.
- The daemon now resolves the active runtime from the database-backed network list rather than from a single static WireGuard payload.
- Burrow added its own runtime switching path so WireGuard can share one daemon lifecycle with the rest of the managed runtime system.
## What Was Improved
The lifted code has been tightened further in-repo.
- Deprecated constant-time comparisons were replaced with `subtle`.
- Network ordering and runtime selection are now deterministic and test-covered.
- The Burrow runtime can swap between WireGuard configurations without restarting the daemon process itself.
## Why This Matters
This project should be explicit about lineage. Burrow benefits from proven Rust WireGuard work, but it owns the integration surface, runtime behavior, and future maintenance burden. That is why the code should be documented as lifted, modified, and improved rather than described as wholly original.

60
evolution/README.md Normal file
View file

@ -0,0 +1,60 @@
# Burrow Evolution
Burrow Evolution Proposals (BEPs) are the repository's durable design record for protocol work, control-plane changes, forge infrastructure, and operational policy.
## Goals
1. Capture intent before implementation outruns the architecture.
2. Give contributors and agents enough context to work safely without re-discovering prior decisions.
3. Tie ambitious work to concrete validation, rollout, and rollback criteria.
## When a BEP is required
Open a BEP for:
- new transports or protocol families
- control-plane and identity changes
- deployment, forge, runner, or secrets changes
- data model migrations
- user-visible behavior that changes security or routing semantics
Small bug fixes and isolated refactors do not need a BEP unless they materially change one of the areas above.
## Lifecycle
1. Pitch
Capture the problem and why it matters now.
2. Draft
Copy `evolution/proposals/0000-template.md` to `evolution/proposals/BEP-XXXX-short-slug.md`.
3. Review
Collect feedback, tighten the design, and document unresolved concerns.
4. Decision
Mark the proposal `Accepted`, `Rejected`, or `Returned for Revision`.
5. Implementation
Link code changes, tests, and rollout evidence.
6. Supersession
Keep historical proposals in-tree and point forward to the replacing BEP.
## Status Values
- `Pitch`
- `Draft`
- `In Review`
- `Accepted`
- `Implemented`
- `Rejected`
- `Returned for Revision`
- `Superseded`
- `Archived`
## Layout
```text
evolution/
README.md
proposals/
0000-template.md
BEP-0001-...
```
Use ASCII Markdown. Keep metadata at the top of each proposal so tooling and future agents can parse it quickly.

View file

@ -0,0 +1,57 @@
# `BEP-XXXX` - Title Case Summary
```text
Status: Draft | In Review | Accepted | Implemented | Rejected | Returned for Revision | Superseded | Archived
Proposal: BEP-XXXX
Authors: <name(s) or agent ids>
Coordinator: <name>
Reviewers: <people, bots, contributors>
Constitution Sections: <II, III, IV, etc.>
Implementation PRs: <link(s)> (optional while drafting)
Decision Date: <YYYY-MM-DD or Pending>
```
## Summary
One or two paragraphs that state the desired outcome and why it matters.
## Motivation
- What problem exists today?
- Why should Burrow solve it now?
- Which issues, incidents, or constraints support the change?
## Detailed Design
- Architecture and boundaries
- Data model and migration plan
- Protocol or API changes
- Observability, testing, and failure handling
## Security and Operational Considerations
- Access and secret handling
- Abuse, downgrade, or supply-chain risks
- Rollback and kill-switch plans
## Contributor Playbook
Give the concrete steps, commands, checks, and evidence a contributor should produce while implementing or rolling out the change.
## Alternatives Considered
List alternatives and why they were rejected.
## Impact on Other Work
- follow-up tasks
- dependencies
- compatibility constraints
## Decision
Record the final call, who made it, and any conditions.
## References
Link relevant issues, specs, transcripts, and external research.

View file

@ -0,0 +1,61 @@
# `BEP-0001` - Sovereign Forge and Governance Bootstrap
```text
Status: Draft
Proposal: BEP-0001
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should own its forge, deployment logic, and operational context under `burrow.net`. This proposal establishes the repository-local governance and forge bootstrap required to move build, release, and infrastructure control out of GitHub-centric assumptions and into a self-hosted operating model.
## Motivation
- The repository currently keeps CI definitions under `.github/workflows/` but has no first-class self-hosted forge layout.
- Infrastructure changes and protocol work are already entangled; without a design record, the project risks landing irreversible operations without enough context.
- A self-hosted forge is a prerequisite for durable autonomy over source, runners, and release pipelines.
## Detailed Design
- Add a project constitution and BEP process under `evolution/`.
- Introduce a Nix flake and NixOS host/module layout for `burrow-forge`.
- Add Forgejo-native workflows under `.forgejo/workflows/` for repository-local CI.
- Bootstrap the initial forge identity around `contact@burrow.net` and an agent-owned SSH workflow.
## Security and Operational Considerations
- Initial bootstrap may read credentials from local intake, but production must converge on encrypted secret handling.
- The first forge host replacement must preserve rollback information before deleting any existing VM.
- DNS for `burrow.net` is currently pending activation; the forge rollout must not assume public reachability until nameserver cutover completes.
## Contributor Playbook
- Keep destructive host operations behind explicit verification of the current Hetzner state.
- Build and test repository-local workflows before using them for deployment.
- Record the active server id, image, IPs, and SSH path before replacement.
## Alternatives Considered
- Continue relying on GitHub Actions while separately hosting services. Rejected because it leaves source authority and CI policy split across systems.
- Stand up Forgejo without a repository-local operating model. Rejected because the repo would still be missing deployment truth.
## Impact on Other Work
- Blocks long-term migration of workflows away from GitHub.
- Provides the governance anchor for protocol and control-plane proposals.
## Decision
Pending.
## References
- `CONSTITUTION.md`
- `.github/workflows/`
- `.forgejo/workflows/`

View file

@ -0,0 +1,60 @@
# `BEP-0002` - Control-Plane Bootstrap and Local Auth
```text
Status: Draft
Proposal: BEP-0002
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow needs a repository-owned control-plane model instead of ad hoc network payload storage plus third-party-only auth. This proposal introduces a local username/password bootstrap for `contact@burrow.net`, plus a register/map data model shaped to support a Tailscale-style control server without claiming full parity yet.
## Motivation
- Current auth support is limited and does not provide a plain local bootstrap path for the project's own operator identity.
- The existing database stores network payloads, but not a durable model for users, nodes, sessions, or control-plane negotiation state.
- Future work on route policy, device coordination, and richer negotiation needs a real data model now.
## Detailed Design
- Add control-plane types for users, nodes, register requests, and map responses.
- Extend the auth server schema with local credentials, sessions, provider logins, and control nodes.
- Expose JSON endpoints for local login, node registration, and map retrieval.
- Seed the initial operator account from intake-backed bootstrap credentials.
## Security and Operational Considerations
- Passwords are stored with Argon2id hashes only.
- Session tokens are bearer credentials and must be treated as sensitive.
- The bootstrap credential path is a short-term path; follow-up work should move it into encrypted secret management before public deployment.
## Contributor Playbook
- Verify bootstrap account creation in an isolated test database.
- Exercise login, register, and map end to end with integration tests.
- Do not advertise protocol parity beyond the implemented request/response contract.
## Alternatives Considered
- Wait for full external identity-provider integration first. Rejected because the forge needs an operator account now.
- Keep control-plane state implicit in daemon-local configuration. Rejected because it cannot express multi-device coordination.
## Impact on Other Work
- Unblocks forge bootstrap and future device control-plane work.
- Creates the storage model needed for richer policy and transport proposals.
## Decision
Pending.
## References
- `burrow/src/auth/server/`
- `burrow/src/control/`

View file

@ -0,0 +1,61 @@
# `BEP-0003` - CONNECT-IP and Negotiation Roadmap
```text
Status: Draft
Proposal: BEP-0003
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: I, II, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should grow from a WireGuard-first tunnel runner into a transport stack that can support HTTP/3 MASQUE `CONNECT-IP` and a richer node negotiation model. This proposal stages that work so Burrow can adopt the right abstractions instead of stapling QUIC-era semantics onto a WireGuard-only daemon.
## Motivation
- `CONNECT-IP` introduces HTTP/3 sessions, context identifiers, address assignment, and route advertisements that do not fit the current daemon model.
- A Tailscale-style control plane requires explicit node, endpoint, and session state rather than raw network blobs.
- The project needs a roadmap that distinguishes data-model work, control-plane work, and actual transport implementation.
## Detailed Design
- Stage 1: land control-plane types and persistent auth/session/node storage.
- Stage 2: add transport-agnostic route, address-assignment, and policy abstractions in Burrow.
- Stage 3: implement MASQUE `CONNECT-IP` framing and HTTP Datagram handling.
- Stage 4: connect the transport layer to real relay, policy, and observability paths.
## Security and Operational Considerations
- `CONNECT-IP` changes the trust boundary from WireGuard peers to HTTP/3 peers and relays; authentication, replay handling, and scope restriction must be explicit.
- Route advertisements and delegated prefixes must be validated before touching the data plane.
- Control-plane capability claims must not imply support that the transport layer does not yet implement.
## Contributor Playbook
- Keep protocol codecs independently testable before integrating them into live transports.
- Add interoperability tests for every new capsule or datagram type.
- Separate request parsing, policy validation, and packet forwarding so regressions stay localized.
## Alternatives Considered
- Implement MASQUE directly in the daemon without control-plane refactoring. Rejected because the current daemon has no transport-neutral contract for routes or prefixes.
- Treat Tailscale negotiation as a one-off compatibility shim. Rejected because Burrow needs first-class control-plane concepts either way.
## Impact on Other Work
- Depends on BEP-0002.
- Informs future relay, policy, and node coordination work.
## Decision
Pending.
## References
- RFC 9484
- `burrow/src/daemon/`
- `burrow/src/control/`

View file

@ -0,0 +1,68 @@
# `BEP-0004` - Hosted Mail Backups and SaaS Identity
```text
Status: Draft
Proposal: BEP-0004
Authors: gpt-5.4
Coordinator: gpt-5.4
Reviewers: Pending
Constitution Sections: II, III, V
Implementation PRs: Pending
Decision Date: Pending
```
## Summary
Burrow should start with hosted mail on Forward Email instead of self-hosting SMTP and IMAP on the first forge machine. Backup retention should still be controlled by Burrow through custom S3-compatible storage backed by Burrow-owned object storage. In parallel, Burrow should treat SaaS identity as a separate track and converge on Authentik as the long-term IdP, with Linear SAML SSO as a planned downstream integration rather than an immediate bootstrap dependency.
## Motivation
- The first forge host already carries source control, CI, and deployment bootstrap risk. Adding a self-hosted mail stack increases operational scope before the forge is stable.
- Forward Email already exposes SMTP and IMAP while allowing per-domain custom S3 backup storage, which preserves Burrow's data ownership over mailbox backups.
- The repository needs a durable decision record that separates hosted mail operations from future SaaS SSO work.
## Detailed Design
- Use Forward Email as the operational mail provider for `burrow.net` and `burrow.rs`.
- Configure custom S3-compatible storage per domain using Burrow-controlled object storage credentials.
- Keep one backup bucket per domain and enforce lifecycle expiration at the bucket layer.
- Add repository-owned tooling and documentation for applying and testing the Forward Email custom S3 configuration.
- Treat Authentik as the future identity authority for SaaS applications, but keep Linear SAML as a later rollout once the workspace and vendor prerequisites are available. Linear's current docs place SAML and SCIM behind higher-tier workspace security settings, so Burrow should treat plan availability as an explicit precondition.
## Security and Operational Considerations
- Forward Email API tokens and S3 credentials must stay in secret files and must not be passed directly on the shell command line.
- Buckets must remain private. Public bucket detection by the vendor should be treated as a hard failure, not a warning.
- Backup growth is unbounded without lifecycle rules. Retention policy is part of the rollout, not optional cleanup.
- Hosted mail reduces MTA attack surface on the forge host, but it adds third-party dependency risk; keeping backups in Burrow-owned storage limits that blast radius.
## Contributor Playbook
- Put the Forward Email API token in `intake/forwardemail_api_token.txt`.
- Use `Tools/forwardemail-custom-s3.sh` to configure `burrow.net` and `burrow.rs`.
- Run the helper again with `--test-only` after any credential rotation.
- Record the chosen endpoint, region, bucket names, and lifecycle policy alongside rollout evidence.
- Do not claim Linear SAML is live until the Authentik app, Linear workspace settings, workspace plan prerequisites, and end-to-end login flow are verified.
## Alternatives Considered
- Self-host Stalwart on the forge host immediately. Rejected for the first rollout because it expands host scope before source control and CI are stable.
- Rely on Forward Email default backup storage only. Rejected because it gives Burrow less control over retention and data location.
- Delay all SaaS identity planning until after forge cutover. Rejected because Linear and other SaaS integrations will otherwise accrete without an agreed authority.
## Impact on Other Work
- Narrows the first forge host scope.
- Creates a clean mail path for `contact@burrow.net` without requiring self-hosted SMTP and IMAP.
- Leaves Authentik and Linear SAML as explicit follow-up work instead of hidden assumptions.
## Decision
Pending.
## References
- `docs/FORWARDEMAIL.md`
- `Tools/forwardemail-custom-s3.sh`
- Forward Email FAQ: custom S3-compatible storage for backups
- Linear docs: SAML SSO

View file

@ -26,7 +26,7 @@ async fn generate(out_dir: &std::path::Path) -> anyhow::Result<()> {
println!("cargo:rerun-if-changed={}", binary_path.to_str().unwrap()); println!("cargo:rerun-if-changed={}", binary_path.to_str().unwrap());
if let (Ok(..), Ok(..)) = (File::open(&bindings_path), File::open(&binary_path)) { if let (Ok(..), Ok(..)) = (File::open(&bindings_path), File::open(&binary_path)) {
return Ok(()) return Ok(());
}; };
let archive = download(out_dir) let archive = download(out_dir)

View file

@ -33,7 +33,7 @@ impl TunInterface {
Ok(result) => return result, Ok(result) => return result,
Err(_would_block) => { Err(_would_block) => {
tracing::debug!("WouldBlock"); tracing::debug!("WouldBlock");
continue continue;
} }
} }
} }

View file

@ -114,6 +114,10 @@ impl TunInterface {
ifname_to_string(buf) ifname_to_string(buf)
} }
pub(crate) fn packet_information_size(&self) -> usize {
4
}
#[throws] #[throws]
#[instrument] #[instrument]
fn ifreq(&self) -> sys::ifreq { fn ifreq(&self) -> sys::ifreq {

View file

@ -73,6 +73,21 @@ impl TunInterface {
ifname_to_string(iff.ifr_name) ifname_to_string(iff.ifr_name)
} }
pub(crate) fn packet_information_size(&self) -> usize {
let mut iff = unsafe { mem::zeroed::<libc::ifreq>() };
match unsafe { sys::tun_get_iff(self.socket.as_raw_fd(), &mut iff) } {
Ok(_) => {
let flags = unsafe { iff.ifr_ifru.ifru_flags };
if flags & libc::IFF_NO_PI as i16 != 0 {
0
} else {
4
}
}
Err(_) => 4,
}
}
#[throws] #[throws]
#[instrument] #[instrument]
fn ifreq(&self) -> sys::ifreq { fn ifreq(&self) -> sys::ifreq {
@ -283,6 +298,16 @@ impl TunInterface {
#[throws] #[throws]
#[instrument] #[instrument]
pub fn send(&self, buf: &[u8]) -> usize { pub fn send(&self, buf: &[u8]) -> usize {
self.socket.send(buf)? let len = unsafe {
libc::write(
self.as_raw_fd(),
buf.as_ptr().cast::<libc::c_void>(),
buf.len(),
)
};
if len < 0 {
Err(Error::last_os_error())?;
}
len as usize
} }
} }

View file

@ -48,12 +48,26 @@ impl TunInterface {
#[throws] #[throws]
#[instrument] #[instrument]
pub fn recv(&self, buf: &mut [u8]) -> usize { pub fn recv(&self, buf: &mut [u8]) -> usize {
// Use IoVec to read directly into target buffer let packet_information_size = self.packet_information_size();
let mut tmp_buf = [MaybeUninit::uninit(); 1500]; let mut tmp_buf = [MaybeUninit::uninit(); 1504];
let len = self.socket.recv(&mut tmp_buf)?; let len = unsafe {
let result_buf = unsafe { assume_init(&tmp_buf[4..len]) }; libc::read(
buf[..len - 4].copy_from_slice(result_buf); self.as_raw_fd(),
len - 4 tmp_buf.as_mut_ptr().cast::<libc::c_void>(),
tmp_buf.len(),
)
};
if len < 0 {
Err(Error::last_os_error())?;
}
let len = len as usize;
if len < packet_information_size {
return 0;
}
let result_buf = unsafe { assume_init(&tmp_buf[packet_information_size..len]) };
buf[..len - packet_information_size].copy_from_slice(result_buf);
len - packet_information_size
} }
#[throws] #[throws]

View file

@ -3,17 +3,33 @@ use std::{io::Error, net::Ipv4Addr};
use fehler::throws; use fehler::throws;
use tun::TunInterface; use tun::TunInterface;
fn open_tun() -> Result<Option<TunInterface>, Error> {
match TunInterface::new() {
Ok(tun) => Ok(Some(tun)),
Err(err)
if err.kind() == std::io::ErrorKind::PermissionDenied
|| matches!(err.raw_os_error(), Some(1 | 13)) =>
{
eprintln!("skipping tun test without tunnel privileges: {err}");
Ok(None)
}
Err(err) => Err(err),
}
}
#[test] #[test]
#[throws] #[throws]
fn test_create() { fn test_create() {
TunInterface::new()?; let _ = open_tun()?;
} }
#[test] #[test]
#[throws] #[throws]
#[cfg(not(any(target_os = "windows", target_vendor = "apple")))] #[cfg(not(any(target_os = "windows", target_vendor = "apple")))]
fn test_set_get_broadcast_addr() { fn test_set_get_broadcast_addr() {
let tun = TunInterface::new()?; let Some(tun) = open_tun()? else {
return Ok(());
};
let addr = Ipv4Addr::new(10, 0, 0, 1); let addr = Ipv4Addr::new(10, 0, 0, 1);
tun.set_ipv4_addr(addr)?; tun.set_ipv4_addr(addr)?;
@ -28,7 +44,9 @@ fn test_set_get_broadcast_addr() {
#[throws] #[throws]
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
fn test_set_get_ipv4() { fn test_set_get_ipv4() {
let tun = TunInterface::new()?; let Some(tun) = open_tun()? else {
return Ok(());
};
let addr = Ipv4Addr::new(10, 0, 0, 1); let addr = Ipv4Addr::new(10, 0, 0, 1);
tun.set_ipv4_addr(addr)?; tun.set_ipv4_addr(addr)?;
@ -43,7 +61,9 @@ fn test_set_get_ipv4() {
fn test_set_get_ipv6() { fn test_set_get_ipv6() {
use std::net::Ipv6Addr; use std::net::Ipv6Addr;
let tun = TunInterface::new()?; let Some(tun) = open_tun()? else {
return Ok(());
};
let addr = Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1); let addr = Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1);
tun.add_ipv6_addr(addr, 128)?; tun.add_ipv6_addr(addr, 128)?;
@ -56,7 +76,9 @@ fn test_set_get_ipv6() {
#[throws] #[throws]
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
fn test_set_get_mtu() { fn test_set_get_mtu() {
let interf = TunInterface::new()?; let Some(interf) = open_tun()? else {
return Ok(());
};
interf.set_mtu(500)?; interf.set_mtu(500)?;
@ -67,7 +89,9 @@ fn test_set_get_mtu() {
#[throws] #[throws]
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
fn test_set_get_netmask() { fn test_set_get_netmask() {
let interf = TunInterface::new()?; let Some(interf) = open_tun()? else {
return Ok(());
};
let netmask = Ipv4Addr::new(255, 0, 0, 0); let netmask = Ipv4Addr::new(255, 0, 0, 0);
let addr = Ipv4Addr::new(192, 168, 1, 1); let addr = Ipv4Addr::new(192, 168, 1, 1);

View file

@ -1,10 +1,27 @@
#[cfg(all(feature = "tokio", not(target_os = "windows")))] #[cfg(all(feature = "tokio", not(target_os = "windows")))]
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
#[cfg(all(feature = "tokio", not(target_os = "windows")))]
fn open_tun() -> Option<tun::TunInterface> {
match tun::TunInterface::new() {
Ok(tun) => Some(tun),
Err(err)
if err.kind() == std::io::ErrorKind::PermissionDenied
|| matches!(err.raw_os_error(), Some(1 | 13)) =>
{
eprintln!("skipping tokio tun test without tunnel privileges: {err}");
None
}
Err(err) => panic!("failed to create tun interface: {err}"),
}
}
#[tokio::test] #[tokio::test]
#[cfg(all(feature = "tokio", not(target_os = "windows")))] #[cfg(all(feature = "tokio", not(target_os = "windows")))]
async fn test_create() { async fn test_create() {
let tun = tun::TunInterface::new().unwrap(); let Some(tun) = open_tun() else {
return;
};
let _ = tun::tokio::TunInterface::new(tun).unwrap(); let _ = tun::tokio::TunInterface::new(tun).unwrap();
} }
@ -12,7 +29,9 @@ async fn test_create() {
#[ignore = "requires interactivity"] #[ignore = "requires interactivity"]
#[cfg(all(feature = "tokio", not(target_os = "windows")))] #[cfg(all(feature = "tokio", not(target_os = "windows")))]
async fn test_write() { async fn test_write() {
let tun = tun::TunInterface::new().unwrap(); let Some(tun) = open_tun() else {
return;
};
tun.set_ipv4_addr(Ipv4Addr::from([192, 168, 1, 10])) tun.set_ipv4_addr(Ipv4Addr::from([192, 168, 1, 10]))
.unwrap(); .unwrap();
let async_tun = tun::tokio::TunInterface::new(tun).unwrap(); let async_tun = tun::tokio::TunInterface::new(tun).unwrap();