Compare commits

..

No commits in common. "97c569fb35688a5b89f0a20f938a7bb6d1afe8d7" and "eb9327a99fcb18ecc763644a0ce2b0068a7b0dd9" have entirely different histories.

13 changed files with 115 additions and 1944 deletions

View file

@ -11,8 +11,6 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]}
burrow = { version = "*", path = "../burrow/" } burrow = { version = "*", path = "../burrow/" }
tokio = { version = "1.35.0", features = ["time", "sync"] } tokio = { version = "1.35.0", features = ["time", "sync"] }
gettext-rs = { version = "0.7.0", features = ["gettext-system"] } gettext-rs = { version = "0.7.0", features = ["gettext-system"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
[build-dependencies] [build-dependencies]
anyhow = "1.0" anyhow = "1.0"

View file

@ -1,139 +0,0 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::{
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountRecord {
pub id: String,
pub kind: AccountKind,
pub title: String,
pub authority: Option<String>,
pub account: String,
pub identity: String,
pub hostname: Option<String>,
pub tailnet: Option<String>,
pub note: Option<String>,
pub created_at: u64,
pub updated_at: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AccountKind {
WireGuard,
Tor,
Tailnet,
}
impl AccountKind {
pub fn title(self) -> &'static str {
match self {
Self::WireGuard => "WireGuard",
Self::Tor => "Tor",
Self::Tailnet => "Tailnet",
}
}
fn sort_rank(self) -> u8 {
match self {
Self::Tailnet => 0,
Self::Tor => 1,
Self::WireGuard => 2,
}
}
}
pub fn load() -> Result<Vec<AccountRecord>> {
let path = storage_path()?;
if !path.exists() {
return Ok(Vec::new());
}
let data =
std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?;
serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display()))
}
pub fn upsert(mut record: AccountRecord) -> Result<Vec<AccountRecord>> {
let mut accounts = load()?;
let now = timestamp();
record.updated_at = now;
if record.created_at == 0 {
record.created_at = now;
}
if let Some(index) = accounts.iter().position(|account| account.id == record.id) {
accounts[index] = record;
} else {
accounts.push(record);
}
accounts.sort_by(|lhs, rhs| {
lhs.kind
.sort_rank()
.cmp(&rhs.kind.sort_rank())
.then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase()))
});
persist(&accounts)?;
Ok(accounts)
}
pub fn new_record(
kind: AccountKind,
title: String,
authority: Option<String>,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
note: Option<String>,
) -> AccountRecord {
let now = timestamp();
AccountRecord {
id: format!("{}-{now}", kind.title().to_ascii_lowercase()),
kind,
title,
authority,
account,
identity,
hostname,
tailnet,
note,
created_at: now,
updated_at: now,
}
}
fn persist(accounts: &[AccountRecord]) -> Result<()> {
let path = storage_path()?;
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?;
std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))
}
fn storage_path() -> Result<PathBuf> {
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home)
.join("burrow")
.join("accounts.json"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("accounts.json"));
}
Ok(std::env::temp_dir().join("burrow-accounts.json"))
}
fn timestamp() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|duration| duration.as_secs())
.unwrap_or_default()
}

View file

@ -1,19 +1,24 @@
use super::*; use super::*;
use anyhow::Context; use anyhow::Context;
use std::time::Duration;
const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5);
pub struct App { pub struct App {
_home_screen: AsyncController<home_screen::HomeScreen>, daemon_client: Arc<Mutex<Option<DaemonClient>>>,
settings_screen: Controller<settings_screen::SettingsScreen>,
switch_screen: AsyncController<switch_screen::SwitchScreen>,
} }
#[derive(Debug)] #[derive(Debug)]
pub enum AppMsg { pub enum AppMsg {
None, None,
PostInit,
} }
impl App { impl App {
pub fn run() { pub fn run() {
let app = RelmApp::new(config::ID); let app = RelmApp::new(config::ID);
relm4::set_global_css(APP_CSS);
Self::setup_gresources().unwrap(); Self::setup_gresources().unwrap();
Self::setup_i18n().unwrap(); Self::setup_i18n().unwrap();
@ -44,7 +49,7 @@ impl AsyncComponent for App {
view! { view! {
adw::Window { adw::Window {
set_title: Some("Burrow"), set_title: Some("Burrow"),
set_default_size: (900, 760), set_default_size: (640, 480),
} }
} }
@ -53,84 +58,100 @@ impl AsyncComponent for App {
root: Self::Root, root: Self::Root,
sender: AsyncComponentSender<Self>, sender: AsyncComponentSender<Self>,
) -> AsyncComponentParts<Self> { ) -> AsyncComponentParts<Self> {
let home_screen = home_screen::HomeScreen::builder() let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok()));
.launch(())
let switch_screen = switch_screen::SwitchScreen::builder()
.launch(switch_screen::SwitchScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
.forward(sender.input_sender(), |_| AppMsg::None);
let settings_screen = settings_screen::SettingsScreen::builder()
.launch(settings_screen::SettingsScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
.forward(sender.input_sender(), |_| AppMsg::None); .forward(sender.input_sender(), |_| AppMsg::None);
let widgets = view_output!(); let widgets = view_output!();
let view_stack = adw::ViewStack::new();
view_stack.add_titled(switch_screen.widget(), None, "Switch");
view_stack.add_titled(settings_screen.widget(), None, "Settings");
let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build();
view_switcher_bar.set_reveal(true);
// When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate
//
// let toolbar = adw::ToolbarView::new();
// toolbar.add_top_bar(
// &adw::HeaderBar::builder()
// .title_widget(&gtk::Label::new(Some("Burrow")))
// .build(),
// );
// toolbar.add_bottom_bar(&view_switcher_bar);
// toolbar.set_content(Some(&view_stack));
// root.set_content(Some(&toolbar));
let content = gtk::Box::new(gtk::Orientation::Vertical, 0); let content = gtk::Box::new(gtk::Orientation::Vertical, 0);
content.append( content.append(
&adw::HeaderBar::builder() &adw::HeaderBar::builder()
.title_widget(&gtk::Label::new(Some("Burrow"))) .title_widget(&gtk::Label::new(Some("Burrow")))
.build(), .build(),
); );
content.append(home_screen.widget()); content.append(&view_stack);
content.append(&view_switcher_bar);
root.set_content(Some(&content)); root.set_content(Some(&content));
let model = App { _home_screen: home_screen }; sender.input(AppMsg::PostInit);
let model = App {
daemon_client,
switch_screen,
settings_screen,
};
AsyncComponentParts { model, widgets } AsyncComponentParts { model, widgets }
} }
async fn update( async fn update(
&mut self, &mut self,
msg: Self::Input, _msg: Self::Input,
_sender: AsyncComponentSender<Self>, _sender: AsyncComponentSender<Self>,
_root: &Self::Root, _root: &Self::Root,
) { ) {
match msg { loop {
AppMsg::None => {} tokio::time::sleep(RECONNECT_POLL_TIME).await;
} {
let mut daemon_client = self.daemon_client.lock().await;
let mut disconnected_daemon_client = false;
if let Some(daemon_client) = daemon_client.as_mut() {
if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await {
disconnected_daemon_client = true;
self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonDisconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
} }
} }
const APP_CSS: &str = r#" if disconnected_daemon_client || daemon_client.is_none() {
.empty-state { match DaemonClient::new().await {
border-radius: 18px; Ok(new_daemon_client) => {
padding: 22px; *daemon_client = Some(new_daemon_client);
background: alpha(@card_bg_color, 0.72); self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonReconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
} }
Err(_e) => {
.summary-card { // TODO: Handle Error
border-radius: 18px;
padding: 14px;
background: alpha(@card_bg_color, 0.72);
} }
.network-card {
border-radius: 10px;
padding: 16px;
box-shadow: 0 2px 6px alpha(black, 0.14);
} }
.wireguard-card {
background: linear-gradient(135deg, #3277d8, #174ea6);
} }
.tailnet-card {
background: linear-gradient(135deg, #31b891, #147d69);
} }
.network-card-kind,
.network-card-title,
.network-card-detail {
color: white;
} }
.network-card-kind {
opacity: 0.86;
font-weight: 700;
} }
.network-card-title {
font-size: 1.22em;
font-weight: 700;
} }
.network-card-detail {
opacity: 0.92;
font-family: monospace;
}
"#;

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
use super::*; use super::*;
use crate::daemon_api;
use adw::prelude::*; use adw::prelude::*;
use burrow::{DaemonClient, DaemonCommand, DaemonResponseData};
use gtk::Align; use gtk::Align;
use relm4::{ use relm4::{
component::{ component::{
@ -9,9 +9,13 @@ use relm4::{
}, },
prelude::*, prelude::*,
}; };
use std::sync::Arc;
use tokio::sync::Mutex;
mod app; mod app;
mod home_screen; mod settings;
mod settings_screen;
mod switch_screen;
pub use app::*; pub use app::*;
pub use home_screen::{HomeScreen, HomeScreenMsg}; pub use settings::{DaemonGroupMsg, DiagGroupMsg};

View file

@ -1,420 +0,0 @@
use anyhow::{anyhow, Context, Result};
use burrow::{
control::{TailnetConfig, TailnetProvider},
grpc_defs::{
Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest,
TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest,
},
BurrowClient,
};
use std::{path::PathBuf, sync::OnceLock};
use tokio::time::{timeout, Duration};
const RPC_TIMEOUT: Duration = Duration::from_secs(3);
const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com";
static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new();
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TunnelState {
Running,
Stopped,
}
#[derive(Debug, Clone)]
pub struct NetworkSummary {
pub id: i32,
pub title: String,
pub detail: String,
}
#[derive(Debug, Clone)]
pub struct TailnetDiscovery {
pub authority: String,
pub managed: bool,
pub oidc_issuer: Option<String>,
}
#[derive(Debug, Clone)]
pub struct TailnetProbe {
pub summary: String,
pub detail: Option<String>,
pub status_code: i32,
}
#[derive(Debug, Clone)]
pub struct TailnetLoginStatus {
pub session_id: String,
pub backend_state: String,
pub auth_url: Option<String>,
pub running: bool,
pub needs_login: bool,
pub tailnet_name: Option<String>,
pub self_dns_name: Option<String>,
pub tailnet_ips: Vec<String>,
pub health: Vec<String>,
}
pub fn default_tailnet_authority() -> &'static str {
MANAGED_TAILSCALE_AUTHORITY
}
pub fn configure_client_paths() -> Result<()> {
if std::env::var_os("BURROW_SOCKET_PATH").is_none() {
std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?);
}
Ok(())
}
pub async fn ensure_daemon() -> Result<()> {
configure_client_paths()?;
if daemon_available().await {
return Ok(());
}
let socket_path = socket_path()?;
let db_path = database_path()?;
ensure_parent(&socket_path)?;
ensure_parent(&db_path)?;
if EMBEDDED_DAEMON_STARTED.get().is_none() {
tokio::task::spawn_blocking(move || {
burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path));
})
.await
.context("failed to join embedded daemon startup")?;
let _ = EMBEDDED_DAEMON_STARTED.set(());
}
tunnel_state()
.await
.map(|_| ())
.context("Burrow daemon started but did not accept tunnel status RPCs")
}
pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider {
let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase();
if normalized == "controlplane.tailscale.com"
|| normalized == "http://controlplane.tailscale.com"
|| normalized == MANAGED_TAILSCALE_AUTHORITY
{
TailnetProvider::Tailscale
} else {
TailnetProvider::Headscale
}
}
pub async fn daemon_available() -> bool {
tunnel_state().await.is_ok()
}
fn socket_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") {
return Ok(PathBuf::from(path));
}
default_socket_path()
}
fn default_socket_path() -> Result<PathBuf> {
if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") {
return Ok(PathBuf::from(runtime_dir).join("burrow.sock"));
}
let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned());
Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock")))
}
fn database_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_DB_PATH") {
return Ok(PathBuf::from(path));
}
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("burrow.db"));
}
Ok(std::env::temp_dir().join("burrow.db"))
}
fn ensure_parent(path: &PathBuf) -> Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
Ok(())
}
pub async fn tunnel_state() -> Result<TunnelState> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {}))
.await
.context("timed out connecting to Burrow daemon")??
.into_inner();
let status = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow tunnel status")??
.context("Burrow daemon ended the status stream without a state")?;
Ok(match status.state() {
State::Running => TunnelState::Running,
State::Stopped => TunnelState::Stopped,
})
}
pub async fn start_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {}))
.await
.context("timed out starting Burrow tunnel")??;
Ok(())
}
pub async fn stop_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {}))
.await
.context("timed out stopping Burrow tunnel")??;
Ok(())
}
pub async fn list_networks() -> Result<Vec<NetworkSummary>> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {}))
.await
.context("timed out connecting to Burrow network list")??
.into_inner();
let response = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow network list")??
.context("Burrow daemon ended the network stream without a snapshot")?;
Ok(response.network.iter().map(summarize_network).collect())
}
pub async fn add_wireguard(config: String) -> Result<i32> {
add_network(NetworkType::WireGuard, config.into_bytes()).await
}
pub async fn add_tailnet(
authority: String,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
) -> Result<i32> {
let provider = infer_tailnet_provider(&authority);
let config = TailnetConfig {
provider,
authority: Some(authority),
account: Some(account),
identity: Some(identity),
hostname,
tailnet,
};
let payload = serde_json::to_vec_pretty(&config)?;
add_network(NetworkType::Tailnet, payload).await
}
pub async fn discover_tailnet(email: String) -> Result<TailnetDiscovery> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.discover(TailnetDiscoverRequest { email }),
)
.await
.context("timed out discovering Tailnet authority")??
.into_inner();
Ok(TailnetDiscovery {
authority: response.authority,
managed: response.managed,
oidc_issuer: optional(response.oidc_issuer),
})
}
pub async fn probe_tailnet(authority: String) -> Result<TailnetProbe> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.probe(TailnetProbeRequest { authority }),
)
.await
.context("timed out probing Tailnet authority")??
.into_inner();
Ok(TailnetProbe {
summary: response.summary,
detail: optional(response.detail),
status_code: response.status_code,
})
}
pub async fn start_tailnet_login(
authority: String,
account_name: String,
identity_name: String,
hostname: Option<String>,
) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client.tailnet_client.login_start(TailnetLoginStartRequest {
account_name,
identity_name,
hostname: hostname.unwrap_or_default(),
authority,
}),
)
.await
.context("timed out starting Tailnet sign-in")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn tailnet_login_status(session_id: String) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_status(TailnetLoginStatusRequest { session_id }),
)
.await
.context("timed out reading Tailnet sign-in status")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn cancel_tailnet_login(session_id: String) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_cancel(TailnetLoginCancelRequest { session_id }),
)
.await
.context("timed out cancelling Tailnet sign-in")??;
Ok(())
}
async fn add_network(network_type: NetworkType, payload: Vec<u8>) -> Result<i32> {
let id = next_network_id().await?;
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client.networks_client.network_add(Network {
id,
r#type: network_type.into(),
payload,
}),
)
.await
.context("timed out saving network to Burrow daemon")??;
Ok(id)
}
async fn next_network_id() -> Result<i32> {
let networks = list_networks().await?;
Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1)
}
fn summarize_network(network: &Network) -> NetworkSummary {
match network.r#type() {
NetworkType::WireGuard => summarize_wireguard(network),
NetworkType::Tailnet => summarize_tailnet(network),
}
}
fn summarize_wireguard(network: &Network) -> NetworkSummary {
let payload = String::from_utf8_lossy(&network.payload);
let detail = payload
.lines()
.map(str::trim)
.find(|line| !line.is_empty() && !line.starts_with('['))
.unwrap_or("Stored WireGuard configuration")
.to_owned();
NetworkSummary {
id: network.id,
title: format!("WireGuard {}", network.id),
detail,
}
}
fn summarize_tailnet(network: &Network) -> NetworkSummary {
match TailnetConfig::from_slice(&network.payload) {
Ok(config) => {
let title = config
.tailnet
.clone()
.or(config.hostname.clone())
.unwrap_or_else(|| "Tailnet".to_owned());
let authority = config
.authority
.unwrap_or_else(|| "default authority".to_owned());
let account = config.account.unwrap_or_else(|| "default".to_owned());
NetworkSummary {
id: network.id,
title,
detail: format!("{authority} - account {account}"),
}
}
Err(error) => NetworkSummary {
id: network.id,
title: "Tailnet".to_owned(),
detail: format!("Unable to read Tailnet payload: {error}"),
},
}
}
fn decode_tailnet_status(
response: burrow::grpc_defs::TailnetLoginStatusResponse,
) -> TailnetLoginStatus {
TailnetLoginStatus {
session_id: response.session_id,
backend_state: response.backend_state,
auth_url: optional(response.auth_url),
running: response.running,
needs_login: response.needs_login,
tailnet_name: optional(response.tailnet_name),
self_dns_name: optional(response.self_dns_name),
tailnet_ips: response.tailnet_ips,
health: response.health,
}
}
fn optional(value: String) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn normalized(value: &str, fallback: &str) -> String {
let trimmed = value.trim();
if trimmed.is_empty() {
fallback.to_owned()
} else {
trimmed.to_owned()
}
}
pub fn normalized_optional(value: &str) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn require_value(value: &str, label: &str) -> Result<String> {
normalized_optional(value).ok_or_else(|| anyhow!("{label} is required"))
}

View file

@ -1,15 +1,11 @@
use anyhow::Result; use anyhow::Result;
pub mod components; pub mod components;
mod account_store; mod diag;
mod daemon_api;
// Generated using meson // Generated using meson
mod config; mod config;
fn main() { fn main() {
if let Err(error) = daemon_api::configure_client_paths() {
eprintln!("failed to configure Burrow daemon paths: {error}");
}
components::App::run(); components::App::run();
} }

View file

@ -1,11 +1,11 @@
use std::{ use std::{
ffi::{c_char, CStr}, ffi::{c_char, CStr},
path::PathBuf, path::PathBuf,
sync::{Arc, Mutex}, sync::Arc,
thread, thread,
}; };
use once_cell::sync::{Lazy, OnceCell}; use once_cell::sync::OnceCell;
use tokio::{ use tokio::{
runtime::{Builder, Handle}, runtime::{Builder, Handle},
sync::Notify, sync::Notify,
@ -14,12 +14,15 @@ use tracing::error;
use crate::daemon::daemon_main; use crate::daemon::daemon_main;
static BURROW_NOTIFY: OnceCell<Arc<Notify>> = OnceCell::new();
static BURROW_HANDLE: OnceCell<Handle> = OnceCell::new(); static BURROW_HANDLE: OnceCell<Handle> = OnceCell::new();
static BURROW_READY: OnceCell<()> = OnceCell::new();
static BURROW_SPAWN_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) { pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) {
crate::tracing::initialize();
let notify = BURROW_NOTIFY.get_or_init(|| Arc::new(Notify::new()));
let handle = BURROW_HANDLE.get_or_init(|| {
let path_buf = if path.is_null() { let path_buf = if path.is_null() {
None None
} else { } else {
@ -30,19 +33,6 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c
} else { } else {
Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap())) Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap()))
}; };
spawn_in_process_with_paths(path_buf, db_path_buf);
}
pub fn spawn_in_process_with_paths(path_buf: Option<PathBuf>, db_path_buf: Option<PathBuf>) {
crate::tracing::initialize();
let _guard = BURROW_SPAWN_LOCK.lock().unwrap();
if BURROW_READY.get().is_some() {
return;
}
let notify = Arc::new(Notify::new());
let handle = BURROW_HANDLE.get_or_init(|| {
let sender = notify.clone(); let sender = notify.clone();
let (handle_tx, handle_rx) = tokio::sync::oneshot::channel(); let (handle_tx, handle_rx) = tokio::sync::oneshot::channel();
@ -72,5 +62,4 @@ pub fn spawn_in_process_with_paths(path_buf: Option<PathBuf>, db_path_buf: Optio
let receiver = notify.clone(); let receiver = notify.clone();
handle.block_on(async move { receiver.notified().await }); handle.block_on(async move { receiver.notified().await });
let _ = BURROW_READY.set(());
} }

View file

@ -16,10 +16,10 @@ pub(crate) mod tracing;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
pub mod usernet; pub mod usernet;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(target_vendor = "apple")]
pub use daemon::apple::{spawn_in_process, spawn_in_process_with_paths}; pub use daemon::apple::spawn_in_process;
#[cfg(any(target_os = "linux", target_vendor = "apple"))] #[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::{ pub use daemon::{
rpc::grpc_defs, rpc::BurrowClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData,
DaemonCommand, DaemonResponseData, DaemonStartOptions, DaemonStartOptions,
}; };

View file

@ -15,7 +15,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies 1. Install build dependencies
``` ```
sudo apt install -y clang meson cmake pkg-config libssl-dev libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils sudo apt install -y clang meson cmake pkg-config libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils
``` ```
2. Install flatpak builder (Optional) 2. Install flatpak builder (Optional)
@ -38,7 +38,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies 1. Install build dependencies
``` ```
sudo dnf install -y clang ninja-build cmake meson openssl-devel gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib sudo dnf install -y clang ninja-build cmake meson gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib
``` ```
2. Install flatpak builder (Optional) 2. Install flatpak builder (Optional)
@ -61,7 +61,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies 1. Install build dependencies
``` ```
sudo xbps-install -Sy gcc clang meson cmake pkg-config openssl-devel gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib sudo xbps-install -Sy gcc clang meson cmake pkg-config gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib
``` ```
2. Install flatpak builder (Optional) 2. Install flatpak builder (Optional)
@ -88,12 +88,6 @@ flatpak install --user \
## Building ## Building
With Nix, enter the focused GTK shell before running the Meson build:
```bash
nix develop .#gtk
```
<details> <details>
<summary>General</summary> <summary>General</summary>
@ -145,16 +139,6 @@ nix develop .#gtk
## Running ## Running
The GTK app mirrors the Apple home surface: a Burrow header, Networks carousel,
Accounts section, Tunnel action, and the same add flows for WireGuard, Tor, and
Tailnet. It talks to the daemon over the same gRPC API used by Apple clients for
network storage, tunnel state, Tailnet discovery, authority probing, browser
sign-in, and Tailnet payloads.
On Linux the GTK app first looks for a daemon on the configured gRPC socket. If
none is reachable, it starts an embedded user-scoped daemon with a socket under
`XDG_RUNTIME_DIR` and a database under `XDG_DATA_HOME` before refreshing the UI.
<details> <details>
<summary>General</summary> <summary>General</summary>

View file

@ -44,7 +44,6 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code. - Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code.
- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized. - The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized.
- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly. - Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly.
- Non-Apple presentation clients should follow the same daemon-first lifecycle pattern: connect to a managed daemon when present, or start a user-scoped embedded daemon before issuing RPCs, without adding platform-local control-plane paths.
## Contributor Playbook ## Contributor Playbook
@ -55,7 +54,6 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
- daemon unavailable behavior - daemon unavailable behavior
- successful RPC path - successful RPC path
- error propagation through the UI - error propagation through the UI
- Keep Linux GTK and Apple clients visually and functionally aligned around the same daemon-backed home surface: Networks, Accounts, Tunnel, and add flows should remain corresponding views over the daemon API.
## Alternatives Considered ## Alternatives Considered
@ -65,7 +63,6 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
## Impact on Other Work ## Impact on Other Work
- Governs the Tailnet refactor and future Apple runtime work. - Governs the Tailnet refactor and future Apple runtime work.
- Governs Linux GTK daemon startup parity where the same daemon API is reused from a user-scoped presentation process.
- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring. - Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring.
## Decision ## Decision

View file

@ -37,7 +37,6 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
- Burrow-owned authority when explicitly applicable - Burrow-owned authority when explicitly applicable
- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server. - Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server.
- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority. - The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority.
- Platform clients consume the same daemon gRPC surface for Tailnet discovery, authority probing, browser sign-in, and saved network payloads. macOS/iOS SwiftUI and Linux GTK may differ in presentation and local credential stores, but neither should introduce a second control-plane path.
## Security and Operational Considerations ## Security and Operational Considerations
@ -49,7 +48,6 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one. - Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one.
- Store the authority explicitly in payloads and infer provider internally only when needed. - Store the authority explicitly in payloads and infer provider internally only when needed.
- Keep Linux GTK and Apple clients at functional parity by routing Tailnet add/discover/probe/login through `TailnetControl` and `Networks` RPCs instead of platform-local HTTP or legacy JSON daemon commands.
- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching. - Prefer tests that validate authority normalization and discovery behavior over UI-provider branching.
## Alternatives Considered ## Alternatives Considered
@ -60,7 +58,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
## Impact on Other Work ## Impact on Other Work
- Refines BEP-0002s Tailscale-shaped control-plane work. - Refines BEP-0002s Tailscale-shaped control-plane work.
- Constrains the Tailnet Apple and Linux GTK refactors plus future daemon control-plane storage. - Constrains the Tailnet Apple refactor and future daemon control-plane storage.
## Decision ## Decision
@ -70,5 +68,4 @@ Pending.
- `burrow/src/control/` - `burrow/src/control/`
- `Apple/UI/Networks/` - `Apple/UI/Networks/`
- `burrow-gtk/src/`
- `proto/burrow.proto` - `proto/burrow.proto`

View file

@ -2,11 +2,6 @@
let let
cfg = config.services.burrow.zulip; cfg = config.services.burrow.zulip;
realmSignupDomain =
let
parts = lib.splitString "@" cfg.administratorEmail;
in
if builtins.length parts == 2 then builtins.elemAt parts 1 else cfg.domain;
yamlFormat = pkgs.formats.yaml { }; yamlFormat = pkgs.formats.yaml { };
composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" { composeFile = yamlFormat.generate "burrow-zulip-compose.yaml" {
services = { services = {
@ -357,7 +352,6 @@ services:
USE_X_FORWARDED_HOST = True USE_X_FORWARDED_HOST = True
SESSION_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True
CSRF_TRUSTED_ORIGINS = ["https://${cfg.domain}"]
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_SAML_REDIRECT_IS_HTTPS = True SOCIAL_AUTH_SAML_REDIRECT_IS_HTTPS = True
SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://${cfg.domain}" SOCIAL_AUTH_SAML_SP_ENTITY_ID = "https://${cfg.domain}"
@ -390,7 +384,7 @@ services:
}, },
} }
SOCIAL_AUTH_SYNC_ATTRS_DICT = { SOCIAL_AUTH_SYNC_ATTRS_DICT = {
"": { "authentik": {
"saml": { "saml": {
"role": "zulip_role", "role": "zulip_role",
}, },
@ -460,70 +454,17 @@ EOF
chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password" chmod 0600 "$zulip_data_dir/secrets/bootstrap-owner-password"
} }
wait_for_zulip_supervisor() { bootstrap_realm_if_needed() {
local realm_exists
local attempts=0 local attempts=0
while ! podman exec burrow-zulip_zulip_1 supervisorctl status >/dev/null 2>&1; do while ! podman exec burrow-zulip_zulip_1 test -r /etc/zulip/zulip-secrets.conf >/dev/null 2>&1; do
attempts=$((attempts + 1)) attempts=$((attempts + 1))
if [ "$attempts" -ge 90 ]; then if [ "$attempts" -ge 90 ]; then
echo "error: Zulip supervisor did not become ready" >&2 echo "error: Zulip did not finish generating production secrets" >&2
exit 1 exit 1
fi fi
sleep 2 sleep 2
done done
}
patch_uwsgi_scheme_handling() {
wait_for_zulip_supervisor
podman exec burrow-zulip_zulip_1 bash -lc "cat > /etc/nginx/zulip-include/trusted-proto <<'EOF'
map \$remote_addr \$trusted_x_forwarded_proto {
default \$scheme;
127.0.0.1 \$http_x_forwarded_proto;
::1 \$http_x_forwarded_proto;
172.31.1.1 \$http_x_forwarded_proto;
}
map \$remote_addr \$trusted_x_forwarded_for {
default \"\";
127.0.0.1 \$http_x_forwarded_for;
::1 \$http_x_forwarded_for;
172.31.1.1 \$http_x_forwarded_for;
}
map \$remote_addr \$x_proxy_misconfiguration {
default \"\";
}
EOF
cat > /etc/nginx/uwsgi_params <<'EOF'
uwsgi_param QUERY_STRING \$query_string;
uwsgi_param REQUEST_METHOD \$request_method;
uwsgi_param CONTENT_TYPE \$content_type;
uwsgi_param CONTENT_LENGTH \$content_length;
uwsgi_param REQUEST_URI \$request_uri;
uwsgi_param PATH_INFO \$document_uri;
uwsgi_param DOCUMENT_ROOT \$document_root;
uwsgi_param SERVER_PROTOCOL \$server_protocol;
uwsgi_param REQUEST_SCHEME \$trusted_x_forwarded_proto;
uwsgi_param HTTPS on;
uwsgi_param REMOTE_ADDR \$remote_addr;
uwsgi_param REMOTE_PORT \$remote_port;
uwsgi_param SERVER_ADDR \$server_addr;
uwsgi_param SERVER_PORT \$server_port;
uwsgi_param SERVER_NAME \$server_name;
uwsgi_param HTTP_X_REAL_IP \$remote_addr;
uwsgi_param HTTP_X_FORWARDED_PROTO \$trusted_x_forwarded_proto;
uwsgi_param HTTP_X_FORWARDED_SSL \"\";
uwsgi_param HTTP_X_PROXY_MISCONFIGURATION \$x_proxy_misconfiguration;
# This value is the default, and is provided for explicitness; it must
# be longer than the configured 55s harakiri timeout in uwsgi
uwsgi_read_timeout 60s;
uwsgi_pass django;
EOF
supervisorctl restart nginx zulip-django >/dev/null"
}
bootstrap_realm_if_needed() {
wait_for_zulip_supervisor
local realm_exists
realm_exists="$( realm_exists="$(
podman exec burrow-zulip_zulip_1 bash -lc \ podman exec burrow-zulip_zulip_1 bash -lc \
@ -553,23 +494,6 @@ supervisorctl restart nginx zulip-django >/dev/null"
podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd" podman exec burrow-zulip_zulip_1 su zulip -c "$create_realm_cmd"
} }
reconcile_realm_policy() {
wait_for_zulip_supervisor
local realm_id
realm_id="$(
podman exec burrow-zulip_zulip_1 bash -lc \
"su zulip -c '/home/zulip/deployments/current/manage.py list_realms'" \
| awk '$NF == "https://${cfg.domain}" { print $1 }'
)"
podman exec burrow-zulip_zulip_1 su zulip -c \
"/home/zulip/deployments/current/manage.py realm_domain --op add -r $realm_id ${realmSignupDomain} --allow-subdomains --automated" \
>/dev/null 2>&1 || true
podman exec burrow-zulip_zulip_1 su zulip -c \
"/home/zulip/deployments/current/manage.py shell -c 'from zerver.models import Realm; realm = Realm.objects.get(id=$realm_id); realm.invite_required = False; realm.save(update_fields=[\"invite_required\"])'"
}
if [ ! -e .initialized ]; then if [ ! -e .initialized ]; then
compose pull compose pull
compose run --rm -T zulip app:init compose run --rm -T zulip app:init
@ -579,8 +503,6 @@ supervisorctl restart nginx zulip-django >/dev/null"
ensure_zulip_data_layout ensure_zulip_data_layout
compose up -d zulip compose up -d zulip
bootstrap_realm_if_needed bootstrap_realm_if_needed
reconcile_realm_policy
patch_uwsgi_scheme_handling
''; '';
}; };
}; };