Align GTK app with Apple home surface
Some checks failed
Build Rust / Cargo Test (push) Successful in 3m50s
Build Site / Next.js Build (push) Failing after 2s
Lint Governance / BEP Metadata (push) Successful in 0s

Add the GTK home screen, local account store, daemon gRPC wrapper, and embedded Linux daemon startup path so the Linux app follows the Apple client UX and daemon boundary.

Document the GTK parity expectations and update the daemon IPC and Tailnet BEPs with the cross-platform client model.
This commit is contained in:
Conrad Kramer 2026-05-03 17:36:55 -07:00
parent 9244a0476a
commit 97c569fb35
12 changed files with 1861 additions and 110 deletions

View file

@ -11,6 +11,8 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]}
burrow = { version = "*", path = "../burrow/" }
tokio = { version = "1.35.0", features = ["time", "sync"] }
gettext-rs = { version = "0.7.0", features = ["gettext-system"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
[build-dependencies]
anyhow = "1.0"

View file

@ -0,0 +1,139 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::{
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountRecord {
pub id: String,
pub kind: AccountKind,
pub title: String,
pub authority: Option<String>,
pub account: String,
pub identity: String,
pub hostname: Option<String>,
pub tailnet: Option<String>,
pub note: Option<String>,
pub created_at: u64,
pub updated_at: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AccountKind {
WireGuard,
Tor,
Tailnet,
}
impl AccountKind {
pub fn title(self) -> &'static str {
match self {
Self::WireGuard => "WireGuard",
Self::Tor => "Tor",
Self::Tailnet => "Tailnet",
}
}
fn sort_rank(self) -> u8 {
match self {
Self::Tailnet => 0,
Self::Tor => 1,
Self::WireGuard => 2,
}
}
}
pub fn load() -> Result<Vec<AccountRecord>> {
let path = storage_path()?;
if !path.exists() {
return Ok(Vec::new());
}
let data =
std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?;
serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display()))
}
pub fn upsert(mut record: AccountRecord) -> Result<Vec<AccountRecord>> {
let mut accounts = load()?;
let now = timestamp();
record.updated_at = now;
if record.created_at == 0 {
record.created_at = now;
}
if let Some(index) = accounts.iter().position(|account| account.id == record.id) {
accounts[index] = record;
} else {
accounts.push(record);
}
accounts.sort_by(|lhs, rhs| {
lhs.kind
.sort_rank()
.cmp(&rhs.kind.sort_rank())
.then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase()))
});
persist(&accounts)?;
Ok(accounts)
}
pub fn new_record(
kind: AccountKind,
title: String,
authority: Option<String>,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
note: Option<String>,
) -> AccountRecord {
let now = timestamp();
AccountRecord {
id: format!("{}-{now}", kind.title().to_ascii_lowercase()),
kind,
title,
authority,
account,
identity,
hostname,
tailnet,
note,
created_at: now,
updated_at: now,
}
}
fn persist(accounts: &[AccountRecord]) -> Result<()> {
let path = storage_path()?;
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?;
std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))
}
fn storage_path() -> Result<PathBuf> {
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home)
.join("burrow")
.join("accounts.json"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("accounts.json"));
}
Ok(std::env::temp_dir().join("burrow-accounts.json"))
}
fn timestamp() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|duration| duration.as_secs())
.unwrap_or_default()
}

View file

@ -1,24 +1,19 @@
use super::*;
use anyhow::Context;
use std::time::Duration;
const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5);
pub struct App {
daemon_client: Arc<Mutex<Option<DaemonClient>>>,
settings_screen: Controller<settings_screen::SettingsScreen>,
switch_screen: AsyncController<switch_screen::SwitchScreen>,
_home_screen: AsyncController<home_screen::HomeScreen>,
}
#[derive(Debug)]
pub enum AppMsg {
None,
PostInit,
}
impl App {
pub fn run() {
let app = RelmApp::new(config::ID);
relm4::set_global_css(APP_CSS);
Self::setup_gresources().unwrap();
Self::setup_i18n().unwrap();
@ -49,7 +44,7 @@ impl AsyncComponent for App {
view! {
adw::Window {
set_title: Some("Burrow"),
set_default_size: (640, 480),
set_default_size: (900, 760),
}
}
@ -58,100 +53,84 @@ impl AsyncComponent for App {
root: Self::Root,
sender: AsyncComponentSender<Self>,
) -> AsyncComponentParts<Self> {
let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok()));
let switch_screen = switch_screen::SwitchScreen::builder()
.launch(switch_screen::SwitchScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
.forward(sender.input_sender(), |_| AppMsg::None);
let settings_screen = settings_screen::SettingsScreen::builder()
.launch(settings_screen::SettingsScreenInit {
daemon_client: Arc::clone(&daemon_client),
})
let home_screen = home_screen::HomeScreen::builder()
.launch(())
.forward(sender.input_sender(), |_| AppMsg::None);
let widgets = view_output!();
let view_stack = adw::ViewStack::new();
view_stack.add_titled(switch_screen.widget(), None, "Switch");
view_stack.add_titled(settings_screen.widget(), None, "Settings");
let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build();
view_switcher_bar.set_reveal(true);
// When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate
//
// let toolbar = adw::ToolbarView::new();
// toolbar.add_top_bar(
// &adw::HeaderBar::builder()
// .title_widget(&gtk::Label::new(Some("Burrow")))
// .build(),
// );
// toolbar.add_bottom_bar(&view_switcher_bar);
// toolbar.set_content(Some(&view_stack));
// root.set_content(Some(&toolbar));
let content = gtk::Box::new(gtk::Orientation::Vertical, 0);
content.append(
&adw::HeaderBar::builder()
.title_widget(&gtk::Label::new(Some("Burrow")))
.build(),
);
content.append(&view_stack);
content.append(&view_switcher_bar);
content.append(home_screen.widget());
root.set_content(Some(&content));
sender.input(AppMsg::PostInit);
let model = App {
daemon_client,
switch_screen,
settings_screen,
};
let model = App { _home_screen: home_screen };
AsyncComponentParts { model, widgets }
}
async fn update(
&mut self,
_msg: Self::Input,
msg: Self::Input,
_sender: AsyncComponentSender<Self>,
_root: &Self::Root,
) {
loop {
tokio::time::sleep(RECONNECT_POLL_TIME).await;
{
let mut daemon_client = self.daemon_client.lock().await;
let mut disconnected_daemon_client = false;
if let Some(daemon_client) = daemon_client.as_mut() {
if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await {
disconnected_daemon_client = true;
self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonDisconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
}
}
if disconnected_daemon_client || daemon_client.is_none() {
match DaemonClient::new().await {
Ok(new_daemon_client) => {
*daemon_client = Some(new_daemon_client);
self.switch_screen
.emit(switch_screen::SwitchScreenMsg::DaemonReconnect);
self.settings_screen
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
}
Err(_e) => {
// TODO: Handle Error
}
}
}
}
match msg {
AppMsg::None => {}
}
}
}
const APP_CSS: &str = r#"
.empty-state {
border-radius: 18px;
padding: 22px;
background: alpha(@card_bg_color, 0.72);
}
.summary-card {
border-radius: 18px;
padding: 14px;
background: alpha(@card_bg_color, 0.72);
}
.network-card {
border-radius: 10px;
padding: 16px;
box-shadow: 0 2px 6px alpha(black, 0.14);
}
.wireguard-card {
background: linear-gradient(135deg, #3277d8, #174ea6);
}
.tailnet-card {
background: linear-gradient(135deg, #31b891, #147d69);
}
.network-card-kind,
.network-card-title,
.network-card-detail {
color: white;
}
.network-card-kind {
opacity: 0.86;
font-weight: 700;
}
.network-card-title {
font-size: 1.22em;
font-weight: 700;
}
.network-card-detail {
opacity: 0.92;
font-family: monospace;
}
"#;

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
use super::*;
use crate::daemon_api;
use adw::prelude::*;
use burrow::{DaemonClient, DaemonCommand, DaemonResponseData};
use gtk::Align;
use relm4::{
component::{
@ -9,13 +9,9 @@ use relm4::{
},
prelude::*,
};
use std::sync::Arc;
use tokio::sync::Mutex;
mod app;
mod settings;
mod settings_screen;
mod switch_screen;
mod home_screen;
pub use app::*;
pub use settings::{DaemonGroupMsg, DiagGroupMsg};
pub use home_screen::{HomeScreen, HomeScreenMsg};

View file

@ -0,0 +1,420 @@
use anyhow::{anyhow, Context, Result};
use burrow::{
control::{TailnetConfig, TailnetProvider},
grpc_defs::{
Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest,
TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest,
},
BurrowClient,
};
use std::{path::PathBuf, sync::OnceLock};
use tokio::time::{timeout, Duration};
const RPC_TIMEOUT: Duration = Duration::from_secs(3);
const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com";
static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new();
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TunnelState {
Running,
Stopped,
}
#[derive(Debug, Clone)]
pub struct NetworkSummary {
pub id: i32,
pub title: String,
pub detail: String,
}
#[derive(Debug, Clone)]
pub struct TailnetDiscovery {
pub authority: String,
pub managed: bool,
pub oidc_issuer: Option<String>,
}
#[derive(Debug, Clone)]
pub struct TailnetProbe {
pub summary: String,
pub detail: Option<String>,
pub status_code: i32,
}
#[derive(Debug, Clone)]
pub struct TailnetLoginStatus {
pub session_id: String,
pub backend_state: String,
pub auth_url: Option<String>,
pub running: bool,
pub needs_login: bool,
pub tailnet_name: Option<String>,
pub self_dns_name: Option<String>,
pub tailnet_ips: Vec<String>,
pub health: Vec<String>,
}
pub fn default_tailnet_authority() -> &'static str {
MANAGED_TAILSCALE_AUTHORITY
}
pub fn configure_client_paths() -> Result<()> {
if std::env::var_os("BURROW_SOCKET_PATH").is_none() {
std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?);
}
Ok(())
}
pub async fn ensure_daemon() -> Result<()> {
configure_client_paths()?;
if daemon_available().await {
return Ok(());
}
let socket_path = socket_path()?;
let db_path = database_path()?;
ensure_parent(&socket_path)?;
ensure_parent(&db_path)?;
if EMBEDDED_DAEMON_STARTED.get().is_none() {
tokio::task::spawn_blocking(move || {
burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path));
})
.await
.context("failed to join embedded daemon startup")?;
let _ = EMBEDDED_DAEMON_STARTED.set(());
}
tunnel_state()
.await
.map(|_| ())
.context("Burrow daemon started but did not accept tunnel status RPCs")
}
pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider {
let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase();
if normalized == "controlplane.tailscale.com"
|| normalized == "http://controlplane.tailscale.com"
|| normalized == MANAGED_TAILSCALE_AUTHORITY
{
TailnetProvider::Tailscale
} else {
TailnetProvider::Headscale
}
}
pub async fn daemon_available() -> bool {
tunnel_state().await.is_ok()
}
fn socket_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") {
return Ok(PathBuf::from(path));
}
default_socket_path()
}
fn default_socket_path() -> Result<PathBuf> {
if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") {
return Ok(PathBuf::from(runtime_dir).join("burrow.sock"));
}
let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned());
Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock")))
}
fn database_path() -> Result<PathBuf> {
if let Some(path) = std::env::var_os("BURROW_DB_PATH") {
return Ok(PathBuf::from(path));
}
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db"));
}
if let Some(home) = std::env::var_os("HOME") {
return Ok(PathBuf::from(home)
.join(".local")
.join("share")
.join("burrow")
.join("burrow.db"));
}
Ok(std::env::temp_dir().join("burrow.db"))
}
fn ensure_parent(path: &PathBuf) -> Result<()> {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("failed to create {}", parent.display()))?;
}
Ok(())
}
pub async fn tunnel_state() -> Result<TunnelState> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {}))
.await
.context("timed out connecting to Burrow daemon")??
.into_inner();
let status = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow tunnel status")??
.context("Burrow daemon ended the status stream without a state")?;
Ok(match status.state() {
State::Running => TunnelState::Running,
State::Stopped => TunnelState::Stopped,
})
}
pub async fn start_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {}))
.await
.context("timed out starting Burrow tunnel")??;
Ok(())
}
pub async fn stop_tunnel() -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {}))
.await
.context("timed out stopping Burrow tunnel")??;
Ok(())
}
pub async fn list_networks() -> Result<Vec<NetworkSummary>> {
let mut client = BurrowClient::from_uds().await?;
let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {}))
.await
.context("timed out connecting to Burrow network list")??
.into_inner();
let response = timeout(RPC_TIMEOUT, stream.message())
.await
.context("timed out reading Burrow network list")??
.context("Burrow daemon ended the network stream without a snapshot")?;
Ok(response.network.iter().map(summarize_network).collect())
}
pub async fn add_wireguard(config: String) -> Result<i32> {
add_network(NetworkType::WireGuard, config.into_bytes()).await
}
pub async fn add_tailnet(
authority: String,
account: String,
identity: String,
hostname: Option<String>,
tailnet: Option<String>,
) -> Result<i32> {
let provider = infer_tailnet_provider(&authority);
let config = TailnetConfig {
provider,
authority: Some(authority),
account: Some(account),
identity: Some(identity),
hostname,
tailnet,
};
let payload = serde_json::to_vec_pretty(&config)?;
add_network(NetworkType::Tailnet, payload).await
}
pub async fn discover_tailnet(email: String) -> Result<TailnetDiscovery> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.discover(TailnetDiscoverRequest { email }),
)
.await
.context("timed out discovering Tailnet authority")??
.into_inner();
Ok(TailnetDiscovery {
authority: response.authority,
managed: response.managed,
oidc_issuer: optional(response.oidc_issuer),
})
}
pub async fn probe_tailnet(authority: String) -> Result<TailnetProbe> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.probe(TailnetProbeRequest { authority }),
)
.await
.context("timed out probing Tailnet authority")??
.into_inner();
Ok(TailnetProbe {
summary: response.summary,
detail: optional(response.detail),
status_code: response.status_code,
})
}
pub async fn start_tailnet_login(
authority: String,
account_name: String,
identity_name: String,
hostname: Option<String>,
) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client.tailnet_client.login_start(TailnetLoginStartRequest {
account_name,
identity_name,
hostname: hostname.unwrap_or_default(),
authority,
}),
)
.await
.context("timed out starting Tailnet sign-in")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn tailnet_login_status(session_id: String) -> Result<TailnetLoginStatus> {
let mut client = BurrowClient::from_uds().await?;
let response = timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_status(TailnetLoginStatusRequest { session_id }),
)
.await
.context("timed out reading Tailnet sign-in status")??
.into_inner();
Ok(decode_tailnet_status(response))
}
pub async fn cancel_tailnet_login(session_id: String) -> Result<()> {
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client
.tailnet_client
.login_cancel(TailnetLoginCancelRequest { session_id }),
)
.await
.context("timed out cancelling Tailnet sign-in")??;
Ok(())
}
async fn add_network(network_type: NetworkType, payload: Vec<u8>) -> Result<i32> {
let id = next_network_id().await?;
let mut client = BurrowClient::from_uds().await?;
timeout(
RPC_TIMEOUT,
client.networks_client.network_add(Network {
id,
r#type: network_type.into(),
payload,
}),
)
.await
.context("timed out saving network to Burrow daemon")??;
Ok(id)
}
async fn next_network_id() -> Result<i32> {
let networks = list_networks().await?;
Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1)
}
fn summarize_network(network: &Network) -> NetworkSummary {
match network.r#type() {
NetworkType::WireGuard => summarize_wireguard(network),
NetworkType::Tailnet => summarize_tailnet(network),
}
}
fn summarize_wireguard(network: &Network) -> NetworkSummary {
let payload = String::from_utf8_lossy(&network.payload);
let detail = payload
.lines()
.map(str::trim)
.find(|line| !line.is_empty() && !line.starts_with('['))
.unwrap_or("Stored WireGuard configuration")
.to_owned();
NetworkSummary {
id: network.id,
title: format!("WireGuard {}", network.id),
detail,
}
}
fn summarize_tailnet(network: &Network) -> NetworkSummary {
match TailnetConfig::from_slice(&network.payload) {
Ok(config) => {
let title = config
.tailnet
.clone()
.or(config.hostname.clone())
.unwrap_or_else(|| "Tailnet".to_owned());
let authority = config
.authority
.unwrap_or_else(|| "default authority".to_owned());
let account = config.account.unwrap_or_else(|| "default".to_owned());
NetworkSummary {
id: network.id,
title,
detail: format!("{authority} - account {account}"),
}
}
Err(error) => NetworkSummary {
id: network.id,
title: "Tailnet".to_owned(),
detail: format!("Unable to read Tailnet payload: {error}"),
},
}
}
fn decode_tailnet_status(
response: burrow::grpc_defs::TailnetLoginStatusResponse,
) -> TailnetLoginStatus {
TailnetLoginStatus {
session_id: response.session_id,
backend_state: response.backend_state,
auth_url: optional(response.auth_url),
running: response.running,
needs_login: response.needs_login,
tailnet_name: optional(response.tailnet_name),
self_dns_name: optional(response.self_dns_name),
tailnet_ips: response.tailnet_ips,
health: response.health,
}
}
fn optional(value: String) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn normalized(value: &str, fallback: &str) -> String {
let trimmed = value.trim();
if trimmed.is_empty() {
fallback.to_owned()
} else {
trimmed.to_owned()
}
}
pub fn normalized_optional(value: &str) -> Option<String> {
let trimmed = value.trim();
if trimmed.is_empty() {
None
} else {
Some(trimmed.to_owned())
}
}
pub fn require_value(value: &str, label: &str) -> Result<String> {
normalized_optional(value).ok_or_else(|| anyhow!("{label} is required"))
}

View file

@ -1,11 +1,15 @@
use anyhow::Result;
pub mod components;
mod diag;
mod account_store;
mod daemon_api;
// Generated using meson
mod config;
fn main() {
if let Err(error) = daemon_api::configure_client_paths() {
eprintln!("failed to configure Burrow daemon paths: {error}");
}
components::App::run();
}

View file

@ -1,11 +1,11 @@
use std::{
ffi::{c_char, CStr},
path::PathBuf,
sync::Arc,
sync::{Arc, Mutex},
thread,
};
use once_cell::sync::OnceCell;
use once_cell::sync::{Lazy, OnceCell};
use tokio::{
runtime::{Builder, Handle},
sync::Notify,
@ -14,15 +14,12 @@ use tracing::error;
use crate::daemon::daemon_main;
static BURROW_NOTIFY: OnceCell<Arc<Notify>> = OnceCell::new();
static BURROW_HANDLE: OnceCell<Handle> = OnceCell::new();
static BURROW_READY: OnceCell<()> = OnceCell::new();
static BURROW_SPAWN_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
#[no_mangle]
pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c_char) {
crate::tracing::initialize();
let notify = BURROW_NOTIFY.get_or_init(|| Arc::new(Notify::new()));
let handle = BURROW_HANDLE.get_or_init(|| {
let path_buf = if path.is_null() {
None
} else {
@ -33,6 +30,19 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c
} else {
Some(PathBuf::from(CStr::from_ptr(db_path).to_str().unwrap()))
};
spawn_in_process_with_paths(path_buf, db_path_buf);
}
pub fn spawn_in_process_with_paths(path_buf: Option<PathBuf>, db_path_buf: Option<PathBuf>) {
crate::tracing::initialize();
let _guard = BURROW_SPAWN_LOCK.lock().unwrap();
if BURROW_READY.get().is_some() {
return;
}
let notify = Arc::new(Notify::new());
let handle = BURROW_HANDLE.get_or_init(|| {
let sender = notify.clone();
let (handle_tx, handle_rx) = tokio::sync::oneshot::channel();
@ -62,4 +72,5 @@ pub unsafe extern "C" fn spawn_in_process(path: *const c_char, db_path: *const c
let receiver = notify.clone();
handle.block_on(async move { receiver.notified().await });
let _ = BURROW_READY.set(());
}

View file

@ -16,10 +16,10 @@ pub(crate) mod tracing;
#[cfg(target_os = "linux")]
pub mod usernet;
#[cfg(target_vendor = "apple")]
pub use daemon::apple::spawn_in_process;
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::apple::{spawn_in_process, spawn_in_process_with_paths};
#[cfg(any(target_os = "linux", target_vendor = "apple"))]
pub use daemon::{
rpc::DaemonResponse, rpc::ServerInfo, DaemonClient, DaemonCommand, DaemonResponseData,
DaemonStartOptions,
rpc::grpc_defs, rpc::BurrowClient, rpc::DaemonResponse, rpc::ServerInfo, DaemonClient,
DaemonCommand, DaemonResponseData, DaemonStartOptions,
};

View file

@ -15,7 +15,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo apt install -y clang meson cmake pkg-config libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils
sudo apt install -y clang meson cmake pkg-config libssl-dev libgtk-4-dev libadwaita-1-dev gettext desktop-file-utils
```
2. Install flatpak builder (Optional)
@ -38,7 +38,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo dnf install -y clang ninja-build cmake meson gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib
sudo dnf install -y clang ninja-build cmake meson openssl-devel gtk4-devel glib2-devel libadwaita-devel desktop-file-utils libappstream-glib
```
2. Install flatpak builder (Optional)
@ -61,7 +61,7 @@ Note that the flatpak version can compile but will not run properly!
1. Install build dependencies
```
sudo xbps-install -Sy gcc clang meson cmake pkg-config gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib
sudo xbps-install -Sy gcc clang meson cmake pkg-config openssl-devel gtk4-devel gettext desktop-file-utils gtk4-update-icon-cache appstream-glib
```
2. Install flatpak builder (Optional)
@ -88,6 +88,12 @@ flatpak install --user \
## Building
With Nix, enter the focused GTK shell before running the Meson build:
```bash
nix develop .#gtk
```
<details>
<summary>General</summary>
@ -139,6 +145,16 @@ flatpak install --user \
## Running
The GTK app mirrors the Apple home surface: a Burrow header, Networks carousel,
Accounts section, Tunnel action, and the same add flows for WireGuard, Tor, and
Tailnet. It talks to the daemon over the same gRPC API used by Apple clients for
network storage, tunnel state, Tailnet discovery, authority probing, browser
sign-in, and Tailnet payloads.
On Linux the GTK app first looks for a daemon on the configured gRPC socket. If
none is reachable, it starts an embedded user-scoped daemon with a socket under
`XDG_RUNTIME_DIR` and a database under `XDG_DATA_HOME` before refreshing the UI.
<details>
<summary>General</summary>

View file

@ -44,6 +44,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
- Keeping control-plane I/O out of Swift UI reduces accidental secret, token, and callback sprawl across app code.
- The daemon boundary makes testing and kill-switch behavior tractable because runtime integration is localized.
- Apple daemon lifecycle ownership must be explicit: either the app ensures the daemon is running before RPC or the extension owns it and the UI surfaces daemon-unavailable state clearly.
- Non-Apple presentation clients should follow the same daemon-first lifecycle pattern: connect to a managed daemon when present, or start a user-scoped embedded daemon before issuing RPCs, without adding platform-local control-plane paths.
## Contributor Playbook
@ -54,6 +55,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
- daemon unavailable behavior
- successful RPC path
- error propagation through the UI
- Keep Linux GTK and Apple clients visually and functionally aligned around the same daemon-backed home surface: Networks, Accounts, Tunnel, and add flows should remain corresponding views over the daemon API.
## Alternatives Considered
@ -63,6 +65,7 @@ Burrow should formalize one Apple/runtime boundary: Apple clients speak only to
## Impact on Other Work
- Governs the Tailnet refactor and future Apple runtime work.
- Governs Linux GTK daemon startup parity where the same daemon API is reused from a user-scoped presentation process.
- Interacts with BEP-0002 control-plane bootstrap and BEP-0003 transport refactoring.
## Decision

View file

@ -37,6 +37,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
- Burrow-owned authority when explicitly applicable
- Discovery returns authority and related metadata; editing the authority is the mechanism that moves a configuration from managed default to custom control server.
- The daemon and control layer own provider inference; the UI should primarily present “Tailnet” plus the selected authority.
- Platform clients consume the same daemon gRPC surface for Tailnet discovery, authority probing, browser sign-in, and saved network payloads. macOS/iOS SwiftUI and Linux GTK may differ in presentation and local credential stores, but neither should introduce a second control-plane path.
## Security and Operational Considerations
@ -48,6 +49,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
- Remove provider pickers from Tailnet UI unless a concrete protocol difference requires one.
- Store the authority explicitly in payloads and infer provider internally only when needed.
- Keep Linux GTK and Apple clients at functional parity by routing Tailnet add/discover/probe/login through `TailnetControl` and `Networks` RPCs instead of platform-local HTTP or legacy JSON daemon commands.
- Prefer tests that validate authority normalization and discovery behavior over UI-provider branching.
## Alternatives Considered
@ -58,7 +60,7 @@ Burrow should treat Tailnet as one protocol family. Tailscale-managed and self-h
## Impact on Other Work
- Refines BEP-0002s Tailscale-shaped control-plane work.
- Constrains the Tailnet Apple refactor and future daemon control-plane storage.
- Constrains the Tailnet Apple and Linux GTK refactors plus future daemon control-plane storage.
## Decision
@ -68,4 +70,5 @@ Pending.
- `burrow/src/control/`
- `Apple/UI/Networks/`
- `burrow-gtk/src/`
- `proto/burrow.proto`