Align GTK app with Apple home surface
Add the GTK home screen, local account store, daemon gRPC wrapper, and embedded Linux daemon startup path so the Linux app follows the Apple client UX and daemon boundary. Document the GTK parity expectations and update the daemon IPC and Tailnet BEPs with the cross-platform client model.
This commit is contained in:
parent
9244a0476a
commit
97c569fb35
12 changed files with 1861 additions and 110 deletions
|
|
@ -11,6 +11,8 @@ relm4 = { version = "0.6", features = ["libadwaita", "gnome_44"]}
|
|||
burrow = { version = "*", path = "../burrow/" }
|
||||
tokio = { version = "1.35.0", features = ["time", "sync"] }
|
||||
gettext-rs = { version = "0.7.0", features = ["gettext-system"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0"
|
||||
|
|
|
|||
139
burrow-gtk/src/account_store.rs
Normal file
139
burrow-gtk/src/account_store.rs
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AccountRecord {
|
||||
pub id: String,
|
||||
pub kind: AccountKind,
|
||||
pub title: String,
|
||||
pub authority: Option<String>,
|
||||
pub account: String,
|
||||
pub identity: String,
|
||||
pub hostname: Option<String>,
|
||||
pub tailnet: Option<String>,
|
||||
pub note: Option<String>,
|
||||
pub created_at: u64,
|
||||
pub updated_at: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AccountKind {
|
||||
WireGuard,
|
||||
Tor,
|
||||
Tailnet,
|
||||
}
|
||||
|
||||
impl AccountKind {
|
||||
pub fn title(self) -> &'static str {
|
||||
match self {
|
||||
Self::WireGuard => "WireGuard",
|
||||
Self::Tor => "Tor",
|
||||
Self::Tailnet => "Tailnet",
|
||||
}
|
||||
}
|
||||
|
||||
fn sort_rank(self) -> u8 {
|
||||
match self {
|
||||
Self::Tailnet => 0,
|
||||
Self::Tor => 1,
|
||||
Self::WireGuard => 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load() -> Result<Vec<AccountRecord>> {
|
||||
let path = storage_path()?;
|
||||
if !path.exists() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
let data =
|
||||
std::fs::read(&path).with_context(|| format!("failed to read {}", path.display()))?;
|
||||
serde_json::from_slice(&data).with_context(|| format!("failed to parse {}", path.display()))
|
||||
}
|
||||
|
||||
pub fn upsert(mut record: AccountRecord) -> Result<Vec<AccountRecord>> {
|
||||
let mut accounts = load()?;
|
||||
let now = timestamp();
|
||||
record.updated_at = now;
|
||||
if record.created_at == 0 {
|
||||
record.created_at = now;
|
||||
}
|
||||
|
||||
if let Some(index) = accounts.iter().position(|account| account.id == record.id) {
|
||||
accounts[index] = record;
|
||||
} else {
|
||||
accounts.push(record);
|
||||
}
|
||||
accounts.sort_by(|lhs, rhs| {
|
||||
lhs.kind
|
||||
.sort_rank()
|
||||
.cmp(&rhs.kind.sort_rank())
|
||||
.then_with(|| lhs.title.to_lowercase().cmp(&rhs.title.to_lowercase()))
|
||||
});
|
||||
persist(&accounts)?;
|
||||
Ok(accounts)
|
||||
}
|
||||
|
||||
pub fn new_record(
|
||||
kind: AccountKind,
|
||||
title: String,
|
||||
authority: Option<String>,
|
||||
account: String,
|
||||
identity: String,
|
||||
hostname: Option<String>,
|
||||
tailnet: Option<String>,
|
||||
note: Option<String>,
|
||||
) -> AccountRecord {
|
||||
let now = timestamp();
|
||||
AccountRecord {
|
||||
id: format!("{}-{now}", kind.title().to_ascii_lowercase()),
|
||||
kind,
|
||||
title,
|
||||
authority,
|
||||
account,
|
||||
identity,
|
||||
hostname,
|
||||
tailnet,
|
||||
note,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
}
|
||||
}
|
||||
|
||||
fn persist(accounts: &[AccountRecord]) -> Result<()> {
|
||||
let path = storage_path()?;
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.with_context(|| format!("failed to create {}", parent.display()))?;
|
||||
}
|
||||
let data = serde_json::to_vec_pretty(accounts).context("failed to encode account store")?;
|
||||
std::fs::write(&path, data).with_context(|| format!("failed to write {}", path.display()))
|
||||
}
|
||||
|
||||
fn storage_path() -> Result<PathBuf> {
|
||||
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
|
||||
return Ok(PathBuf::from(data_home)
|
||||
.join("burrow")
|
||||
.join("accounts.json"));
|
||||
}
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
return Ok(PathBuf::from(home)
|
||||
.join(".local")
|
||||
.join("share")
|
||||
.join("burrow")
|
||||
.join("accounts.json"));
|
||||
}
|
||||
Ok(std::env::temp_dir().join("burrow-accounts.json"))
|
||||
}
|
||||
|
||||
fn timestamp() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map(|duration| duration.as_secs())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
|
@ -1,24 +1,19 @@
|
|||
use super::*;
|
||||
use anyhow::Context;
|
||||
use std::time::Duration;
|
||||
|
||||
const RECONNECT_POLL_TIME: Duration = Duration::from_secs(5);
|
||||
|
||||
pub struct App {
|
||||
daemon_client: Arc<Mutex<Option<DaemonClient>>>,
|
||||
settings_screen: Controller<settings_screen::SettingsScreen>,
|
||||
switch_screen: AsyncController<switch_screen::SwitchScreen>,
|
||||
_home_screen: AsyncController<home_screen::HomeScreen>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AppMsg {
|
||||
None,
|
||||
PostInit,
|
||||
}
|
||||
|
||||
impl App {
|
||||
pub fn run() {
|
||||
let app = RelmApp::new(config::ID);
|
||||
relm4::set_global_css(APP_CSS);
|
||||
Self::setup_gresources().unwrap();
|
||||
Self::setup_i18n().unwrap();
|
||||
|
||||
|
|
@ -49,7 +44,7 @@ impl AsyncComponent for App {
|
|||
view! {
|
||||
adw::Window {
|
||||
set_title: Some("Burrow"),
|
||||
set_default_size: (640, 480),
|
||||
set_default_size: (900, 760),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -58,100 +53,84 @@ impl AsyncComponent for App {
|
|||
root: Self::Root,
|
||||
sender: AsyncComponentSender<Self>,
|
||||
) -> AsyncComponentParts<Self> {
|
||||
let daemon_client = Arc::new(Mutex::new(DaemonClient::new().await.ok()));
|
||||
|
||||
let switch_screen = switch_screen::SwitchScreen::builder()
|
||||
.launch(switch_screen::SwitchScreenInit {
|
||||
daemon_client: Arc::clone(&daemon_client),
|
||||
})
|
||||
.forward(sender.input_sender(), |_| AppMsg::None);
|
||||
|
||||
let settings_screen = settings_screen::SettingsScreen::builder()
|
||||
.launch(settings_screen::SettingsScreenInit {
|
||||
daemon_client: Arc::clone(&daemon_client),
|
||||
})
|
||||
let home_screen = home_screen::HomeScreen::builder()
|
||||
.launch(())
|
||||
.forward(sender.input_sender(), |_| AppMsg::None);
|
||||
|
||||
let widgets = view_output!();
|
||||
|
||||
let view_stack = adw::ViewStack::new();
|
||||
view_stack.add_titled(switch_screen.widget(), None, "Switch");
|
||||
view_stack.add_titled(settings_screen.widget(), None, "Settings");
|
||||
|
||||
let view_switcher_bar = adw::ViewSwitcherBar::builder().stack(&view_stack).build();
|
||||
view_switcher_bar.set_reveal(true);
|
||||
|
||||
// When libadwaita 1.4 support becomes more avaliable, this approach is more appropriate
|
||||
//
|
||||
// let toolbar = adw::ToolbarView::new();
|
||||
// toolbar.add_top_bar(
|
||||
// &adw::HeaderBar::builder()
|
||||
// .title_widget(>k::Label::new(Some("Burrow")))
|
||||
// .build(),
|
||||
// );
|
||||
// toolbar.add_bottom_bar(&view_switcher_bar);
|
||||
// toolbar.set_content(Some(&view_stack));
|
||||
// root.set_content(Some(&toolbar));
|
||||
|
||||
let content = gtk::Box::new(gtk::Orientation::Vertical, 0);
|
||||
content.append(
|
||||
&adw::HeaderBar::builder()
|
||||
.title_widget(>k::Label::new(Some("Burrow")))
|
||||
.build(),
|
||||
);
|
||||
content.append(&view_stack);
|
||||
content.append(&view_switcher_bar);
|
||||
content.append(home_screen.widget());
|
||||
|
||||
root.set_content(Some(&content));
|
||||
|
||||
sender.input(AppMsg::PostInit);
|
||||
|
||||
let model = App {
|
||||
daemon_client,
|
||||
switch_screen,
|
||||
settings_screen,
|
||||
};
|
||||
let model = App { _home_screen: home_screen };
|
||||
|
||||
AsyncComponentParts { model, widgets }
|
||||
}
|
||||
|
||||
async fn update(
|
||||
&mut self,
|
||||
_msg: Self::Input,
|
||||
msg: Self::Input,
|
||||
_sender: AsyncComponentSender<Self>,
|
||||
_root: &Self::Root,
|
||||
) {
|
||||
loop {
|
||||
tokio::time::sleep(RECONNECT_POLL_TIME).await;
|
||||
{
|
||||
let mut daemon_client = self.daemon_client.lock().await;
|
||||
let mut disconnected_daemon_client = false;
|
||||
|
||||
if let Some(daemon_client) = daemon_client.as_mut() {
|
||||
if let Err(_e) = daemon_client.send_command(DaemonCommand::ServerInfo).await {
|
||||
disconnected_daemon_client = true;
|
||||
self.switch_screen
|
||||
.emit(switch_screen::SwitchScreenMsg::DaemonDisconnect);
|
||||
self.settings_screen
|
||||
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
|
||||
}
|
||||
}
|
||||
|
||||
if disconnected_daemon_client || daemon_client.is_none() {
|
||||
match DaemonClient::new().await {
|
||||
Ok(new_daemon_client) => {
|
||||
*daemon_client = Some(new_daemon_client);
|
||||
self.switch_screen
|
||||
.emit(switch_screen::SwitchScreenMsg::DaemonReconnect);
|
||||
self.settings_screen
|
||||
.emit(settings_screen::SettingsScreenMsg::DaemonStateChange)
|
||||
}
|
||||
Err(_e) => {
|
||||
// TODO: Handle Error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
match msg {
|
||||
AppMsg::None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const APP_CSS: &str = r#"
|
||||
.empty-state {
|
||||
border-radius: 18px;
|
||||
padding: 22px;
|
||||
background: alpha(@card_bg_color, 0.72);
|
||||
}
|
||||
|
||||
.summary-card {
|
||||
border-radius: 18px;
|
||||
padding: 14px;
|
||||
background: alpha(@card_bg_color, 0.72);
|
||||
}
|
||||
|
||||
.network-card {
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
box-shadow: 0 2px 6px alpha(black, 0.14);
|
||||
}
|
||||
|
||||
.wireguard-card {
|
||||
background: linear-gradient(135deg, #3277d8, #174ea6);
|
||||
}
|
||||
|
||||
.tailnet-card {
|
||||
background: linear-gradient(135deg, #31b891, #147d69);
|
||||
}
|
||||
|
||||
.network-card-kind,
|
||||
.network-card-title,
|
||||
.network-card-detail {
|
||||
color: white;
|
||||
}
|
||||
|
||||
.network-card-kind {
|
||||
opacity: 0.86;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.network-card-title {
|
||||
font-size: 1.22em;
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.network-card-detail {
|
||||
opacity: 0.92;
|
||||
font-family: monospace;
|
||||
}
|
||||
"#;
|
||||
|
|
|
|||
1178
burrow-gtk/src/components/home_screen.rs
Normal file
1178
burrow-gtk/src/components/home_screen.rs
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,6 +1,6 @@
|
|||
use super::*;
|
||||
use crate::daemon_api;
|
||||
use adw::prelude::*;
|
||||
use burrow::{DaemonClient, DaemonCommand, DaemonResponseData};
|
||||
use gtk::Align;
|
||||
use relm4::{
|
||||
component::{
|
||||
|
|
@ -9,13 +9,9 @@ use relm4::{
|
|||
},
|
||||
prelude::*,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
mod app;
|
||||
mod settings;
|
||||
mod settings_screen;
|
||||
mod switch_screen;
|
||||
mod home_screen;
|
||||
|
||||
pub use app::*;
|
||||
pub use settings::{DaemonGroupMsg, DiagGroupMsg};
|
||||
pub use home_screen::{HomeScreen, HomeScreenMsg};
|
||||
|
|
|
|||
420
burrow-gtk/src/daemon_api.rs
Normal file
420
burrow-gtk/src/daemon_api.rs
Normal file
|
|
@ -0,0 +1,420 @@
|
|||
use anyhow::{anyhow, Context, Result};
|
||||
use burrow::{
|
||||
control::{TailnetConfig, TailnetProvider},
|
||||
grpc_defs::{
|
||||
Empty, Network, NetworkType, State, TailnetDiscoverRequest, TailnetLoginCancelRequest,
|
||||
TailnetLoginStartRequest, TailnetLoginStatusRequest, TailnetProbeRequest,
|
||||
},
|
||||
BurrowClient,
|
||||
};
|
||||
use std::{path::PathBuf, sync::OnceLock};
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
const RPC_TIMEOUT: Duration = Duration::from_secs(3);
|
||||
const MANAGED_TAILSCALE_AUTHORITY: &str = "https://controlplane.tailscale.com";
|
||||
static EMBEDDED_DAEMON_STARTED: OnceLock<()> = OnceLock::new();
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TunnelState {
|
||||
Running,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NetworkSummary {
|
||||
pub id: i32,
|
||||
pub title: String,
|
||||
pub detail: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TailnetDiscovery {
|
||||
pub authority: String,
|
||||
pub managed: bool,
|
||||
pub oidc_issuer: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TailnetProbe {
|
||||
pub summary: String,
|
||||
pub detail: Option<String>,
|
||||
pub status_code: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TailnetLoginStatus {
|
||||
pub session_id: String,
|
||||
pub backend_state: String,
|
||||
pub auth_url: Option<String>,
|
||||
pub running: bool,
|
||||
pub needs_login: bool,
|
||||
pub tailnet_name: Option<String>,
|
||||
pub self_dns_name: Option<String>,
|
||||
pub tailnet_ips: Vec<String>,
|
||||
pub health: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn default_tailnet_authority() -> &'static str {
|
||||
MANAGED_TAILSCALE_AUTHORITY
|
||||
}
|
||||
|
||||
pub fn configure_client_paths() -> Result<()> {
|
||||
if std::env::var_os("BURROW_SOCKET_PATH").is_none() {
|
||||
std::env::set_var("BURROW_SOCKET_PATH", default_socket_path()?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn ensure_daemon() -> Result<()> {
|
||||
configure_client_paths()?;
|
||||
if daemon_available().await {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let socket_path = socket_path()?;
|
||||
let db_path = database_path()?;
|
||||
ensure_parent(&socket_path)?;
|
||||
ensure_parent(&db_path)?;
|
||||
|
||||
if EMBEDDED_DAEMON_STARTED.get().is_none() {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
burrow::spawn_in_process_with_paths(Some(socket_path), Some(db_path));
|
||||
})
|
||||
.await
|
||||
.context("failed to join embedded daemon startup")?;
|
||||
let _ = EMBEDDED_DAEMON_STARTED.set(());
|
||||
}
|
||||
|
||||
tunnel_state()
|
||||
.await
|
||||
.map(|_| ())
|
||||
.context("Burrow daemon started but did not accept tunnel status RPCs")
|
||||
}
|
||||
|
||||
pub fn infer_tailnet_provider(authority: &str) -> TailnetProvider {
|
||||
let normalized = authority.trim().trim_end_matches('/').to_ascii_lowercase();
|
||||
if normalized == "controlplane.tailscale.com"
|
||||
|| normalized == "http://controlplane.tailscale.com"
|
||||
|| normalized == MANAGED_TAILSCALE_AUTHORITY
|
||||
{
|
||||
TailnetProvider::Tailscale
|
||||
} else {
|
||||
TailnetProvider::Headscale
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn daemon_available() -> bool {
|
||||
tunnel_state().await.is_ok()
|
||||
}
|
||||
|
||||
fn socket_path() -> Result<PathBuf> {
|
||||
if let Some(path) = std::env::var_os("BURROW_SOCKET_PATH") {
|
||||
return Ok(PathBuf::from(path));
|
||||
}
|
||||
default_socket_path()
|
||||
}
|
||||
|
||||
fn default_socket_path() -> Result<PathBuf> {
|
||||
if let Some(runtime_dir) = std::env::var_os("XDG_RUNTIME_DIR") {
|
||||
return Ok(PathBuf::from(runtime_dir).join("burrow.sock"));
|
||||
}
|
||||
let uid = std::env::var("UID").unwrap_or_else(|_| "1000".to_owned());
|
||||
Ok(PathBuf::from(format!("/tmp/burrow-{uid}.sock")))
|
||||
}
|
||||
|
||||
fn database_path() -> Result<PathBuf> {
|
||||
if let Some(path) = std::env::var_os("BURROW_DB_PATH") {
|
||||
return Ok(PathBuf::from(path));
|
||||
}
|
||||
if let Some(data_home) = std::env::var_os("XDG_DATA_HOME") {
|
||||
return Ok(PathBuf::from(data_home).join("burrow").join("burrow.db"));
|
||||
}
|
||||
if let Some(home) = std::env::var_os("HOME") {
|
||||
return Ok(PathBuf::from(home)
|
||||
.join(".local")
|
||||
.join("share")
|
||||
.join("burrow")
|
||||
.join("burrow.db"));
|
||||
}
|
||||
Ok(std::env::temp_dir().join("burrow.db"))
|
||||
}
|
||||
|
||||
fn ensure_parent(path: &PathBuf) -> Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.with_context(|| format!("failed to create {}", parent.display()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tunnel_state() -> Result<TunnelState> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let mut stream = timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_status(Empty {}))
|
||||
.await
|
||||
.context("timed out connecting to Burrow daemon")??
|
||||
.into_inner();
|
||||
let status = timeout(RPC_TIMEOUT, stream.message())
|
||||
.await
|
||||
.context("timed out reading Burrow tunnel status")??
|
||||
.context("Burrow daemon ended the status stream without a state")?;
|
||||
Ok(match status.state() {
|
||||
State::Running => TunnelState::Running,
|
||||
State::Stopped => TunnelState::Stopped,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tunnel() -> Result<()> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_start(Empty {}))
|
||||
.await
|
||||
.context("timed out starting Burrow tunnel")??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn stop_tunnel() -> Result<()> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
timeout(RPC_TIMEOUT, client.tunnel_client.tunnel_stop(Empty {}))
|
||||
.await
|
||||
.context("timed out stopping Burrow tunnel")??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_networks() -> Result<Vec<NetworkSummary>> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let mut stream = timeout(RPC_TIMEOUT, client.networks_client.network_list(Empty {}))
|
||||
.await
|
||||
.context("timed out connecting to Burrow network list")??
|
||||
.into_inner();
|
||||
let response = timeout(RPC_TIMEOUT, stream.message())
|
||||
.await
|
||||
.context("timed out reading Burrow network list")??
|
||||
.context("Burrow daemon ended the network stream without a snapshot")?;
|
||||
Ok(response.network.iter().map(summarize_network).collect())
|
||||
}
|
||||
|
||||
pub async fn add_wireguard(config: String) -> Result<i32> {
|
||||
add_network(NetworkType::WireGuard, config.into_bytes()).await
|
||||
}
|
||||
|
||||
pub async fn add_tailnet(
|
||||
authority: String,
|
||||
account: String,
|
||||
identity: String,
|
||||
hostname: Option<String>,
|
||||
tailnet: Option<String>,
|
||||
) -> Result<i32> {
|
||||
let provider = infer_tailnet_provider(&authority);
|
||||
let config = TailnetConfig {
|
||||
provider,
|
||||
authority: Some(authority),
|
||||
account: Some(account),
|
||||
identity: Some(identity),
|
||||
hostname,
|
||||
tailnet,
|
||||
};
|
||||
let payload = serde_json::to_vec_pretty(&config)?;
|
||||
add_network(NetworkType::Tailnet, payload).await
|
||||
}
|
||||
|
||||
pub async fn discover_tailnet(email: String) -> Result<TailnetDiscovery> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let response = timeout(
|
||||
RPC_TIMEOUT,
|
||||
client
|
||||
.tailnet_client
|
||||
.discover(TailnetDiscoverRequest { email }),
|
||||
)
|
||||
.await
|
||||
.context("timed out discovering Tailnet authority")??
|
||||
.into_inner();
|
||||
|
||||
Ok(TailnetDiscovery {
|
||||
authority: response.authority,
|
||||
managed: response.managed,
|
||||
oidc_issuer: optional(response.oidc_issuer),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn probe_tailnet(authority: String) -> Result<TailnetProbe> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let response = timeout(
|
||||
RPC_TIMEOUT,
|
||||
client
|
||||
.tailnet_client
|
||||
.probe(TailnetProbeRequest { authority }),
|
||||
)
|
||||
.await
|
||||
.context("timed out probing Tailnet authority")??
|
||||
.into_inner();
|
||||
|
||||
Ok(TailnetProbe {
|
||||
summary: response.summary,
|
||||
detail: optional(response.detail),
|
||||
status_code: response.status_code,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tailnet_login(
|
||||
authority: String,
|
||||
account_name: String,
|
||||
identity_name: String,
|
||||
hostname: Option<String>,
|
||||
) -> Result<TailnetLoginStatus> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let response = timeout(
|
||||
RPC_TIMEOUT,
|
||||
client.tailnet_client.login_start(TailnetLoginStartRequest {
|
||||
account_name,
|
||||
identity_name,
|
||||
hostname: hostname.unwrap_or_default(),
|
||||
authority,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("timed out starting Tailnet sign-in")??
|
||||
.into_inner();
|
||||
Ok(decode_tailnet_status(response))
|
||||
}
|
||||
|
||||
pub async fn tailnet_login_status(session_id: String) -> Result<TailnetLoginStatus> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
let response = timeout(
|
||||
RPC_TIMEOUT,
|
||||
client
|
||||
.tailnet_client
|
||||
.login_status(TailnetLoginStatusRequest { session_id }),
|
||||
)
|
||||
.await
|
||||
.context("timed out reading Tailnet sign-in status")??
|
||||
.into_inner();
|
||||
Ok(decode_tailnet_status(response))
|
||||
}
|
||||
|
||||
pub async fn cancel_tailnet_login(session_id: String) -> Result<()> {
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
timeout(
|
||||
RPC_TIMEOUT,
|
||||
client
|
||||
.tailnet_client
|
||||
.login_cancel(TailnetLoginCancelRequest { session_id }),
|
||||
)
|
||||
.await
|
||||
.context("timed out cancelling Tailnet sign-in")??;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_network(network_type: NetworkType, payload: Vec<u8>) -> Result<i32> {
|
||||
let id = next_network_id().await?;
|
||||
let mut client = BurrowClient::from_uds().await?;
|
||||
timeout(
|
||||
RPC_TIMEOUT,
|
||||
client.networks_client.network_add(Network {
|
||||
id,
|
||||
r#type: network_type.into(),
|
||||
payload,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("timed out saving network to Burrow daemon")??;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
async fn next_network_id() -> Result<i32> {
|
||||
let networks = list_networks().await?;
|
||||
Ok(networks.iter().map(|network| network.id).max().unwrap_or(0) + 1)
|
||||
}
|
||||
|
||||
fn summarize_network(network: &Network) -> NetworkSummary {
|
||||
match network.r#type() {
|
||||
NetworkType::WireGuard => summarize_wireguard(network),
|
||||
NetworkType::Tailnet => summarize_tailnet(network),
|
||||
}
|
||||
}
|
||||
|
||||
fn summarize_wireguard(network: &Network) -> NetworkSummary {
|
||||
let payload = String::from_utf8_lossy(&network.payload);
|
||||
let detail = payload
|
||||
.lines()
|
||||
.map(str::trim)
|
||||
.find(|line| !line.is_empty() && !line.starts_with('['))
|
||||
.unwrap_or("Stored WireGuard configuration")
|
||||
.to_owned();
|
||||
NetworkSummary {
|
||||
id: network.id,
|
||||
title: format!("WireGuard {}", network.id),
|
||||
detail,
|
||||
}
|
||||
}
|
||||
|
||||
fn summarize_tailnet(network: &Network) -> NetworkSummary {
|
||||
match TailnetConfig::from_slice(&network.payload) {
|
||||
Ok(config) => {
|
||||
let title = config
|
||||
.tailnet
|
||||
.clone()
|
||||
.or(config.hostname.clone())
|
||||
.unwrap_or_else(|| "Tailnet".to_owned());
|
||||
let authority = config
|
||||
.authority
|
||||
.unwrap_or_else(|| "default authority".to_owned());
|
||||
let account = config.account.unwrap_or_else(|| "default".to_owned());
|
||||
NetworkSummary {
|
||||
id: network.id,
|
||||
title,
|
||||
detail: format!("{authority} - account {account}"),
|
||||
}
|
||||
}
|
||||
Err(error) => NetworkSummary {
|
||||
id: network.id,
|
||||
title: "Tailnet".to_owned(),
|
||||
detail: format!("Unable to read Tailnet payload: {error}"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_tailnet_status(
|
||||
response: burrow::grpc_defs::TailnetLoginStatusResponse,
|
||||
) -> TailnetLoginStatus {
|
||||
TailnetLoginStatus {
|
||||
session_id: response.session_id,
|
||||
backend_state: response.backend_state,
|
||||
auth_url: optional(response.auth_url),
|
||||
running: response.running,
|
||||
needs_login: response.needs_login,
|
||||
tailnet_name: optional(response.tailnet_name),
|
||||
self_dns_name: optional(response.self_dns_name),
|
||||
tailnet_ips: response.tailnet_ips,
|
||||
health: response.health,
|
||||
}
|
||||
}
|
||||
|
||||
fn optional(value: String) -> Option<String> {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalized(value: &str, fallback: &str) -> String {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
fallback.to_owned()
|
||||
} else {
|
||||
trimmed.to_owned()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalized_optional(value: &str) -> Option<String> {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(trimmed.to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn require_value(value: &str, label: &str) -> Result<String> {
|
||||
normalized_optional(value).ok_or_else(|| anyhow!("{label} is required"))
|
||||
}
|
||||
|
|
@ -1,11 +1,15 @@
|
|||
use anyhow::Result;
|
||||
|
||||
pub mod components;
|
||||
mod diag;
|
||||
mod account_store;
|
||||
mod daemon_api;
|
||||
|
||||
// Generated using meson
|
||||
mod config;
|
||||
|
||||
fn main() {
|
||||
if let Err(error) = daemon_api::configure_client_paths() {
|
||||
eprintln!("failed to configure Burrow daemon paths: {error}");
|
||||
}
|
||||
components::App::run();
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue