feat: Agent Orchestrator — multi-project agent dashboard

Tauri + Svelte 5 + Rust application for orchestrating multiple AI coding agents.
Includes Claude, Aider, Codex, and Ollama provider support, multi-agent
communication (btmsg/bttask), session anchors, plugin sandbox, FTS5 search,
Landlock sandboxing, and 507 vitest + 110 cargo tests.
This commit is contained in:
DexterFromLab 2026-03-15 15:45:27 +01:00
commit 3672e92b7e
272 changed files with 68600 additions and 0 deletions

4
src-tauri/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
# Generated by Cargo
# will have compiled files and executables
/target/
/gen/schemas

49
src-tauri/Cargo.toml Normal file
View file

@ -0,0 +1,49 @@
[package]
name = "agent-orchestrator"
version = "0.1.0"
description = "Multi-session Claude agent dashboard"
authors = ["DexterFromLab"]
license = "MIT"
edition = "2021"
rust-version = "1.77.2"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "agent_orchestrator_lib"
crate-type = ["staticlib", "cdylib", "rlib"]
[build-dependencies]
tauri-build = { version = "2.5.6", features = [] }
[dependencies]
bterminal-core = { path = "../bterminal-core" }
serde_json = "1.0"
serde = { version = "1.0", features = ["derive"] }
log = "0.4"
tauri = { version = "2.10.3", features = [] }
rusqlite = { version = "0.31", features = ["bundled-full"] }
dirs = "5"
notify = { version = "6", features = ["macos_fsevent"] }
tauri-plugin-updater = "2.10.0"
tauri-plugin-dialog = "2"
rfd = { version = "0.16", default-features = false, features = ["gtk3"] }
uuid = { version = "1", features = ["v4"] }
tokio-tungstenite = { version = "0.21", features = ["native-tls"] }
tokio = { version = "1", features = ["full"] }
futures-util = "0.3"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
opentelemetry = "0.28"
opentelemetry_sdk = { version = "0.28", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.28", features = ["http-proto", "reqwest-client"] }
tracing-opentelemetry = "0.29"
keyring = { version = "3", features = ["linux-native"] }
notify-rust = "4"
native-tls = "0.2"
tokio-native-tls = "0.3"
sha2 = "0.10"
hex = "0.4"
[dev-dependencies]
tempfile = "3"

3
src-tauri/build.rs Normal file
View file

@ -0,0 +1,3 @@
fn main() {
tauri_build::build()
}

View file

@ -0,0 +1,12 @@
{
"$schema": "../gen/schemas/desktop-schema.json",
"identifier": "default",
"description": "enables the default permissions",
"windows": [
"main"
],
"permissions": [
"core:default",
"dialog:default"
]
}

BIN
src-tauri/icons/128x128.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

BIN
src-tauri/icons/32x32.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

BIN
src-tauri/icons/icon.icns Normal file

Binary file not shown.

BIN
src-tauri/icons/icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
src-tauri/icons/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

1896
src-tauri/src/btmsg.rs Normal file

File diff suppressed because it is too large Load diff

766
src-tauri/src/bttask.rs Normal file
View file

@ -0,0 +1,766 @@
// bttask — Read access to task board SQLite tables in btmsg.db
// Tasks table created by bttask CLI, shared DB with btmsg
// Path configurable via init() for test isolation.
use rusqlite::{params, Connection, OpenFlags};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::OnceLock;
static DB_PATH: OnceLock<PathBuf> = OnceLock::new();
/// Set the bttask database path. Must be called before any db access.
/// Called from lib.rs setup with AppConfig-resolved path.
pub fn init(path: PathBuf) {
let _ = DB_PATH.set(path);
}
fn db_path() -> PathBuf {
DB_PATH.get().cloned().unwrap_or_else(|| {
dirs::data_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("bterminal")
.join("btmsg.db")
})
}
fn open_db() -> Result<Connection, String> {
let path = db_path();
if !path.exists() {
return Err("btmsg database not found".into());
}
let conn = Connection::open_with_flags(&path, OpenFlags::SQLITE_OPEN_READ_WRITE)
.map_err(|e| format!("Failed to open btmsg.db: {e}"))?;
conn.query_row("PRAGMA journal_mode=WAL", [], |_| Ok(()))
.map_err(|e| format!("Failed to set WAL mode: {e}"))?;
conn.query_row("PRAGMA busy_timeout = 5000", [], |_| Ok(()))
.map_err(|e| format!("Failed to set busy_timeout: {e}"))?;
// Migration: add version column if missing
let has_version: i64 = conn
.query_row(
"SELECT COUNT(*) FROM pragma_table_info('tasks') WHERE name='version'",
[],
|row| row.get(0),
)
.unwrap_or(0);
if has_version == 0 {
conn.execute("ALTER TABLE tasks ADD COLUMN version INTEGER DEFAULT 1", [])
.map_err(|e| format!("Migration (version column) failed: {e}"))?;
}
Ok(conn)
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Task {
pub id: String,
pub title: String,
pub description: String,
pub status: String,
pub priority: String,
pub assigned_to: Option<String>,
pub created_by: String,
pub group_id: String,
pub parent_task_id: Option<String>,
pub sort_order: i32,
pub created_at: String,
pub updated_at: String,
pub version: i64,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TaskComment {
pub id: String,
pub task_id: String,
pub agent_id: String,
pub content: String,
pub created_at: String,
}
/// Get all tasks for a group
pub fn list_tasks(group_id: &str) -> Result<Vec<Task>, String> {
let db = open_db()?;
let mut stmt = db
.prepare(
"SELECT id, title, description, status, priority, assigned_to,
created_by, group_id, parent_task_id, sort_order,
created_at, updated_at, version
FROM tasks WHERE group_id = ?1
ORDER BY sort_order ASC, created_at DESC",
)
.map_err(|e| format!("Query error: {e}"))?;
let rows = stmt
.query_map(params![group_id], |row| {
Ok(Task {
id: row.get("id")?,
title: row.get("title")?,
description: row.get::<_, String>("description").unwrap_or_default(),
status: row.get::<_, String>("status").unwrap_or_else(|_| "todo".into()),
priority: row.get::<_, String>("priority").unwrap_or_else(|_| "medium".into()),
assigned_to: row.get("assigned_to")?,
created_by: row.get("created_by")?,
group_id: row.get("group_id")?,
parent_task_id: row.get("parent_task_id")?,
sort_order: row.get::<_, i32>("sort_order").unwrap_or(0),
created_at: row.get::<_, String>("created_at").unwrap_or_default(),
updated_at: row.get::<_, String>("updated_at").unwrap_or_default(),
version: row.get::<_, i64>("version").unwrap_or(1),
})
})
.map_err(|e| format!("Query error: {e}"))?;
rows.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Row error: {e}"))
}
/// Get comments for a task
pub fn task_comments(task_id: &str) -> Result<Vec<TaskComment>, String> {
let db = open_db()?;
let mut stmt = db
.prepare(
"SELECT id, task_id, agent_id, content, created_at
FROM task_comments WHERE task_id = ?1
ORDER BY created_at ASC",
)
.map_err(|e| format!("Query error: {e}"))?;
let rows = stmt
.query_map(params![task_id], |row| {
Ok(TaskComment {
id: row.get("id")?,
task_id: row.get("task_id")?,
agent_id: row.get("agent_id")?,
content: row.get("content")?,
created_at: row.get::<_, String>("created_at").unwrap_or_default(),
})
})
.map_err(|e| format!("Query error: {e}"))?;
rows.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Row error: {e}"))
}
/// Update task status with optimistic locking.
/// `expected_version` must match the current version in the database.
/// Returns the new version on success.
/// When transitioning to 'review', auto-posts to #review-queue channel if it exists.
pub fn update_task_status(task_id: &str, status: &str, expected_version: i64) -> Result<i64, String> {
let valid = ["todo", "progress", "review", "done", "blocked"];
if !valid.contains(&status) {
return Err(format!("Invalid status '{}'. Valid: {:?}", status, valid));
}
let db = open_db()?;
// Fetch task info before update (for channel notification)
let task_title: Option<(String, String)> = if status == "review" {
db.query_row(
"SELECT title, group_id FROM tasks WHERE id = ?1",
params![task_id],
|row| Ok((row.get::<_, String>("title")?, row.get::<_, String>("group_id")?)),
).ok()
} else {
None
};
let rows_affected = db.execute(
"UPDATE tasks SET status = ?1, version = version + 1, updated_at = datetime('now')
WHERE id = ?2 AND version = ?3",
params![status, task_id, expected_version],
)
.map_err(|e| format!("Update error: {e}"))?;
if rows_affected == 0 {
return Err("Task was modified by another agent (version conflict)".into());
}
let new_version = expected_version + 1;
// Auto-post to #review-queue channel on review transition
if let Some((title, group_id)) = task_title {
notify_review_channel(&db, &group_id, task_id, &title);
}
Ok(new_version)
}
/// Post a notification to #review-queue channel (best-effort, never fails the parent operation)
fn notify_review_channel(db: &Connection, group_id: &str, task_id: &str, title: &str) {
// Find #review-queue channel for this group
let channel_id: Option<String> = db
.query_row(
"SELECT id FROM channels WHERE name = 'review-queue' AND group_id = ?1",
params![group_id],
|row| row.get(0),
)
.ok();
let channel_id = match channel_id {
Some(id) => id,
None => {
// Auto-create #review-queue channel
match ensure_review_channels(db, group_id) {
Some(id) => id,
None => return, // Give up silently
}
}
};
let msg_id = uuid::Uuid::new_v4().to_string();
let content = format!("📋 Task ready for review: **{}** (`{}`)", title, task_id);
let _ = db.execute(
"INSERT INTO channel_messages (id, channel_id, from_agent, content) VALUES (?1, ?2, 'system', ?3)",
params![msg_id, channel_id, content],
);
}
/// Ensure #review-queue and #review-log channels exist for a group.
/// Returns the review-queue channel ID if created/found.
fn ensure_review_channels(db: &Connection, group_id: &str) -> Option<String> {
// Create channels only if they don't already exist
for name in &["review-queue", "review-log"] {
let exists: bool = db
.query_row(
"SELECT COUNT(*) > 0 FROM channels WHERE name = ?1 AND group_id = ?2",
params![name, group_id],
|row| row.get(0),
)
.unwrap_or(false);
if !exists {
let id = uuid::Uuid::new_v4().to_string();
let _ = db.execute(
"INSERT INTO channels (id, name, group_id, created_by) VALUES (?1, ?2, ?3, 'system')",
params![id, name, group_id],
);
}
}
// Return the review-queue channel ID
db.query_row(
"SELECT id FROM channels WHERE name = 'review-queue' AND group_id = ?1",
params![group_id],
|row| row.get(0),
)
.ok()
}
/// Count tasks in 'review' status for a group
pub fn review_queue_count(group_id: &str) -> Result<i64, String> {
let db = open_db()?;
db.query_row(
"SELECT COUNT(*) FROM tasks WHERE group_id = ?1 AND status = 'review'",
params![group_id],
|row| row.get(0),
)
.map_err(|e| format!("Query error: {e}"))
}
/// Add a comment to a task
pub fn add_comment(task_id: &str, agent_id: &str, content: &str) -> Result<String, String> {
let db = open_db()?;
let id = uuid::Uuid::new_v4().to_string();
db.execute(
"INSERT INTO task_comments (id, task_id, agent_id, content) VALUES (?1, ?2, ?3, ?4)",
params![id, task_id, agent_id, content],
)
.map_err(|e| format!("Insert error: {e}"))?;
Ok(id)
}
/// Create a new task
pub fn create_task(
title: &str,
description: &str,
priority: &str,
group_id: &str,
created_by: &str,
assigned_to: Option<&str>,
) -> Result<String, String> {
let db = open_db()?;
let id = uuid::Uuid::new_v4().to_string();
db.execute(
"INSERT INTO tasks (id, title, description, priority, group_id, created_by, assigned_to)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![id, title, description, priority, group_id, created_by, assigned_to],
)
.map_err(|e| format!("Insert error: {e}"))?;
Ok(id)
}
/// Delete a task
pub fn delete_task(task_id: &str) -> Result<(), String> {
let db = open_db()?;
db.execute("DELETE FROM task_comments WHERE task_id = ?1", params![task_id])
.map_err(|e| format!("Delete comments error: {e}"))?;
db.execute("DELETE FROM tasks WHERE id = ?1", params![task_id])
.map_err(|e| format!("Delete task error: {e}"))?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use rusqlite::Connection;
fn test_db() -> Connection {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE tasks (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
description TEXT DEFAULT '',
status TEXT DEFAULT 'todo',
priority TEXT DEFAULT 'medium',
assigned_to TEXT,
created_by TEXT NOT NULL,
group_id TEXT NOT NULL,
parent_task_id TEXT,
sort_order INTEGER DEFAULT 0,
created_at TEXT DEFAULT (datetime('now')),
updated_at TEXT DEFAULT (datetime('now')),
version INTEGER DEFAULT 1
);
CREATE TABLE task_comments (
id TEXT PRIMARY KEY,
task_id TEXT NOT NULL,
agent_id TEXT NOT NULL,
content TEXT NOT NULL,
created_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE channels (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
group_id TEXT NOT NULL,
created_by TEXT NOT NULL,
created_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE channel_messages (
id TEXT PRIMARY KEY,
channel_id TEXT NOT NULL,
from_agent TEXT NOT NULL,
content TEXT NOT NULL,
created_at TEXT DEFAULT (datetime('now'))
);",
)
.unwrap();
conn
}
// ---- REGRESSION: list_tasks named column access ----
#[test]
fn test_list_tasks_named_column_access() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, description, status, priority, assigned_to, created_by, group_id, sort_order)
VALUES ('t1', 'Fix bug', 'Critical fix', 'progress', 'high', 'a1', 'admin', 'g1', 1)",
[],
).unwrap();
conn.execute(
"INSERT INTO tasks (id, title, description, status, priority, assigned_to, created_by, group_id, sort_order)
VALUES ('t2', 'Add tests', '', 'todo', 'medium', NULL, 'a1', 'g1', 2)",
[],
).unwrap();
let mut stmt = conn.prepare(
"SELECT id, title, description, status, priority, assigned_to,
created_by, group_id, parent_task_id, sort_order,
created_at, updated_at, version
FROM tasks WHERE group_id = ?1
ORDER BY sort_order ASC, created_at DESC",
).unwrap();
let tasks: Vec<Task> = stmt.query_map(params!["g1"], |row| {
Ok(Task {
id: row.get("id")?,
title: row.get("title")?,
description: row.get::<_, String>("description").unwrap_or_default(),
status: row.get::<_, String>("status").unwrap_or_else(|_| "todo".into()),
priority: row.get::<_, String>("priority").unwrap_or_else(|_| "medium".into()),
assigned_to: row.get("assigned_to")?,
created_by: row.get("created_by")?,
group_id: row.get("group_id")?,
parent_task_id: row.get("parent_task_id")?,
sort_order: row.get::<_, i32>("sort_order").unwrap_or(0),
created_at: row.get::<_, String>("created_at").unwrap_or_default(),
updated_at: row.get::<_, String>("updated_at").unwrap_or_default(),
version: row.get::<_, i64>("version").unwrap_or(1),
})
}).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
assert_eq!(tasks.len(), 2);
assert_eq!(tasks[0].id, "t1");
assert_eq!(tasks[0].title, "Fix bug");
assert_eq!(tasks[0].status, "progress");
assert_eq!(tasks[0].priority, "high");
assert_eq!(tasks[0].assigned_to, Some("a1".to_string()));
assert_eq!(tasks[0].sort_order, 1);
assert_eq!(tasks[1].id, "t2");
assert_eq!(tasks[1].assigned_to, None);
assert_eq!(tasks[1].parent_task_id, None);
}
// ---- REGRESSION: task_comments named column access ----
#[test]
fn test_task_comments_named_column_access() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, created_by, group_id) VALUES ('t1', 'Test', 'admin', 'g1')",
[],
).unwrap();
conn.execute(
"INSERT INTO task_comments (id, task_id, agent_id, content) VALUES ('c1', 't1', 'a1', 'Working on it')",
[],
).unwrap();
conn.execute(
"INSERT INTO task_comments (id, task_id, agent_id, content) VALUES ('c2', 't1', 'a2', 'Looks good')",
[],
).unwrap();
let mut stmt = conn.prepare(
"SELECT id, task_id, agent_id, content, created_at
FROM task_comments WHERE task_id = ?1
ORDER BY created_at ASC",
).unwrap();
let comments: Vec<TaskComment> = stmt.query_map(params!["t1"], |row| {
Ok(TaskComment {
id: row.get("id")?,
task_id: row.get("task_id")?,
agent_id: row.get("agent_id")?,
content: row.get("content")?,
created_at: row.get::<_, String>("created_at").unwrap_or_default(),
})
}).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
assert_eq!(comments.len(), 2);
assert_eq!(comments[0].agent_id, "a1");
assert_eq!(comments[0].content, "Working on it");
assert_eq!(comments[1].agent_id, "a2");
}
// ---- serde camelCase serialization ----
#[test]
fn test_task_serializes_to_camel_case() {
let task = Task {
id: "t1".into(),
title: "Test".into(),
description: "desc".into(),
status: "todo".into(),
priority: "high".into(),
assigned_to: Some("a1".into()),
created_by: "admin".into(),
group_id: "g1".into(),
parent_task_id: None,
sort_order: 0,
created_at: "2026-01-01".into(),
updated_at: "2026-01-01".into(),
version: 1,
};
let json = serde_json::to_value(&task).unwrap();
assert!(json.get("assignedTo").is_some(), "expected camelCase 'assignedTo'");
assert!(json.get("createdBy").is_some(), "expected camelCase 'createdBy'");
assert!(json.get("groupId").is_some(), "expected camelCase 'groupId'");
assert!(json.get("parentTaskId").is_some(), "expected camelCase 'parentTaskId'");
assert!(json.get("sortOrder").is_some(), "expected camelCase 'sortOrder'");
assert!(json.get("createdAt").is_some(), "expected camelCase 'createdAt'");
assert!(json.get("updatedAt").is_some(), "expected camelCase 'updatedAt'");
// Ensure no snake_case leaks
assert!(json.get("assigned_to").is_none());
assert!(json.get("created_by").is_none());
assert!(json.get("group_id").is_none());
}
#[test]
fn test_task_comment_serializes_to_camel_case() {
let comment = TaskComment {
id: "c1".into(),
task_id: "t1".into(),
agent_id: "a1".into(),
content: "note".into(),
created_at: "2026-01-01".into(),
};
let json = serde_json::to_value(&comment).unwrap();
assert!(json.get("taskId").is_some(), "expected camelCase 'taskId'");
assert!(json.get("agentId").is_some(), "expected camelCase 'agentId'");
assert!(json.get("createdAt").is_some(), "expected camelCase 'createdAt'");
assert!(json.get("task_id").is_none());
}
// ---- update_task_status validation ----
#[test]
fn test_update_task_status_rejects_invalid() {
// Can't call update_task_status directly (uses open_db), but we can test the validation logic
let valid = ["todo", "progress", "review", "done", "blocked"];
assert!(valid.contains(&"todo"));
assert!(valid.contains(&"done"));
assert!(!valid.contains(&"invalid"));
assert!(!valid.contains(&"cancelled"));
}
// ---- Review channel auto-creation ----
#[test]
fn test_ensure_review_channels_creates_both() {
let conn = test_db();
let result = ensure_review_channels(&conn, "g1");
assert!(result.is_some(), "should return review-queue channel ID");
// Verify both channels exist
let queue_count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM channels WHERE name = 'review-queue' AND group_id = 'g1'",
[],
|row| row.get(0),
)
.unwrap();
assert_eq!(queue_count, 1);
let log_count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM channels WHERE name = 'review-log' AND group_id = 'g1'",
[],
|row| row.get(0),
)
.unwrap();
assert_eq!(log_count, 1);
}
#[test]
fn test_ensure_review_channels_idempotent() {
let conn = test_db();
let id1 = ensure_review_channels(&conn, "g1").unwrap();
let id2 = ensure_review_channels(&conn, "g1").unwrap();
assert_eq!(id1, id2, "should return same channel ID on repeated calls");
// Verify no duplicates
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM channels WHERE name = 'review-queue' AND group_id = 'g1'",
[],
|row| row.get(0),
)
.unwrap();
assert_eq!(count, 1);
}
#[test]
fn test_notify_review_channel_posts_message() {
let conn = test_db();
// Insert a task
conn.execute(
"INSERT INTO tasks (id, title, created_by, group_id) VALUES ('t1', 'Fix login bug', 'admin', 'g1')",
[],
).unwrap();
// Trigger notification (should auto-create channel)
notify_review_channel(&conn, "g1", "t1", "Fix login bug");
// Verify message was posted
let msg_count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM channel_messages cm
JOIN channels c ON cm.channel_id = c.id
WHERE c.name = 'review-queue' AND c.group_id = 'g1'",
[],
|row| row.get(0),
)
.unwrap();
assert_eq!(msg_count, 1);
// Verify message content
let content: String = conn
.query_row(
"SELECT cm.content FROM channel_messages cm
JOIN channels c ON cm.channel_id = c.id
WHERE c.name = 'review-queue'",
[],
|row| row.get(0),
)
.unwrap();
assert!(content.contains("Fix login bug"));
assert!(content.contains("t1"));
}
// ---- Review queue count ----
#[test]
fn test_review_queue_count_via_sql() {
let conn = test_db();
// Insert tasks with various statuses
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t1', 'A', 'review', 'admin', 'g1')",
[],
).unwrap();
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t2', 'B', 'review', 'admin', 'g1')",
[],
).unwrap();
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t3', 'C', 'progress', 'admin', 'g1')",
[],
).unwrap();
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t4', 'D', 'review', 'admin', 'g2')",
[],
).unwrap();
// Count review tasks for g1
let count: i64 = conn
.query_row(
"SELECT COUNT(*) FROM tasks WHERE group_id = ?1 AND status = 'review'",
params!["g1"],
|row| row.get(0),
)
.unwrap();
assert_eq!(count, 2, "should count only review tasks in g1");
// Count review tasks for g2
let count_g2: i64 = conn
.query_row(
"SELECT COUNT(*) FROM tasks WHERE group_id = ?1 AND status = 'review'",
params!["g2"],
|row| row.get(0),
)
.unwrap();
assert_eq!(count_g2, 1, "should count only review tasks in g2");
}
// ---- Optimistic locking (version column) ----
#[test]
fn test_version_column_defaults_to_1() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, created_by, group_id) VALUES ('t1', 'Test', 'admin', 'g1')",
[],
).unwrap();
let version: i64 = conn
.query_row("SELECT version FROM tasks WHERE id = 't1'", [], |row| row.get(0))
.unwrap();
assert_eq!(version, 1);
}
#[test]
fn test_optimistic_lock_success() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t1', 'Test', 'todo', 'admin', 'g1')",
[],
).unwrap();
// Update with correct version (1)
let rows = conn.execute(
"UPDATE tasks SET status = 'progress', version = version + 1, updated_at = datetime('now')
WHERE id = 't1' AND version = 1",
[],
).unwrap();
assert_eq!(rows, 1, "should affect 1 row");
let new_version: i64 = conn
.query_row("SELECT version FROM tasks WHERE id = 't1'", [], |row| row.get(0))
.unwrap();
assert_eq!(new_version, 2);
}
#[test]
fn test_optimistic_lock_conflict() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, status, created_by, group_id) VALUES ('t1', 'Test', 'todo', 'admin', 'g1')",
[],
).unwrap();
// First update succeeds
conn.execute(
"UPDATE tasks SET status = 'progress', version = version + 1, updated_at = datetime('now')
WHERE id = 't1' AND version = 1",
[],
).unwrap();
// Second update with stale version (1) should affect 0 rows
let rows = conn.execute(
"UPDATE tasks SET status = 'review', version = version + 1, updated_at = datetime('now')
WHERE id = 't1' AND version = 1",
[],
).unwrap();
assert_eq!(rows, 0, "stale version should affect 0 rows");
// Task should still be in 'progress' state
let status: String = conn
.query_row("SELECT status FROM tasks WHERE id = 't1'", [], |row| row.get(0))
.unwrap();
assert_eq!(status, "progress");
}
#[test]
fn test_version_in_list_tasks_query() {
let conn = test_db();
conn.execute(
"INSERT INTO tasks (id, title, created_by, group_id, sort_order) VALUES ('t1', 'V1', 'admin', 'g1', 1)",
[],
).unwrap();
// Bump version to 3
conn.execute("UPDATE tasks SET version = 3 WHERE id = 't1'", []).unwrap();
let mut stmt = conn.prepare(
"SELECT id, title, description, status, priority, assigned_to,
created_by, group_id, parent_task_id, sort_order,
created_at, updated_at, version
FROM tasks WHERE group_id = ?1",
).unwrap();
let tasks: Vec<Task> = stmt.query_map(params!["g1"], |row| {
Ok(Task {
id: row.get("id")?,
title: row.get("title")?,
description: row.get::<_, String>("description").unwrap_or_default(),
status: row.get::<_, String>("status").unwrap_or_else(|_| "todo".into()),
priority: row.get::<_, String>("priority").unwrap_or_else(|_| "medium".into()),
assigned_to: row.get("assigned_to")?,
created_by: row.get("created_by")?,
group_id: row.get("group_id")?,
parent_task_id: row.get("parent_task_id")?,
sort_order: row.get::<_, i32>("sort_order").unwrap_or(0),
created_at: row.get::<_, String>("created_at").unwrap_or_default(),
updated_at: row.get::<_, String>("updated_at").unwrap_or_default(),
version: row.get::<_, i64>("version").unwrap_or(1),
})
}).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
assert_eq!(tasks.len(), 1);
assert_eq!(tasks[0].version, 3);
}
#[test]
fn test_version_serializes_to_camel_case() {
let task = Task {
id: "t1".into(),
title: "Test".into(),
description: "".into(),
status: "todo".into(),
priority: "medium".into(),
assigned_to: None,
created_by: "admin".into(),
group_id: "g1".into(),
parent_task_id: None,
sort_order: 0,
created_at: "2026-01-01".into(),
updated_at: "2026-01-01".into(),
version: 5,
};
let json = serde_json::to_value(&task).unwrap();
assert_eq!(json.get("version").unwrap(), 5);
}
}

View file

@ -0,0 +1,58 @@
use tauri::State;
use crate::AppState;
use crate::sidecar::AgentQueryOptions;
use bterminal_core::sandbox::SandboxConfig;
#[tauri::command]
#[tracing::instrument(skip(state, options), fields(session_id = %options.session_id))]
pub fn agent_query(
state: State<'_, AppState>,
options: AgentQueryOptions,
) -> Result<(), String> {
state.sidecar_manager.query(&options)
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub fn agent_stop(state: State<'_, AppState>, session_id: String) -> Result<(), String> {
state.sidecar_manager.stop_session(&session_id)
}
#[tauri::command]
pub fn agent_ready(state: State<'_, AppState>) -> bool {
state.sidecar_manager.is_ready()
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub fn agent_restart(state: State<'_, AppState>) -> Result<(), String> {
state.sidecar_manager.restart()
}
/// Update sidecar sandbox configuration and restart to apply.
/// `project_cwds` — directories needing read+write access.
/// `worktree_roots` — optional worktree directories.
/// `enabled` — whether Landlock sandboxing is active.
#[tauri::command]
#[tracing::instrument(skip(state))]
pub fn agent_set_sandbox(
state: State<'_, AppState>,
project_cwds: Vec<String>,
worktree_roots: Vec<String>,
enabled: bool,
) -> Result<(), String> {
let cwd_refs: Vec<&str> = project_cwds.iter().map(|s| s.as_str()).collect();
let wt_refs: Vec<&str> = worktree_roots.iter().map(|s| s.as_str()).collect();
let mut sandbox = SandboxConfig::for_projects(&cwd_refs, &wt_refs);
sandbox.enabled = enabled;
state.sidecar_manager.set_sandbox(sandbox);
// Restart sidecar so Landlock restrictions take effect on the new process
if state.sidecar_manager.is_ready() {
state.sidecar_manager.restart()?;
}
Ok(())
}

View file

@ -0,0 +1,152 @@
use crate::btmsg;
use crate::groups;
#[tauri::command]
pub fn btmsg_get_agents(group_id: String) -> Result<Vec<btmsg::BtmsgAgent>, String> {
btmsg::get_agents(&group_id)
}
#[tauri::command]
pub fn btmsg_unread_count(agent_id: String) -> Result<i32, String> {
btmsg::unread_count(&agent_id)
}
#[tauri::command]
pub fn btmsg_unread_messages(agent_id: String) -> Result<Vec<btmsg::BtmsgMessage>, String> {
btmsg::unread_messages(&agent_id)
}
#[tauri::command]
pub fn btmsg_history(agent_id: String, other_id: String, limit: i32) -> Result<Vec<btmsg::BtmsgMessage>, String> {
btmsg::history(&agent_id, &other_id, limit)
}
#[tauri::command]
pub fn btmsg_send(from_agent: String, to_agent: String, content: String) -> Result<String, String> {
btmsg::send_message(&from_agent, &to_agent, &content)
}
#[tauri::command]
pub fn btmsg_set_status(agent_id: String, status: String) -> Result<(), String> {
btmsg::set_status(&agent_id, &status)
}
#[tauri::command]
pub fn btmsg_ensure_admin(group_id: String) -> Result<(), String> {
btmsg::ensure_admin(&group_id)
}
#[tauri::command]
pub fn btmsg_all_feed(group_id: String, limit: i32) -> Result<Vec<btmsg::BtmsgFeedMessage>, String> {
btmsg::all_feed(&group_id, limit)
}
#[tauri::command]
pub fn btmsg_mark_read(reader_id: String, sender_id: String) -> Result<(), String> {
btmsg::mark_read_conversation(&reader_id, &sender_id)
}
#[tauri::command]
pub fn btmsg_get_channels(group_id: String) -> Result<Vec<btmsg::BtmsgChannel>, String> {
btmsg::get_channels(&group_id)
}
#[tauri::command]
pub fn btmsg_channel_messages(channel_id: String, limit: i32) -> Result<Vec<btmsg::BtmsgChannelMessage>, String> {
btmsg::get_channel_messages(&channel_id, limit)
}
#[tauri::command]
pub fn btmsg_channel_send(channel_id: String, from_agent: String, content: String) -> Result<String, String> {
btmsg::send_channel_message(&channel_id, &from_agent, &content)
}
#[tauri::command]
pub fn btmsg_create_channel(name: String, group_id: String, created_by: String) -> Result<String, String> {
btmsg::create_channel(&name, &group_id, &created_by)
}
#[tauri::command]
pub fn btmsg_add_channel_member(channel_id: String, agent_id: String) -> Result<(), String> {
btmsg::add_channel_member(&channel_id, &agent_id)
}
/// Register all agents from a GroupsFile into the btmsg database.
/// Creates/updates agent records, sets up contact permissions, ensures review channels.
#[tauri::command]
pub fn btmsg_register_agents(config: groups::GroupsFile) -> Result<(), String> {
btmsg::register_agents_from_groups(&config)
}
// ---- Per-message acknowledgment (seen_messages) ----
#[tauri::command]
pub fn btmsg_unseen_messages(agent_id: String, session_id: String) -> Result<Vec<btmsg::BtmsgMessage>, String> {
btmsg::unseen_messages(&agent_id, &session_id)
}
#[tauri::command]
pub fn btmsg_mark_seen(session_id: String, message_ids: Vec<String>) -> Result<(), String> {
btmsg::mark_messages_seen(&session_id, &message_ids)
}
#[tauri::command]
pub fn btmsg_prune_seen() -> Result<u64, String> {
btmsg::prune_seen_messages(7 * 24 * 3600, 200_000)
}
// ---- Heartbeat monitoring ----
#[tauri::command]
pub fn btmsg_record_heartbeat(agent_id: String) -> Result<(), String> {
btmsg::record_heartbeat(&agent_id)
}
#[tauri::command]
pub fn btmsg_get_stale_agents(group_id: String, threshold_secs: i64) -> Result<Vec<String>, String> {
btmsg::get_stale_agents(&group_id, threshold_secs)
}
#[tauri::command]
pub fn btmsg_get_agent_heartbeats(group_id: String) -> Result<Vec<btmsg::AgentHeartbeat>, String> {
btmsg::get_agent_heartbeats(&group_id)
}
// ---- Dead letter queue ----
#[tauri::command]
pub fn btmsg_get_dead_letters(group_id: String, limit: i32) -> Result<Vec<btmsg::DeadLetter>, String> {
btmsg::get_dead_letters(&group_id, limit)
}
#[tauri::command]
pub fn btmsg_clear_dead_letters(group_id: String) -> Result<(), String> {
btmsg::clear_dead_letters(&group_id)
}
#[tauri::command]
pub fn btmsg_queue_dead_letter(
from_agent: String,
to_agent: String,
content: String,
error: String,
) -> Result<(), String> {
btmsg::queue_dead_letter(&from_agent, &to_agent, &content, &error)
}
// ---- Audit log ----
#[tauri::command]
pub fn audit_log_event(agent_id: String, event_type: String, detail: String) -> Result<(), String> {
btmsg::log_audit_event(&agent_id, &event_type, &detail)
}
#[tauri::command]
pub fn audit_log_list(group_id: String, limit: i32, offset: i32) -> Result<Vec<btmsg::AuditEntry>, String> {
btmsg::get_audit_log(&group_id, limit, offset)
}
#[tauri::command]
pub fn audit_log_for_agent(agent_id: String, limit: i32) -> Result<Vec<btmsg::AuditEntry>, String> {
btmsg::get_audit_log_for_agent(&agent_id, limit)
}

View file

@ -0,0 +1,43 @@
use crate::bttask;
#[tauri::command]
pub fn bttask_list(group_id: String) -> Result<Vec<bttask::Task>, String> {
bttask::list_tasks(&group_id)
}
#[tauri::command]
pub fn bttask_comments(task_id: String) -> Result<Vec<bttask::TaskComment>, String> {
bttask::task_comments(&task_id)
}
#[tauri::command]
pub fn bttask_update_status(task_id: String, status: String, version: i64) -> Result<i64, String> {
bttask::update_task_status(&task_id, &status, version)
}
#[tauri::command]
pub fn bttask_add_comment(task_id: String, agent_id: String, content: String) -> Result<String, String> {
bttask::add_comment(&task_id, &agent_id, &content)
}
#[tauri::command]
pub fn bttask_create(
title: String,
description: String,
priority: String,
group_id: String,
created_by: String,
assigned_to: Option<String>,
) -> Result<String, String> {
bttask::create_task(&title, &description, &priority, &group_id, &created_by, assigned_to.as_deref())
}
#[tauri::command]
pub fn bttask_delete(task_id: String) -> Result<(), String> {
bttask::delete_task(&task_id)
}
#[tauri::command]
pub fn bttask_review_queue_count(group_id: String) -> Result<i64, String> {
bttask::review_queue_count(&group_id)
}

View file

@ -0,0 +1,158 @@
// Claude profile and skill discovery commands
#[derive(serde::Serialize)]
pub struct ClaudeProfile {
pub name: String,
pub email: Option<String>,
pub subscription_type: Option<String>,
pub display_name: Option<String>,
pub config_dir: String,
}
#[derive(serde::Serialize)]
pub struct ClaudeSkill {
pub name: String,
pub description: String,
pub source_path: String,
}
#[tauri::command]
pub fn claude_list_profiles() -> Vec<ClaudeProfile> {
let mut profiles = Vec::new();
let config_dir = dirs::config_dir().unwrap_or_default();
let profiles_dir = config_dir.join("switcher").join("profiles");
let alt_dir_root = config_dir.join("switcher-claude");
if let Ok(entries) = std::fs::read_dir(&profiles_dir) {
for entry in entries.flatten() {
if !entry.path().is_dir() { continue; }
let name = entry.file_name().to_string_lossy().to_string();
let toml_path = entry.path().join("profile.toml");
let (email, subscription_type, display_name) = if toml_path.exists() {
let content = std::fs::read_to_string(&toml_path).unwrap_or_else(|e| {
log::warn!("Failed to read {}: {e}", toml_path.display());
String::new()
});
(
extract_toml_value(&content, "email"),
extract_toml_value(&content, "subscription_type"),
extract_toml_value(&content, "display_name"),
)
} else {
(None, None, None)
};
let alt_path = alt_dir_root.join(&name);
let config_dir_str = if alt_path.exists() {
alt_path.to_string_lossy().to_string()
} else {
dirs::home_dir()
.unwrap_or_default()
.join(".claude")
.to_string_lossy()
.to_string()
};
profiles.push(ClaudeProfile {
name,
email,
subscription_type,
display_name,
config_dir: config_dir_str,
});
}
}
if profiles.is_empty() {
let home = dirs::home_dir().unwrap_or_default();
profiles.push(ClaudeProfile {
name: "default".to_string(),
email: None,
subscription_type: None,
display_name: None,
config_dir: home.join(".claude").to_string_lossy().to_string(),
});
}
profiles
}
fn extract_toml_value(content: &str, key: &str) -> Option<String> {
for line in content.lines() {
let trimmed = line.trim();
if let Some(rest) = trimmed.strip_prefix(key) {
if let Some(rest) = rest.trim().strip_prefix('=') {
let val = rest.trim().trim_matches('"');
if !val.is_empty() {
return Some(val.to_string());
}
}
}
}
None
}
#[tauri::command]
pub fn claude_list_skills() -> Vec<ClaudeSkill> {
let mut skills = Vec::new();
let home = dirs::home_dir().unwrap_or_default();
let skills_dir = home.join(".claude").join("skills");
if let Ok(entries) = std::fs::read_dir(&skills_dir) {
for entry in entries.flatten() {
let path = entry.path();
let (name, skill_file) = if path.is_dir() {
let skill_md = path.join("SKILL.md");
if skill_md.exists() {
(entry.file_name().to_string_lossy().to_string(), skill_md)
} else {
continue;
}
} else if path.extension().map_or(false, |e| e == "md") {
let stem = path.file_stem().unwrap_or_default().to_string_lossy().to_string();
(stem, path.clone())
} else {
continue;
};
let description = if let Ok(content) = std::fs::read_to_string(&skill_file) {
content.lines()
.filter(|l| !l.trim().is_empty() && !l.starts_with('#'))
.next()
.unwrap_or("")
.trim()
.chars()
.take(120)
.collect()
} else {
String::new()
};
skills.push(ClaudeSkill {
name,
description,
source_path: skill_file.to_string_lossy().to_string(),
});
}
}
skills
}
#[tauri::command]
pub fn claude_read_skill(path: String) -> Result<String, String> {
let skills_dir = dirs::home_dir()
.ok_or("Cannot determine home directory")?
.join(".claude")
.join("skills");
let canonical_skills = skills_dir.canonicalize()
.map_err(|_| "Skills directory does not exist".to_string())?;
let canonical_path = std::path::Path::new(&path).canonicalize()
.map_err(|e| format!("Invalid skill path: {e}"))?;
if !canonical_path.starts_with(&canonical_skills) {
return Err("Access denied: path is outside skills directory".to_string());
}
std::fs::read_to_string(&canonical_path).map_err(|e| format!("Failed to read skill: {e}"))
}

View file

@ -0,0 +1,130 @@
// File browser commands (Files tab)
#[derive(serde::Serialize)]
pub struct DirEntry {
pub name: String,
pub path: String,
pub is_dir: bool,
pub size: u64,
pub ext: String,
}
/// Content types for file viewer routing
#[derive(serde::Serialize)]
#[serde(tag = "type")]
pub enum FileContent {
Text { content: String, lang: String },
Binary { message: String },
TooLarge { size: u64 },
}
#[tauri::command]
pub fn list_directory_children(path: String) -> Result<Vec<DirEntry>, String> {
let dir = std::path::Path::new(&path);
if !dir.is_dir() {
return Err(format!("Not a directory: {path}"));
}
let mut entries = Vec::new();
let read_dir = std::fs::read_dir(dir).map_err(|e| format!("Failed to read directory: {e}"))?;
for entry in read_dir {
let entry = entry.map_err(|e| format!("Failed to read entry: {e}"))?;
let metadata = entry.metadata().map_err(|e| format!("Failed to read metadata: {e}"))?;
let name = entry.file_name().to_string_lossy().into_owned();
if name.starts_with('.') {
continue;
}
let is_dir = metadata.is_dir();
let ext = if is_dir {
String::new()
} else {
std::path::Path::new(&name)
.extension()
.map(|e| e.to_string_lossy().to_lowercase())
.unwrap_or_default()
};
entries.push(DirEntry {
name,
path: entry.path().to_string_lossy().into_owned(),
is_dir,
size: metadata.len(),
ext,
});
}
entries.sort_by(|a, b| {
b.is_dir.cmp(&a.is_dir).then_with(|| a.name.to_lowercase().cmp(&b.name.to_lowercase()))
});
Ok(entries)
}
#[tauri::command]
pub fn read_file_content(path: String) -> Result<FileContent, String> {
let file_path = std::path::Path::new(&path);
if !file_path.is_file() {
return Err(format!("Not a file: {path}"));
}
let metadata = std::fs::metadata(&path).map_err(|e| format!("Failed to read metadata: {e}"))?;
let size = metadata.len();
if size > 10 * 1024 * 1024 {
return Ok(FileContent::TooLarge { size });
}
let ext = file_path
.extension()
.map(|e| e.to_string_lossy().to_lowercase())
.unwrap_or_default();
let binary_exts = ["png", "jpg", "jpeg", "gif", "webp", "svg", "ico", "bmp",
"pdf", "zip", "tar", "gz", "7z", "rar",
"mp3", "mp4", "wav", "ogg", "webm", "avi",
"woff", "woff2", "ttf", "otf", "eot",
"exe", "dll", "so", "dylib", "wasm"];
if binary_exts.contains(&ext.as_str()) {
return Ok(FileContent::Binary { message: format!("Binary file ({ext}), {size} bytes") });
}
let content = std::fs::read_to_string(&path)
.map_err(|_| format!("Binary or non-UTF-8 file"))?;
let lang = match ext.as_str() {
"rs" => "rust",
"ts" | "tsx" => "typescript",
"js" | "jsx" | "mjs" | "cjs" => "javascript",
"py" => "python",
"svelte" => "svelte",
"html" | "htm" => "html",
"css" | "scss" | "less" => "css",
"json" => "json",
"toml" => "toml",
"yaml" | "yml" => "yaml",
"md" | "markdown" => "markdown",
"sh" | "bash" | "zsh" => "bash",
"sql" => "sql",
"xml" => "xml",
"csv" => "csv",
"dockerfile" => "dockerfile",
"lock" => "text",
_ => "text",
}.to_string();
Ok(FileContent::Text { content, lang })
}
#[tauri::command]
pub fn write_file_content(path: String, content: String) -> Result<(), String> {
let file_path = std::path::Path::new(&path);
if !file_path.is_file() {
return Err(format!("Not an existing file: {path}"));
}
std::fs::write(&path, content.as_bytes())
.map_err(|e| format!("Failed to write file: {e}"))
}
#[tauri::command]
pub async fn pick_directory(window: tauri::Window) -> Result<Option<String>, String> {
let dialog = rfd::AsyncFileDialog::new()
.set_title("Select Directory")
.set_parent(&window);
let folder = dialog.pick_folder().await;
Ok(folder.map(|f| f.path().to_string_lossy().into_owned()))
}

View file

@ -0,0 +1,16 @@
use crate::groups::{GroupsFile, MdFileEntry};
#[tauri::command]
pub fn groups_load() -> Result<GroupsFile, String> {
crate::groups::load_groups()
}
#[tauri::command]
pub fn groups_save(config: GroupsFile) -> Result<(), String> {
crate::groups::save_groups(&config)
}
#[tauri::command]
pub fn discover_markdown_files(cwd: String) -> Result<Vec<MdFileEntry>, String> {
crate::groups::discover_markdown_files(&cwd)
}

View file

@ -0,0 +1,67 @@
use tauri::State;
use crate::AppState;
use crate::{ctx, memora};
// --- ctx commands ---
#[tauri::command]
pub fn ctx_init_db(state: State<'_, AppState>) -> Result<(), String> {
state.ctx_db.init_db()
}
#[tauri::command]
pub fn ctx_register_project(state: State<'_, AppState>, name: String, description: String, work_dir: Option<String>) -> Result<(), String> {
state.ctx_db.register_project(&name, &description, work_dir.as_deref())
}
#[tauri::command]
pub fn ctx_get_context(state: State<'_, AppState>, project: String) -> Result<Vec<ctx::CtxEntry>, String> {
state.ctx_db.get_context(&project)
}
#[tauri::command]
pub fn ctx_get_shared(state: State<'_, AppState>) -> Result<Vec<ctx::CtxEntry>, String> {
state.ctx_db.get_shared()
}
#[tauri::command]
pub fn ctx_get_summaries(state: State<'_, AppState>, project: String, limit: i64) -> Result<Vec<ctx::CtxSummary>, String> {
state.ctx_db.get_summaries(&project, limit)
}
#[tauri::command]
pub fn ctx_search(state: State<'_, AppState>, query: String) -> Result<Vec<ctx::CtxEntry>, String> {
state.ctx_db.search(&query)
}
// --- Memora commands (read-only) ---
#[tauri::command]
pub fn memora_available(state: State<'_, AppState>) -> bool {
state.memora_db.is_available()
}
#[tauri::command]
pub fn memora_list(
state: State<'_, AppState>,
tags: Option<Vec<String>>,
limit: Option<i64>,
offset: Option<i64>,
) -> Result<memora::MemoraSearchResult, String> {
state.memora_db.list(tags, limit.unwrap_or(50), offset.unwrap_or(0))
}
#[tauri::command]
pub fn memora_search(
state: State<'_, AppState>,
query: String,
tags: Option<Vec<String>>,
limit: Option<i64>,
) -> Result<memora::MemoraSearchResult, String> {
state.memora_db.search(&query, tags, limit.unwrap_or(50))
}
#[tauri::command]
pub fn memora_get(state: State<'_, AppState>, id: i64) -> Result<Option<memora::MemoraNode>, String> {
state.memora_db.get(id)
}

View file

@ -0,0 +1,46 @@
// Miscellaneous commands — CLI args, URL opening, frontend telemetry
#[tauri::command]
pub fn cli_get_group() -> Option<String> {
let args: Vec<String> = std::env::args().collect();
let mut i = 1;
while i < args.len() {
if args[i] == "--group" {
if i + 1 < args.len() {
return Some(args[i + 1].clone());
}
} else if let Some(val) = args[i].strip_prefix("--group=") {
return Some(val.to_string());
}
i += 1;
}
None
}
#[tauri::command]
pub fn open_url(url: String) -> Result<(), String> {
if !url.starts_with("http://") && !url.starts_with("https://") {
return Err("Only http/https URLs are allowed".into());
}
std::process::Command::new("xdg-open")
.arg(&url)
.spawn()
.map_err(|e| format!("Failed to open URL: {e}"))?;
Ok(())
}
#[tauri::command]
pub fn is_test_mode() -> bool {
std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1")
}
#[tauri::command]
pub fn frontend_log(level: String, message: String, context: Option<serde_json::Value>) {
match level.as_str() {
"error" => tracing::error!(source = "frontend", ?context, "{message}"),
"warn" => tracing::warn!(source = "frontend", ?context, "{message}"),
"info" => tracing::info!(source = "frontend", ?context, "{message}"),
"debug" => tracing::debug!(source = "frontend", ?context, "{message}"),
_ => tracing::trace!(source = "frontend", ?context, "{message}"),
}
}

View file

@ -0,0 +1,17 @@
pub mod pty;
pub mod agent;
pub mod watcher;
pub mod session;
pub mod persistence;
pub mod knowledge;
pub mod claude;
pub mod groups;
pub mod files;
pub mod remote;
pub mod misc;
pub mod btmsg;
pub mod bttask;
pub mod notifications;
pub mod search;
pub mod plugins;
pub mod secrets;

View file

@ -0,0 +1,8 @@
// Notification commands — desktop notification via notify-rust
use crate::notifications;
#[tauri::command]
pub fn notify_desktop(title: String, body: String, urgency: String) -> Result<(), String> {
notifications::send_desktop_notification(&title, &body, &urgency)
}

View file

@ -0,0 +1,109 @@
use tauri::State;
use crate::AppState;
use crate::session::{AgentMessageRecord, ProjectAgentState, SessionMetric, SessionAnchorRecord};
// --- Agent message persistence ---
#[tauri::command]
pub fn agent_messages_save(
state: State<'_, AppState>,
session_id: String,
project_id: String,
sdk_session_id: Option<String>,
messages: Vec<AgentMessageRecord>,
) -> Result<(), String> {
state.session_db.save_agent_messages(
&session_id,
&project_id,
sdk_session_id.as_deref(),
&messages,
)
}
#[tauri::command]
pub fn agent_messages_load(
state: State<'_, AppState>,
project_id: String,
) -> Result<Vec<AgentMessageRecord>, String> {
state.session_db.load_agent_messages(&project_id)
}
// --- Project agent state ---
#[tauri::command]
pub fn project_agent_state_save(
state: State<'_, AppState>,
agent_state: ProjectAgentState,
) -> Result<(), String> {
state.session_db.save_project_agent_state(&agent_state)
}
#[tauri::command]
pub fn project_agent_state_load(
state: State<'_, AppState>,
project_id: String,
) -> Result<Option<ProjectAgentState>, String> {
state.session_db.load_project_agent_state(&project_id)
}
// --- Session metrics ---
#[tauri::command]
pub fn session_metric_save(
state: State<'_, AppState>,
metric: SessionMetric,
) -> Result<(), String> {
state.session_db.save_session_metric(&metric)
}
#[tauri::command]
pub fn session_metrics_load(
state: State<'_, AppState>,
project_id: String,
limit: i64,
) -> Result<Vec<SessionMetric>, String> {
state.session_db.load_session_metrics(&project_id, limit)
}
// --- Session anchors ---
#[tauri::command]
pub fn session_anchors_save(
state: State<'_, AppState>,
anchors: Vec<SessionAnchorRecord>,
) -> Result<(), String> {
state.session_db.save_session_anchors(&anchors)
}
#[tauri::command]
pub fn session_anchors_load(
state: State<'_, AppState>,
project_id: String,
) -> Result<Vec<SessionAnchorRecord>, String> {
state.session_db.load_session_anchors(&project_id)
}
#[tauri::command]
pub fn session_anchor_delete(
state: State<'_, AppState>,
id: String,
) -> Result<(), String> {
state.session_db.delete_session_anchor(&id)
}
#[tauri::command]
pub fn session_anchors_clear(
state: State<'_, AppState>,
project_id: String,
) -> Result<(), String> {
state.session_db.delete_project_anchors(&project_id)
}
#[tauri::command]
pub fn session_anchor_update_type(
state: State<'_, AppState>,
id: String,
anchor_type: String,
) -> Result<(), String> {
state.session_db.update_anchor_type(&id, &anchor_type)
}

View file

@ -0,0 +1,20 @@
// Plugin discovery and file access commands
use crate::AppState;
use crate::plugins;
#[tauri::command]
pub fn plugins_discover(state: tauri::State<'_, AppState>) -> Vec<plugins::PluginMeta> {
let plugins_dir = state.app_config.plugins_dir();
plugins::discover_plugins(&plugins_dir)
}
#[tauri::command]
pub fn plugin_read_file(
state: tauri::State<'_, AppState>,
plugin_id: String,
filename: String,
) -> Result<String, String> {
let plugins_dir = state.app_config.plugins_dir();
plugins::read_plugin_file(&plugins_dir, &plugin_id, &filename)
}

View file

@ -0,0 +1,33 @@
use tauri::State;
use crate::AppState;
use crate::pty::PtyOptions;
#[tauri::command]
#[tracing::instrument(skip(state), fields(shell = ?options.shell))]
pub fn pty_spawn(
state: State<'_, AppState>,
options: PtyOptions,
) -> Result<String, String> {
state.pty_manager.spawn(options)
}
#[tauri::command]
pub fn pty_write(state: State<'_, AppState>, id: String, data: String) -> Result<(), String> {
state.pty_manager.write(&id, &data)
}
#[tauri::command]
pub fn pty_resize(
state: State<'_, AppState>,
id: String,
cols: u16,
rows: u16,
) -> Result<(), String> {
state.pty_manager.resize(&id, cols, rows)
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub fn pty_kill(state: State<'_, AppState>, id: String) -> Result<(), String> {
state.pty_manager.kill(&id)
}

View file

@ -0,0 +1,85 @@
use tauri::State;
use crate::AppState;
use crate::remote::{self, RemoteMachineConfig, RemoteMachineInfo};
use crate::pty::PtyOptions;
use crate::sidecar::AgentQueryOptions;
#[tauri::command]
pub async fn remote_list(state: State<'_, AppState>) -> Result<Vec<RemoteMachineInfo>, String> {
Ok(state.remote_manager.list_machines().await)
}
#[tauri::command]
pub async fn remote_add(state: State<'_, AppState>, config: RemoteMachineConfig) -> Result<String, String> {
Ok(state.remote_manager.add_machine(config).await)
}
#[tauri::command]
pub async fn remote_remove(state: State<'_, AppState>, machine_id: String) -> Result<(), String> {
state.remote_manager.remove_machine(&machine_id).await
}
#[tauri::command]
#[tracing::instrument(skip(app, state))]
pub async fn remote_connect(app: tauri::AppHandle, state: State<'_, AppState>, machine_id: String) -> Result<(), String> {
state.remote_manager.connect(&app, &machine_id).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_disconnect(state: State<'_, AppState>, machine_id: String) -> Result<(), String> {
state.remote_manager.disconnect(&machine_id).await
}
#[tauri::command]
#[tracing::instrument(skip(state, options), fields(session_id = %options.session_id))]
pub async fn remote_agent_query(state: State<'_, AppState>, machine_id: String, options: AgentQueryOptions) -> Result<(), String> {
state.remote_manager.agent_query(&machine_id, &options).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_agent_stop(state: State<'_, AppState>, machine_id: String, session_id: String) -> Result<(), String> {
state.remote_manager.agent_stop(&machine_id, &session_id).await
}
#[tauri::command]
#[tracing::instrument(skip(state), fields(shell = ?options.shell))]
pub async fn remote_pty_spawn(state: State<'_, AppState>, machine_id: String, options: PtyOptions) -> Result<String, String> {
state.remote_manager.pty_spawn(&machine_id, &options).await
}
#[tauri::command]
pub async fn remote_pty_write(state: State<'_, AppState>, machine_id: String, id: String, data: String) -> Result<(), String> {
state.remote_manager.pty_write(&machine_id, &id, &data).await
}
#[tauri::command]
pub async fn remote_pty_resize(state: State<'_, AppState>, machine_id: String, id: String, cols: u16, rows: u16) -> Result<(), String> {
state.remote_manager.pty_resize(&machine_id, &id, cols, rows).await
}
#[tauri::command]
pub async fn remote_pty_kill(state: State<'_, AppState>, machine_id: String, id: String) -> Result<(), String> {
state.remote_manager.pty_kill(&machine_id, &id).await
}
// --- SPKI certificate pinning ---
#[tauri::command]
#[tracing::instrument]
pub async fn remote_probe_spki(url: String) -> Result<String, String> {
remote::probe_spki_hash(&url).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_add_pin(state: State<'_, AppState>, machine_id: String, pin: String) -> Result<(), String> {
state.remote_manager.add_spki_pin(&machine_id, pin).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_remove_pin(state: State<'_, AppState>, machine_id: String, pin: String) -> Result<(), String> {
state.remote_manager.remove_spki_pin(&machine_id, &pin).await
}

View file

@ -0,0 +1,59 @@
use crate::AppState;
use crate::search::SearchResult;
use tauri::State;
#[tauri::command]
pub fn search_init(state: State<'_, AppState>) -> Result<(), String> {
// SearchDb is already initialized during app setup; this is a no-op
// but allows the frontend to confirm readiness.
let _db = &state.search_db;
Ok(())
}
#[tauri::command]
pub fn search_query(
state: State<'_, AppState>,
query: String,
limit: Option<i32>,
) -> Result<Vec<SearchResult>, String> {
state.search_db.search_all(&query, limit.unwrap_or(20))
}
#[tauri::command]
pub fn search_rebuild(state: State<'_, AppState>) -> Result<(), String> {
state.search_db.rebuild_index()
}
#[tauri::command]
pub fn search_index_message(
state: State<'_, AppState>,
session_id: String,
role: String,
content: String,
) -> Result<(), String> {
state.search_db.index_message(&session_id, &role, &content)
}
#[tauri::command]
pub fn search_index_task(
state: State<'_, AppState>,
task_id: String,
title: String,
description: String,
status: String,
assigned_to: String,
) -> Result<(), String> {
state.search_db.index_task(&task_id, &title, &description, &status, &assigned_to)
}
#[tauri::command]
pub fn search_index_btmsg(
state: State<'_, AppState>,
msg_id: String,
from_agent: String,
to_agent: String,
content: String,
channel: String,
) -> Result<(), String> {
state.search_db.index_btmsg(&msg_id, &from_agent, &to_agent, &content, &channel)
}

View file

@ -0,0 +1,34 @@
use crate::secrets::SecretsManager;
#[tauri::command]
pub fn secrets_store(key: String, value: String) -> Result<(), String> {
SecretsManager::store_secret(&key, &value)
}
#[tauri::command]
pub fn secrets_get(key: String) -> Result<Option<String>, String> {
SecretsManager::get_secret(&key)
}
#[tauri::command]
pub fn secrets_delete(key: String) -> Result<(), String> {
SecretsManager::delete_secret(&key)
}
#[tauri::command]
pub fn secrets_list() -> Result<Vec<String>, String> {
SecretsManager::list_keys()
}
#[tauri::command]
pub fn secrets_has_keyring() -> bool {
SecretsManager::has_keyring()
}
#[tauri::command]
pub fn secrets_known_keys() -> Vec<String> {
crate::secrets::KNOWN_KEYS
.iter()
.map(|s| s.to_string())
.collect()
}

View file

@ -0,0 +1,81 @@
use tauri::State;
use crate::AppState;
use crate::session::{Session, LayoutState, SshSession};
// --- Session persistence ---
#[tauri::command]
pub fn session_list(state: State<'_, AppState>) -> Result<Vec<Session>, String> {
state.session_db.list_sessions()
}
#[tauri::command]
pub fn session_save(state: State<'_, AppState>, session: Session) -> Result<(), String> {
state.session_db.save_session(&session)
}
#[tauri::command]
pub fn session_delete(state: State<'_, AppState>, id: String) -> Result<(), String> {
state.session_db.delete_session(&id)
}
#[tauri::command]
pub fn session_update_title(state: State<'_, AppState>, id: String, title: String) -> Result<(), String> {
state.session_db.update_title(&id, &title)
}
#[tauri::command]
pub fn session_touch(state: State<'_, AppState>, id: String) -> Result<(), String> {
state.session_db.touch_session(&id)
}
#[tauri::command]
pub fn session_update_group(state: State<'_, AppState>, id: String, group_name: String) -> Result<(), String> {
state.session_db.update_group(&id, &group_name)
}
// --- Layout ---
#[tauri::command]
pub fn layout_save(state: State<'_, AppState>, layout: LayoutState) -> Result<(), String> {
state.session_db.save_layout(&layout)
}
#[tauri::command]
pub fn layout_load(state: State<'_, AppState>) -> Result<LayoutState, String> {
state.session_db.load_layout()
}
// --- Settings ---
#[tauri::command]
pub fn settings_get(state: State<'_, AppState>, key: String) -> Result<Option<String>, String> {
state.session_db.get_setting(&key)
}
#[tauri::command]
pub fn settings_set(state: State<'_, AppState>, key: String, value: String) -> Result<(), String> {
state.session_db.set_setting(&key, &value)
}
#[tauri::command]
pub fn settings_list(state: State<'_, AppState>) -> Result<Vec<(String, String)>, String> {
state.session_db.get_all_settings()
}
// --- SSH sessions ---
#[tauri::command]
pub fn ssh_session_list(state: State<'_, AppState>) -> Result<Vec<SshSession>, String> {
state.session_db.list_ssh_sessions()
}
#[tauri::command]
pub fn ssh_session_save(state: State<'_, AppState>, session: SshSession) -> Result<(), String> {
state.session_db.save_ssh_session(&session)
}
#[tauri::command]
pub fn ssh_session_delete(state: State<'_, AppState>, id: String) -> Result<(), String> {
state.session_db.delete_ssh_session(&id)
}

View file

@ -0,0 +1,43 @@
use tauri::State;
use crate::AppState;
use crate::fs_watcher::FsWatcherStatus;
#[tauri::command]
pub fn file_watch(
app: tauri::AppHandle,
state: State<'_, AppState>,
pane_id: String,
path: String,
) -> Result<String, String> {
state.file_watcher.watch(&app, &pane_id, &path)
}
#[tauri::command]
pub fn file_unwatch(state: State<'_, AppState>, pane_id: String) {
state.file_watcher.unwatch(&pane_id);
}
#[tauri::command]
pub fn file_read(state: State<'_, AppState>, path: String) -> Result<String, String> {
state.file_watcher.read_file(&path)
}
#[tauri::command]
pub fn fs_watch_project(
app: tauri::AppHandle,
state: State<'_, AppState>,
project_id: String,
cwd: String,
) -> Result<(), String> {
state.fs_watcher.watch_project(&app, &project_id, &cwd)
}
#[tauri::command]
pub fn fs_unwatch_project(state: State<'_, AppState>, project_id: String) {
state.fs_watcher.unwatch_project(&project_id);
}
#[tauri::command]
pub fn fs_watcher_status(state: State<'_, AppState>) -> FsWatcherStatus {
state.fs_watcher.status()
}

317
src-tauri/src/ctx.rs Normal file
View file

@ -0,0 +1,317 @@
// ctx — Read-only access to the Claude Code context manager database
// Database: ~/.claude-context/context.db (managed by ctx CLI tool)
// Path configurable via new_with_path() for test isolation.
use rusqlite::{Connection, params};
use serde::Serialize;
use std::path::PathBuf;
use std::sync::Mutex;
#[derive(Debug, Clone, Serialize)]
pub struct CtxEntry {
pub project: String,
pub key: String,
pub value: String,
pub updated_at: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct CtxSummary {
pub project: String,
pub summary: String,
pub created_at: String,
}
pub struct CtxDb {
conn: Mutex<Option<Connection>>,
path: PathBuf,
}
impl CtxDb {
#[cfg(test)]
fn default_db_path() -> PathBuf {
dirs::home_dir()
.unwrap_or_default()
.join(".claude-context")
.join("context.db")
}
#[cfg(test)]
pub fn new() -> Self {
Self::new_with_path(Self::default_db_path())
}
/// Create a CtxDb with a custom database path (for test isolation).
pub fn new_with_path(db_path: PathBuf) -> Self {
let conn = if db_path.exists() {
Connection::open_with_flags(
&db_path,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
).ok()
} else {
None
};
Self { conn: Mutex::new(conn), path: db_path }
}
/// Create the context database directory and schema, then open a read-only connection.
pub fn init_db(&self) -> Result<(), String> {
let db_path = &self.path;
// Create parent directory
if let Some(parent) = db_path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create directory: {e}"))?;
}
// Open read-write to create schema
let conn = Connection::open(&db_path)
.map_err(|e| format!("Failed to create database: {e}"))?;
conn.execute_batch("PRAGMA journal_mode=WAL;").map_err(|e| format!("WAL mode failed: {e}"))?;
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS sessions (
name TEXT PRIMARY KEY,
description TEXT,
work_dir TEXT,
created_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS contexts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
project TEXT NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL,
updated_at TEXT DEFAULT (datetime('now')),
UNIQUE(project, key)
);
CREATE TABLE IF NOT EXISTS shared (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TEXT DEFAULT (datetime('now'))
);
CREATE TABLE IF NOT EXISTS summaries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
project TEXT NOT NULL,
summary TEXT NOT NULL,
created_at TEXT DEFAULT (datetime('now'))
);
CREATE VIRTUAL TABLE IF NOT EXISTS contexts_fts USING fts5(
project, key, value, content=contexts, content_rowid=id
);
CREATE VIRTUAL TABLE IF NOT EXISTS shared_fts USING fts5(
key, value, content=shared
);
CREATE TRIGGER IF NOT EXISTS contexts_ai AFTER INSERT ON contexts BEGIN
INSERT INTO contexts_fts(rowid, project, key, value)
VALUES (new.id, new.project, new.key, new.value);
END;
CREATE TRIGGER IF NOT EXISTS contexts_ad AFTER DELETE ON contexts BEGIN
INSERT INTO contexts_fts(contexts_fts, rowid, project, key, value)
VALUES ('delete', old.id, old.project, old.key, old.value);
END;
CREATE TRIGGER IF NOT EXISTS contexts_au AFTER UPDATE ON contexts BEGIN
INSERT INTO contexts_fts(contexts_fts, rowid, project, key, value)
VALUES ('delete', old.id, old.project, old.key, old.value);
INSERT INTO contexts_fts(rowid, project, key, value)
VALUES (new.id, new.project, new.key, new.value);
END;"
).map_err(|e| format!("Schema creation failed: {e}"))?;
drop(conn);
// Re-open as read-only for normal operation
let ro_conn = Connection::open_with_flags(
&db_path,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
).map_err(|e| format!("Failed to reopen database: {e}"))?;
let mut lock = self.conn.lock().map_err(|_| "ctx database lock poisoned".to_string())?;
*lock = Some(ro_conn);
Ok(())
}
/// Register a project in the ctx database (creates if not exists).
/// Opens a brief read-write connection; the main self.conn stays read-only.
pub fn register_project(&self, name: &str, description: &str, work_dir: Option<&str>) -> Result<(), String> {
let db_path = &self.path;
let conn = Connection::open(&db_path)
.map_err(|e| format!("ctx database not found: {e}"))?;
conn.execute(
"INSERT OR IGNORE INTO sessions (name, description, work_dir) VALUES (?1, ?2, ?3)",
rusqlite::params![name, description, work_dir],
).map_err(|e| format!("Failed to register project: {e}"))?;
Ok(())
}
pub fn get_context(&self, project: &str) -> Result<Vec<CtxEntry>, String> {
let lock = self.conn.lock().map_err(|_| "ctx database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("ctx database not found")?;
let mut stmt = conn
.prepare("SELECT project, key, value, updated_at FROM contexts WHERE project = ?1 ORDER BY key")
.map_err(|e| format!("ctx query failed: {e}"))?;
let entries = stmt
.query_map(params![project], |row| {
Ok(CtxEntry {
project: row.get(0)?,
key: row.get(1)?,
value: row.get(2)?,
updated_at: row.get(3)?,
})
})
.map_err(|e| format!("ctx query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("ctx row read failed: {e}"))?;
Ok(entries)
}
pub fn get_shared(&self) -> Result<Vec<CtxEntry>, String> {
let lock = self.conn.lock().map_err(|_| "ctx database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("ctx database not found")?;
let mut stmt = conn
.prepare("SELECT key, value, updated_at FROM shared ORDER BY key")
.map_err(|e| format!("ctx query failed: {e}"))?;
let entries = stmt
.query_map([], |row| {
Ok(CtxEntry {
project: "shared".to_string(),
key: row.get(0)?,
value: row.get(1)?,
updated_at: row.get(2)?,
})
})
.map_err(|e| format!("ctx query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("ctx row read failed: {e}"))?;
Ok(entries)
}
pub fn get_summaries(&self, project: &str, limit: i64) -> Result<Vec<CtxSummary>, String> {
let lock = self.conn.lock().map_err(|_| "ctx database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("ctx database not found")?;
let mut stmt = conn
.prepare("SELECT project, summary, created_at FROM summaries WHERE project = ?1 ORDER BY created_at DESC LIMIT ?2")
.map_err(|e| format!("ctx query failed: {e}"))?;
let summaries = stmt
.query_map(params![project, limit], |row| {
Ok(CtxSummary {
project: row.get(0)?,
summary: row.get(1)?,
created_at: row.get(2)?,
})
})
.map_err(|e| format!("ctx query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("ctx row read failed: {e}"))?;
Ok(summaries)
}
pub fn search(&self, query: &str) -> Result<Vec<CtxEntry>, String> {
let lock = self.conn.lock().map_err(|_| "ctx database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("ctx database not found")?;
let mut stmt = conn
.prepare("SELECT project, key, value FROM contexts_fts WHERE contexts_fts MATCH ?1 LIMIT 50")
.map_err(|e| format!("ctx search failed: {e}"))?;
let entries = stmt
.query_map(params![query], |row| {
Ok(CtxEntry {
project: row.get(0)?,
key: row.get(1)?,
value: row.get(2)?,
updated_at: String::new(), // FTS5 virtual table doesn't store updated_at
})
})
.map_err(|e| {
let msg = e.to_string();
if msg.contains("fts5") || msg.contains("syntax") {
format!("Invalid search query syntax: {e}")
} else {
format!("ctx search failed: {e}")
}
})?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("ctx row read failed: {e}"))?;
Ok(entries)
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Create a CtxDb with conn set to None, simulating a missing database.
fn make_missing_db() -> CtxDb {
CtxDb { conn: Mutex::new(None), path: PathBuf::from("/nonexistent/context.db") }
}
#[test]
fn test_new_does_not_panic() {
// CtxDb::new() should never panic even if ~/.claude-context/context.db
// doesn't exist — it just stores None for the connection.
let _db = CtxDb::new();
}
#[test]
fn test_get_context_missing_db_returns_error() {
let db = make_missing_db();
let result = db.get_context("any-project");
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "ctx database not found");
}
#[test]
fn test_get_shared_missing_db_returns_error() {
let db = make_missing_db();
let result = db.get_shared();
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "ctx database not found");
}
#[test]
fn test_get_summaries_missing_db_returns_error() {
let db = make_missing_db();
let result = db.get_summaries("any-project", 10);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "ctx database not found");
}
#[test]
fn test_search_missing_db_returns_error() {
let db = make_missing_db();
let result = db.search("anything");
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "ctx database not found");
}
#[test]
fn test_search_empty_query_missing_db_returns_error() {
let db = make_missing_db();
let result = db.search("");
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "ctx database not found");
}
}

View file

@ -0,0 +1,11 @@
use bterminal_core::event::EventSink;
use tauri::{AppHandle, Emitter};
/// Bridges bterminal-core's EventSink trait to Tauri's event system.
pub struct TauriEventSink(pub AppHandle);
impl EventSink for TauriEventSink {
fn emit(&self, event: &str, payload: serde_json::Value) {
let _ = self.0.emit(event, &payload);
}
}

357
src-tauri/src/fs_watcher.rs Normal file
View file

@ -0,0 +1,357 @@
// Filesystem write detection for project directories
// Uses notify crate (inotify on Linux) to detect file modifications.
// Emits Tauri events so frontend can detect external writes vs agent-managed writes.
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use serde::Serialize;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Mutex;
use std::time::{Duration, Instant};
use tauri::Emitter;
/// Payload emitted on fs-write-detected events
#[derive(Clone, Serialize)]
pub struct FsWritePayload {
pub project_id: String,
pub file_path: String,
pub timestamp_ms: u64,
}
/// Directories to skip when watching recursively
const IGNORED_DIRS: &[&str] = &[
".git",
"node_modules",
"target",
".svelte-kit",
"dist",
"__pycache__",
".next",
".nuxt",
".cache",
"build",
];
/// Status of inotify watch capacity
#[derive(Clone, Serialize)]
pub struct FsWatcherStatus {
/// Kernel limit from /proc/sys/fs/inotify/max_user_watches
pub max_watches: u64,
/// Estimated directories being watched across all projects
pub estimated_watches: u64,
/// Usage ratio (0.0 - 1.0)
pub usage_ratio: f64,
/// Number of actively watched projects
pub active_projects: usize,
/// Warning message if approaching limit, null otherwise
pub warning: Option<String>,
}
struct ProjectWatch {
_watcher: RecommendedWatcher,
_cwd: String,
/// Estimated number of directories (inotify watches) for this project
dir_count: u64,
}
pub struct ProjectFsWatcher {
watches: Mutex<HashMap<String, ProjectWatch>>,
}
impl ProjectFsWatcher {
pub fn new() -> Self {
Self {
watches: Mutex::new(HashMap::new()),
}
}
/// Start watching a project's CWD for file writes (Create, Modify, Rename).
/// Debounces events per-file (100ms) to avoid flooding on rapid writes.
pub fn watch_project(
&self,
app: &tauri::AppHandle,
project_id: &str,
cwd: &str,
) -> Result<(), String> {
// In test mode, skip inotify watchers to avoid resource contention and flaky events
if std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1") {
log::info!("Test mode: skipping fs watcher for project {project_id}");
return Ok(());
}
let cwd_path = Path::new(cwd);
if !cwd_path.is_dir() {
return Err(format!("Not a directory: {cwd}"));
}
let mut watches = self.watches.lock().unwrap();
// Don't duplicate — unwatch first if already watching
if watches.contains_key(project_id) {
drop(watches);
self.unwatch_project(project_id);
watches = self.watches.lock().unwrap();
}
let app_handle = app.clone();
let project_id_owned = project_id.to_string();
let cwd_owned = cwd.to_string();
// Per-file debounce state
let debounce: std::sync::Arc<Mutex<HashMap<String, Instant>>> =
std::sync::Arc::new(Mutex::new(HashMap::new()));
let debounce_duration = Duration::from_millis(100);
let mut watcher = RecommendedWatcher::new(
move |res: Result<Event, notify::Error>| {
let event = match res {
Ok(e) => e,
Err(_) => return,
};
// Only care about file writes (create, modify, rename-to)
let is_write = matches!(
event.kind,
EventKind::Create(_) | EventKind::Modify(_)
);
if !is_write {
return;
}
for path in &event.paths {
// Skip directories
if path.is_dir() {
continue;
}
let path_str = path.to_string_lossy().to_string();
// Skip ignored directories
if should_ignore_path(&path_str) {
continue;
}
// Debounce: skip if same file was emitted within debounce window
let now = Instant::now();
let mut db = debounce.lock().unwrap();
if let Some(last) = db.get(&path_str) {
if now.duration_since(*last) < debounce_duration {
continue;
}
}
db.insert(path_str.clone(), now);
// Prune old debounce entries (keep map from growing unbounded)
if db.len() > 1000 {
let max_age = debounce_duration * 10;
db.retain(|_, v| now.duration_since(*v) < max_age);
}
drop(db);
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let _ = app_handle.emit(
"fs-write-detected",
FsWritePayload {
project_id: project_id_owned.clone(),
file_path: path_str,
timestamp_ms,
},
);
}
},
Config::default(),
)
.map_err(|e| format!("Failed to create fs watcher: {e}"))?;
watcher
.watch(cwd_path, RecursiveMode::Recursive)
.map_err(|e| format!("Failed to watch directory: {e}"))?;
let dir_count = count_watched_dirs(cwd_path);
log::info!("Started fs watcher for project {project_id} at {cwd} (~{dir_count} directories)");
watches.insert(
project_id.to_string(),
ProjectWatch {
_watcher: watcher,
_cwd: cwd_owned,
dir_count,
},
);
Ok(())
}
/// Stop watching a project's CWD
pub fn unwatch_project(&self, project_id: &str) {
let mut watches = self.watches.lock().unwrap();
if watches.remove(project_id).is_some() {
log::info!("Stopped fs watcher for project {project_id}");
}
}
/// Get current watcher status including inotify limit check
pub fn status(&self) -> FsWatcherStatus {
let max_watches = read_inotify_max_watches();
let watches = self.watches.lock().unwrap();
let active_projects = watches.len();
let estimated_watches: u64 = watches.values().map(|w| w.dir_count).sum();
let usage_ratio = if max_watches > 0 {
estimated_watches as f64 / max_watches as f64
} else {
0.0
};
let warning = if usage_ratio > 0.90 {
Some(format!(
"inotify watch limit critical: using ~{estimated_watches}/{max_watches} watches ({:.0}%). \
Increase with: echo {} | sudo tee /proc/sys/fs/inotify/max_user_watches",
usage_ratio * 100.0,
max_watches * 2
))
} else if usage_ratio > 0.75 {
Some(format!(
"inotify watch limit warning: using ~{estimated_watches}/{max_watches} watches ({:.0}%). \
Consider increasing with: echo {} | sudo tee /proc/sys/fs/inotify/max_user_watches",
usage_ratio * 100.0,
max_watches * 2
))
} else {
None
};
FsWatcherStatus {
max_watches,
estimated_watches,
usage_ratio,
active_projects,
warning,
}
}
}
/// Check if a path contains any ignored directory component
fn should_ignore_path(path: &str) -> bool {
for component in Path::new(path).components() {
if let std::path::Component::Normal(name) = component {
let name_str = name.to_string_lossy();
if IGNORED_DIRS.contains(&name_str.as_ref()) {
return true;
}
}
}
false
}
/// Read the kernel inotify watch limit from /proc/sys/fs/inotify/max_user_watches.
/// Returns 0 on non-Linux or if the file can't be read.
fn read_inotify_max_watches() -> u64 {
std::fs::read_to_string("/proc/sys/fs/inotify/max_user_watches")
.ok()
.and_then(|s| s.trim().parse::<u64>().ok())
.unwrap_or(0)
}
/// Count directories under a path that would become inotify watches.
/// Skips ignored directories. Caps the walk at 30,000 to avoid blocking on huge monorepos.
fn count_watched_dirs(root: &Path) -> u64 {
const MAX_WALK: u64 = 30_000;
let mut count: u64 = 1; // root itself
fn walk_dir(dir: &Path, count: &mut u64, max: u64) {
if *count >= max {
return;
}
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return,
};
for entry in entries.flatten() {
if *count >= max {
return;
}
let path = entry.path();
if !path.is_dir() {
continue;
}
let name = entry.file_name();
let name_str = name.to_string_lossy();
if IGNORED_DIRS.contains(&name_str.as_ref()) {
continue;
}
*count += 1;
walk_dir(&path, count, max);
}
}
walk_dir(root, &mut count, MAX_WALK);
count
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_should_ignore_git() {
assert!(should_ignore_path("/home/user/project/.git/objects/abc"));
assert!(should_ignore_path("/home/user/project/.git/HEAD"));
}
#[test]
fn test_should_ignore_node_modules() {
assert!(should_ignore_path("/project/node_modules/pkg/index.js"));
}
#[test]
fn test_should_ignore_target() {
assert!(should_ignore_path("/project/target/debug/build/foo"));
}
#[test]
fn test_should_not_ignore_src() {
assert!(!should_ignore_path("/project/src/main.rs"));
assert!(!should_ignore_path("/project/src/lib/stores/health.svelte.ts"));
}
#[test]
fn test_should_not_ignore_root_file() {
assert!(!should_ignore_path("/project/Cargo.toml"));
}
#[test]
fn test_read_inotify_max_watches() {
// On Linux this should return a positive number
let max = read_inotify_max_watches();
if cfg!(target_os = "linux") {
assert!(max > 0, "Expected positive inotify limit on Linux, got {max}");
}
}
#[test]
fn test_count_watched_dirs_tempdir() {
let tmp = std::env::temp_dir().join("bterminal_test_count_dirs");
let _ = std::fs::remove_dir_all(&tmp);
std::fs::create_dir_all(tmp.join("src/lib")).unwrap();
std::fs::create_dir_all(tmp.join("node_modules/pkg")).unwrap(); // should be skipped
std::fs::create_dir_all(tmp.join(".git/objects")).unwrap(); // should be skipped
let count = count_watched_dirs(&tmp);
// root + src + src/lib = 3 (node_modules and .git skipped)
assert_eq!(count, 3, "Expected 3 watched dirs, got {count}");
let _ = std::fs::remove_dir_all(&tmp);
}
#[test]
fn test_watcher_status_no_projects() {
let watcher = ProjectFsWatcher::new();
let status = watcher.status();
assert_eq!(status.active_projects, 0);
assert_eq!(status.estimated_watches, 0);
assert!(status.warning.is_none());
}
}

314
src-tauri/src/groups.rs Normal file
View file

@ -0,0 +1,314 @@
// Project group configuration
// Reads/writes ~/.config/bterminal/groups.json
// Path configurable via init() for test isolation.
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::OnceLock;
static CONFIG_PATH: OnceLock<PathBuf> = OnceLock::new();
/// Set the groups.json path. Must be called before any config access.
/// Called from lib.rs setup with AppConfig-resolved path.
pub fn init(path: PathBuf) {
let _ = CONFIG_PATH.set(path);
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectConfig {
pub id: String,
pub name: String,
pub identifier: String,
pub description: String,
pub icon: String,
pub cwd: String,
pub profile: String,
pub enabled: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub use_worktrees: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sandbox_enabled: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub anchor_budget_scale: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stall_threshold_min: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub is_agent: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub agent_role: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GroupAgentConfig {
pub id: String,
pub name: String,
pub role: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cwd: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub system_prompt: Option<String>,
pub enabled: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub wake_interval_min: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub wake_strategy: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub wake_threshold: Option<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GroupConfig {
pub id: String,
pub name: String,
pub projects: Vec<ProjectConfig>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub agents: Vec<GroupAgentConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GroupsFile {
pub version: u32,
pub groups: Vec<GroupConfig>,
pub active_group_id: String,
}
impl Default for GroupsFile {
fn default() -> Self {
Self {
version: 1,
groups: Vec::new(),
active_group_id: String::new(),
}
}
}
fn config_path() -> PathBuf {
CONFIG_PATH.get().cloned().unwrap_or_else(|| {
dirs::config_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("bterminal")
.join("groups.json")
})
}
pub fn load_groups() -> Result<GroupsFile, String> {
let path = config_path();
if !path.exists() {
return Ok(GroupsFile::default());
}
let content = std::fs::read_to_string(&path)
.map_err(|e| format!("Failed to read groups.json: {e}"))?;
serde_json::from_str(&content)
.map_err(|e| format!("Invalid groups.json: {e}"))
}
pub fn save_groups(config: &GroupsFile) -> Result<(), String> {
let path = config_path();
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create config dir: {e}"))?;
}
let json = serde_json::to_string_pretty(config)
.map_err(|e| format!("JSON serialize error: {e}"))?;
std::fs::write(&path, json)
.map_err(|e| format!("Failed to write groups.json: {e}"))
}
/// Discover markdown files in a project directory for the Docs tab.
/// Returns paths relative to cwd, prioritized: CLAUDE.md, README.md, docs/*.md
pub fn discover_markdown_files(cwd: &str) -> Result<Vec<MdFileEntry>, String> {
let root = PathBuf::from(cwd);
if !root.is_dir() {
return Err(format!("Directory not found: {cwd}"));
}
let mut entries = Vec::new();
// Priority files at root
for name in &["CLAUDE.md", "README.md", "CHANGELOG.md", "TODO.md", "SETUP.md"] {
let path = root.join(name);
if path.is_file() {
entries.push(MdFileEntry {
name: name.to_string(),
path: path.to_string_lossy().to_string(),
priority: true,
});
}
}
// docs/ or doc/ directory (max 20 entries, depth 2)
for dir_name in &["docs", "doc"] {
let docs_dir = root.join(dir_name);
if docs_dir.is_dir() {
scan_md_dir(&docs_dir, &mut entries, 2, 20);
}
}
Ok(entries)
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdFileEntry {
pub name: String,
pub path: String,
pub priority: bool,
}
fn scan_md_dir(dir: &PathBuf, entries: &mut Vec<MdFileEntry>, max_depth: u32, max_count: usize) {
if max_depth == 0 || entries.len() >= max_count {
return;
}
let Ok(read_dir) = std::fs::read_dir(dir) else { return };
for entry in read_dir.flatten() {
if entries.len() >= max_count {
break;
}
let path = entry.path();
if path.is_file() {
if let Some(ext) = path.extension() {
if ext == "md" || ext == "markdown" {
let name = path.file_name().unwrap_or_default().to_string_lossy().to_string();
entries.push(MdFileEntry {
name,
path: path.to_string_lossy().to_string(),
priority: false,
});
}
}
} else if path.is_dir() {
// Skip common non-doc directories
let dir_name = path.file_name().unwrap_or_default().to_string_lossy().to_string();
if !matches!(dir_name.as_str(), "node_modules" | ".git" | "target" | "dist" | "build" | ".next" | "__pycache__") {
scan_md_dir(&path, entries, max_depth - 1, max_count);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_groups_file() {
let g = GroupsFile::default();
assert_eq!(g.version, 1);
assert!(g.groups.is_empty());
assert!(g.active_group_id.is_empty());
}
#[test]
fn test_groups_roundtrip() {
let config = GroupsFile {
version: 1,
groups: vec![GroupConfig {
id: "test".to_string(),
name: "Test Group".to_string(),
projects: vec![ProjectConfig {
id: "p1".to_string(),
name: "Project One".to_string(),
identifier: "project-one".to_string(),
description: "A test project".to_string(),
icon: "\u{f120}".to_string(),
cwd: "/tmp/test".to_string(),
profile: "default".to_string(),
enabled: true,
provider: None,
model: None,
use_worktrees: None,
sandbox_enabled: None,
anchor_budget_scale: None,
stall_threshold_min: None,
is_agent: None,
agent_role: None,
system_prompt: None,
}],
agents: vec![],
}],
active_group_id: "test".to_string(),
};
let json = serde_json::to_string(&config).unwrap();
let parsed: GroupsFile = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.groups.len(), 1);
assert_eq!(parsed.groups[0].projects.len(), 1);
assert_eq!(parsed.groups[0].projects[0].identifier, "project-one");
}
#[test]
fn test_load_missing_file_returns_default() {
// config_path() will point to a non-existent file in test
// We test the default case directly
let g = GroupsFile::default();
assert_eq!(g.version, 1);
}
#[test]
fn test_discover_nonexistent_dir() {
let result = discover_markdown_files("/nonexistent/path/12345");
assert!(result.is_err());
}
#[test]
fn test_discover_empty_dir() {
let dir = tempfile::tempdir().unwrap();
let result = discover_markdown_files(dir.path().to_str().unwrap()).unwrap();
assert!(result.is_empty());
}
#[test]
fn test_discover_finds_readme() {
let dir = tempfile::tempdir().unwrap();
std::fs::write(dir.path().join("README.md"), "# Hello").unwrap();
let result = discover_markdown_files(dir.path().to_str().unwrap()).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(result[0].name, "README.md");
assert!(result[0].priority);
}
#[test]
fn test_discover_finds_docs() {
let dir = tempfile::tempdir().unwrap();
let docs = dir.path().join("docs");
std::fs::create_dir(&docs).unwrap();
std::fs::write(docs.join("guide.md"), "# Guide").unwrap();
std::fs::write(docs.join("api.md"), "# API").unwrap();
let result = discover_markdown_files(dir.path().to_str().unwrap()).unwrap();
assert_eq!(result.len(), 2);
assert!(result.iter().all(|e| !e.priority));
}
#[test]
fn test_discover_finds_doc_dir() {
let dir = tempfile::tempdir().unwrap();
let doc = dir.path().join("doc");
std::fs::create_dir(&doc).unwrap();
std::fs::write(doc.join("requirements.md"), "# Req").unwrap();
let result = discover_markdown_files(dir.path().to_str().unwrap()).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(result[0].name, "requirements.md");
assert!(!result[0].priority);
}
#[test]
fn test_discover_finds_setup_md() {
let dir = tempfile::tempdir().unwrap();
std::fs::write(dir.path().join("SETUP.md"), "# Setup").unwrap();
let result = discover_markdown_files(dir.path().to_str().unwrap()).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(result[0].name, "SETUP.md");
assert!(result[0].priority);
}
}

420
src-tauri/src/lib.rs Normal file
View file

@ -0,0 +1,420 @@
mod btmsg;
mod bttask;
mod commands;
mod ctx;
mod event_sink;
mod fs_watcher;
mod groups;
mod memora;
mod notifications;
mod plugins;
mod pty;
mod secrets;
mod remote;
mod search;
mod sidecar;
mod session;
mod telemetry;
mod watcher;
use bterminal_core::config::AppConfig;
use event_sink::TauriEventSink;
use pty::PtyManager;
use remote::RemoteManager;
use session::SessionDb;
use sidecar::{SidecarConfig, SidecarManager};
use fs_watcher::ProjectFsWatcher;
use watcher::FileWatcherManager;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tauri::Manager;
pub(crate) struct AppState {
pub pty_manager: Arc<PtyManager>,
pub sidecar_manager: Arc<SidecarManager>,
pub session_db: Arc<SessionDb>,
pub file_watcher: Arc<FileWatcherManager>,
pub fs_watcher: Arc<ProjectFsWatcher>,
pub ctx_db: Arc<ctx::CtxDb>,
pub memora_db: Arc<memora::MemoraDb>,
pub remote_manager: Arc<RemoteManager>,
pub search_db: Arc<search::SearchDb>,
pub app_config: Arc<AppConfig>,
_telemetry: telemetry::TelemetryGuard,
}
/// Install btmsg/bttask CLI tools to ~/.local/bin/ so agent subprocesses can find them.
/// Sources: bundled resources (production) or repo root (development).
/// Only overwrites if the source is newer or the destination doesn't exist.
fn install_cli_tools(resource_dir: &Path, dev_root: &Path) {
let bin_dir = dirs::home_dir()
.unwrap_or_default()
.join(".local")
.join("bin");
if let Err(e) = std::fs::create_dir_all(&bin_dir) {
log::warn!("Failed to create ~/.local/bin: {e}");
return;
}
for tool_name in &["btmsg", "bttask"] {
// Try resource dir first (production bundle), then dev repo root
let source = [
resource_dir.join(tool_name),
dev_root.join(tool_name),
]
.into_iter()
.find(|p| p.is_file());
let source = match source {
Some(p) => p,
None => {
log::warn!("CLI tool '{tool_name}' not found in resources or dev root");
continue;
}
};
let dest = bin_dir.join(tool_name);
let should_install = if dest.exists() {
// Compare modification times — install if source is newer
match (source.metadata(), dest.metadata()) {
(Ok(sm), Ok(dm)) => {
sm.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH)
> dm.modified().unwrap_or(std::time::SystemTime::UNIX_EPOCH)
}
_ => true,
}
} else {
true
};
if should_install {
match std::fs::copy(&source, &dest) {
Ok(_) => {
// Ensure executable permission on Unix
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let _ = std::fs::set_permissions(&dest, std::fs::Permissions::from_mode(0o755));
}
log::info!("Installed {tool_name} to {}", dest.display());
}
Err(e) => log::warn!("Failed to install {tool_name}: {e}"),
}
}
}
}
/// Run `PRAGMA wal_checkpoint(TRUNCATE)` on a SQLite database to reclaim WAL file space.
/// Returns Ok(()) on success or Err with a diagnostic message.
pub(crate) fn checkpoint_wal(path: &Path) -> Result<(), String> {
use rusqlite::{Connection, OpenFlags};
if !path.exists() {
return Ok(()); // DB doesn't exist yet — nothing to checkpoint
}
let conn = Connection::open_with_flags(path, OpenFlags::SQLITE_OPEN_READ_WRITE)
.map_err(|e| format!("WAL checkpoint: failed to open {}: {e}", path.display()))?;
conn.query_row("PRAGMA busy_timeout = 5000", [], |_| Ok(()))
.map_err(|e| format!("WAL checkpoint: failed to set busy_timeout: {e}"))?;
conn.query_row("PRAGMA wal_checkpoint(TRUNCATE)", [], |_| Ok(()))
.map_err(|e| format!("WAL checkpoint failed on {}: {e}", path.display()))?;
Ok(())
}
/// Spawn a background task that checkpoints WAL on both databases every 5 minutes.
/// Uses `tauri::async_runtime::spawn` instead of `tokio::spawn` because this runs
/// during Tauri setup where the Tokio runtime may not be directly accessible.
fn spawn_wal_checkpoint_task(sessions_db_path: PathBuf, btmsg_db_path: PathBuf) {
tauri::async_runtime::spawn(async move {
let interval = std::time::Duration::from_secs(300);
loop {
tokio::time::sleep(interval).await;
for (label, path) in [("sessions.db", &sessions_db_path), ("btmsg.db", &btmsg_db_path)] {
match checkpoint_wal(path) {
Ok(()) => tracing::info!("WAL checkpoint completed for {label}"),
Err(e) => tracing::warn!("WAL checkpoint error for {label}: {e}"),
}
}
}
});
}
#[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() {
// Force dark GTK theme for native dialogs (file chooser, etc.)
std::env::set_var("GTK_THEME", "Adwaita:dark");
// Resolve all paths via AppConfig (respects BTERMINAL_TEST_* env vars)
let app_config = AppConfig::from_env();
if app_config.is_test_mode() {
log::info!(
"Test mode enabled: data_dir={}, config_dir={}",
app_config.data_dir.display(),
app_config.config_dir.display()
);
}
// Initialize subsystem paths from AppConfig (before any db access)
btmsg::init(app_config.btmsg_db_path());
bttask::init(app_config.btmsg_db_path());
groups::init(app_config.groups_json_path());
// Initialize tracing + optional OTLP export (before any tracing macros)
let telemetry_guard = telemetry::init();
let app_config_arc = Arc::new(app_config);
tauri::Builder::default()
.invoke_handler(tauri::generate_handler![
// PTY
commands::pty::pty_spawn,
commands::pty::pty_write,
commands::pty::pty_resize,
commands::pty::pty_kill,
// Agent/sidecar
commands::agent::agent_query,
commands::agent::agent_stop,
commands::agent::agent_ready,
commands::agent::agent_restart,
commands::agent::agent_set_sandbox,
// File watcher
commands::watcher::file_watch,
commands::watcher::file_unwatch,
commands::watcher::file_read,
commands::watcher::fs_watch_project,
commands::watcher::fs_unwatch_project,
commands::watcher::fs_watcher_status,
// Session/layout/settings/SSH
commands::session::session_list,
commands::session::session_save,
commands::session::session_delete,
commands::session::session_update_title,
commands::session::session_touch,
commands::session::session_update_group,
commands::session::layout_save,
commands::session::layout_load,
commands::session::settings_get,
commands::session::settings_set,
commands::session::settings_list,
commands::session::ssh_session_list,
commands::session::ssh_session_save,
commands::session::ssh_session_delete,
// Agent persistence (messages, state, metrics, anchors)
commands::persistence::agent_messages_save,
commands::persistence::agent_messages_load,
commands::persistence::project_agent_state_save,
commands::persistence::project_agent_state_load,
commands::persistence::session_metric_save,
commands::persistence::session_metrics_load,
commands::persistence::session_anchors_save,
commands::persistence::session_anchors_load,
commands::persistence::session_anchor_delete,
commands::persistence::session_anchors_clear,
commands::persistence::session_anchor_update_type,
// ctx + Memora
commands::knowledge::ctx_init_db,
commands::knowledge::ctx_register_project,
commands::knowledge::ctx_get_context,
commands::knowledge::ctx_get_shared,
commands::knowledge::ctx_get_summaries,
commands::knowledge::ctx_search,
commands::knowledge::memora_available,
commands::knowledge::memora_list,
commands::knowledge::memora_search,
commands::knowledge::memora_get,
// Claude profiles/skills
commands::claude::claude_list_profiles,
commands::claude::claude_list_skills,
commands::claude::claude_read_skill,
// Groups
commands::groups::groups_load,
commands::groups::groups_save,
commands::groups::discover_markdown_files,
// File browser
commands::files::list_directory_children,
commands::files::read_file_content,
commands::files::write_file_content,
commands::files::pick_directory,
// Remote machines
commands::remote::remote_list,
commands::remote::remote_add,
commands::remote::remote_remove,
commands::remote::remote_connect,
commands::remote::remote_disconnect,
commands::remote::remote_agent_query,
commands::remote::remote_agent_stop,
commands::remote::remote_pty_spawn,
commands::remote::remote_pty_write,
commands::remote::remote_pty_resize,
commands::remote::remote_pty_kill,
commands::remote::remote_probe_spki,
commands::remote::remote_add_pin,
commands::remote::remote_remove_pin,
// btmsg (agent messenger)
commands::btmsg::btmsg_get_agents,
commands::btmsg::btmsg_unread_count,
commands::btmsg::btmsg_unread_messages,
commands::btmsg::btmsg_history,
commands::btmsg::btmsg_send,
commands::btmsg::btmsg_set_status,
commands::btmsg::btmsg_ensure_admin,
commands::btmsg::btmsg_all_feed,
commands::btmsg::btmsg_mark_read,
commands::btmsg::btmsg_get_channels,
commands::btmsg::btmsg_channel_messages,
commands::btmsg::btmsg_channel_send,
commands::btmsg::btmsg_create_channel,
commands::btmsg::btmsg_add_channel_member,
commands::btmsg::btmsg_register_agents,
// btmsg per-message acknowledgment
commands::btmsg::btmsg_unseen_messages,
commands::btmsg::btmsg_mark_seen,
commands::btmsg::btmsg_prune_seen,
// btmsg health monitoring
commands::btmsg::btmsg_record_heartbeat,
commands::btmsg::btmsg_get_stale_agents,
commands::btmsg::btmsg_get_agent_heartbeats,
commands::btmsg::btmsg_get_dead_letters,
commands::btmsg::btmsg_clear_dead_letters,
commands::btmsg::btmsg_queue_dead_letter,
// Audit log
commands::btmsg::audit_log_event,
commands::btmsg::audit_log_list,
commands::btmsg::audit_log_for_agent,
// bttask (task board)
commands::bttask::bttask_list,
commands::bttask::bttask_comments,
commands::bttask::bttask_update_status,
commands::bttask::bttask_add_comment,
commands::bttask::bttask_create,
commands::bttask::bttask_delete,
commands::bttask::bttask_review_queue_count,
// Search (FTS5)
commands::search::search_init,
commands::search::search_query,
commands::search::search_rebuild,
commands::search::search_index_message,
commands::search::search_index_task,
commands::search::search_index_btmsg,
// Notifications
commands::notifications::notify_desktop,
// Secrets (system keyring)
commands::secrets::secrets_store,
commands::secrets::secrets_get,
commands::secrets::secrets_delete,
commands::secrets::secrets_list,
commands::secrets::secrets_has_keyring,
commands::secrets::secrets_known_keys,
// Plugins
commands::plugins::plugins_discover,
commands::plugins::plugin_read_file,
// Misc
commands::misc::cli_get_group,
commands::misc::open_url,
commands::misc::is_test_mode,
commands::misc::frontend_log,
])
.plugin(tauri_plugin_updater::Builder::new().build())
.plugin(tauri_plugin_dialog::init())
.setup(move |app| {
// Note: tauri-plugin-log is NOT initialized here because telemetry::init()
// already sets up tracing-subscriber (which bridges the `log` crate via
// tracing's compatibility layer). Adding plugin-log would panic with
// "attempted to set a logger after the logging system was already initialized".
let config = app_config_arc.clone();
// Create TauriEventSink for core managers
let sink: Arc<dyn bterminal_core::event::EventSink> =
Arc::new(TauriEventSink(app.handle().clone()));
// Build sidecar config from Tauri paths
let resource_dir = app
.handle()
.path()
.resource_dir()
.unwrap_or_else(|e| {
log::warn!("Failed to resolve resource_dir: {e}");
std::path::PathBuf::new()
});
let dev_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.unwrap()
.to_path_buf();
// Install btmsg/bttask CLI tools to ~/.local/bin/
if !config.is_test_mode() {
install_cli_tools(&resource_dir, &dev_root);
}
// Forward test mode env vars to sidecar processes
let mut env_overrides = std::collections::HashMap::new();
if config.is_test_mode() {
env_overrides.insert("BTERMINAL_TEST".into(), "1".into());
if let Ok(v) = std::env::var("BTERMINAL_TEST_DATA_DIR") {
env_overrides.insert("BTERMINAL_TEST_DATA_DIR".into(), v);
}
if let Ok(v) = std::env::var("BTERMINAL_TEST_CONFIG_DIR") {
env_overrides.insert("BTERMINAL_TEST_CONFIG_DIR".into(), v);
}
}
let sidecar_config = SidecarConfig {
search_paths: vec![
resource_dir.join("sidecar"),
dev_root.join("sidecar"),
],
env_overrides,
sandbox: bterminal_core::sandbox::SandboxConfig::default(),
};
let pty_manager = Arc::new(PtyManager::new(sink.clone()));
let sidecar_manager = Arc::new(SidecarManager::new(sink, sidecar_config));
// Initialize session database using AppConfig data_dir
let session_db = Arc::new(
SessionDb::open(config.sessions_db_dir()).expect("Failed to open session database"),
);
let file_watcher = Arc::new(FileWatcherManager::new());
let fs_watcher = Arc::new(ProjectFsWatcher::new());
let ctx_db = Arc::new(ctx::CtxDb::new_with_path(config.ctx_db_path.clone()));
let memora_db = Arc::new(memora::MemoraDb::new_with_path(config.memora_db_path.clone()));
let remote_manager = Arc::new(RemoteManager::new());
// Initialize FTS5 search database
let search_db_path = config.data_dir.join("bterminal").join("search.db");
let search_db = Arc::new(
search::SearchDb::open(&search_db_path).expect("Failed to open search database"),
);
// Start local sidecar
match sidecar_manager.start() {
Ok(()) => log::info!("Sidecar startup initiated"),
Err(e) => log::warn!("Sidecar startup failed (agent features unavailable): {e}"),
}
// Start periodic WAL checkpoint task (every 5 minutes)
let sessions_db_path = config.data_dir.join("sessions.db");
let btmsg_db_path = config.btmsg_db_path();
spawn_wal_checkpoint_task(sessions_db_path, btmsg_db_path);
app.manage(AppState {
pty_manager,
sidecar_manager,
session_db,
file_watcher,
fs_watcher,
ctx_db,
memora_db,
remote_manager,
search_db,
app_config: config,
_telemetry: telemetry_guard,
});
Ok(())
})
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

6
src-tauri/src/main.rs Normal file
View file

@ -0,0 +1,6 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
fn main() {
agent_orchestrator_lib::run();
}

338
src-tauri/src/memora.rs Normal file
View file

@ -0,0 +1,338 @@
// memora — Read-only access to the Memora memory database
// Database: ~/.local/share/memora/memories.db (managed by Memora MCP server)
use rusqlite::{Connection, params};
use serde::Serialize;
use std::sync::Mutex;
#[derive(Debug, Clone, Serialize)]
pub struct MemoraNode {
pub id: i64,
pub content: String,
pub tags: Vec<String>,
pub metadata: Option<serde_json::Value>,
pub created_at: Option<String>,
pub updated_at: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct MemoraSearchResult {
pub nodes: Vec<MemoraNode>,
pub total: i64,
}
pub struct MemoraDb {
conn: Mutex<Option<Connection>>,
}
impl MemoraDb {
#[cfg(test)]
fn default_db_path() -> std::path::PathBuf {
dirs::data_dir()
.unwrap_or_else(|| dirs::home_dir().unwrap_or_default().join(".local/share"))
.join("memora")
.join("memories.db")
}
#[cfg(test)]
pub fn new() -> Self {
Self::new_with_path(Self::default_db_path())
}
/// Create a MemoraDb with a custom database path (for test isolation).
pub fn new_with_path(db_path: std::path::PathBuf) -> Self {
let conn = if db_path.exists() {
Connection::open_with_flags(
&db_path,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY | rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX,
).ok()
} else {
None
};
Self { conn: Mutex::new(conn) }
}
/// Check if the database connection is available.
pub fn is_available(&self) -> bool {
let lock = self.conn.lock().unwrap_or_else(|e| e.into_inner());
lock.is_some()
}
fn parse_row(row: &rusqlite::Row) -> rusqlite::Result<MemoraNode> {
let tags_raw: String = row.get(2)?;
let tags: Vec<String> = serde_json::from_str(&tags_raw).unwrap_or_default();
let meta_raw: Option<String> = row.get(3)?;
let metadata = meta_raw.and_then(|m| serde_json::from_str(&m).ok());
Ok(MemoraNode {
id: row.get(0)?,
content: row.get(1)?,
tags,
metadata,
created_at: row.get(4)?,
updated_at: row.get(5)?,
})
}
pub fn list(
&self,
tags: Option<Vec<String>>,
limit: i64,
offset: i64,
) -> Result<MemoraSearchResult, String> {
let lock = self.conn.lock().map_err(|_| "memora database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("memora database not found")?;
if let Some(ref tag_list) = tags {
if !tag_list.is_empty() {
return self.list_by_tags(conn, tag_list, limit, offset);
}
}
let total: i64 = conn
.query_row("SELECT COUNT(*) FROM memories", [], |r| r.get(0))
.map_err(|e| format!("memora count failed: {e}"))?;
let mut stmt = conn
.prepare(
"SELECT id, content, tags, metadata, created_at, updated_at
FROM memories ORDER BY id DESC LIMIT ?1 OFFSET ?2",
)
.map_err(|e| format!("memora query failed: {e}"))?;
let nodes = stmt
.query_map(params![limit, offset], Self::parse_row)
.map_err(|e| format!("memora query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("memora row read failed: {e}"))?;
Ok(MemoraSearchResult { nodes, total })
}
fn list_by_tags(
&self,
conn: &Connection,
tags: &[String],
limit: i64,
offset: i64,
) -> Result<MemoraSearchResult, String> {
// Filter memories whose JSON tags array contains ANY of the given tags.
// Uses json_each() to expand the tags array and match against the filter list.
let placeholders: Vec<String> = tags.iter().enumerate().map(|(i, _)| format!("?{}", i + 1)).collect();
let in_clause = placeholders.join(", ");
let count_sql = format!(
"SELECT COUNT(DISTINCT m.id) FROM memories m, json_each(m.tags) j WHERE j.value IN ({in_clause})"
);
let query_sql = format!(
"SELECT DISTINCT m.id, m.content, m.tags, m.metadata, m.created_at, m.updated_at
FROM memories m, json_each(m.tags) j
WHERE j.value IN ({in_clause})
ORDER BY m.id DESC LIMIT ?{} OFFSET ?{}",
tags.len() + 1,
tags.len() + 2,
);
let tag_params: Vec<&dyn rusqlite::ToSql> = tags.iter().map(|t| t as &dyn rusqlite::ToSql).collect();
let count_params = tag_params.clone();
let total: i64 = conn
.query_row(&count_sql, count_params.as_slice(), |r| r.get(0))
.map_err(|e| format!("memora count failed: {e}"))?;
let mut query_params = tag_params;
query_params.push(&limit);
query_params.push(&offset);
let mut stmt = conn
.prepare(&query_sql)
.map_err(|e| format!("memora query failed: {e}"))?;
let nodes = stmt
.query_map(query_params.as_slice(), Self::parse_row)
.map_err(|e| format!("memora query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("memora row read failed: {e}"))?;
Ok(MemoraSearchResult { nodes, total })
}
pub fn search(
&self,
query: &str,
tags: Option<Vec<String>>,
limit: i64,
) -> Result<MemoraSearchResult, String> {
let lock = self.conn.lock().map_err(|_| "memora database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("memora database not found")?;
// Use FTS5 for text search with optional tag filter
let fts_query = query.to_string();
if let Some(ref tag_list) = tags {
if !tag_list.is_empty() {
return self.search_with_tags(conn, &fts_query, tag_list, limit);
}
}
let mut stmt = conn
.prepare(
"SELECT m.id, m.content, m.tags, m.metadata, m.created_at, m.updated_at
FROM memories_fts f
JOIN memories m ON m.id = f.rowid
WHERE memories_fts MATCH ?1
ORDER BY rank
LIMIT ?2",
)
.map_err(|e| format!("memora search failed: {e}"))?;
let nodes = stmt
.query_map(params![fts_query, limit], Self::parse_row)
.map_err(|e| {
let msg = e.to_string();
if msg.contains("fts5") || msg.contains("syntax") {
format!("Invalid search query: {e}")
} else {
format!("memora search failed: {e}")
}
})?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("memora row read failed: {e}"))?;
let total = nodes.len() as i64;
Ok(MemoraSearchResult { nodes, total })
}
fn search_with_tags(
&self,
conn: &Connection,
query: &str,
tags: &[String],
limit: i64,
) -> Result<MemoraSearchResult, String> {
let placeholders: Vec<String> = tags.iter().enumerate().map(|(i, _)| format!("?{}", i + 3)).collect();
let in_clause = placeholders.join(", ");
let sql = format!(
"SELECT DISTINCT m.id, m.content, m.tags, m.metadata, m.created_at, m.updated_at
FROM memories_fts f
JOIN memories m ON m.id = f.rowid
JOIN json_each(m.tags) j ON j.value IN ({in_clause})
WHERE memories_fts MATCH ?1
ORDER BY rank
LIMIT ?2"
);
let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
params.push(Box::new(query.to_string()));
params.push(Box::new(limit));
for tag in tags {
params.push(Box::new(tag.clone()));
}
let param_refs: Vec<&dyn rusqlite::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let mut stmt = conn
.prepare(&sql)
.map_err(|e| format!("memora search failed: {e}"))?;
let nodes = stmt
.query_map(param_refs.as_slice(), Self::parse_row)
.map_err(|e| {
let msg = e.to_string();
if msg.contains("fts5") || msg.contains("syntax") {
format!("Invalid search query: {e}")
} else {
format!("memora search failed: {e}")
}
})?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("memora row read failed: {e}"))?;
let total = nodes.len() as i64;
Ok(MemoraSearchResult { nodes, total })
}
pub fn get(&self, id: i64) -> Result<Option<MemoraNode>, String> {
let lock = self.conn.lock().map_err(|_| "memora database lock poisoned".to_string())?;
let conn = lock.as_ref().ok_or("memora database not found")?;
let mut stmt = conn
.prepare(
"SELECT id, content, tags, metadata, created_at, updated_at
FROM memories WHERE id = ?1",
)
.map_err(|e| format!("memora query failed: {e}"))?;
let mut rows = stmt
.query_map(params![id], Self::parse_row)
.map_err(|e| format!("memora query failed: {e}"))?;
match rows.next() {
Some(Ok(node)) => Ok(Some(node)),
Some(Err(e)) => Err(format!("memora row read failed: {e}")),
None => Ok(None),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_missing_db() -> MemoraDb {
MemoraDb { conn: Mutex::new(None) }
}
#[test]
fn test_new_does_not_panic() {
let _db = MemoraDb::new();
}
#[test]
fn test_missing_db_not_available() {
let db = make_missing_db();
assert!(!db.is_available());
}
#[test]
fn test_list_missing_db_returns_error() {
let db = make_missing_db();
let result = db.list(None, 50, 0);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "memora database not found");
}
#[test]
fn test_search_missing_db_returns_error() {
let db = make_missing_db();
let result = db.search("test", None, 50);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "memora database not found");
}
#[test]
fn test_get_missing_db_returns_error() {
let db = make_missing_db();
let result = db.get(1);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "memora database not found");
}
#[test]
fn test_list_with_tags_missing_db_returns_error() {
let db = make_missing_db();
let result = db.list(Some(vec!["bterminal".to_string()]), 50, 0);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "memora database not found");
}
#[test]
fn test_search_with_tags_missing_db_returns_error() {
let db = make_missing_db();
let result = db.search("test", Some(vec!["bterminal".to_string()]), 50);
assert!(result.is_err());
assert_eq!(result.unwrap_err(), "memora database not found");
}
}

View file

@ -0,0 +1,31 @@
// Desktop notification support via notify-rust
use notify_rust::{Notification, Urgency};
/// Send an OS desktop notification.
/// Fails gracefully if the notification daemon is unavailable.
pub fn send_desktop_notification(
title: &str,
body: &str,
urgency: &str,
) -> Result<(), String> {
let urgency_level = match urgency {
"critical" => Urgency::Critical,
"low" => Urgency::Low,
_ => Urgency::Normal,
};
match Notification::new()
.summary(title)
.body(body)
.appname("BTerminal")
.urgency(urgency_level)
.show()
{
Ok(_) => Ok(()),
Err(e) => {
tracing::warn!("Desktop notification failed (daemon unavailable?): {e}");
Ok(()) // Graceful — don't propagate to frontend
}
}
}

255
src-tauri/src/plugins.rs Normal file
View file

@ -0,0 +1,255 @@
// Plugin discovery and file reading.
// Scans ~/.config/bterminal/plugins/ for plugin.json manifest files.
// Each plugin lives in its own subdirectory with a plugin.json manifest.
use serde::{Deserialize, Serialize};
use std::path::Path;
/// Plugin manifest — parsed from plugin.json
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PluginMeta {
pub id: String,
pub name: String,
pub version: String,
#[serde(default)]
pub description: String,
/// Entry JS file relative to plugin directory
pub main: String,
/// Permission strings: "palette", "btmsg:read", "bttask:read", "events"
#[serde(default)]
pub permissions: Vec<String>,
}
const VALID_PERMISSIONS: &[&str] = &["palette", "btmsg:read", "bttask:read", "events"];
/// Validate plugin ID: alphanumeric + hyphens only, 1-64 chars
fn is_valid_plugin_id(id: &str) -> bool {
!id.is_empty()
&& id.len() <= 64
&& id
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '-')
}
/// Discover all plugins in the given plugins directory.
/// Each plugin must have a plugin.json manifest file.
pub fn discover_plugins(plugins_dir: &Path) -> Vec<PluginMeta> {
let mut plugins = Vec::new();
let entries = match std::fs::read_dir(plugins_dir) {
Ok(e) => e,
Err(_) => return plugins,
};
for entry in entries.flatten() {
let path = entry.path();
if !path.is_dir() {
continue;
}
let manifest_path = path.join("plugin.json");
if !manifest_path.exists() {
continue;
}
let content = match std::fs::read_to_string(&manifest_path) {
Ok(c) => c,
Err(e) => {
log::warn!(
"Failed to read plugin manifest {}: {e}",
manifest_path.display()
);
continue;
}
};
let meta: PluginMeta = match serde_json::from_str(&content) {
Ok(m) => m,
Err(e) => {
log::warn!(
"Invalid plugin manifest {}: {e}",
manifest_path.display()
);
continue;
}
};
// Validate plugin ID
if !is_valid_plugin_id(&meta.id) {
log::warn!(
"Plugin at {} has invalid ID '{}' — skipping",
path.display(),
meta.id
);
continue;
}
// Validate permissions
for perm in &meta.permissions {
if !VALID_PERMISSIONS.contains(&perm.as_str()) {
log::warn!(
"Plugin '{}' requests unknown permission '{}' — skipping",
meta.id,
perm
);
continue;
}
}
plugins.push(meta);
}
plugins
}
/// Read a file from a plugin directory, with path traversal prevention.
/// Only files within the plugin's own directory are accessible.
pub fn read_plugin_file(
plugins_dir: &Path,
plugin_id: &str,
filename: &str,
) -> Result<String, String> {
if !is_valid_plugin_id(plugin_id) {
return Err("Invalid plugin ID".to_string());
}
let plugin_dir = plugins_dir.join(plugin_id);
if !plugin_dir.is_dir() {
return Err(format!("Plugin directory not found: {}", plugin_id));
}
// Canonicalize the plugin directory to resolve symlinks
let canonical_plugin_dir = plugin_dir
.canonicalize()
.map_err(|e| format!("Failed to resolve plugin directory: {e}"))?;
let target = plugin_dir.join(filename);
let canonical_target = target
.canonicalize()
.map_err(|e| format!("Failed to resolve file path: {e}"))?;
// Path traversal prevention: target must be within plugin directory
if !canonical_target.starts_with(&canonical_plugin_dir) {
return Err("Access denied: path is outside plugin directory".to_string());
}
std::fs::read_to_string(&canonical_target)
.map_err(|e| format!("Failed to read plugin file: {e}"))
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
#[test]
fn test_valid_plugin_ids() {
assert!(is_valid_plugin_id("my-plugin"));
assert!(is_valid_plugin_id("hello123"));
assert!(is_valid_plugin_id("a"));
assert!(!is_valid_plugin_id(""));
assert!(!is_valid_plugin_id("my_plugin")); // underscore not allowed
assert!(!is_valid_plugin_id("my plugin")); // space not allowed
assert!(!is_valid_plugin_id("../evil")); // path traversal chars
assert!(!is_valid_plugin_id(&"a".repeat(65))); // too long
}
#[test]
fn test_discover_plugins_empty_dir() {
let dir = tempfile::tempdir().unwrap();
let plugins = discover_plugins(dir.path());
assert!(plugins.is_empty());
}
#[test]
fn test_discover_plugins_nonexistent_dir() {
let plugins = discover_plugins(Path::new("/nonexistent/path"));
assert!(plugins.is_empty());
}
#[test]
fn test_discover_plugins_valid_manifest() {
let dir = tempfile::tempdir().unwrap();
let plugin_dir = dir.path().join("test-plugin");
fs::create_dir(&plugin_dir).unwrap();
fs::write(
plugin_dir.join("plugin.json"),
r#"{
"id": "test-plugin",
"name": "Test Plugin",
"version": "1.0.0",
"description": "A test plugin",
"main": "index.js",
"permissions": ["palette"]
}"#,
)
.unwrap();
fs::write(plugin_dir.join("index.js"), "// test").unwrap();
let plugins = discover_plugins(dir.path());
assert_eq!(plugins.len(), 1);
assert_eq!(plugins[0].id, "test-plugin");
assert_eq!(plugins[0].name, "Test Plugin");
assert_eq!(plugins[0].permissions, vec!["palette"]);
}
#[test]
fn test_discover_plugins_invalid_id_skipped() {
let dir = tempfile::tempdir().unwrap();
let plugin_dir = dir.path().join("bad_plugin");
fs::create_dir(&plugin_dir).unwrap();
fs::write(
plugin_dir.join("plugin.json"),
r#"{
"id": "bad_plugin",
"name": "Bad",
"version": "1.0.0",
"main": "index.js"
}"#,
)
.unwrap();
let plugins = discover_plugins(dir.path());
assert!(plugins.is_empty());
}
#[test]
fn test_read_plugin_file_success() {
let dir = tempfile::tempdir().unwrap();
let plugin_dir = dir.path().join("my-plugin");
fs::create_dir(&plugin_dir).unwrap();
fs::write(plugin_dir.join("index.js"), "console.log('hello');").unwrap();
let result = read_plugin_file(dir.path(), "my-plugin", "index.js");
assert_eq!(result.unwrap(), "console.log('hello');");
}
#[test]
fn test_read_plugin_file_path_traversal_blocked() {
let dir = tempfile::tempdir().unwrap();
let plugin_dir = dir.path().join("my-plugin");
fs::create_dir(&plugin_dir).unwrap();
fs::write(plugin_dir.join("index.js"), "ok").unwrap();
let result = read_plugin_file(dir.path(), "my-plugin", "../../../etc/passwd");
assert!(result.is_err());
let err = result.unwrap_err();
assert!(err.contains("outside plugin directory") || err.contains("Failed to resolve"));
}
#[test]
fn test_read_plugin_file_invalid_id() {
let dir = tempfile::tempdir().unwrap();
let result = read_plugin_file(dir.path(), "../evil", "index.js");
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid plugin ID"));
}
#[test]
fn test_read_plugin_file_nonexistent_plugin() {
let dir = tempfile::tempdir().unwrap();
let result = read_plugin_file(dir.path(), "nonexistent", "index.js");
assert!(result.is_err());
}
}

4
src-tauri/src/pty.rs Normal file
View file

@ -0,0 +1,4 @@
// Thin wrapper — re-exports bterminal_core::pty types.
// PtyManager is now in bterminal-core; this module only re-exports for lib.rs.
pub use bterminal_core::pty::{PtyManager, PtyOptions};

590
src-tauri/src/remote.rs Normal file
View file

@ -0,0 +1,590 @@
// Remote machine management — WebSocket client connections to bterminal-relay instances
use bterminal_core::pty::PtyOptions;
use bterminal_core::sidecar::AgentQueryOptions;
use futures_util::{SinkExt, StreamExt};
use serde::{Deserialize, Serialize};
use sha2::{Sha256, Digest};
use std::collections::HashMap;
use std::sync::Arc;
use tauri::{AppHandle, Emitter};
use tokio::sync::{Mutex, mpsc};
use tokio_tungstenite::tungstenite::Message;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RemoteMachineConfig {
pub label: String,
pub url: String,
pub token: String,
pub auto_connect: bool,
/// SPKI SHA-256 pin(s) for certificate verification. Empty = TOFU on first connect.
#[serde(default)]
pub spki_pins: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RemoteMachineInfo {
pub id: String,
pub label: String,
pub url: String,
pub status: String,
pub auto_connect: bool,
/// Currently stored SPKI pin hashes (hex-encoded SHA-256)
pub spki_pins: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct RelayCommand {
id: String,
#[serde(rename = "type")]
type_: String,
payload: serde_json::Value,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct RelayEvent {
#[serde(rename = "type")]
type_: String,
#[serde(rename = "sessionId")]
session_id: Option<String>,
#[serde(rename = "machineId")]
machine_id: Option<String>,
payload: Option<serde_json::Value>,
}
struct WsConnection {
tx: mpsc::UnboundedSender<String>,
_handle: tokio::task::JoinHandle<()>,
}
struct RemoteMachine {
id: String,
config: RemoteMachineConfig,
status: String,
connection: Option<WsConnection>,
/// Cancellation signal — set to true to stop reconnect loops for this machine
cancelled: Arc<std::sync::atomic::AtomicBool>,
}
pub struct RemoteManager {
machines: Arc<Mutex<HashMap<String, RemoteMachine>>>,
}
impl RemoteManager {
pub fn new() -> Self {
Self {
machines: Arc::new(Mutex::new(HashMap::new())),
}
}
pub async fn list_machines(&self) -> Vec<RemoteMachineInfo> {
let machines = self.machines.lock().await;
machines.values().map(|m| RemoteMachineInfo {
id: m.id.clone(),
label: m.config.label.clone(),
url: m.config.url.clone(),
status: m.status.clone(),
auto_connect: m.config.auto_connect,
spki_pins: m.config.spki_pins.clone(),
}).collect()
}
pub async fn add_machine(&self, config: RemoteMachineConfig) -> String {
let id = uuid::Uuid::new_v4().to_string();
let machine = RemoteMachine {
id: id.clone(),
config,
status: "disconnected".to_string(),
connection: None,
cancelled: Arc::new(std::sync::atomic::AtomicBool::new(false)),
};
self.machines.lock().await.insert(id.clone(), machine);
id
}
pub async fn remove_machine(&self, machine_id: &str) -> Result<(), String> {
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
// Signal cancellation to stop any reconnect loops
machine.cancelled.store(true, std::sync::atomic::Ordering::Relaxed);
// Abort connection tasks before removing to prevent resource leaks
if let Some(conn) = machine.connection.take() {
conn._handle.abort();
}
}
machines.remove(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
Ok(())
}
/// Add an SPKI pin hash to a machine's trusted pins.
pub async fn add_spki_pin(&self, machine_id: &str, pin: String) -> Result<(), String> {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
if !machine.config.spki_pins.contains(&pin) {
machine.config.spki_pins.push(pin);
}
Ok(())
}
/// Remove an SPKI pin hash from a machine's trusted pins.
pub async fn remove_spki_pin(&self, machine_id: &str, pin: &str) -> Result<(), String> {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
machine.config.spki_pins.retain(|p| p != pin);
Ok(())
}
pub async fn connect(&self, app: &AppHandle, machine_id: &str) -> Result<(), String> {
let (url, token, spki_pins) = {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
if machine.connection.is_some() {
return Err("Already connected".to_string());
}
machine.status = "connecting".to_string();
// Reset cancellation flag for new connection
machine.cancelled.store(false, std::sync::atomic::Ordering::Relaxed);
(machine.config.url.clone(), machine.config.token.clone(), machine.config.spki_pins.clone())
};
// SPKI certificate pin verification for wss:// connections
if url.starts_with("wss://") {
if !spki_pins.is_empty() {
// Verify server certificate against stored pins
let server_hash = probe_spki_hash(&url).await.map_err(|e| {
// Reset status on probe failure
let machines = self.machines.clone();
let mid = machine_id.to_string();
tauri::async_runtime::spawn(async move {
let mut machines = machines.lock().await;
if let Some(machine) = machines.get_mut(&mid) {
machine.status = "disconnected".to_string();
}
});
format!("SPKI probe failed: {e}")
})?;
if !spki_pins.contains(&server_hash) {
// Pin mismatch — possible MITM or certificate rotation
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
machine.status = "disconnected".to_string();
}
return Err(format!(
"SPKI pin mismatch! Server certificate hash '{server_hash}' does not match \
any trusted pin. This may indicate a MITM attack or certificate rotation. \
Update the pin in Settings if this is expected."
));
}
log::info!("SPKI pin verified for machine {machine_id}");
} else {
// TOFU: no pins stored — probe and auto-store on first wss:// connect
match probe_spki_hash(&url).await {
Ok(hash) => {
log::info!("TOFU: storing SPKI pin for machine {machine_id}: {hash}");
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
machine.config.spki_pins.push(hash.clone());
}
let _ = app.emit("remote-spki-tofu", &serde_json::json!({
"machineId": machine_id,
"hash": hash,
}));
}
Err(e) => {
log::warn!("TOFU: failed to probe SPKI hash for {machine_id}: {e}");
// Continue without pinning — non-blocking
}
}
}
}
// Build WebSocket request with auth header
let request = tokio_tungstenite::tungstenite::http::Request::builder()
.uri(&url)
.header("Authorization", format!("Bearer {token}"))
.header("Sec-WebSocket-Key", tokio_tungstenite::tungstenite::handshake::client::generate_key())
.header("Sec-WebSocket-Version", "13")
.header("Connection", "Upgrade")
.header("Upgrade", "websocket")
.header("Host", extract_host(&url).unwrap_or_default())
.body(())
.map_err(|e| format!("Failed to build request: {e}"))?;
let (ws_stream, _) = tokio_tungstenite::connect_async(request)
.await
.map_err(|e| format!("WebSocket connect failed: {e}"))?;
let (mut ws_tx, mut ws_rx) = ws_stream.split();
// Channel for sending messages to the WebSocket
let (send_tx, mut send_rx) = mpsc::unbounded_channel::<String>();
// Writer task — forwards channel messages to WebSocket
let writer_handle = tokio::spawn(async move {
while let Some(msg) = send_rx.recv().await {
if ws_tx.send(Message::Text(msg)).await.is_err() {
break;
}
}
});
// Reader task — forwards WebSocket messages to Tauri events
let app_handle = app.clone();
let mid = machine_id.to_string();
let machines_ref = self.machines.clone();
let cancelled_flag = {
let machines = self.machines.lock().await;
machines.get(machine_id).map(|m| m.cancelled.clone())
.unwrap_or_else(|| Arc::new(std::sync::atomic::AtomicBool::new(false)))
};
let reader_handle = tokio::spawn(async move {
while let Some(msg) = ws_rx.next().await {
match msg {
Ok(Message::Text(text)) => {
if let Ok(mut event) = serde_json::from_str::<RelayEvent>(&text) {
event.machine_id = Some(mid.clone());
// Route relay events to Tauri events
match event.type_.as_str() {
"sidecar_message" => {
if let Some(payload) = &event.payload {
let _ = app_handle.emit("remote-sidecar-message", &serde_json::json!({
"machineId": mid,
"sessionId": event.session_id,
"event": payload,
}));
}
}
"pty_data" => {
if let Some(payload) = &event.payload {
let _ = app_handle.emit("remote-pty-data", &serde_json::json!({
"machineId": mid,
"sessionId": event.session_id,
"data": payload,
}));
}
}
"pty_exit" => {
let _ = app_handle.emit("remote-pty-exit", &serde_json::json!({
"machineId": mid,
"sessionId": event.session_id,
}));
}
"ready" => {
let _ = app_handle.emit("remote-machine-ready", &serde_json::json!({
"machineId": mid,
}));
}
"state_sync" => {
let _ = app_handle.emit("remote-state-sync", &serde_json::json!({
"machineId": mid,
"payload": event.payload,
}));
}
"pty_created" => {
// Relay confirmed PTY spawn — emit with real PTY ID
let _ = app_handle.emit("remote-pty-created", &serde_json::json!({
"machineId": mid,
"ptyId": event.session_id,
"commandId": event.payload.as_ref().and_then(|p| p.get("commandId")).and_then(|v| v.as_str()),
}));
}
"pong" => {} // heartbeat response, ignore
"error" => {
let _ = app_handle.emit("remote-error", &serde_json::json!({
"machineId": mid,
"error": event.payload,
}));
}
_ => {
log::warn!("Unknown relay event type: {}", event.type_);
}
}
}
}
Ok(Message::Close(_)) => break,
Err(e) => {
log::error!("WebSocket read error for machine {mid}: {e}");
break;
}
_ => {}
}
}
// Mark disconnected and clear connection
{
let mut machines = machines_ref.lock().await;
if let Some(machine) = machines.get_mut(&mid) {
machine.status = "disconnected".to_string();
machine.connection = None;
}
}
let _ = app_handle.emit("remote-machine-disconnected", &serde_json::json!({
"machineId": mid,
}));
// Exponential backoff reconnection (1s, 2s, 4s, 8s, 16s, 30s cap)
let reconnect_machines = machines_ref.clone();
let reconnect_app = app_handle.clone();
let reconnect_mid = mid.clone();
let reconnect_cancelled = cancelled_flag.clone();
tokio::spawn(async move {
let mut delay = std::time::Duration::from_secs(1);
let max_delay = std::time::Duration::from_secs(30);
loop {
tokio::time::sleep(delay).await;
// Check cancellation flag first (set by remove_machine/disconnect)
if reconnect_cancelled.load(std::sync::atomic::Ordering::Relaxed) {
log::info!("Reconnection cancelled (machine removed) for {reconnect_mid}");
break;
}
// Check if machine still exists and wants reconnection
let should_reconnect = {
let machines = reconnect_machines.lock().await;
machines.get(&reconnect_mid)
.map(|m| m.status == "disconnected" && m.connection.is_none())
.unwrap_or(false)
};
if !should_reconnect {
log::info!("Reconnection cancelled for machine {reconnect_mid}");
break;
}
log::info!("Attempting reconnection to {reconnect_mid} (backoff: {}s)", delay.as_secs());
let _ = reconnect_app.emit("remote-machine-reconnecting", &serde_json::json!({
"machineId": reconnect_mid,
"backoffSecs": delay.as_secs(),
}));
// Try to get URL for TCP probe
let url = {
let machines = reconnect_machines.lock().await;
machines.get(&reconnect_mid).map(|m| m.config.url.clone())
};
if let Some(url) = url {
if attempt_tcp_probe(&url).await.is_ok() {
log::info!("Reconnection probe succeeded for {reconnect_mid}");
// Mark as ready for reconnection — frontend should call connect()
let _ = reconnect_app.emit("remote-machine-reconnect-ready", &serde_json::json!({
"machineId": reconnect_mid,
}));
break;
}
} else {
break; // Machine removed
}
delay = std::cmp::min(delay * 2, max_delay);
}
});
});
// Combine reader + writer into one handle
let combined_handle = tokio::spawn(async move {
tokio::select! {
_ = reader_handle => {}
_ = writer_handle => {}
}
});
// Store connection
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
machine.status = "connected".to_string();
machine.connection = Some(WsConnection {
tx: send_tx,
_handle: combined_handle,
});
}
// Start heartbeat
let ping_tx = {
let machines = self.machines.lock().await;
machines.get(machine_id).and_then(|m| m.connection.as_ref().map(|c| c.tx.clone()))
};
if let Some(tx) = ping_tx {
let mid = machine_id.to_string();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(15));
loop {
interval.tick().await;
let ping = serde_json::json!({"id": "", "type": "ping", "payload": {}});
if tx.send(ping.to_string()).is_err() {
log::info!("Heartbeat stopped for machine {mid}");
break;
}
}
});
}
Ok(())
}
pub async fn disconnect(&self, machine_id: &str) -> Result<(), String> {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
// Signal cancellation to stop any reconnect loops
machine.cancelled.store(true, std::sync::atomic::Ordering::Relaxed);
if let Some(conn) = machine.connection.take() {
conn._handle.abort();
}
machine.status = "disconnected".to_string();
Ok(())
}
// --- Remote command helpers ---
async fn send_command(&self, machine_id: &str, cmd: RelayCommand) -> Result<(), String> {
let machines = self.machines.lock().await;
let machine = machines.get(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
let conn = machine.connection.as_ref()
.ok_or_else(|| format!("Machine {machine_id} not connected"))?;
let json = serde_json::to_string(&cmd)
.map_err(|e| format!("Serialize error: {e}"))?;
conn.tx.send(json)
.map_err(|_| format!("Send channel closed for machine {machine_id}"))
}
pub async fn agent_query(&self, machine_id: &str, options: &AgentQueryOptions) -> Result<(), String> {
self.send_command(machine_id, RelayCommand {
id: uuid::Uuid::new_v4().to_string(),
type_: "agent_query".to_string(),
payload: serde_json::to_value(options).unwrap_or_default(),
}).await
}
pub async fn agent_stop(&self, machine_id: &str, session_id: &str) -> Result<(), String> {
self.send_command(machine_id, RelayCommand {
id: uuid::Uuid::new_v4().to_string(),
type_: "agent_stop".to_string(),
payload: serde_json::json!({ "sessionId": session_id }),
}).await
}
pub async fn pty_spawn(&self, machine_id: &str, options: &PtyOptions) -> Result<String, String> {
// Send spawn command; the relay will respond with the PTY ID via a relay event
let cmd_id = uuid::Uuid::new_v4().to_string();
self.send_command(machine_id, RelayCommand {
id: cmd_id.clone(),
type_: "pty_create".to_string(),
payload: serde_json::to_value(options).unwrap_or_default(),
}).await?;
// Return the command ID as a placeholder; the real PTY ID comes via event
Ok(cmd_id)
}
pub async fn pty_write(&self, machine_id: &str, id: &str, data: &str) -> Result<(), String> {
self.send_command(machine_id, RelayCommand {
id: uuid::Uuid::new_v4().to_string(),
type_: "pty_write".to_string(),
payload: serde_json::json!({ "id": id, "data": data }),
}).await
}
pub async fn pty_resize(&self, machine_id: &str, id: &str, cols: u16, rows: u16) -> Result<(), String> {
self.send_command(machine_id, RelayCommand {
id: uuid::Uuid::new_v4().to_string(),
type_: "pty_resize".to_string(),
payload: serde_json::json!({ "id": id, "cols": cols, "rows": rows }),
}).await
}
pub async fn pty_kill(&self, machine_id: &str, id: &str) -> Result<(), String> {
self.send_command(machine_id, RelayCommand {
id: uuid::Uuid::new_v4().to_string(),
type_: "pty_close".to_string(),
payload: serde_json::json!({ "id": id }),
}).await
}
}
/// Probe a relay server's TLS certificate and return its SHA-256 hash (hex-encoded).
/// Connects with a permissive TLS config to extract the certificate, then hashes it.
/// Only works for wss:// URLs.
pub async fn probe_spki_hash(url: &str) -> Result<String, String> {
let host = extract_host(url).ok_or_else(|| "Invalid URL".to_string())?;
let hostname = host.split(':').next().unwrap_or(&host).to_string();
let addr = if host.contains(':') {
host.clone()
} else {
format!("{host}:9750")
};
// Build a permissive TLS connector to get the certificate regardless of CA trust
let connector = native_tls::TlsConnector::builder()
.danger_accept_invalid_certs(true)
.build()
.map_err(|e| format!("TLS connector error: {e}"))?;
let connector = tokio_native_tls::TlsConnector::from(connector);
let tcp = tokio::time::timeout(
std::time::Duration::from_secs(5),
tokio::net::TcpStream::connect(&addr),
)
.await
.map_err(|_| "Connection timeout".to_string())?
.map_err(|e| format!("TCP connect failed: {e}"))?;
let tls_stream = connector
.connect(&hostname, tcp)
.await
.map_err(|e| format!("TLS handshake failed: {e}"))?;
// Extract peer certificate DER bytes
let cert = tls_stream
.get_ref()
.peer_certificate()
.map_err(|e| format!("Failed to get peer certificate: {e}"))?
.ok_or_else(|| "No peer certificate presented".to_string())?;
let cert_der = cert
.to_der()
.map_err(|e| format!("Failed to encode certificate DER: {e}"))?;
// SHA-256 hash of the full DER-encoded certificate
let mut hasher = Sha256::new();
hasher.update(&cert_der);
let hash = hasher.finalize();
Ok(hex::encode(hash))
}
/// Probe whether a relay is reachable via TCP connect only (no WS upgrade).
/// This avoids allocating per-connection resources (PtyManager, SidecarManager) on the relay.
async fn attempt_tcp_probe(url: &str) -> Result<(), String> {
let host = extract_host(url).ok_or_else(|| "Invalid URL".to_string())?;
// Parse host:port, default to 9750 if no port
let addr = if host.contains(':') {
host.clone()
} else {
format!("{host}:9750")
};
tokio::time::timeout(
std::time::Duration::from_secs(5),
tokio::net::TcpStream::connect(&addr),
)
.await
.map_err(|_| "Connection timeout".to_string())?
.map_err(|e| format!("TCP connect failed: {e}"))?;
Ok(())
}
fn extract_host(url: &str) -> Option<String> {
url.replace("wss://", "")
.replace("ws://", "")
.split('/')
.next()
.map(|s| s.to_string())
}

404
src-tauri/src/search.rs Normal file
View file

@ -0,0 +1,404 @@
// search — FTS5 full-text search across messages, tasks, and btmsg
// Uses sessions.db for search index (separate from btmsg.db source tables).
// Index populated via explicit index_* calls; rebuild re-reads from source tables.
use rusqlite::{params, Connection};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::sync::Mutex;
pub struct SearchDb {
conn: Mutex<Connection>,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SearchResult {
pub result_type: String,
pub id: String,
pub title: String,
pub snippet: String,
pub score: f64,
}
impl SearchDb {
/// Open (or create) the search database and initialize FTS5 tables.
pub fn open(db_path: &PathBuf) -> Result<Self, String> {
if let Some(parent) = db_path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create search db dir: {e}"))?;
}
let conn = Connection::open(db_path)
.map_err(|e| format!("Failed to open search db: {e}"))?;
conn.query_row("PRAGMA journal_mode=WAL", [], |_| Ok(()))
.map_err(|e| format!("Failed to set WAL mode: {e}"))?;
let db = Self {
conn: Mutex::new(conn),
};
db.create_tables()?;
Ok(db)
}
/// Create FTS5 virtual tables if they don't already exist.
fn create_tables(&self) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute_batch(
"CREATE VIRTUAL TABLE IF NOT EXISTS search_messages USING fts5(
session_id,
role,
content,
timestamp
);
CREATE VIRTUAL TABLE IF NOT EXISTS search_tasks USING fts5(
task_id,
title,
description,
status,
assigned_to
);
CREATE VIRTUAL TABLE IF NOT EXISTS search_btmsg USING fts5(
message_id,
from_agent,
to_agent,
content,
channel_name
);"
)
.map_err(|e| format!("Failed to create FTS5 tables: {e}"))
}
/// Index an agent message into the search_messages FTS5 table.
pub fn index_message(
&self,
session_id: &str,
role: &str,
content: &str,
) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
let timestamp = chrono_now();
conn.execute(
"INSERT INTO search_messages (session_id, role, content, timestamp)
VALUES (?1, ?2, ?3, ?4)",
params![session_id, role, content, timestamp],
)
.map_err(|e| format!("Index message error: {e}"))?;
Ok(())
}
/// Index a task into the search_tasks FTS5 table.
#[allow(dead_code)] // Called via Tauri IPC command search_index_task
pub fn index_task(
&self,
task_id: &str,
title: &str,
description: &str,
status: &str,
assigned_to: &str,
) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO search_tasks (task_id, title, description, status, assigned_to)
VALUES (?1, ?2, ?3, ?4, ?5)",
params![task_id, title, description, status, assigned_to],
)
.map_err(|e| format!("Index task error: {e}"))?;
Ok(())
}
/// Index a btmsg message into the search_btmsg FTS5 table.
#[allow(dead_code)] // Called via Tauri IPC command search_index_btmsg
pub fn index_btmsg(
&self,
msg_id: &str,
from_agent: &str,
to_agent: &str,
content: &str,
channel: &str,
) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO search_btmsg (message_id, from_agent, to_agent, content, channel_name)
VALUES (?1, ?2, ?3, ?4, ?5)",
params![msg_id, from_agent, to_agent, content, channel],
)
.map_err(|e| format!("Index btmsg error: {e}"))?;
Ok(())
}
/// Search across all FTS5 tables using MATCH, returning results sorted by relevance.
pub fn search_all(&self, query: &str, limit: i32) -> Result<Vec<SearchResult>, String> {
if query.trim().is_empty() {
return Ok(Vec::new());
}
let conn = self.conn.lock().unwrap();
let mut results = Vec::new();
// Search messages
{
let mut stmt = conn
.prepare(
"SELECT session_id, role, snippet(search_messages, 2, '<b>', '</b>', '...', 32) as snip,
rank
FROM search_messages
WHERE search_messages MATCH ?1
ORDER BY rank
LIMIT ?2",
)
.map_err(|e| format!("Search messages query error: {e}"))?;
let rows = stmt
.query_map(params![query, limit], |row| {
Ok(SearchResult {
result_type: "message".into(),
id: row.get::<_, String>("session_id")?,
title: row.get::<_, String>("role")?,
snippet: row.get::<_, String>("snip").unwrap_or_default(),
score: row.get::<_, f64>("rank").unwrap_or(0.0).abs(),
})
})
.map_err(|e| format!("Search messages error: {e}"))?;
for row in rows {
if let Ok(r) = row {
results.push(r);
}
}
}
// Search tasks
{
let mut stmt = conn
.prepare(
"SELECT task_id, title, snippet(search_tasks, 2, '<b>', '</b>', '...', 32) as snip,
rank
FROM search_tasks
WHERE search_tasks MATCH ?1
ORDER BY rank
LIMIT ?2",
)
.map_err(|e| format!("Search tasks query error: {e}"))?;
let rows = stmt
.query_map(params![query, limit], |row| {
Ok(SearchResult {
result_type: "task".into(),
id: row.get::<_, String>("task_id")?,
title: row.get::<_, String>("title")?,
snippet: row.get::<_, String>("snip").unwrap_or_default(),
score: row.get::<_, f64>("rank").unwrap_or(0.0).abs(),
})
})
.map_err(|e| format!("Search tasks error: {e}"))?;
for row in rows {
if let Ok(r) = row {
results.push(r);
}
}
}
// Search btmsg
{
let mut stmt = conn
.prepare(
"SELECT message_id, from_agent, snippet(search_btmsg, 3, '<b>', '</b>', '...', 32) as snip,
rank
FROM search_btmsg
WHERE search_btmsg MATCH ?1
ORDER BY rank
LIMIT ?2",
)
.map_err(|e| format!("Search btmsg query error: {e}"))?;
let rows = stmt
.query_map(params![query, limit], |row| {
Ok(SearchResult {
result_type: "btmsg".into(),
id: row.get::<_, String>("message_id")?,
title: row.get::<_, String>("from_agent")?,
snippet: row.get::<_, String>("snip").unwrap_or_default(),
score: row.get::<_, f64>("rank").unwrap_or(0.0).abs(),
})
})
.map_err(|e| format!("Search btmsg error: {e}"))?;
for row in rows {
if let Ok(r) = row {
results.push(r);
}
}
}
// Sort by score ascending (FTS5 rank is negative, abs() makes lower = more relevant)
results.sort_by(|a, b| a.score.partial_cmp(&b.score).unwrap_or(std::cmp::Ordering::Equal));
results.truncate(limit as usize);
Ok(results)
}
/// Drop and recreate all FTS5 tables (clears the index).
pub fn rebuild_index(&self) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute_batch(
"DROP TABLE IF EXISTS search_messages;
DROP TABLE IF EXISTS search_tasks;
DROP TABLE IF EXISTS search_btmsg;"
)
.map_err(|e| format!("Failed to drop FTS5 tables: {e}"))?;
drop(conn);
self.create_tables()?;
Ok(())
}
}
/// Simple timestamp helper (avoids adding chrono dependency).
fn chrono_now() -> String {
// Use SQLite's datetime('now') equivalent via a simple format
// We return empty string; actual timestamp can be added by caller if needed
String::new()
}
#[cfg(test)]
mod tests {
use super::*;
fn temp_search_db() -> (SearchDb, tempfile::TempDir) {
let dir = tempfile::tempdir().unwrap();
let db_path = dir.path().join("search.db");
let db = SearchDb::open(&db_path).unwrap();
(db, dir)
}
#[test]
fn test_create_tables_idempotent() {
let (db, _dir) = temp_search_db();
// Second call should not fail
db.create_tables().unwrap();
}
#[test]
fn test_index_and_search_message() {
let (db, _dir) = temp_search_db();
db.index_message("s1", "assistant", "The quick brown fox jumps over the lazy dog")
.unwrap();
db.index_message("s2", "user", "Hello world from the user")
.unwrap();
let results = db.search_all("quick brown", 10).unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].result_type, "message");
assert_eq!(results[0].id, "s1");
}
#[test]
fn test_index_and_search_task() {
let (db, _dir) = temp_search_db();
db.index_task("t1", "Fix login bug", "Users cannot log in with SSO", "progress", "agent-1")
.unwrap();
db.index_task("t2", "Add dark mode", "Theme support", "todo", "agent-2")
.unwrap();
let results = db.search_all("login SSO", 10).unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].result_type, "task");
assert_eq!(results[0].id, "t1");
assert_eq!(results[0].title, "Fix login bug");
}
#[test]
fn test_index_and_search_btmsg() {
let (db, _dir) = temp_search_db();
db.index_btmsg("m1", "manager", "architect", "Please review the API design", "general")
.unwrap();
db.index_btmsg("m2", "tester", "manager", "All tests passing", "review-queue")
.unwrap();
let results = db.search_all("API design", 10).unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].result_type, "btmsg");
assert_eq!(results[0].id, "m1");
}
#[test]
fn test_search_across_all_tables() {
let (db, _dir) = temp_search_db();
db.index_message("s1", "assistant", "Please deploy the auth service now")
.unwrap();
db.index_task("t1", "Deploy auth service", "Deploy to production", "todo", "ops")
.unwrap();
db.index_btmsg("m1", "manager", "ops", "Please deploy the auth service ASAP", "ops-channel")
.unwrap();
let results = db.search_all("deploy auth", 10).unwrap();
assert_eq!(results.len(), 3, "should find results across all 3 tables");
let types: Vec<&str> = results.iter().map(|r| r.result_type.as_str()).collect();
assert!(types.contains(&"message"));
assert!(types.contains(&"task"));
assert!(types.contains(&"btmsg"));
}
#[test]
fn test_search_empty_query() {
let (db, _dir) = temp_search_db();
db.index_message("s1", "user", "some content").unwrap();
let results = db.search_all("", 10).unwrap();
assert!(results.is_empty());
}
#[test]
fn test_search_no_results() {
let (db, _dir) = temp_search_db();
db.index_message("s1", "user", "hello world").unwrap();
let results = db.search_all("nonexistent", 10).unwrap();
assert!(results.is_empty());
}
#[test]
fn test_search_limit() {
let (db, _dir) = temp_search_db();
for i in 0..20 {
db.index_message(&format!("s{i}"), "user", &format!("test message number {i}"))
.unwrap();
}
let results = db.search_all("test message", 5).unwrap();
assert!(results.len() <= 5);
}
#[test]
fn test_rebuild_index() {
let (db, _dir) = temp_search_db();
db.index_message("s1", "user", "important data").unwrap();
let before = db.search_all("important", 10).unwrap();
assert_eq!(before.len(), 1);
db.rebuild_index().unwrap();
let after = db.search_all("important", 10).unwrap();
assert!(after.is_empty(), "index should be empty after rebuild");
}
#[test]
fn test_search_result_serializes_to_camel_case() {
let result = SearchResult {
result_type: "message".into(),
id: "s1".into(),
title: "user".into(),
snippet: "test".into(),
score: 0.5,
};
let json = serde_json::to_value(&result).unwrap();
assert!(json.get("resultType").is_some(), "expected camelCase 'resultType'");
assert!(json.get("result_type").is_none(), "should not have snake_case");
}
}

130
src-tauri/src/secrets.rs Normal file
View file

@ -0,0 +1,130 @@
//! Secrets management via system keyring (libsecret on Linux).
//!
//! Stores secrets in the OS keyring (GNOME Keyring / KDE Wallet).
//! A metadata entry "__bterminal_keys__" tracks known key names.
//! If the keyring is unavailable, operations return explicit errors
//! rather than falling back to plaintext storage.
use keyring::Entry;
const SERVICE: &str = "bterminal";
const KEYS_META: &str = "__bterminal_keys__";
/// Known secret key identifiers.
pub const KNOWN_KEYS: &[&str] = &[
"anthropic_api_key",
"openai_api_key",
"openrouter_api_key",
"github_token",
"relay_token",
];
pub struct SecretsManager;
impl SecretsManager {
/// Store a secret value in the system keyring.
pub fn store_secret(key: &str, value: &str) -> Result<(), String> {
let entry = Entry::new(SERVICE, key).map_err(|e| format!("keyring init error: {e}"))?;
entry
.set_password(value)
.map_err(|e| format!("failed to store secret '{key}': {e}"))?;
// Track the key in metadata
Self::add_key_to_meta(key)?;
Ok(())
}
/// Retrieve a secret value from the system keyring.
/// Returns Ok(None) if the key does not exist.
pub fn get_secret(key: &str) -> Result<Option<String>, String> {
let entry = Entry::new(SERVICE, key).map_err(|e| format!("keyring init error: {e}"))?;
match entry.get_password() {
Ok(pw) => Ok(Some(pw)),
Err(keyring::Error::NoEntry) => Ok(None),
Err(e) => Err(format!("failed to get secret '{key}': {e}")),
}
}
/// Delete a secret from the system keyring.
pub fn delete_secret(key: &str) -> Result<(), String> {
let entry = Entry::new(SERVICE, key).map_err(|e| format!("keyring init error: {e}"))?;
match entry.delete_credential() {
Ok(()) => {}
Err(keyring::Error::NoEntry) => {} // already absent, not an error
Err(e) => return Err(format!("failed to delete secret '{key}': {e}")),
}
Self::remove_key_from_meta(key)?;
Ok(())
}
/// List keys that have been stored (read from metadata entry).
pub fn list_keys() -> Result<Vec<String>, String> {
let entry =
Entry::new(SERVICE, KEYS_META).map_err(|e| format!("keyring init error: {e}"))?;
match entry.get_password() {
Ok(raw) => {
let keys: Vec<String> = raw
.split('\n')
.filter(|s| !s.is_empty())
.map(String::from)
.collect();
Ok(keys)
}
Err(keyring::Error::NoEntry) => Ok(Vec::new()),
Err(e) => Err(format!("failed to list secret keys: {e}")),
}
}
/// Check whether the system keyring is available.
pub fn has_keyring() -> bool {
// Attempt to create an entry — this is the cheapest probe.
Entry::new(SERVICE, "__probe__").is_ok()
}
// --- internal helpers ---
fn add_key_to_meta(key: &str) -> Result<(), String> {
let mut keys = Self::list_keys().unwrap_or_default();
if !keys.iter().any(|k| k == key) {
keys.push(key.to_string());
Self::save_meta(&keys)?;
}
Ok(())
}
fn remove_key_from_meta(key: &str) -> Result<(), String> {
let mut keys = Self::list_keys().unwrap_or_default();
keys.retain(|k| k != key);
Self::save_meta(&keys)?;
Ok(())
}
fn save_meta(keys: &[String]) -> Result<(), String> {
let entry =
Entry::new(SERVICE, KEYS_META).map_err(|e| format!("keyring init error: {e}"))?;
let data = keys.join("\n");
entry
.set_password(&data)
.map_err(|e| format!("failed to save key metadata: {e}"))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn known_keys_are_not_empty() {
assert!(!KNOWN_KEYS.is_empty());
}
#[test]
fn known_keys_contains_expected() {
assert!(KNOWN_KEYS.contains(&"anthropic_api_key"));
assert!(KNOWN_KEYS.contains(&"openai_api_key"));
assert!(KNOWN_KEYS.contains(&"github_token"));
assert!(KNOWN_KEYS.contains(&"relay_token"));
}
}

View file

@ -0,0 +1,144 @@
// Agent message and project state persistence (agent_messages + project_agent_state tables)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentMessageRecord {
#[serde(default)]
pub id: i64,
pub session_id: String,
pub project_id: String,
pub sdk_session_id: Option<String>,
pub message_type: String,
pub content: String,
pub parent_id: Option<String>,
pub created_at: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProjectAgentState {
pub project_id: String,
pub last_session_id: String,
pub sdk_session_id: Option<String>,
pub status: String,
pub cost_usd: f64,
pub input_tokens: i64,
pub output_tokens: i64,
pub last_prompt: Option<String>,
pub updated_at: i64,
}
impl SessionDb {
pub fn save_agent_messages(
&self,
session_id: &str,
project_id: &str,
sdk_session_id: Option<&str>,
messages: &[AgentMessageRecord],
) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
// Wrap DELETE+INSERTs in a transaction to prevent partial writes on crash
let tx = conn.unchecked_transaction()
.map_err(|e| format!("Begin transaction failed: {e}"))?;
// Clear previous messages for this session
tx.execute(
"DELETE FROM agent_messages WHERE session_id = ?1",
params![session_id],
).map_err(|e| format!("Delete old messages failed: {e}"))?;
let mut stmt = tx.prepare(
"INSERT INTO agent_messages (session_id, project_id, sdk_session_id, message_type, content, parent_id, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
).map_err(|e| format!("Prepare insert failed: {e}"))?;
for msg in messages {
stmt.execute(params![
session_id,
project_id,
sdk_session_id,
msg.message_type,
msg.content,
msg.parent_id,
msg.created_at,
]).map_err(|e| format!("Insert message failed: {e}"))?;
}
drop(stmt);
tx.commit().map_err(|e| format!("Commit failed: {e}"))?;
Ok(())
}
pub fn load_agent_messages(&self, project_id: &str) -> Result<Vec<AgentMessageRecord>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, session_id, project_id, sdk_session_id, message_type, content, parent_id, created_at
FROM agent_messages
WHERE project_id = ?1
ORDER BY created_at ASC"
).map_err(|e| format!("Query prepare failed: {e}"))?;
let messages = stmt.query_map(params![project_id], |row| {
Ok(AgentMessageRecord {
id: row.get(0)?,
session_id: row.get(1)?,
project_id: row.get(2)?,
sdk_session_id: row.get(3)?,
message_type: row.get(4)?,
content: row.get(5)?,
parent_id: row.get(6)?,
created_at: row.get(7)?,
})
}).map_err(|e| format!("Query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Row read failed: {e}"))?;
Ok(messages)
}
pub fn save_project_agent_state(&self, state: &ProjectAgentState) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO project_agent_state (project_id, last_session_id, sdk_session_id, status, cost_usd, input_tokens, output_tokens, last_prompt, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
state.project_id,
state.last_session_id,
state.sdk_session_id,
state.status,
state.cost_usd,
state.input_tokens,
state.output_tokens,
state.last_prompt,
state.updated_at,
],
).map_err(|e| format!("Save project agent state failed: {e}"))?;
Ok(())
}
pub fn load_project_agent_state(&self, project_id: &str) -> Result<Option<ProjectAgentState>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT project_id, last_session_id, sdk_session_id, status, cost_usd, input_tokens, output_tokens, last_prompt, updated_at FROM project_agent_state WHERE project_id = ?1"
).map_err(|e| format!("Query prepare failed: {e}"))?;
let result = stmt.query_row(params![project_id], |row| {
Ok(ProjectAgentState {
project_id: row.get(0)?,
last_session_id: row.get(1)?,
sdk_session_id: row.get(2)?,
status: row.get(3)?,
cost_usd: row.get(4)?,
input_tokens: row.get(5)?,
output_tokens: row.get(6)?,
last_prompt: row.get(7)?,
updated_at: row.get(8)?,
})
});
match result {
Ok(state) => Ok(Some(state)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(format!("Load project agent state failed: {e}")),
}
}
}

View file

@ -0,0 +1,90 @@
// Session anchor persistence (session_anchors table)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionAnchorRecord {
pub id: String,
pub project_id: String,
pub message_id: String,
pub anchor_type: String,
pub content: String,
pub estimated_tokens: i64,
pub turn_index: i64,
pub created_at: i64,
}
impl SessionDb {
pub fn save_session_anchors(&self, anchors: &[SessionAnchorRecord]) -> Result<(), String> {
if anchors.is_empty() {
return Ok(());
}
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"INSERT OR REPLACE INTO session_anchors (id, project_id, message_id, anchor_type, content, estimated_tokens, turn_index, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"
).map_err(|e| format!("Prepare anchor insert failed: {e}"))?;
for anchor in anchors {
stmt.execute(params![
anchor.id,
anchor.project_id,
anchor.message_id,
anchor.anchor_type,
anchor.content,
anchor.estimated_tokens,
anchor.turn_index,
anchor.created_at,
]).map_err(|e| format!("Insert anchor failed: {e}"))?;
}
Ok(())
}
pub fn load_session_anchors(&self, project_id: &str) -> Result<Vec<SessionAnchorRecord>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, project_id, message_id, anchor_type, content, estimated_tokens, turn_index, created_at FROM session_anchors WHERE project_id = ?1 ORDER BY turn_index ASC"
).map_err(|e| format!("Query anchors failed: {e}"))?;
let anchors = stmt.query_map(params![project_id], |row| {
Ok(SessionAnchorRecord {
id: row.get(0)?,
project_id: row.get(1)?,
message_id: row.get(2)?,
anchor_type: row.get(3)?,
content: row.get(4)?,
estimated_tokens: row.get(5)?,
turn_index: row.get(6)?,
created_at: row.get(7)?,
})
}).map_err(|e| format!("Query anchors failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Read anchor row failed: {e}"))?;
Ok(anchors)
}
pub fn delete_session_anchor(&self, id: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute("DELETE FROM session_anchors WHERE id = ?1", params![id])
.map_err(|e| format!("Delete anchor failed: {e}"))?;
Ok(())
}
pub fn delete_project_anchors(&self, project_id: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute("DELETE FROM session_anchors WHERE project_id = ?1", params![project_id])
.map_err(|e| format!("Delete project anchors failed: {e}"))?;
Ok(())
}
pub fn update_anchor_type(&self, id: &str, anchor_type: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"UPDATE session_anchors SET anchor_type = ?2 WHERE id = ?1",
params![id, anchor_type],
).map_err(|e| format!("Update anchor type failed: {e}"))?;
Ok(())
}
}

View file

@ -0,0 +1,90 @@
// Layout state persistence (layout_state table)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LayoutState {
pub preset: String,
pub pane_ids: Vec<String>,
}
impl SessionDb {
pub fn save_layout(&self, layout: &LayoutState) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
let pane_ids_json = serde_json::to_string(&layout.pane_ids)
.map_err(|e| format!("Serialize pane_ids failed: {e}"))?;
conn.execute(
"UPDATE layout_state SET preset = ?1, pane_ids = ?2 WHERE id = 1",
params![layout.preset, pane_ids_json],
).map_err(|e| format!("Layout save failed: {e}"))?;
Ok(())
}
pub fn load_layout(&self) -> Result<LayoutState, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT preset, pane_ids FROM layout_state WHERE id = 1")
.map_err(|e| format!("Layout query failed: {e}"))?;
stmt.query_row([], |row| {
let preset: String = row.get(0)?;
let pane_ids_json: String = row.get(1)?;
let pane_ids: Vec<String> = serde_json::from_str(&pane_ids_json).unwrap_or_default();
Ok(LayoutState { preset, pane_ids })
}).map_err(|e| format!("Layout read failed: {e}"))
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_db() -> SessionDb {
let dir = tempfile::tempdir().unwrap();
SessionDb::open(&dir.path().to_path_buf()).unwrap()
}
#[test]
fn test_load_default_layout() {
let db = make_db();
let layout = db.load_layout().unwrap();
assert_eq!(layout.preset, "1-col");
assert!(layout.pane_ids.is_empty());
}
#[test]
fn test_save_and_load_layout() {
let db = make_db();
let layout = LayoutState {
preset: "2-col".to_string(),
pane_ids: vec!["p1".to_string(), "p2".to_string()],
};
db.save_layout(&layout).unwrap();
let loaded = db.load_layout().unwrap();
assert_eq!(loaded.preset, "2-col");
assert_eq!(loaded.pane_ids, vec!["p1", "p2"]);
}
#[test]
fn test_save_layout_overwrites() {
let db = make_db();
let layout1 = LayoutState {
preset: "2-col".to_string(),
pane_ids: vec!["p1".to_string()],
};
db.save_layout(&layout1).unwrap();
let layout2 = LayoutState {
preset: "3-col".to_string(),
pane_ids: vec!["a".to_string(), "b".to_string(), "c".to_string()],
};
db.save_layout(&layout2).unwrap();
let loaded = db.load_layout().unwrap();
assert_eq!(loaded.preset, "3-col");
assert_eq!(loaded.pane_ids.len(), 3);
}
}

View file

@ -0,0 +1,80 @@
// Session metrics persistence (session_metrics table)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionMetric {
#[serde(default)]
pub id: i64,
pub project_id: String,
pub session_id: String,
pub start_time: i64,
pub end_time: i64,
pub peak_tokens: i64,
pub turn_count: i64,
pub tool_call_count: i64,
pub cost_usd: f64,
pub model: Option<String>,
pub status: String,
pub error_message: Option<String>,
}
impl SessionDb {
pub fn save_session_metric(&self, metric: &SessionMetric) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO session_metrics (project_id, session_id, start_time, end_time, peak_tokens, turn_count, tool_call_count, cost_usd, model, status, error_message) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)",
params![
metric.project_id,
metric.session_id,
metric.start_time,
metric.end_time,
metric.peak_tokens,
metric.turn_count,
metric.tool_call_count,
metric.cost_usd,
metric.model,
metric.status,
metric.error_message,
],
).map_err(|e| format!("Save session metric failed: {e}"))?;
// Enforce retention: keep last 100 per project
conn.execute(
"DELETE FROM session_metrics WHERE project_id = ?1 AND id NOT IN (SELECT id FROM session_metrics WHERE project_id = ?1 ORDER BY end_time DESC LIMIT 100)",
params![metric.project_id],
).map_err(|e| format!("Prune session metrics failed: {e}"))?;
Ok(())
}
pub fn load_session_metrics(&self, project_id: &str, limit: i64) -> Result<Vec<SessionMetric>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, project_id, session_id, start_time, end_time, peak_tokens, turn_count, tool_call_count, cost_usd, model, status, error_message FROM session_metrics WHERE project_id = ?1 ORDER BY end_time DESC LIMIT ?2"
).map_err(|e| format!("Query prepare failed: {e}"))?;
let metrics = stmt.query_map(params![project_id, limit], |row| {
Ok(SessionMetric {
id: row.get(0)?,
project_id: row.get(1)?,
session_id: row.get(2)?,
start_time: row.get(3)?,
end_time: row.get(4)?,
peak_tokens: row.get(5)?,
turn_count: row.get(6)?,
tool_call_count: row.get(7)?,
cost_usd: row.get(8)?,
model: row.get(9)?,
status: row.get(10)?,
error_message: row.get(11)?,
})
}).map_err(|e| format!("Query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Row read failed: {e}"))?;
Ok(metrics)
}
}

View file

@ -0,0 +1,174 @@
// Session persistence via rusqlite
// SessionDb owns the connection; table-specific operations live in sub-modules
mod sessions;
mod layout;
mod settings;
mod ssh;
mod agents;
mod metrics;
mod anchors;
pub use sessions::Session;
pub use layout::LayoutState;
pub use ssh::SshSession;
pub use agents::{AgentMessageRecord, ProjectAgentState};
pub use metrics::SessionMetric;
pub use anchors::SessionAnchorRecord;
use rusqlite::Connection;
use std::path::PathBuf;
use std::sync::Mutex;
pub struct SessionDb {
pub(in crate::session) conn: Mutex<Connection>,
}
impl SessionDb {
pub fn open(data_dir: &PathBuf) -> Result<Self, String> {
std::fs::create_dir_all(data_dir)
.map_err(|e| format!("Failed to create data dir: {e}"))?;
let db_path = data_dir.join("sessions.db");
let conn = Connection::open(&db_path)
.map_err(|e| format!("Failed to open database: {e}"))?;
// Enable WAL mode for better concurrent read performance
// journal_mode returns a result row, so use query_row instead of pragma_update
conn.query_row("PRAGMA journal_mode=WAL", [], |_| Ok(()))
.map_err(|e| format!("Failed to set journal_mode: {e}"))?;
conn.pragma_update(None, "foreign_keys", "ON")
.map_err(|e| format!("Failed to set foreign_keys: {e}"))?;
let db = Self { conn: Mutex::new(conn) };
db.migrate()?;
Ok(db)
}
fn migrate(&self) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS sessions (
id TEXT PRIMARY KEY,
type TEXT NOT NULL,
title TEXT NOT NULL,
shell TEXT,
cwd TEXT,
args TEXT,
created_at INTEGER NOT NULL,
last_used_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS layout_state (
id INTEGER PRIMARY KEY CHECK (id = 1),
preset TEXT NOT NULL DEFAULT '1-col',
pane_ids TEXT NOT NULL DEFAULT '[]'
);
INSERT OR IGNORE INTO layout_state (id, preset, pane_ids) VALUES (1, '1-col', '[]');
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS ssh_sessions (
id TEXT PRIMARY KEY,
name TEXT NOT NULL,
host TEXT NOT NULL,
port INTEGER NOT NULL DEFAULT 22,
username TEXT NOT NULL,
key_file TEXT DEFAULT '',
folder TEXT DEFAULT '',
color TEXT DEFAULT '#89b4fa',
created_at INTEGER NOT NULL,
last_used_at INTEGER NOT NULL
);
"
).map_err(|e| format!("Migration failed: {e}"))?;
// Add group_name column if missing (v2 migration)
let has_group: i64 = conn.query_row(
"SELECT COUNT(*) FROM pragma_table_info('sessions') WHERE name='group_name'",
[],
|row| row.get(0),
).unwrap_or(0);
if has_group == 0 {
conn.execute("ALTER TABLE sessions ADD COLUMN group_name TEXT DEFAULT ''", [])
.map_err(|e| format!("Migration (group_name) failed: {e}"))?;
}
// v3 migration: project_id column on sessions
let has_project_id: i64 = conn.query_row(
"SELECT COUNT(*) FROM pragma_table_info('sessions') WHERE name='project_id'",
[],
|row| row.get(0),
).unwrap_or(0);
if has_project_id == 0 {
conn.execute("ALTER TABLE sessions ADD COLUMN project_id TEXT DEFAULT ''", [])
.map_err(|e| format!("Migration (project_id) failed: {e}"))?;
}
// v3: agent message history for session continuity
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS agent_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT NOT NULL,
project_id TEXT NOT NULL,
sdk_session_id TEXT,
message_type TEXT NOT NULL,
content TEXT NOT NULL,
parent_id TEXT,
created_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_agent_messages_session
ON agent_messages(session_id);
CREATE INDEX IF NOT EXISTS idx_agent_messages_project
ON agent_messages(project_id);
CREATE TABLE IF NOT EXISTS project_agent_state (
project_id TEXT PRIMARY KEY,
last_session_id TEXT NOT NULL,
sdk_session_id TEXT,
status TEXT NOT NULL,
cost_usd REAL DEFAULT 0,
input_tokens INTEGER DEFAULT 0,
output_tokens INTEGER DEFAULT 0,
last_prompt TEXT,
updated_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS session_metrics (
id INTEGER PRIMARY KEY AUTOINCREMENT,
project_id TEXT NOT NULL,
session_id TEXT NOT NULL,
start_time INTEGER NOT NULL,
end_time INTEGER NOT NULL,
peak_tokens INTEGER DEFAULT 0,
turn_count INTEGER DEFAULT 0,
tool_call_count INTEGER DEFAULT 0,
cost_usd REAL DEFAULT 0,
model TEXT,
status TEXT NOT NULL,
error_message TEXT
);
CREATE INDEX IF NOT EXISTS idx_session_metrics_project
ON session_metrics(project_id);
CREATE TABLE IF NOT EXISTS session_anchors (
id TEXT PRIMARY KEY,
project_id TEXT NOT NULL,
message_id TEXT NOT NULL,
anchor_type TEXT NOT NULL,
content TEXT NOT NULL,
estimated_tokens INTEGER NOT NULL,
turn_index INTEGER NOT NULL DEFAULT 0,
created_at INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_session_anchors_project
ON session_anchors(project_id);"
).map_err(|e| format!("Migration (v3 tables) failed: {e}"))?;
Ok(())
}
}

View file

@ -0,0 +1,256 @@
// Session CRUD operations (sessions table)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Session {
pub id: String,
#[serde(rename = "type")]
pub session_type: String,
pub title: String,
pub shell: Option<String>,
pub cwd: Option<String>,
pub args: Option<Vec<String>>,
#[serde(default)]
pub group_name: String,
pub created_at: i64,
pub last_used_at: i64,
}
impl SessionDb {
pub fn list_sessions(&self) -> Result<Vec<Session>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT id, type, title, shell, cwd, args, group_name, created_at, last_used_at FROM sessions ORDER BY last_used_at DESC")
.map_err(|e| format!("Query prepare failed: {e}"))?;
let sessions = stmt
.query_map([], |row| {
let args_json: Option<String> = row.get(5)?;
let args: Option<Vec<String>> = args_json.and_then(|j| serde_json::from_str(&j).ok());
Ok(Session {
id: row.get(0)?,
session_type: row.get(1)?,
title: row.get(2)?,
shell: row.get(3)?,
cwd: row.get(4)?,
args,
group_name: row.get::<_, Option<String>>(6)?.unwrap_or_default(),
created_at: row.get(7)?,
last_used_at: row.get(8)?,
})
})
.map_err(|e| format!("Query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Row read failed: {e}"))?;
Ok(sessions)
}
pub fn save_session(&self, session: &Session) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
let args_json = session.args.as_ref().map(|a| serde_json::to_string(a).unwrap_or_default());
conn.execute(
"INSERT OR REPLACE INTO sessions (id, type, title, shell, cwd, args, group_name, created_at, last_used_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
params![
session.id,
session.session_type,
session.title,
session.shell,
session.cwd,
args_json,
session.group_name,
session.created_at,
session.last_used_at,
],
).map_err(|e| format!("Insert failed: {e}"))?;
Ok(())
}
pub fn delete_session(&self, id: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute("DELETE FROM sessions WHERE id = ?1", params![id])
.map_err(|e| format!("Delete failed: {e}"))?;
Ok(())
}
pub fn update_title(&self, id: &str, title: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"UPDATE sessions SET title = ?1 WHERE id = ?2",
params![title, id],
).map_err(|e| format!("Update failed: {e}"))?;
Ok(())
}
pub fn update_group(&self, id: &str, group_name: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"UPDATE sessions SET group_name = ?1 WHERE id = ?2",
params![group_name, id],
).map_err(|e| format!("Update group failed: {e}"))?;
Ok(())
}
pub fn touch_session(&self, id: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
conn.execute(
"UPDATE sessions SET last_used_at = ?1 WHERE id = ?2",
params![now, id],
).map_err(|e| format!("Touch failed: {e}"))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_db() -> SessionDb {
let dir = tempfile::tempdir().unwrap();
SessionDb::open(&dir.path().to_path_buf()).unwrap()
}
fn make_session(id: &str, title: &str) -> Session {
Session {
id: id.to_string(),
session_type: "terminal".to_string(),
title: title.to_string(),
shell: Some("/bin/bash".to_string()),
cwd: Some("/home/user".to_string()),
args: Some(vec!["--login".to_string()]),
group_name: String::new(),
created_at: 1000,
last_used_at: 2000,
}
}
#[test]
fn test_list_sessions_empty() {
let db = make_db();
let sessions = db.list_sessions().unwrap();
assert!(sessions.is_empty());
}
#[test]
fn test_save_and_list_session() {
let db = make_db();
let s = make_session("s1", "My Terminal");
db.save_session(&s).unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].id, "s1");
assert_eq!(sessions[0].title, "My Terminal");
assert_eq!(sessions[0].session_type, "terminal");
assert_eq!(sessions[0].shell, Some("/bin/bash".to_string()));
assert_eq!(sessions[0].cwd, Some("/home/user".to_string()));
assert_eq!(sessions[0].args, Some(vec!["--login".to_string()]));
assert_eq!(sessions[0].created_at, 1000);
assert_eq!(sessions[0].last_used_at, 2000);
}
#[test]
fn test_save_session_upsert() {
let db = make_db();
let mut s = make_session("s1", "First");
db.save_session(&s).unwrap();
s.title = "Updated".to_string();
db.save_session(&s).unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].title, "Updated");
}
#[test]
fn test_delete_session() {
let db = make_db();
db.save_session(&make_session("s1", "A")).unwrap();
db.save_session(&make_session("s2", "B")).unwrap();
assert_eq!(db.list_sessions().unwrap().len(), 2);
db.delete_session("s1").unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].id, "s2");
}
#[test]
fn test_delete_nonexistent_session_no_error() {
let db = make_db();
db.delete_session("nonexistent").unwrap();
}
#[test]
fn test_update_title() {
let db = make_db();
db.save_session(&make_session("s1", "Old")).unwrap();
db.update_title("s1", "New Title").unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions[0].title, "New Title");
}
#[test]
fn test_touch_session() {
let db = make_db();
db.save_session(&make_session("s1", "A")).unwrap();
let before = db.list_sessions().unwrap()[0].last_used_at;
db.touch_session("s1").unwrap();
let after = db.list_sessions().unwrap()[0].last_used_at;
assert!(after > before);
}
#[test]
fn test_session_with_no_optional_fields() {
let db = make_db();
let s = Session {
id: "s1".to_string(),
session_type: "agent".to_string(),
title: "Agent".to_string(),
shell: None,
cwd: None,
args: None,
group_name: String::new(),
created_at: 1000,
last_used_at: 2000,
};
db.save_session(&s).unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert!(sessions[0].shell.is_none());
assert!(sessions[0].cwd.is_none());
assert!(sessions[0].args.is_none());
}
#[test]
fn test_sessions_ordered_by_last_used_desc() {
let db = make_db();
let mut s1 = make_session("s1", "Older");
s1.last_used_at = 1000;
let mut s2 = make_session("s2", "Newer");
s2.last_used_at = 3000;
let mut s3 = make_session("s3", "Middle");
s3.last_used_at = 2000;
db.save_session(&s1).unwrap();
db.save_session(&s2).unwrap();
db.save_session(&s3).unwrap();
let sessions = db.list_sessions().unwrap();
assert_eq!(sessions[0].id, "s2");
assert_eq!(sessions[1].id, "s3");
assert_eq!(sessions[2].id, "s1");
}
}

View file

@ -0,0 +1,93 @@
// Settings persistence (settings table)
use rusqlite::params;
use super::SessionDb;
impl SessionDb {
pub fn get_setting(&self, key: &str) -> Result<Option<String>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT value FROM settings WHERE key = ?1")
.map_err(|e| format!("Settings query failed: {e}"))?;
let result = stmt.query_row(params![key], |row| row.get(0));
match result {
Ok(val) => Ok(Some(val)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(format!("Settings read failed: {e}")),
}
}
pub fn set_setting(&self, key: &str, value: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO settings (key, value) VALUES (?1, ?2)",
params![key, value],
).map_err(|e| format!("Settings write failed: {e}"))?;
Ok(())
}
pub fn get_all_settings(&self) -> Result<Vec<(String, String)>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT key, value FROM settings ORDER BY key")
.map_err(|e| format!("Settings query failed: {e}"))?;
let settings = stmt
.query_map([], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| format!("Settings query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("Settings read failed: {e}"))?;
Ok(settings)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_db() -> SessionDb {
let dir = tempfile::tempdir().unwrap();
SessionDb::open(&dir.path().to_path_buf()).unwrap()
}
#[test]
fn test_get_setting_missing_returns_none() {
let db = make_db();
let val = db.get_setting("nonexistent").unwrap();
assert!(val.is_none());
}
#[test]
fn test_set_and_get_setting() {
let db = make_db();
db.set_setting("theme", "mocha").unwrap();
let val = db.get_setting("theme").unwrap();
assert_eq!(val, Some("mocha".to_string()));
}
#[test]
fn test_set_setting_overwrites() {
let db = make_db();
db.set_setting("font_size", "12").unwrap();
db.set_setting("font_size", "14").unwrap();
assert_eq!(db.get_setting("font_size").unwrap(), Some("14".to_string()));
}
#[test]
fn test_get_all_settings() {
let db = make_db();
db.set_setting("b_key", "val_b").unwrap();
db.set_setting("a_key", "val_a").unwrap();
let all = db.get_all_settings().unwrap();
assert_eq!(all.len(), 2);
assert_eq!(all[0].0, "a_key");
assert_eq!(all[1].0, "b_key");
}
#[test]
fn test_get_all_settings_empty() {
let db = make_db();
let all = db.get_all_settings().unwrap();
assert!(all.is_empty());
}
}

View file

@ -0,0 +1,149 @@
// SSH session persistence (ssh_sessions table)
use rusqlite::params;
use serde::{Deserialize, Serialize};
use super::SessionDb;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SshSession {
pub id: String,
pub name: String,
pub host: String,
pub port: i32,
pub username: String,
pub key_file: String,
pub folder: String,
pub color: String,
pub created_at: i64,
pub last_used_at: i64,
}
impl SessionDb {
pub fn list_ssh_sessions(&self) -> Result<Vec<SshSession>, String> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn
.prepare("SELECT id, name, host, port, username, key_file, folder, color, created_at, last_used_at FROM ssh_sessions ORDER BY last_used_at DESC")
.map_err(|e| format!("SSH query prepare failed: {e}"))?;
let sessions = stmt
.query_map([], |row| {
Ok(SshSession {
id: row.get(0)?,
name: row.get(1)?,
host: row.get(2)?,
port: row.get(3)?,
username: row.get(4)?,
key_file: row.get(5)?,
folder: row.get(6)?,
color: row.get(7)?,
created_at: row.get(8)?,
last_used_at: row.get(9)?,
})
})
.map_err(|e| format!("SSH query failed: {e}"))?
.collect::<Result<Vec<_>, _>>()
.map_err(|e| format!("SSH row read failed: {e}"))?;
Ok(sessions)
}
pub fn save_ssh_session(&self, session: &SshSession) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT OR REPLACE INTO ssh_sessions (id, name, host, port, username, key_file, folder, color, created_at, last_used_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
params![
session.id,
session.name,
session.host,
session.port,
session.username,
session.key_file,
session.folder,
session.color,
session.created_at,
session.last_used_at,
],
).map_err(|e| format!("SSH insert failed: {e}"))?;
Ok(())
}
pub fn delete_ssh_session(&self, id: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute("DELETE FROM ssh_sessions WHERE id = ?1", params![id])
.map_err(|e| format!("SSH delete failed: {e}"))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_db() -> SessionDb {
let dir = tempfile::tempdir().unwrap();
SessionDb::open(&dir.path().to_path_buf()).unwrap()
}
fn make_ssh_session(id: &str, name: &str) -> SshSession {
SshSession {
id: id.to_string(),
name: name.to_string(),
host: "example.com".to_string(),
port: 22,
username: "admin".to_string(),
key_file: "/home/user/.ssh/id_rsa".to_string(),
folder: "/srv".to_string(),
color: "#89b4fa".to_string(),
created_at: 1000,
last_used_at: 2000,
}
}
#[test]
fn test_list_ssh_sessions_empty() {
let db = make_db();
let sessions = db.list_ssh_sessions().unwrap();
assert!(sessions.is_empty());
}
#[test]
fn test_save_and_list_ssh_session() {
let db = make_db();
let s = make_ssh_session("ssh1", "Prod Server");
db.save_ssh_session(&s).unwrap();
let sessions = db.list_ssh_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].id, "ssh1");
assert_eq!(sessions[0].name, "Prod Server");
assert_eq!(sessions[0].host, "example.com");
assert_eq!(sessions[0].port, 22);
assert_eq!(sessions[0].username, "admin");
}
#[test]
fn test_delete_ssh_session() {
let db = make_db();
db.save_ssh_session(&make_ssh_session("ssh1", "A")).unwrap();
db.save_ssh_session(&make_ssh_session("ssh2", "B")).unwrap();
db.delete_ssh_session("ssh1").unwrap();
let sessions = db.list_ssh_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].id, "ssh2");
}
#[test]
fn test_ssh_session_upsert() {
let db = make_db();
let mut s = make_ssh_session("ssh1", "First");
db.save_ssh_session(&s).unwrap();
s.name = "Second".to_string();
db.save_ssh_session(&s).unwrap();
let sessions = db.list_ssh_sessions().unwrap();
assert_eq!(sessions.len(), 1);
assert_eq!(sessions[0].name, "Second");
}
}

4
src-tauri/src/sidecar.rs Normal file
View file

@ -0,0 +1,4 @@
// Thin wrapper — re-exports bterminal_core::sidecar types.
// SidecarManager is now in bterminal-core; this module only re-exports for lib.rs.
pub use bterminal_core::sidecar::{AgentQueryOptions, SidecarConfig, SidecarManager};

104
src-tauri/src/telemetry.rs Normal file
View file

@ -0,0 +1,104 @@
// OpenTelemetry telemetry — tracing spans + OTLP export to Tempo/Grafana
//
// Controlled by BTERMINAL_OTLP_ENDPOINT env var:
// - Set (e.g. "http://localhost:4318") → export traces via OTLP/HTTP + console
// - Absent → console-only (no network calls)
use opentelemetry::trace::TracerProvider;
use opentelemetry_sdk::trace::SdkTracerProvider;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
/// Holds the tracer provider and shuts it down on drop.
/// Store this in Tauri's managed state so it lives for the app lifetime.
pub struct TelemetryGuard {
provider: Option<SdkTracerProvider>,
}
impl Drop for TelemetryGuard {
fn drop(&mut self) {
if let Some(provider) = self.provider.take() {
if let Err(e) = provider.shutdown() {
eprintln!("OTEL shutdown error: {e}");
}
}
}
}
/// Initialize tracing with optional OTLP export.
/// Call once at app startup, before any tracing macros fire.
pub fn init() -> TelemetryGuard {
let filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new("agent_orchestrator=info,agent_orchestrator_lib=info,bterminal_core=info"));
let fmt_layer = tracing_subscriber::fmt::layer()
.with_target(true)
.compact();
// In test mode, never export telemetry (avoid contaminating production data)
let is_test = std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1");
match std::env::var("BTERMINAL_OTLP_ENDPOINT") {
Ok(endpoint) if !endpoint.is_empty() && !is_test => {
match build_otlp_provider(&endpoint) {
Ok(provider) => {
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(provider.tracer("agent-orchestrator"));
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.with(otel_layer)
.init();
log::info!("Telemetry: OTLP export enabled → {endpoint}");
TelemetryGuard { provider: Some(provider) }
}
Err(e) => {
// Fall back to console-only if OTLP setup fails
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.init();
log::warn!("Telemetry: OTLP setup failed ({e}), console-only fallback");
TelemetryGuard { provider: None }
}
}
}
_ => {
tracing_subscriber::registry()
.with(filter)
.with(fmt_layer)
.init();
log::info!("Telemetry: console-only (BTERMINAL_OTLP_ENDPOINT not set)");
TelemetryGuard { provider: None }
}
}
}
fn build_otlp_provider(endpoint: &str) -> Result<SdkTracerProvider, Box<dyn std::error::Error>> {
use opentelemetry_otlp::{SpanExporter, WithExportConfig};
use opentelemetry_sdk::trace::SdkTracerProvider;
use opentelemetry_sdk::Resource;
use opentelemetry::KeyValue;
let exporter = SpanExporter::builder()
.with_http()
.with_endpoint(endpoint)
.build()?;
let resource = Resource::builder()
.with_attributes([
KeyValue::new("service.name", "agent-orchestrator"),
KeyValue::new("service.version", env!("CARGO_PKG_VERSION")),
])
.build();
let provider = SdkTracerProvider::builder()
.with_batch_exporter(exporter)
.with_resource(resource)
.build();
Ok(provider)
}

105
src-tauri/src/watcher.rs Normal file
View file

@ -0,0 +1,105 @@
// File watcher for markdown viewer
// Uses notify crate to watch files and emit Tauri events on change
use notify::{Config, Event, RecommendedWatcher, RecursiveMode, Watcher};
use serde::Serialize;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Mutex;
use tauri::Emitter;
#[derive(Clone, Serialize)]
struct FileChangedPayload {
pane_id: String,
path: String,
content: String,
}
struct WatchEntry {
_watcher: RecommendedWatcher,
_path: PathBuf,
}
pub struct FileWatcherManager {
watchers: Mutex<HashMap<String, WatchEntry>>,
}
impl FileWatcherManager {
pub fn new() -> Self {
Self {
watchers: Mutex::new(HashMap::new()),
}
}
pub fn watch(
&self,
app: &tauri::AppHandle,
pane_id: &str,
path: &str,
) -> Result<String, String> {
// In test mode, skip file watching to avoid inotify noise and flaky events
if std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1") {
return std::fs::read_to_string(path)
.map_err(|e| format!("Failed to read file: {e}"));
}
let file_path = PathBuf::from(path);
if !file_path.exists() {
return Err(format!("File not found: {path}"));
}
// Read initial content
let content = std::fs::read_to_string(&file_path)
.map_err(|e| format!("Failed to read file: {e}"))?;
// Set up watcher
let app_handle = app.clone();
let pane_id_owned = pane_id.to_string();
let watch_path = file_path.clone();
let mut watcher = RecommendedWatcher::new(
move |res: Result<Event, notify::Error>| {
if let Ok(event) = res {
if event.kind.is_modify() {
if let Ok(new_content) = std::fs::read_to_string(&watch_path) {
let _ = app_handle.emit(
"file-changed",
FileChangedPayload {
pane_id: pane_id_owned.clone(),
path: watch_path.to_string_lossy().to_string(),
content: new_content,
},
);
}
}
}
},
Config::default(),
)
.map_err(|e| format!("Failed to create watcher: {e}"))?;
let watch_dir = file_path.parent()
.ok_or_else(|| format!("Cannot watch root-level path: {path}"))?;
watcher
.watch(watch_dir, RecursiveMode::NonRecursive)
.map_err(|e| format!("Failed to watch path: {e}"))?;
let mut watchers = self.watchers.lock().unwrap();
watchers.insert(pane_id.to_string(), WatchEntry {
_watcher: watcher,
_path: file_path,
});
Ok(content)
}
pub fn unwatch(&self, pane_id: &str) {
let mut watchers = self.watchers.lock().unwrap();
watchers.remove(pane_id);
}
pub fn read_file(&self, path: &str) -> Result<String, String> {
std::fs::read_to_string(path)
.map_err(|e| format!("Failed to read file: {e}"))
}
}

71
src-tauri/tauri.conf.json Normal file
View file

@ -0,0 +1,71 @@
{
"$schema": "https://schema.tauri.app/config/2",
"productName": "agent-orchestrator",
"version": "0.1.0",
"identifier": "com.dexterfromlab.agent-orchestrator",
"build": {
"frontendDist": "../dist",
"devUrl": "http://localhost:9700",
"beforeDevCommand": "npm run dev",
"beforeBuildCommand": "npm run build"
},
"app": {
"windows": [
{
"title": "Agent Orchestrator",
"width": 1920,
"height": 1080,
"resizable": true,
"fullscreen": false,
"decorations": true,
"backgroundColor": [30, 30, 46, 255]
}
],
"security": {
"csp": null
}
},
"plugins": {
"updater": {
"endpoints": [
"https://github.com/DexterFromLab/agent-orchestrator/releases/latest/download/latest.json"
],
"dialog": true,
"pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IEJCRkZEMERDMTUwMzY5MjIKUldRaWFRTVYzTkQvdTYwRDh6YStaSE9rWUZYYkRGd3UvVUcydE1IQVdTM29uNTRPTlpjQmFqVFEK"
}
},
"bundle": {
"active": true,
"targets": ["deb", "appimage"],
"icon": [
"icons/32x32.png",
"icons/128x128.png",
"icons/128x128@2x.png",
"icons/icon.icns",
"icons/icon.ico"
],
"resources": [
"../sidecar/dist/claude-runner.mjs",
"../sidecar/dist/aider-runner.mjs",
"../btmsg",
"../bttask"
],
"category": "DeveloperTool",
"shortDescription": "Multi-session Claude agent dashboard",
"longDescription": "Agent Orchestrator is a multi-project agent dashboard with integrated Claude AI sessions, SSH management, and multi-agent orchestration. Built with Tauri, Svelte 5, and xterm.js.",
"linux": {
"deb": {
"depends": [
"libwebkit2gtk-4.1-0",
"libgtk-3-0",
"libayatana-appindicator3-1"
],
"section": "devel",
"priority": "optional"
},
"appimage": {
"bundleMediaFramework": false
}
}
}
}