refactor!: rebrand bterminal to agor (agents-orchestrator)
Rename Cargo crates (bterminal-core→agor-core, bterminal-relay→agor-relay), env vars (BTERMINAL_*→AGOR_*), config paths (~/.config/agor), CSS custom properties, plugin API object, package names, and all documentation. BREAKING CHANGE: config/data paths changed from bterminal to agor.
This commit is contained in:
parent
ef3548a569
commit
a63e6711ac
52 changed files with 3889 additions and 169 deletions
12
.github/workflows/e2e.yml
vendored
12
.github/workflows/e2e.yml
vendored
|
|
@ -6,7 +6,7 @@ on:
|
|||
paths:
|
||||
- 'v2/src/**'
|
||||
- 'v2/src-tauri/**'
|
||||
- 'v2/bterminal-core/**'
|
||||
- 'v2/agor-core/**'
|
||||
- 'v2/tests/e2e/**'
|
||||
- '.github/workflows/e2e.yml'
|
||||
pull_request:
|
||||
|
|
@ -14,7 +14,7 @@ on:
|
|||
paths:
|
||||
- 'v2/src/**'
|
||||
- 'v2/src-tauri/**'
|
||||
- 'v2/bterminal-core/**'
|
||||
- 'v2/agor-core/**'
|
||||
- 'v2/tests/e2e/**'
|
||||
workflow_dispatch:
|
||||
|
||||
|
|
@ -134,19 +134,19 @@ jobs:
|
|||
- name: Run E2E tests (Phase A — deterministic)
|
||||
working-directory: v2
|
||||
env:
|
||||
BTERMINAL_TEST: '1'
|
||||
AGOR_TEST: '1'
|
||||
SKIP_BUILD: '1'
|
||||
run: |
|
||||
xvfb-run --auto-servernum --server-args="-screen 0 1920x1080x24" \
|
||||
npx wdio tests/e2e/wdio.conf.js \
|
||||
--spec tests/e2e/specs/bterminal.test.ts \
|
||||
--spec tests/e2e/specs/agor.test.ts \
|
||||
--spec tests/e2e/specs/agent-scenarios.test.ts
|
||||
|
||||
- name: Run E2E tests (Phase B — multi-project)
|
||||
if: success()
|
||||
working-directory: v2
|
||||
env:
|
||||
BTERMINAL_TEST: '1'
|
||||
AGOR_TEST: '1'
|
||||
SKIP_BUILD: '1'
|
||||
run: |
|
||||
xvfb-run --auto-servernum --server-args="-screen 0 1920x1080x24" \
|
||||
|
|
@ -158,7 +158,7 @@ jobs:
|
|||
if: success() && env.ANTHROPIC_API_KEY != ''
|
||||
working-directory: v2
|
||||
env:
|
||||
BTERMINAL_TEST: '1'
|
||||
AGOR_TEST: '1'
|
||||
SKIP_BUILD: '1'
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
run: |
|
||||
|
|
|
|||
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
|
|
@ -85,7 +85,7 @@ jobs:
|
|||
"platforms": {
|
||||
"linux-x86_64": {
|
||||
"signature": "${SIG}",
|
||||
"url": "https://github.com/DexterFromLab/BTerminal/releases/download/${GITHUB_REF_NAME}/${APPIMAGE_NAME}"
|
||||
"url": "https://github.com/agents-orchestrator/agents-orchestrator/releases/download/${GITHUB_REF_NAME}/${APPIMAGE_NAME}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -94,13 +94,13 @@ jobs:
|
|||
- name: Upload .deb
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bterminal-deb
|
||||
name: agor-deb
|
||||
path: v2/src-tauri/target/release/bundle/deb/*.deb
|
||||
|
||||
- name: Upload AppImage
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: bterminal-appimage
|
||||
name: agor-appimage
|
||||
path: v2/src-tauri/target/release/bundle/appimage/*.AppImage
|
||||
|
||||
- name: Upload latest.json
|
||||
|
|
@ -118,13 +118,13 @@ jobs:
|
|||
- name: Download .deb
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bterminal-deb
|
||||
name: agor-deb
|
||||
path: artifacts/
|
||||
|
||||
- name: Download AppImage
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: bterminal-appimage
|
||||
name: agor-appimage
|
||||
path: artifacts/
|
||||
|
||||
- name: Download latest.json
|
||||
|
|
|
|||
4
.vscode/launch.json
vendored
4
.vscode/launch.json
vendored
|
|
@ -2,10 +2,10 @@
|
|||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch BTerminal (v1)",
|
||||
"name": "Launch Agents Orchestrator (v1)",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/bterminal.py"
|
||||
"program": "${workspaceFolder}/# v1 removed"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
2
.vscode/tasks.json
vendored
2
.vscode/tasks.json
vendored
|
|
@ -4,7 +4,7 @@
|
|||
{
|
||||
"label": "run",
|
||||
"type": "shell",
|
||||
"command": "python3 ${workspaceFolder}/bterminal.py",
|
||||
"command": "python3 ${workspaceFolder}/# v1 removed",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
|
|
|
|||
76
Cargo.lock
generated
76
Cargo.lock
generated
|
|
@ -12,7 +12,8 @@ checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
|||
name = "agent-orchestrator"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bterminal-core",
|
||||
"agor-core",
|
||||
"agor-pro",
|
||||
"dirs 5.0.1",
|
||||
"futures-util",
|
||||
"hex",
|
||||
|
|
@ -43,6 +44,48 @@ dependencies = [
|
|||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "agor-core"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"dirs 5.0.1",
|
||||
"landlock",
|
||||
"log",
|
||||
"portable-pty",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "agor-pro"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"agor-core",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tauri",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "agor-relay"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"agor-core",
|
||||
"clap",
|
||||
"env_logger",
|
||||
"futures-util",
|
||||
"log",
|
||||
"native-tls",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
"tokio-tungstenite",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ahash"
|
||||
version = "0.8.12"
|
||||
|
|
@ -398,37 +441,6 @@ dependencies = [
|
|||
"alloc-stdlib",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bterminal-core"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"dirs 5.0.1",
|
||||
"landlock",
|
||||
"log",
|
||||
"portable-pty",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bterminal-relay"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bterminal-core",
|
||||
"clap",
|
||||
"env_logger",
|
||||
"futures-util",
|
||||
"log",
|
||||
"native-tls",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
"tokio-tungstenite",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.20.2"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
[workspace]
|
||||
members = ["src-tauri", "bterminal-core", "bterminal-relay"]
|
||||
members = ["src-tauri", "agor-core", "agor-relay", "agor-pro"]
|
||||
resolver = "2"
|
||||
|
|
|
|||
15
agor-core/Cargo.toml
Normal file
15
agor-core/Cargo.toml
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
[package]
|
||||
name = "agor-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Shared PTY and sidecar management for Agents Orchestrator"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
log = "0.4"
|
||||
portable-pty = "0.8"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
dirs = "5"
|
||||
landlock = "0.4"
|
||||
209
agor-core/src/config.rs
Normal file
209
agor-core/src/config.rs
Normal file
|
|
@ -0,0 +1,209 @@
|
|||
// AppConfig — centralized path resolution for all Agents Orchestrator subsystems.
|
||||
// In production, paths resolve via dirs:: crate defaults.
|
||||
// In test mode (AGOR_TEST=1), paths resolve from env var overrides:
|
||||
// AGOR_TEST_DATA_DIR → replaces dirs::data_dir()/agor
|
||||
// AGOR_TEST_CONFIG_DIR → replaces dirs::config_dir()/agor
|
||||
// AGOR_TEST_CTX_DIR → replaces ~/.claude-context
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AppConfig {
|
||||
/// Data directory for btmsg.db, sessions.db (default: ~/.local/share/agor)
|
||||
pub data_dir: PathBuf,
|
||||
/// Config directory for groups.json (default: ~/.config/agor)
|
||||
pub config_dir: PathBuf,
|
||||
/// ctx database path (default: ~/.claude-context/context.db)
|
||||
pub ctx_db_path: PathBuf,
|
||||
/// Memora database path (default: ~/.local/share/memora/memories.db)
|
||||
pub memora_db_path: PathBuf,
|
||||
/// Whether we are in test mode
|
||||
pub test_mode: bool,
|
||||
}
|
||||
|
||||
impl AppConfig {
|
||||
/// Build config from environment. In test mode, uses AGOR_TEST_*_DIR env vars.
|
||||
pub fn from_env() -> Self {
|
||||
let test_mode = std::env::var("AGOR_TEST").map_or(false, |v| v == "1");
|
||||
|
||||
let data_dir = std::env::var("AGOR_TEST_DATA_DIR")
|
||||
.ok()
|
||||
.filter(|_| test_mode)
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| {
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("agor")
|
||||
});
|
||||
|
||||
let config_dir = std::env::var("AGOR_TEST_CONFIG_DIR")
|
||||
.ok()
|
||||
.filter(|_| test_mode)
|
||||
.map(PathBuf::from)
|
||||
.unwrap_or_else(|| {
|
||||
dirs::config_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("agor")
|
||||
});
|
||||
|
||||
let ctx_db_path = std::env::var("AGOR_TEST_CTX_DIR")
|
||||
.ok()
|
||||
.filter(|_| test_mode)
|
||||
.map(|d| PathBuf::from(d).join("context.db"))
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_default()
|
||||
.join(".claude-context")
|
||||
.join("context.db")
|
||||
});
|
||||
|
||||
let memora_db_path = if test_mode {
|
||||
// In test mode, memora is optional — use data_dir/memora/memories.db
|
||||
data_dir.join("memora").join("memories.db")
|
||||
} else {
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_default()
|
||||
.join(".local/share")
|
||||
})
|
||||
.join("memora")
|
||||
.join("memories.db")
|
||||
};
|
||||
|
||||
Self {
|
||||
data_dir,
|
||||
config_dir,
|
||||
ctx_db_path,
|
||||
memora_db_path,
|
||||
test_mode,
|
||||
}
|
||||
}
|
||||
|
||||
/// Path to btmsg.db (shared between btmsg and bttask)
|
||||
pub fn btmsg_db_path(&self) -> PathBuf {
|
||||
self.data_dir.join("btmsg.db")
|
||||
}
|
||||
|
||||
/// Path to sessions.db
|
||||
pub fn sessions_db_dir(&self) -> &PathBuf {
|
||||
&self.data_dir
|
||||
}
|
||||
|
||||
/// Path to groups.json
|
||||
pub fn groups_json_path(&self) -> PathBuf {
|
||||
self.config_dir.join("groups.json")
|
||||
}
|
||||
|
||||
/// Path to plugins directory
|
||||
pub fn plugins_dir(&self) -> PathBuf {
|
||||
self.config_dir.join("plugins")
|
||||
}
|
||||
|
||||
/// Whether running in test mode (AGOR_TEST=1)
|
||||
pub fn is_test_mode(&self) -> bool {
|
||||
self.test_mode
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Mutex;
|
||||
|
||||
// Serialize all tests that mutate env vars to prevent race conditions.
|
||||
// Rust runs tests in parallel; set_var/remove_var are process-global.
|
||||
static ENV_LOCK: Mutex<()> = Mutex::new(());
|
||||
|
||||
#[test]
|
||||
fn test_production_paths_use_dirs() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
// Without AGOR_TEST=1, paths should use dirs:: defaults
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
std::env::remove_var("AGOR_TEST_DATA_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CONFIG_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CTX_DIR");
|
||||
|
||||
let config = AppConfig::from_env();
|
||||
assert!(!config.is_test_mode());
|
||||
// Should end with "agor" for data and config
|
||||
assert!(config.data_dir.ends_with("agor"));
|
||||
assert!(config.config_dir.ends_with("agor"));
|
||||
assert!(config.ctx_db_path.ends_with("context.db"));
|
||||
assert!(config.memora_db_path.ends_with("memories.db"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_btmsg_db_path() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
let config = AppConfig::from_env();
|
||||
let path = config.btmsg_db_path();
|
||||
assert!(path.ends_with("btmsg.db"));
|
||||
assert!(path.parent().unwrap().ends_with("agor"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_groups_json_path() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
let config = AppConfig::from_env();
|
||||
let path = config.groups_json_path();
|
||||
assert!(path.ends_with("groups.json"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_test_mode_uses_overrides() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
std::env::set_var("AGOR_TEST", "1");
|
||||
std::env::set_var("AGOR_TEST_DATA_DIR", "/tmp/agor-test-data");
|
||||
std::env::set_var("AGOR_TEST_CONFIG_DIR", "/tmp/agor-test-config");
|
||||
std::env::set_var("AGOR_TEST_CTX_DIR", "/tmp/agor-test-ctx");
|
||||
|
||||
let config = AppConfig::from_env();
|
||||
assert!(config.is_test_mode());
|
||||
assert_eq!(config.data_dir, PathBuf::from("/tmp/agor-test-data"));
|
||||
assert_eq!(config.config_dir, PathBuf::from("/tmp/agor-test-config"));
|
||||
assert_eq!(config.ctx_db_path, PathBuf::from("/tmp/agor-test-ctx/context.db"));
|
||||
assert_eq!(config.btmsg_db_path(), PathBuf::from("/tmp/agor-test-data/btmsg.db"));
|
||||
assert_eq!(config.groups_json_path(), PathBuf::from("/tmp/agor-test-config/groups.json"));
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
std::env::remove_var("AGOR_TEST_DATA_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CONFIG_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CTX_DIR");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_test_mode_without_overrides_uses_defaults() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
std::env::set_var("AGOR_TEST", "1");
|
||||
std::env::remove_var("AGOR_TEST_DATA_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CONFIG_DIR");
|
||||
std::env::remove_var("AGOR_TEST_CTX_DIR");
|
||||
|
||||
let config = AppConfig::from_env();
|
||||
assert!(config.is_test_mode());
|
||||
// Without override vars, falls back to dirs:: defaults
|
||||
assert!(config.data_dir.ends_with("agor"));
|
||||
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_test_mode_memora_in_data_dir() {
|
||||
let _lock = ENV_LOCK.lock().unwrap();
|
||||
std::env::set_var("AGOR_TEST", "1");
|
||||
std::env::set_var("AGOR_TEST_DATA_DIR", "/tmp/agor-test-data");
|
||||
|
||||
let config = AppConfig::from_env();
|
||||
assert_eq!(
|
||||
config.memora_db_path,
|
||||
PathBuf::from("/tmp/agor-test-data/memora/memories.db")
|
||||
);
|
||||
|
||||
std::env::remove_var("AGOR_TEST");
|
||||
std::env::remove_var("AGOR_TEST_DATA_DIR");
|
||||
}
|
||||
}
|
||||
5
agor-core/src/event.rs
Normal file
5
agor-core/src/event.rs
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
/// Trait for emitting events from PTY and sidecar managers.
|
||||
/// Implemented by Tauri's AppHandle (controller) and WebSocket sender (relay).
|
||||
pub trait EventSink: Send + Sync {
|
||||
fn emit(&self, event: &str, payload: serde_json::Value);
|
||||
}
|
||||
6
agor-core/src/lib.rs
Normal file
6
agor-core/src/lib.rs
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
pub mod config;
|
||||
pub mod event;
|
||||
pub mod pty;
|
||||
pub mod sandbox;
|
||||
pub mod sidecar;
|
||||
pub mod supervisor;
|
||||
173
agor-core/src/pty.rs
Normal file
173
agor-core/src/pty.rs
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
use portable_pty::{native_pty_system, CommandBuilder, MasterPty, PtySize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{BufReader, Write};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::event::EventSink;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PtyOptions {
|
||||
pub shell: Option<String>,
|
||||
pub cwd: Option<String>,
|
||||
pub args: Option<Vec<String>>,
|
||||
pub cols: Option<u16>,
|
||||
pub rows: Option<u16>,
|
||||
}
|
||||
|
||||
struct PtyInstance {
|
||||
master: Box<dyn MasterPty + Send>,
|
||||
writer: Box<dyn Write + Send>,
|
||||
}
|
||||
|
||||
pub struct PtyManager {
|
||||
instances: Arc<Mutex<HashMap<String, PtyInstance>>>,
|
||||
sink: Arc<dyn EventSink>,
|
||||
}
|
||||
|
||||
impl PtyManager {
|
||||
pub fn new(sink: Arc<dyn EventSink>) -> Self {
|
||||
Self {
|
||||
instances: Arc::new(Mutex::new(HashMap::new())),
|
||||
sink,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spawn(&self, options: PtyOptions) -> Result<String, String> {
|
||||
let pty_system = native_pty_system();
|
||||
let cols = options.cols.unwrap_or(80);
|
||||
let rows = options.rows.unwrap_or(24);
|
||||
|
||||
let pair = pty_system
|
||||
.openpty(PtySize {
|
||||
rows,
|
||||
cols,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
})
|
||||
.map_err(|e| format!("Failed to open PTY: {e}"))?;
|
||||
|
||||
let shell = options.shell.unwrap_or_else(|| {
|
||||
std::env::var("SHELL").unwrap_or_else(|_| "/bin/bash".to_string())
|
||||
});
|
||||
|
||||
let mut cmd = CommandBuilder::new(&shell);
|
||||
if let Some(args) = &options.args {
|
||||
for arg in args {
|
||||
cmd.arg(arg);
|
||||
}
|
||||
}
|
||||
if let Some(cwd) = &options.cwd {
|
||||
cmd.cwd(cwd);
|
||||
}
|
||||
|
||||
let _child = pair
|
||||
.slave
|
||||
.spawn_command(cmd)
|
||||
.map_err(|e| format!("Failed to spawn command: {e}"))?;
|
||||
|
||||
drop(pair.slave);
|
||||
|
||||
let id = Uuid::new_v4().to_string();
|
||||
let reader = pair
|
||||
.master
|
||||
.try_clone_reader()
|
||||
.map_err(|e| format!("Failed to clone PTY reader: {e}"))?;
|
||||
let writer = pair
|
||||
.master
|
||||
.take_writer()
|
||||
.map_err(|e| format!("Failed to take PTY writer: {e}"))?;
|
||||
|
||||
let event_id = id.clone();
|
||||
let sink = self.sink.clone();
|
||||
thread::spawn(move || {
|
||||
let mut buf_reader = BufReader::with_capacity(4096, reader);
|
||||
let mut buf = vec![0u8; 4096];
|
||||
loop {
|
||||
match std::io::Read::read(&mut buf_reader, &mut buf) {
|
||||
Ok(0) => {
|
||||
sink.emit(
|
||||
&format!("pty-exit-{event_id}"),
|
||||
serde_json::Value::Null,
|
||||
);
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
let data = String::from_utf8_lossy(&buf[..n]).to_string();
|
||||
sink.emit(
|
||||
&format!("pty-data-{event_id}"),
|
||||
serde_json::Value::String(data),
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("PTY read error for {event_id}: {e}");
|
||||
sink.emit(
|
||||
&format!("pty-exit-{event_id}"),
|
||||
serde_json::Value::Null,
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let instance = PtyInstance {
|
||||
master: pair.master,
|
||||
writer,
|
||||
};
|
||||
self.instances.lock().unwrap().insert(id.clone(), instance);
|
||||
|
||||
log::info!("Spawned PTY {id} ({shell})");
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub fn write(&self, id: &str, data: &str) -> Result<(), String> {
|
||||
let mut instances = self.instances.lock().unwrap();
|
||||
let instance = instances
|
||||
.get_mut(id)
|
||||
.ok_or_else(|| format!("PTY {id} not found"))?;
|
||||
instance
|
||||
.writer
|
||||
.write_all(data.as_bytes())
|
||||
.map_err(|e| format!("PTY write error: {e}"))?;
|
||||
instance
|
||||
.writer
|
||||
.flush()
|
||||
.map_err(|e| format!("PTY flush error: {e}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn resize(&self, id: &str, cols: u16, rows: u16) -> Result<(), String> {
|
||||
let instances = self.instances.lock().unwrap();
|
||||
let instance = instances
|
||||
.get(id)
|
||||
.ok_or_else(|| format!("PTY {id} not found"))?;
|
||||
instance
|
||||
.master
|
||||
.resize(PtySize {
|
||||
rows,
|
||||
cols,
|
||||
pixel_width: 0,
|
||||
pixel_height: 0,
|
||||
})
|
||||
.map_err(|e| format!("PTY resize error: {e}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn kill(&self, id: &str) -> Result<(), String> {
|
||||
let mut instances = self.instances.lock().unwrap();
|
||||
if instances.remove(id).is_some() {
|
||||
log::info!("Killed PTY {id}");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("PTY {id} not found"))
|
||||
}
|
||||
}
|
||||
|
||||
/// List active PTY session IDs.
|
||||
pub fn list_sessions(&self) -> Vec<String> {
|
||||
self.instances.lock().unwrap().keys().cloned().collect()
|
||||
}
|
||||
}
|
||||
361
agor-core/src/sandbox.rs
Normal file
361
agor-core/src/sandbox.rs
Normal file
|
|
@ -0,0 +1,361 @@
|
|||
// Landlock-based filesystem sandboxing for sidecar processes.
|
||||
//
|
||||
// Landlock is a Linux Security Module (LSM) available since kernel 5.13.
|
||||
// It restricts filesystem access for the calling process and all its children.
|
||||
// Applied via pre_exec() on the sidecar child process before exec.
|
||||
//
|
||||
// Restrictions can only be tightened after application — never relaxed.
|
||||
// The sidecar is long-lived and handles queries for multiple projects,
|
||||
// so we apply the union of all project paths at sidecar start time.
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use landlock::{
|
||||
Access, AccessFs, PathBeneath, PathFd, Ruleset, RulesetAttr, RulesetCreatedAttr,
|
||||
RulesetStatus, ABI,
|
||||
};
|
||||
|
||||
/// Target Landlock ABI version. V3 requires kernel 6.2+ (we run 6.12+).
|
||||
/// Falls back gracefully on older kernels via best-effort mode.
|
||||
const TARGET_ABI: ABI = ABI::V3;
|
||||
|
||||
/// Configuration for Landlock filesystem sandboxing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SandboxConfig {
|
||||
/// Directories with full read+write+execute access (project CWDs, worktrees, tmp)
|
||||
pub rw_paths: Vec<PathBuf>,
|
||||
/// Directories with read-only access (system libs, runtimes, config)
|
||||
pub ro_paths: Vec<PathBuf>,
|
||||
/// Whether sandboxing is enabled
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for SandboxConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
rw_paths: Vec::new(),
|
||||
ro_paths: Vec::new(),
|
||||
enabled: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SandboxConfig {
|
||||
/// Build a sandbox config for a set of project directories.
|
||||
///
|
||||
/// `project_cwds` — directories that need read+write access (one per project).
|
||||
/// `worktree_roots` — optional worktree directories (one per project that uses worktrees).
|
||||
///
|
||||
/// System paths (runtimes, libraries, /etc) are added as read-only automatically.
|
||||
pub fn for_projects(project_cwds: &[&str], worktree_roots: &[&str]) -> Self {
|
||||
let mut rw = Vec::new();
|
||||
|
||||
for cwd in project_cwds {
|
||||
rw.push(PathBuf::from(cwd));
|
||||
}
|
||||
for wt in worktree_roots {
|
||||
rw.push(PathBuf::from(wt));
|
||||
}
|
||||
|
||||
// Temp dir for sidecar scratch files
|
||||
rw.push(std::env::temp_dir());
|
||||
|
||||
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("/root"));
|
||||
|
||||
let ro = vec![
|
||||
PathBuf::from("/usr"), // system binaries + libraries
|
||||
PathBuf::from("/lib"), // shared libraries
|
||||
PathBuf::from("/lib64"), // 64-bit shared libraries
|
||||
PathBuf::from("/etc"), // system configuration (read only)
|
||||
PathBuf::from("/proc"), // process info (Landlock V3+ handles this)
|
||||
PathBuf::from("/dev"), // device nodes (stdin/stdout/stderr, /dev/null, urandom)
|
||||
PathBuf::from("/bin"), // essential binaries (symlink to /usr/bin on most distros)
|
||||
PathBuf::from("/sbin"), // essential system binaries
|
||||
home.join(".local"), // ~/.local/bin (claude CLI, user-installed tools)
|
||||
home.join(".deno"), // Deno runtime cache
|
||||
home.join(".nvm"), // Node.js version manager
|
||||
home.join(".config"), // XDG config (claude profiles, agor config)
|
||||
home.join(".claude"), // Claude CLI data (worktrees, skills, settings)
|
||||
];
|
||||
|
||||
Self {
|
||||
rw_paths: rw,
|
||||
ro_paths: ro,
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a restricted sandbox config for Aider agent sessions.
|
||||
/// More restrictive than `for_projects`: only project worktree + read-only system paths.
|
||||
/// Does NOT allow write access to ~/.config, ~/.claude, etc.
|
||||
pub fn for_aider_restricted(project_cwd: &str, worktree: Option<&str>) -> Self {
|
||||
let mut rw = vec![PathBuf::from(project_cwd)];
|
||||
if let Some(wt) = worktree {
|
||||
rw.push(PathBuf::from(wt));
|
||||
}
|
||||
rw.push(std::env::temp_dir());
|
||||
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("/root"));
|
||||
rw.push(home.join(".aider"));
|
||||
|
||||
let ro = vec![
|
||||
PathBuf::from("/usr"),
|
||||
PathBuf::from("/lib"),
|
||||
PathBuf::from("/lib64"),
|
||||
PathBuf::from("/etc"),
|
||||
PathBuf::from("/proc"),
|
||||
PathBuf::from("/dev"),
|
||||
PathBuf::from("/bin"),
|
||||
PathBuf::from("/sbin"),
|
||||
home.join(".local"),
|
||||
home.join(".deno"),
|
||||
home.join(".nvm"),
|
||||
];
|
||||
|
||||
Self {
|
||||
rw_paths: rw,
|
||||
ro_paths: ro,
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a sandbox config for a single project directory.
|
||||
pub fn for_project(cwd: &str, worktree: Option<&str>) -> Self {
|
||||
let worktrees: Vec<&str> = worktree.into_iter().collect();
|
||||
Self::for_projects(&[cwd], &worktrees)
|
||||
}
|
||||
|
||||
/// Apply Landlock restrictions to the current process.
|
||||
///
|
||||
/// This must be called in the child process (e.g., via `pre_exec`) BEFORE exec.
|
||||
/// Once applied, restrictions are inherited by all child processes and cannot be relaxed.
|
||||
///
|
||||
/// Returns:
|
||||
/// - `Ok(true)` if Landlock was applied and enforced
|
||||
/// - `Ok(false)` if the kernel does not support Landlock (graceful degradation)
|
||||
/// - `Err(msg)` on configuration or syscall errors
|
||||
pub fn apply(&self) -> Result<bool, String> {
|
||||
if !self.enabled {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let access_all = AccessFs::from_all(TARGET_ABI);
|
||||
let access_read = AccessFs::from_read(TARGET_ABI);
|
||||
|
||||
// Create ruleset handling all filesystem access types
|
||||
let mut ruleset = Ruleset::default()
|
||||
.handle_access(access_all)
|
||||
.map_err(|e| format!("Landlock: failed to handle access: {e}"))?
|
||||
.create()
|
||||
.map_err(|e| format!("Landlock: failed to create ruleset: {e}"))?;
|
||||
|
||||
// Add read+write rules for project directories and tmp
|
||||
for path in &self.rw_paths {
|
||||
if path.exists() {
|
||||
let fd = PathFd::new(path)
|
||||
.map_err(|e| format!("Landlock: PathFd failed for {}: {e}", path.display()))?;
|
||||
ruleset = ruleset
|
||||
.add_rule(PathBeneath::new(fd, access_all))
|
||||
.map_err(|e| {
|
||||
format!("Landlock: add_rule (rw) failed for {}: {e}", path.display())
|
||||
})?;
|
||||
} else {
|
||||
log::warn!(
|
||||
"Landlock: skipping non-existent rw path: {}",
|
||||
path.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Add read-only rules for system paths
|
||||
for path in &self.ro_paths {
|
||||
if path.exists() {
|
||||
let fd = PathFd::new(path)
|
||||
.map_err(|e| format!("Landlock: PathFd failed for {}: {e}", path.display()))?;
|
||||
ruleset = ruleset
|
||||
.add_rule(PathBeneath::new(fd, access_read))
|
||||
.map_err(|e| {
|
||||
format!("Landlock: add_rule (ro) failed for {}: {e}", path.display())
|
||||
})?;
|
||||
}
|
||||
// Silently skip non-existent read-only paths (e.g., /lib64 on some systems)
|
||||
}
|
||||
|
||||
// Enforce the ruleset on this thread (and inherited by children)
|
||||
let status = ruleset
|
||||
.restrict_self()
|
||||
.map_err(|e| format!("Landlock: restrict_self failed: {e}"))?;
|
||||
|
||||
// Landlock enforcement states:
|
||||
// - Enforced: kernel 6.2+ with ABI V3 (full filesystem restriction)
|
||||
// - NotEnforced: kernel 5.13–6.1 (Landlock exists but ABI too old for V3)
|
||||
// - Error (caught above): kernel <5.13 (no Landlock LSM available)
|
||||
let enforced = status.ruleset != RulesetStatus::NotEnforced;
|
||||
if enforced {
|
||||
log::info!("Landlock sandbox applied ({} rw, {} ro paths)", self.rw_paths.len(), self.ro_paths.len());
|
||||
} else {
|
||||
log::warn!(
|
||||
"Landlock not enforced — sidecar runs without filesystem restrictions. \
|
||||
Kernel 6.2+ required for enforcement."
|
||||
);
|
||||
}
|
||||
|
||||
Ok(enforced)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_is_disabled() {
|
||||
let config = SandboxConfig::default();
|
||||
assert!(!config.enabled);
|
||||
assert!(config.rw_paths.is_empty());
|
||||
assert!(config.ro_paths.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_project_single_cwd() {
|
||||
let config = SandboxConfig::for_project("/home/user/myproject", None);
|
||||
assert!(config.enabled);
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
|
||||
assert!(config.rw_paths.contains(&std::env::temp_dir()));
|
||||
// No worktree path added
|
||||
assert!(!config
|
||||
.rw_paths
|
||||
.iter()
|
||||
.any(|p| p.to_string_lossy().contains("worktree")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_project_with_worktree() {
|
||||
let config = SandboxConfig::for_project(
|
||||
"/home/user/myproject",
|
||||
Some("/home/user/myproject/.claude/worktrees/abc123"),
|
||||
);
|
||||
assert!(config.enabled);
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
|
||||
assert!(config.rw_paths.contains(&PathBuf::from(
|
||||
"/home/user/myproject/.claude/worktrees/abc123"
|
||||
)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_projects_multiple_cwds() {
|
||||
let config = SandboxConfig::for_projects(
|
||||
&["/home/user/project-a", "/home/user/project-b"],
|
||||
&["/home/user/project-a/.claude/worktrees/s1"],
|
||||
);
|
||||
assert!(config.enabled);
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/project-a")));
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/project-b")));
|
||||
assert!(config.rw_paths.contains(&PathBuf::from(
|
||||
"/home/user/project-a/.claude/worktrees/s1"
|
||||
)));
|
||||
// tmp always present
|
||||
assert!(config.rw_paths.contains(&std::env::temp_dir()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ro_paths_include_system_dirs() {
|
||||
let config = SandboxConfig::for_project("/tmp/test", None);
|
||||
let ro_strs: Vec<String> = config.ro_paths.iter().map(|p| p.display().to_string()).collect();
|
||||
|
||||
assert!(ro_strs.iter().any(|p| p == "/usr"), "missing /usr");
|
||||
assert!(ro_strs.iter().any(|p| p == "/lib"), "missing /lib");
|
||||
assert!(ro_strs.iter().any(|p| p == "/etc"), "missing /etc");
|
||||
assert!(ro_strs.iter().any(|p| p == "/proc"), "missing /proc");
|
||||
assert!(ro_strs.iter().any(|p| p == "/dev"), "missing /dev");
|
||||
assert!(ro_strs.iter().any(|p| p == "/bin"), "missing /bin");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ro_paths_include_runtime_dirs() {
|
||||
let config = SandboxConfig::for_project("/tmp/test", None);
|
||||
let home = dirs::home_dir().unwrap();
|
||||
|
||||
assert!(config.ro_paths.contains(&home.join(".local")));
|
||||
assert!(config.ro_paths.contains(&home.join(".deno")));
|
||||
assert!(config.ro_paths.contains(&home.join(".nvm")));
|
||||
assert!(config.ro_paths.contains(&home.join(".config")));
|
||||
assert!(config.ro_paths.contains(&home.join(".claude")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disabled_apply_returns_false() {
|
||||
let config = SandboxConfig::default();
|
||||
assert_eq!(config.apply().unwrap(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rw_paths_count() {
|
||||
// Single project: cwd + tmp = 2
|
||||
let config = SandboxConfig::for_project("/tmp/test", None);
|
||||
assert_eq!(config.rw_paths.len(), 2);
|
||||
|
||||
// With worktree: cwd + worktree + tmp = 3
|
||||
let config = SandboxConfig::for_project("/tmp/test", Some("/tmp/wt"));
|
||||
assert_eq!(config.rw_paths.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_aider_restricted_single_cwd() {
|
||||
let config = SandboxConfig::for_aider_restricted("/home/user/myproject", None);
|
||||
assert!(config.enabled);
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
|
||||
assert!(config.rw_paths.contains(&std::env::temp_dir()));
|
||||
let home = dirs::home_dir().unwrap();
|
||||
assert!(config.rw_paths.contains(&home.join(".aider")));
|
||||
// No worktree path added
|
||||
assert!(!config
|
||||
.rw_paths
|
||||
.iter()
|
||||
.any(|p| p.to_string_lossy().contains("worktree")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_aider_restricted_with_worktree() {
|
||||
let config = SandboxConfig::for_aider_restricted(
|
||||
"/home/user/myproject",
|
||||
Some("/home/user/myproject/.claude/worktrees/abc123"),
|
||||
);
|
||||
assert!(config.enabled);
|
||||
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
|
||||
assert!(config.rw_paths.contains(&PathBuf::from(
|
||||
"/home/user/myproject/.claude/worktrees/abc123"
|
||||
)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_aider_restricted_no_config_write() {
|
||||
let config = SandboxConfig::for_aider_restricted("/tmp/test", None);
|
||||
let home = dirs::home_dir().unwrap();
|
||||
// Aider restricted must NOT have ~/.config or ~/.claude in rw_paths
|
||||
assert!(!config.rw_paths.contains(&home.join(".config")));
|
||||
assert!(!config.rw_paths.contains(&home.join(".claude")));
|
||||
// And NOT in ro_paths either (stricter than for_projects)
|
||||
assert!(!config.ro_paths.contains(&home.join(".config")));
|
||||
assert!(!config.ro_paths.contains(&home.join(".claude")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_aider_restricted_rw_count() {
|
||||
// Without worktree: cwd + tmp + .aider = 3
|
||||
let config = SandboxConfig::for_aider_restricted("/tmp/test", None);
|
||||
assert_eq!(config.rw_paths.len(), 3);
|
||||
|
||||
// With worktree: cwd + worktree + tmp + .aider = 4
|
||||
let config = SandboxConfig::for_aider_restricted("/tmp/test", Some("/tmp/wt"));
|
||||
assert_eq!(config.rw_paths.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_for_projects_empty() {
|
||||
let config = SandboxConfig::for_projects(&[], &[]);
|
||||
assert!(config.enabled);
|
||||
// Only tmp dir in rw
|
||||
assert_eq!(config.rw_paths.len(), 1);
|
||||
assert_eq!(config.rw_paths[0], std::env::temp_dir());
|
||||
}
|
||||
}
|
||||
980
agor-core/src/sidecar.rs
Normal file
980
agor-core/src/sidecar.rs
Normal file
|
|
@ -0,0 +1,980 @@
|
|||
// Sidecar lifecycle management (Deno-first, Node.js fallback)
|
||||
// Spawns per-provider runner scripts (e.g. claude-runner.mjs, aider-runner.mjs)
|
||||
// via deno or node, communicates via stdio NDJSON.
|
||||
// Each provider gets its own process, started lazily on first query.
|
||||
//
|
||||
// Uses a std::sync::mpsc actor pattern: the actor thread owns all mutable state
|
||||
// (providers HashMap, session_providers HashMap) exclusively. External callers
|
||||
// send requests via a channel, eliminating the TOCTOU race in ensure_provider().
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{BufRead, BufReader, Write};
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::process::CommandExt;
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Child, Command, Stdio};
|
||||
use std::sync::mpsc as std_mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use crate::event::EventSink;
|
||||
use crate::sandbox::SandboxConfig;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AgentQueryOptions {
|
||||
#[serde(default = "default_provider")]
|
||||
pub provider: String,
|
||||
pub session_id: String,
|
||||
pub prompt: String,
|
||||
pub cwd: Option<String>,
|
||||
pub max_turns: Option<u32>,
|
||||
pub max_budget_usd: Option<f64>,
|
||||
pub resume_session_id: Option<String>,
|
||||
pub permission_mode: Option<String>,
|
||||
pub setting_sources: Option<Vec<String>>,
|
||||
pub system_prompt: Option<String>,
|
||||
pub model: Option<String>,
|
||||
pub claude_config_dir: Option<String>,
|
||||
pub additional_directories: Option<Vec<String>>,
|
||||
/// When set, agent runs in a git worktree for isolation (passed as --worktree <name> CLI flag)
|
||||
pub worktree_name: Option<String>,
|
||||
/// Provider-specific configuration blob (passed through to sidecar as-is)
|
||||
#[serde(default)]
|
||||
pub provider_config: serde_json::Value,
|
||||
/// Extra environment variables injected into the agent process (e.g. BTMSG_AGENT_ID)
|
||||
#[serde(default)]
|
||||
pub extra_env: std::collections::HashMap<String, String>,
|
||||
}
|
||||
|
||||
fn default_provider() -> String {
|
||||
"claude".to_string()
|
||||
}
|
||||
|
||||
/// Directories to search for sidecar scripts.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SidecarConfig {
|
||||
pub search_paths: Vec<PathBuf>,
|
||||
/// Extra env vars forwarded to sidecar processes (e.g. AGOR_TEST=1 for test isolation)
|
||||
pub env_overrides: std::collections::HashMap<String, String>,
|
||||
/// Landlock filesystem sandbox configuration (Linux 5.13+, applied via pre_exec)
|
||||
pub sandbox: SandboxConfig,
|
||||
}
|
||||
|
||||
struct SidecarCommand {
|
||||
program: String,
|
||||
args: Vec<String>,
|
||||
}
|
||||
|
||||
/// Per-provider sidecar process state.
|
||||
struct ProviderProcess {
|
||||
child: Child,
|
||||
stdin_writer: Box<dyn Write + Send>,
|
||||
ready: bool,
|
||||
/// Atomic flag set by the stdout reader thread when "ready" message arrives.
|
||||
/// The actor polls this to detect readiness without needing a separate channel.
|
||||
ready_flag: Arc<std::sync::atomic::AtomicBool>,
|
||||
}
|
||||
|
||||
/// Requests sent from public API methods to the actor thread.
|
||||
enum ProviderRequest {
|
||||
Start {
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
EnsureAndQuery {
|
||||
options: AgentQueryOptions,
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
StopSession {
|
||||
session_id: String,
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
SendMessage {
|
||||
msg: serde_json::Value,
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
Restart {
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
Shutdown {
|
||||
reply: std_mpsc::Sender<Result<(), String>>,
|
||||
},
|
||||
IsReady {
|
||||
reply: std_mpsc::Sender<bool>,
|
||||
},
|
||||
SetSandbox {
|
||||
sandbox: SandboxConfig,
|
||||
reply: std_mpsc::Sender<()>,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct SidecarManager {
|
||||
tx: std_mpsc::Sender<ProviderRequest>,
|
||||
// Keep a handle so the thread lives as long as the manager.
|
||||
// Not joined on drop — we send Shutdown instead.
|
||||
_actor_thread: Option<thread::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
/// Actor function that owns all mutable state exclusively.
|
||||
/// Receives requests via `req_rx`. Ready signaling from stdout reader threads
|
||||
/// uses per-provider AtomicBool flags (polled during ensure_provider_impl).
|
||||
fn run_actor(
|
||||
req_rx: std_mpsc::Receiver<ProviderRequest>,
|
||||
sink: Arc<dyn EventSink>,
|
||||
initial_config: SidecarConfig,
|
||||
) {
|
||||
let mut providers: HashMap<String, ProviderProcess> = HashMap::new();
|
||||
let mut session_providers: HashMap<String, String> = HashMap::new();
|
||||
let mut config = initial_config;
|
||||
|
||||
loop {
|
||||
// Block waiting for next request (with timeout so actor stays responsive)
|
||||
match req_rx.recv_timeout(std::time::Duration::from_millis(50)) {
|
||||
Ok(req) => {
|
||||
match req {
|
||||
ProviderRequest::Start { reply } => {
|
||||
let result = start_provider_impl(
|
||||
&mut providers,
|
||||
&config,
|
||||
&sink,
|
||||
"claude",
|
||||
);
|
||||
let _ = reply.send(result);
|
||||
}
|
||||
ProviderRequest::EnsureAndQuery { options, reply } => {
|
||||
let provider = options.provider.clone();
|
||||
|
||||
// Ensure provider is ready — atomic, no TOCTOU
|
||||
if let Err(e) = ensure_provider_impl(
|
||||
&mut providers,
|
||||
&config,
|
||||
&sink,
|
||||
&provider,
|
||||
) {
|
||||
let _ = reply.send(Err(e));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track session -> provider mapping
|
||||
session_providers.insert(options.session_id.clone(), provider.clone());
|
||||
|
||||
// Build and send query message
|
||||
let msg = build_query_msg(&options);
|
||||
let result = send_to_provider_impl(&mut providers, &provider, &msg);
|
||||
let _ = reply.send(result);
|
||||
}
|
||||
ProviderRequest::StopSession { session_id, reply } => {
|
||||
let provider = session_providers
|
||||
.get(&session_id)
|
||||
.cloned()
|
||||
.unwrap_or_else(|| "claude".to_string());
|
||||
let msg = serde_json::json!({
|
||||
"type": "stop",
|
||||
"sessionId": session_id,
|
||||
});
|
||||
let result = send_to_provider_impl(&mut providers, &provider, &msg);
|
||||
let _ = reply.send(result);
|
||||
}
|
||||
ProviderRequest::SendMessage { msg, reply } => {
|
||||
let result = send_to_provider_impl(&mut providers, "claude", &msg);
|
||||
let _ = reply.send(result);
|
||||
}
|
||||
ProviderRequest::Restart { reply } => {
|
||||
log::info!("Restarting all sidecars");
|
||||
shutdown_all(&mut providers, &mut session_providers);
|
||||
let result = start_provider_impl(
|
||||
&mut providers,
|
||||
&config,
|
||||
&sink,
|
||||
"claude",
|
||||
);
|
||||
let _ = reply.send(result);
|
||||
}
|
||||
ProviderRequest::Shutdown { reply } => {
|
||||
shutdown_all(&mut providers, &mut session_providers);
|
||||
let _ = reply.send(Ok(()));
|
||||
}
|
||||
ProviderRequest::IsReady { reply } => {
|
||||
// Sync ready state from atomic flags
|
||||
sync_ready_flags(&mut providers);
|
||||
let ready = providers
|
||||
.get("claude")
|
||||
.map(|p| p.ready)
|
||||
.unwrap_or(false);
|
||||
let _ = reply.send(ready);
|
||||
}
|
||||
ProviderRequest::SetSandbox { sandbox, reply } => {
|
||||
config.sandbox = sandbox;
|
||||
let _ = reply.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(std_mpsc::RecvTimeoutError::Timeout) => {
|
||||
// Loop back -- keeps actor responsive to shutdown
|
||||
continue;
|
||||
}
|
||||
Err(std_mpsc::RecvTimeoutError::Disconnected) => {
|
||||
// All senders dropped — shut down
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Channel closed — clean up remaining providers
|
||||
shutdown_all(&mut providers, &mut session_providers);
|
||||
}
|
||||
|
||||
/// Sync ready state from AtomicBool flags set by stdout reader threads.
|
||||
fn sync_ready_flags(providers: &mut HashMap<String, ProviderProcess>) {
|
||||
for p in providers.values_mut() {
|
||||
if !p.ready && p.ready_flag.load(std::sync::atomic::Ordering::Acquire) {
|
||||
p.ready = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Shut down all provider processes and clear session mappings.
|
||||
fn shutdown_all(
|
||||
providers: &mut HashMap<String, ProviderProcess>,
|
||||
session_providers: &mut HashMap<String, String>,
|
||||
) {
|
||||
for (name, mut proc) in providers.drain() {
|
||||
log::info!("Shutting down {} sidecar", name);
|
||||
let _ = proc.child.kill();
|
||||
let _ = proc.child.wait();
|
||||
}
|
||||
session_providers.clear();
|
||||
}
|
||||
|
||||
/// Start a specific provider's sidecar process. Called from the actor thread
|
||||
/// which owns the providers HashMap exclusively — no lock contention possible.
|
||||
fn start_provider_impl(
|
||||
providers: &mut HashMap<String, ProviderProcess>,
|
||||
config: &SidecarConfig,
|
||||
sink: &Arc<dyn EventSink>,
|
||||
provider: &str,
|
||||
) -> Result<(), String> {
|
||||
if providers.contains_key(provider) {
|
||||
return Err(format!("Sidecar for '{}' already running", provider));
|
||||
}
|
||||
|
||||
let cmd = SidecarManager::resolve_sidecar_for_provider_with_config(config, provider)?;
|
||||
|
||||
log::info!(
|
||||
"Starting {} sidecar: {} {}",
|
||||
provider,
|
||||
cmd.program,
|
||||
cmd.args.join(" ")
|
||||
);
|
||||
|
||||
// Build a clean environment stripping provider-specific vars to prevent
|
||||
// SDKs from detecting nesting when Agents Orchestrator is launched from a provider terminal.
|
||||
let clean_env: Vec<(String, String)> = std::env::vars()
|
||||
.filter(|(k, _)| strip_provider_env_var(k))
|
||||
.collect();
|
||||
|
||||
let mut command = Command::new(&cmd.program);
|
||||
command
|
||||
.args(&cmd.args)
|
||||
.env_clear()
|
||||
.envs(clean_env)
|
||||
.envs(
|
||||
config
|
||||
.env_overrides
|
||||
.iter()
|
||||
.map(|(k, v)| (k.as_str(), v.as_str())),
|
||||
)
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
// Apply Landlock sandbox in child process before exec (Linux only).
|
||||
#[cfg(unix)]
|
||||
if config.sandbox.enabled {
|
||||
let sandbox = config.sandbox.clone();
|
||||
unsafe {
|
||||
command.pre_exec(move || {
|
||||
sandbox
|
||||
.apply()
|
||||
.map(|enforced| {
|
||||
if !enforced {
|
||||
log::warn!("Landlock sandbox not enforced in sidecar child");
|
||||
}
|
||||
})
|
||||
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.map_err(|e| format!("Failed to start {} sidecar: {e}", provider))?;
|
||||
|
||||
let child_stdin = child
|
||||
.stdin
|
||||
.take()
|
||||
.ok_or("Failed to capture sidecar stdin")?;
|
||||
let child_stdout = child
|
||||
.stdout
|
||||
.take()
|
||||
.ok_or("Failed to capture sidecar stdout")?;
|
||||
let child_stderr = child
|
||||
.stderr
|
||||
.take()
|
||||
.ok_or("Failed to capture sidecar stderr")?;
|
||||
|
||||
// Per-provider AtomicBool for ready signaling from stdout reader thread to actor.
|
||||
let ready_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
||||
let ready_flag_writer = ready_flag.clone();
|
||||
|
||||
// Stdout reader thread — forwards NDJSON to event sink
|
||||
let sink_clone = sink.clone();
|
||||
let provider_name = provider.to_string();
|
||||
thread::spawn(move || {
|
||||
let reader = BufReader::new(child_stdout);
|
||||
for line in reader.lines() {
|
||||
match line {
|
||||
Ok(line) => {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
match serde_json::from_str::<serde_json::Value>(&line) {
|
||||
Ok(msg) => {
|
||||
if msg.get("type").and_then(|t| t.as_str()) == Some("ready") {
|
||||
ready_flag_writer
|
||||
.store(true, std::sync::atomic::Ordering::Release);
|
||||
log::info!("{} sidecar ready", provider_name);
|
||||
}
|
||||
sink_clone.emit("sidecar-message", msg);
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!(
|
||||
"Invalid JSON from {} sidecar: {e}: {line}",
|
||||
provider_name
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("{} sidecar stdout read error: {e}", provider_name);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
log::info!("{} sidecar stdout reader exited", provider_name);
|
||||
sink_clone.emit(
|
||||
"sidecar-exited",
|
||||
serde_json::json!({ "provider": provider_name }),
|
||||
);
|
||||
});
|
||||
|
||||
// Stderr reader thread — logs only
|
||||
let provider_name2 = provider.to_string();
|
||||
thread::spawn(move || {
|
||||
let reader = BufReader::new(child_stderr);
|
||||
for line in reader.lines() {
|
||||
match line {
|
||||
Ok(line) => log::info!("[{} sidecar stderr] {line}", provider_name2),
|
||||
Err(e) => {
|
||||
log::error!("{} sidecar stderr read error: {e}", provider_name2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
providers.insert(
|
||||
provider.to_string(),
|
||||
ProviderProcess {
|
||||
child,
|
||||
stdin_writer: Box::new(child_stdin),
|
||||
ready: false,
|
||||
ready_flag,
|
||||
},
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure a provider's sidecar is running and ready, starting it lazily if needed.
|
||||
/// Called exclusively from the actor thread — no lock contention, no TOCTOU race.
|
||||
fn ensure_provider_impl(
|
||||
providers: &mut HashMap<String, ProviderProcess>,
|
||||
config: &SidecarConfig,
|
||||
sink: &Arc<dyn EventSink>,
|
||||
provider: &str,
|
||||
) -> Result<(), String> {
|
||||
// Sync ready state from atomic flag (set by stdout reader thread)
|
||||
if let Some(p) = providers.get_mut(provider) {
|
||||
if !p.ready && p.ready_flag.load(std::sync::atomic::Ordering::Acquire) {
|
||||
p.ready = true;
|
||||
}
|
||||
if p.ready {
|
||||
return Ok(());
|
||||
}
|
||||
// Started but not ready yet -- fall through to wait loop
|
||||
} else {
|
||||
// Not started -- start it now. No TOCTOU: we own the HashMap exclusively.
|
||||
start_provider_impl(providers, config, sink, provider)?;
|
||||
}
|
||||
|
||||
// Wait for ready (up to 10 seconds)
|
||||
for _ in 0..100 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
|
||||
if let Some(p) = providers.get_mut(provider) {
|
||||
if !p.ready && p.ready_flag.load(std::sync::atomic::Ordering::Acquire) {
|
||||
p.ready = true;
|
||||
}
|
||||
if p.ready {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Err(format!("{} sidecar process exited before ready", provider));
|
||||
}
|
||||
}
|
||||
Err(format!(
|
||||
"{} sidecar did not become ready within timeout",
|
||||
provider
|
||||
))
|
||||
}
|
||||
|
||||
/// Send a JSON message to a provider's stdin.
|
||||
fn send_to_provider_impl(
|
||||
providers: &mut HashMap<String, ProviderProcess>,
|
||||
provider: &str,
|
||||
msg: &serde_json::Value,
|
||||
) -> Result<(), String> {
|
||||
let proc = providers
|
||||
.get_mut(provider)
|
||||
.ok_or_else(|| format!("{} sidecar not running", provider))?;
|
||||
|
||||
let line =
|
||||
serde_json::to_string(msg).map_err(|e| format!("JSON serialize error: {e}"))?;
|
||||
|
||||
proc.stdin_writer
|
||||
.write_all(line.as_bytes())
|
||||
.map_err(|e| format!("Sidecar write error: {e}"))?;
|
||||
proc.stdin_writer
|
||||
.write_all(b"\n")
|
||||
.map_err(|e| format!("Sidecar write error: {e}"))?;
|
||||
proc.stdin_writer
|
||||
.flush()
|
||||
.map_err(|e| format!("Sidecar flush error: {e}"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build the NDJSON query message from AgentQueryOptions.
|
||||
fn build_query_msg(options: &AgentQueryOptions) -> serde_json::Value {
|
||||
serde_json::json!({
|
||||
"type": "query",
|
||||
"provider": options.provider,
|
||||
"sessionId": options.session_id,
|
||||
"prompt": options.prompt,
|
||||
"cwd": options.cwd,
|
||||
"maxTurns": options.max_turns,
|
||||
"maxBudgetUsd": options.max_budget_usd,
|
||||
"resumeSessionId": options.resume_session_id,
|
||||
"permissionMode": options.permission_mode,
|
||||
"settingSources": options.setting_sources,
|
||||
"systemPrompt": options.system_prompt,
|
||||
"model": options.model,
|
||||
"claudeConfigDir": options.claude_config_dir,
|
||||
"additionalDirectories": options.additional_directories,
|
||||
"worktreeName": options.worktree_name,
|
||||
"providerConfig": options.provider_config,
|
||||
"extraEnv": options.extra_env,
|
||||
})
|
||||
}
|
||||
|
||||
impl SidecarManager {
|
||||
pub fn new(sink: Arc<dyn EventSink>, config: SidecarConfig) -> Self {
|
||||
let (req_tx, req_rx) = std_mpsc::channel();
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
run_actor(req_rx, sink, config);
|
||||
});
|
||||
|
||||
Self {
|
||||
tx: req_tx,
|
||||
_actor_thread: Some(handle),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the sandbox configuration. Takes effect on next sidecar (re)start.
|
||||
pub fn set_sandbox(&self, sandbox: SandboxConfig) {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
if self
|
||||
.tx
|
||||
.send(ProviderRequest::SetSandbox {
|
||||
sandbox,
|
||||
reply: reply_tx,
|
||||
})
|
||||
.is_ok()
|
||||
{
|
||||
let _ = reply_rx.recv();
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the default (claude) provider sidecar. Called on app startup.
|
||||
pub fn start(&self) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
self.tx
|
||||
.send(ProviderRequest::Start { reply: reply_tx })
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?;
|
||||
reply_rx
|
||||
.recv()
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?
|
||||
}
|
||||
|
||||
pub fn query(&self, options: &AgentQueryOptions) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
self.tx
|
||||
.send(ProviderRequest::EnsureAndQuery {
|
||||
options: options.clone(),
|
||||
reply: reply_tx,
|
||||
})
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?;
|
||||
reply_rx
|
||||
.recv()
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?
|
||||
}
|
||||
|
||||
pub fn stop_session(&self, session_id: &str) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
self.tx
|
||||
.send(ProviderRequest::StopSession {
|
||||
session_id: session_id.to_string(),
|
||||
reply: reply_tx,
|
||||
})
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?;
|
||||
reply_rx
|
||||
.recv()
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?
|
||||
}
|
||||
|
||||
pub fn restart(&self) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
self.tx
|
||||
.send(ProviderRequest::Restart { reply: reply_tx })
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?;
|
||||
reply_rx
|
||||
.recv()
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?
|
||||
}
|
||||
|
||||
pub fn shutdown(&self) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
if self
|
||||
.tx
|
||||
.send(ProviderRequest::Shutdown { reply: reply_tx })
|
||||
.is_ok()
|
||||
{
|
||||
let _ = reply_rx.recv();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if the default (claude) provider sidecar is ready.
|
||||
pub fn is_ready(&self) -> bool {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
if self
|
||||
.tx
|
||||
.send(ProviderRequest::IsReady { reply: reply_tx })
|
||||
.is_ok()
|
||||
{
|
||||
reply_rx.recv().unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Legacy send_message — routes to the default (claude) provider.
|
||||
pub fn send_message(&self, msg: &serde_json::Value) -> Result<(), String> {
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
self.tx
|
||||
.send(ProviderRequest::SendMessage {
|
||||
msg: msg.clone(),
|
||||
reply: reply_tx,
|
||||
})
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?;
|
||||
reply_rx
|
||||
.recv()
|
||||
.map_err(|_| "Sidecar actor stopped".to_string())?
|
||||
}
|
||||
|
||||
/// Resolve a sidecar command for a specific provider's runner file.
|
||||
fn resolve_sidecar_for_provider_with_config(
|
||||
config: &SidecarConfig,
|
||||
provider: &str,
|
||||
) -> Result<SidecarCommand, String> {
|
||||
let runner_name = format!("{}-runner.mjs", provider);
|
||||
|
||||
// Try Deno first (faster startup, better perf), fall back to Node.js.
|
||||
let has_deno = Command::new("deno")
|
||||
.arg("--version")
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.status()
|
||||
.is_ok();
|
||||
let has_node = Command::new("node")
|
||||
.arg("--version")
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.status()
|
||||
.is_ok();
|
||||
|
||||
let mut checked = Vec::new();
|
||||
|
||||
for base in &config.search_paths {
|
||||
let mjs_path = base.join("dist").join(&runner_name);
|
||||
if mjs_path.exists() {
|
||||
if has_deno {
|
||||
return Ok(SidecarCommand {
|
||||
program: "deno".to_string(),
|
||||
args: vec![
|
||||
"run".to_string(),
|
||||
"--allow-run".to_string(),
|
||||
"--allow-env".to_string(),
|
||||
"--allow-read".to_string(),
|
||||
"--allow-write".to_string(),
|
||||
"--allow-net".to_string(),
|
||||
mjs_path.to_string_lossy().to_string(),
|
||||
],
|
||||
});
|
||||
}
|
||||
if has_node {
|
||||
return Ok(SidecarCommand {
|
||||
program: "node".to_string(),
|
||||
args: vec![mjs_path.to_string_lossy().to_string()],
|
||||
});
|
||||
}
|
||||
}
|
||||
checked.push(mjs_path);
|
||||
}
|
||||
|
||||
let paths: Vec<_> = checked.iter().map(|p| p.display().to_string()).collect();
|
||||
let runtime_note = if !has_deno && !has_node {
|
||||
". Neither deno nor node found in PATH"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
Err(format!(
|
||||
"Sidecar not found for provider '{}'. Checked: {}{}",
|
||||
provider,
|
||||
paths.join(", "),
|
||||
runtime_note,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the env var should be KEPT (not stripped).
|
||||
/// First line of defense: strips provider-specific prefixes to prevent nesting detection
|
||||
/// and credential leakage. JS runners apply a second layer of provider-specific stripping.
|
||||
///
|
||||
/// Stripped prefixes: CLAUDE*, CODEX*, OLLAMA*, AIDER*, ANTHROPIC_*
|
||||
/// Whitelisted: CLAUDE_CODE_EXPERIMENTAL_* (feature flags like agent teams)
|
||||
///
|
||||
/// Note: OPENAI_* and OPENROUTER_* are NOT stripped here because runners need
|
||||
/// these keys from the environment or extraEnv injection.
|
||||
fn strip_provider_env_var(key: &str) -> bool {
|
||||
if key.starts_with("CLAUDE_CODE_EXPERIMENTAL_") {
|
||||
return true;
|
||||
}
|
||||
if key.starts_with("CLAUDE")
|
||||
|| key.starts_with("CODEX")
|
||||
|| key.starts_with("OLLAMA")
|
||||
|| key.starts_with("AIDER")
|
||||
|| key.starts_with("ANTHROPIC_")
|
||||
{
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
impl Drop for SidecarManager {
|
||||
fn drop(&mut self) {
|
||||
// Send shutdown request to the actor. If the channel is already closed
|
||||
// (actor thread exited), this is a no-op.
|
||||
let (reply_tx, reply_rx) = std_mpsc::channel();
|
||||
if self
|
||||
.tx
|
||||
.send(ProviderRequest::Shutdown { reply: reply_tx })
|
||||
.is_ok()
|
||||
{
|
||||
// Wait briefly for the actor to clean up (with timeout to avoid hanging)
|
||||
let _ = reply_rx.recv_timeout(std::time::Duration::from_secs(5));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Mutex;
|
||||
|
||||
// ---- strip_provider_env_var unit tests ----
|
||||
|
||||
#[test]
|
||||
fn test_keeps_normal_env_vars() {
|
||||
assert!(strip_provider_env_var("HOME"));
|
||||
assert!(strip_provider_env_var("PATH"));
|
||||
assert!(strip_provider_env_var("USER"));
|
||||
assert!(strip_provider_env_var("SHELL"));
|
||||
assert!(strip_provider_env_var("TERM"));
|
||||
assert!(strip_provider_env_var("XDG_DATA_HOME"));
|
||||
assert!(strip_provider_env_var("RUST_LOG"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_claude_vars() {
|
||||
assert!(!strip_provider_env_var("CLAUDE_CONFIG_DIR"));
|
||||
assert!(!strip_provider_env_var("CLAUDE_SESSION_ID"));
|
||||
assert!(!strip_provider_env_var("CLAUDECODE"));
|
||||
assert!(!strip_provider_env_var("CLAUDE_API_KEY"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_whitelists_claude_code_experimental() {
|
||||
assert!(strip_provider_env_var("CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS"));
|
||||
assert!(strip_provider_env_var("CLAUDE_CODE_EXPERIMENTAL_TOOLS"));
|
||||
assert!(strip_provider_env_var("CLAUDE_CODE_EXPERIMENTAL_SOMETHING_NEW"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_codex_vars() {
|
||||
assert!(!strip_provider_env_var("CODEX_API_KEY"));
|
||||
assert!(!strip_provider_env_var("CODEX_SESSION"));
|
||||
assert!(!strip_provider_env_var("CODEX_CONFIG"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_ollama_vars() {
|
||||
assert!(!strip_provider_env_var("OLLAMA_HOST"));
|
||||
assert!(!strip_provider_env_var("OLLAMA_MODELS"));
|
||||
assert!(!strip_provider_env_var("OLLAMA_NUM_PARALLEL"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_anthropic_vars() {
|
||||
// ANTHROPIC_* vars stripped at Rust layer (defense in depth)
|
||||
// Claude CLI has its own auth via credentials file
|
||||
assert!(!strip_provider_env_var("ANTHROPIC_API_KEY"));
|
||||
assert!(!strip_provider_env_var("ANTHROPIC_BASE_URL"));
|
||||
assert!(!strip_provider_env_var("ANTHROPIC_LOG"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keeps_openai_vars() {
|
||||
// OPENAI_* vars are NOT stripped by the Rust layer
|
||||
// (they're stripped in the JS codex-runner layer instead)
|
||||
assert!(strip_provider_env_var("OPENAI_API_KEY"));
|
||||
assert!(strip_provider_env_var("OPENAI_BASE_URL"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_filtering_integration() {
|
||||
let test_env = vec![
|
||||
("HOME", "/home/user"),
|
||||
("PATH", "/usr/bin"),
|
||||
("CLAUDE_CONFIG_DIR", "/tmp/claude"),
|
||||
("CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS", "1"),
|
||||
("CODEX_API_KEY", "sk-test"),
|
||||
("OLLAMA_HOST", "localhost"),
|
||||
("ANTHROPIC_API_KEY", "sk-ant-xxx"),
|
||||
("OPENAI_API_KEY", "sk-openai-xxx"),
|
||||
("RUST_LOG", "debug"),
|
||||
("BTMSG_AGENT_ID", "a1"),
|
||||
];
|
||||
|
||||
let kept: Vec<&str> = test_env
|
||||
.iter()
|
||||
.filter(|(k, _)| strip_provider_env_var(k))
|
||||
.map(|(k, _)| *k)
|
||||
.collect();
|
||||
|
||||
assert!(kept.contains(&"HOME"));
|
||||
assert!(kept.contains(&"PATH"));
|
||||
assert!(kept.contains(&"CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS"));
|
||||
assert!(kept.contains(&"RUST_LOG"));
|
||||
assert!(kept.contains(&"BTMSG_AGENT_ID"));
|
||||
// OPENAI_* passes through Rust layer (Codex runner needs it)
|
||||
assert!(kept.contains(&"OPENAI_API_KEY"));
|
||||
// These are stripped:
|
||||
assert!(!kept.contains(&"CLAUDE_CONFIG_DIR"));
|
||||
assert!(!kept.contains(&"CODEX_API_KEY"));
|
||||
assert!(!kept.contains(&"OLLAMA_HOST"));
|
||||
assert!(!kept.contains(&"ANTHROPIC_API_KEY"));
|
||||
}
|
||||
|
||||
// ---- Actor pattern tests ----
|
||||
|
||||
/// Mock EventSink that records emitted events.
|
||||
struct MockSink {
|
||||
events: Mutex<Vec<(String, serde_json::Value)>>,
|
||||
}
|
||||
|
||||
impl MockSink {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
events: Mutex::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EventSink for MockSink {
|
||||
fn emit(&self, event: &str, payload: serde_json::Value) {
|
||||
self.events
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push((event.to_string(), payload));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actor_new_and_drop() {
|
||||
// SidecarManager should create and drop cleanly without panicking
|
||||
let sink: Arc<dyn EventSink> = Arc::new(MockSink::new());
|
||||
let config = SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
};
|
||||
let manager = SidecarManager::new(sink, config);
|
||||
// is_ready should return false since no provider started
|
||||
assert!(!manager.is_ready());
|
||||
// Drop should send shutdown cleanly
|
||||
drop(manager);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actor_shutdown_idempotent() {
|
||||
let sink: Arc<dyn EventSink> = Arc::new(MockSink::new());
|
||||
let config = SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
};
|
||||
let manager = SidecarManager::new(sink, config);
|
||||
// Multiple shutdowns should not panic
|
||||
assert!(manager.shutdown().is_ok());
|
||||
assert!(manager.shutdown().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_actor_set_sandbox() {
|
||||
let sink: Arc<dyn EventSink> = Arc::new(MockSink::new());
|
||||
let config = SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
};
|
||||
let manager = SidecarManager::new(sink, config);
|
||||
// set_sandbox should complete without error
|
||||
manager.set_sandbox(SandboxConfig {
|
||||
rw_paths: vec![PathBuf::from("/tmp")],
|
||||
ro_paths: vec![],
|
||||
enabled: true,
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_query_msg_fields() {
|
||||
let options = AgentQueryOptions {
|
||||
provider: "claude".to_string(),
|
||||
session_id: "s1".to_string(),
|
||||
prompt: "hello".to_string(),
|
||||
cwd: Some("/tmp".to_string()),
|
||||
max_turns: Some(5),
|
||||
max_budget_usd: None,
|
||||
resume_session_id: None,
|
||||
permission_mode: Some("bypassPermissions".to_string()),
|
||||
setting_sources: None,
|
||||
system_prompt: None,
|
||||
model: Some("claude-4-opus".to_string()),
|
||||
claude_config_dir: None,
|
||||
additional_directories: None,
|
||||
worktree_name: None,
|
||||
provider_config: serde_json::Value::Null,
|
||||
extra_env: Default::default(),
|
||||
};
|
||||
let msg = build_query_msg(&options);
|
||||
assert_eq!(msg["type"], "query");
|
||||
assert_eq!(msg["provider"], "claude");
|
||||
assert_eq!(msg["sessionId"], "s1");
|
||||
assert_eq!(msg["prompt"], "hello");
|
||||
assert_eq!(msg["cwd"], "/tmp");
|
||||
assert_eq!(msg["maxTurns"], 5);
|
||||
assert_eq!(msg["model"], "claude-4-opus");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_queries_no_race() {
|
||||
// This test verifies that concurrent query() calls from multiple threads
|
||||
// are serialized by the actor and don't cause a TOCTOU race on ensure_provider.
|
||||
// Since we can't actually start a sidecar in tests (no runner scripts),
|
||||
// we verify that the actor handles multiple concurrent requests gracefully
|
||||
// (all get errors, none panic or deadlock).
|
||||
|
||||
let sink: Arc<dyn EventSink> = Arc::new(MockSink::new());
|
||||
let config = SidecarConfig {
|
||||
search_paths: vec![], // No search paths → start_provider will fail
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
};
|
||||
let manager = Arc::new(SidecarManager::new(sink, config));
|
||||
|
||||
let mut handles = vec![];
|
||||
let errors = Arc::new(Mutex::new(Vec::new()));
|
||||
|
||||
// Spawn 10 concurrent query() calls
|
||||
for i in 0..10 {
|
||||
let mgr = manager.clone();
|
||||
let errs = errors.clone();
|
||||
handles.push(thread::spawn(move || {
|
||||
let options = AgentQueryOptions {
|
||||
provider: "test-provider".to_string(),
|
||||
session_id: format!("session-{}", i),
|
||||
prompt: "hello".to_string(),
|
||||
cwd: None,
|
||||
max_turns: None,
|
||||
max_budget_usd: None,
|
||||
resume_session_id: None,
|
||||
permission_mode: None,
|
||||
setting_sources: None,
|
||||
system_prompt: None,
|
||||
model: None,
|
||||
claude_config_dir: None,
|
||||
additional_directories: None,
|
||||
worktree_name: None,
|
||||
provider_config: serde_json::Value::Null,
|
||||
extra_env: Default::default(),
|
||||
};
|
||||
let result = mgr.query(&options);
|
||||
if let Err(e) = result {
|
||||
errs.lock().unwrap().push(e);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("Thread should not panic");
|
||||
}
|
||||
|
||||
// All 10 should have failed (no sidecar scripts available), but none panicked
|
||||
let errs = errors.lock().unwrap();
|
||||
assert_eq!(errs.len(), 10, "All 10 concurrent queries should get errors");
|
||||
|
||||
// The key invariant: no "Sidecar for 'X' already running" error.
|
||||
// Because the actor serializes requests, the second caller sees the first's
|
||||
// start_provider result (either success or failure), not a conflicting start.
|
||||
// With no search paths, all errors should be "Sidecar not found" style.
|
||||
for err in errs.iter() {
|
||||
assert!(
|
||||
!err.contains("already running"),
|
||||
"Should not get 'already running' error from serialized actor. Got: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
684
agor-core/src/supervisor.rs
Normal file
684
agor-core/src/supervisor.rs
Normal file
|
|
@ -0,0 +1,684 @@
|
|||
// Sidecar crash recovery and supervision.
|
||||
// Wraps a SidecarManager with automatic restart, exponential backoff,
|
||||
// and health status tracking. Emits `sidecar-health-changed` events.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::event::EventSink;
|
||||
use crate::sidecar::{AgentQueryOptions, SidecarConfig, SidecarManager};
|
||||
|
||||
/// Health status of the supervised sidecar process.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(tag = "status", rename_all = "camelCase")]
|
||||
pub enum SidecarHealth {
|
||||
Healthy,
|
||||
Degraded {
|
||||
restart_count: u32,
|
||||
},
|
||||
Failed {
|
||||
#[serde(default)]
|
||||
last_error: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Configuration for supervisor restart behavior.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SupervisorConfig {
|
||||
/// Maximum restart attempts before entering Failed state (default: 5)
|
||||
pub max_retries: u32,
|
||||
/// Base backoff in milliseconds, doubled each retry (default: 1000, cap: 30000)
|
||||
pub backoff_base_ms: u64,
|
||||
/// Maximum backoff in milliseconds (default: 30000)
|
||||
pub backoff_cap_ms: u64,
|
||||
/// Stable operation duration before restart_count resets (default: 5 minutes)
|
||||
pub stability_window: Duration,
|
||||
}
|
||||
|
||||
impl Default for SupervisorConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_retries: 5,
|
||||
backoff_base_ms: 1000,
|
||||
backoff_cap_ms: 30_000,
|
||||
stability_window: Duration::from_secs(300),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal state shared between the supervisor and its event interceptor.
|
||||
struct SupervisorState {
|
||||
health: SidecarHealth,
|
||||
restart_count: u32,
|
||||
last_crash_time: Option<Instant>,
|
||||
last_start_time: Option<Instant>,
|
||||
}
|
||||
|
||||
impl SupervisorState {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
health: SidecarHealth::Healthy,
|
||||
restart_count: 0,
|
||||
last_crash_time: None,
|
||||
last_start_time: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute exponential backoff: base_ms * 2^attempt, capped at cap_ms.
|
||||
fn compute_backoff(base_ms: u64, attempt: u32, cap_ms: u64) -> Duration {
|
||||
let backoff = base_ms.saturating_mul(1u64.checked_shl(attempt).unwrap_or(u64::MAX));
|
||||
Duration::from_millis(backoff.min(cap_ms))
|
||||
}
|
||||
|
||||
/// EventSink wrapper that intercepts `sidecar-exited` events and triggers
|
||||
/// supervisor restart logic, while forwarding all other events unchanged.
|
||||
struct SupervisorSink {
|
||||
outer_sink: Arc<dyn EventSink>,
|
||||
state: Arc<Mutex<SupervisorState>>,
|
||||
config: SupervisorConfig,
|
||||
sidecar_config: SidecarConfig,
|
||||
}
|
||||
|
||||
impl EventSink for SupervisorSink {
|
||||
fn emit(&self, event: &str, payload: serde_json::Value) {
|
||||
if event == "sidecar-exited" {
|
||||
self.handle_exit();
|
||||
} else {
|
||||
self.outer_sink.emit(event, payload);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SupervisorSink {
|
||||
fn handle_exit(&self) {
|
||||
let (should_restart, backoff, restart_count) = {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
|
||||
// Check if stable operation has elapsed since last start — reset counter
|
||||
if let Some(start_time) = state.last_start_time {
|
||||
if start_time.elapsed() >= self.config.stability_window {
|
||||
log::info!(
|
||||
"Sidecar ran stable for {:?}, resetting restart count",
|
||||
start_time.elapsed()
|
||||
);
|
||||
state.restart_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
state.restart_count += 1;
|
||||
state.last_crash_time = Some(Instant::now());
|
||||
let count = state.restart_count;
|
||||
|
||||
if count > self.config.max_retries {
|
||||
let error = format!("Exceeded max retries ({})", self.config.max_retries);
|
||||
log::error!("Sidecar supervisor: {}", error);
|
||||
state.health = SidecarHealth::Failed {
|
||||
last_error: error.clone(),
|
||||
};
|
||||
self.emit_health(&state.health);
|
||||
// Forward the original exited event so frontend knows
|
||||
self.outer_sink
|
||||
.emit("sidecar-exited", serde_json::Value::Null);
|
||||
return;
|
||||
}
|
||||
|
||||
state.health = SidecarHealth::Degraded {
|
||||
restart_count: count,
|
||||
};
|
||||
self.emit_health(&state.health);
|
||||
|
||||
let backoff = compute_backoff(
|
||||
self.config.backoff_base_ms,
|
||||
count - 1,
|
||||
self.config.backoff_cap_ms,
|
||||
);
|
||||
|
||||
(true, backoff, count)
|
||||
};
|
||||
|
||||
if !should_restart {
|
||||
return;
|
||||
}
|
||||
|
||||
log::warn!(
|
||||
"Sidecar crashed (attempt {}/{}), restarting in {:?}",
|
||||
restart_count,
|
||||
self.config.max_retries,
|
||||
backoff
|
||||
);
|
||||
|
||||
// Restart on a background thread to avoid blocking the stdout reader
|
||||
let outer_sink = self.outer_sink.clone();
|
||||
let state = self.state.clone();
|
||||
let sidecar_config = self.sidecar_config.clone();
|
||||
let supervisor_state = self.state.clone();
|
||||
let stability_window = self.config.stability_window;
|
||||
let max_retries = self.config.max_retries;
|
||||
let backoff_base_ms = self.config.backoff_base_ms;
|
||||
let backoff_cap_ms = self.config.backoff_cap_ms;
|
||||
|
||||
std::thread::spawn(move || {
|
||||
std::thread::sleep(backoff);
|
||||
|
||||
// Create a new SidecarManager that shares our supervisor sink.
|
||||
// We need a new interceptor sink to capture the next exit event.
|
||||
let new_state = state.clone();
|
||||
let new_outer = outer_sink.clone();
|
||||
let new_sidecar_config = sidecar_config.clone();
|
||||
|
||||
let interceptor: Arc<dyn EventSink> = Arc::new(SupervisorSink {
|
||||
outer_sink: new_outer.clone(),
|
||||
state: new_state.clone(),
|
||||
config: SupervisorConfig {
|
||||
max_retries,
|
||||
backoff_base_ms,
|
||||
backoff_cap_ms,
|
||||
stability_window,
|
||||
},
|
||||
sidecar_config: new_sidecar_config.clone(),
|
||||
});
|
||||
|
||||
let new_manager = SidecarManager::new(interceptor, new_sidecar_config);
|
||||
match new_manager.start() {
|
||||
Ok(()) => {
|
||||
let mut s = supervisor_state.lock().unwrap();
|
||||
s.last_start_time = Some(Instant::now());
|
||||
log::info!("Sidecar restarted successfully (attempt {})", restart_count);
|
||||
// Note: we cannot replace the manager reference in the outer
|
||||
// SidecarSupervisor from here. The restart creates a new manager
|
||||
// that handles its own lifecycle. The outer manager reference
|
||||
// becomes stale. This is acceptable because:
|
||||
// 1. The new manager's stdout reader will emit through our sink chain
|
||||
// 2. The old manager's child process is already dead
|
||||
// For a more sophisticated approach, the supervisor would need
|
||||
// interior mutability on the manager reference. We do that below.
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Sidecar restart failed: {}", e);
|
||||
let mut s = supervisor_state.lock().unwrap();
|
||||
s.health = SidecarHealth::Failed {
|
||||
last_error: e.clone(),
|
||||
};
|
||||
// Emit health change + forward exited
|
||||
drop(s);
|
||||
let health = SidecarHealth::Failed { last_error: e };
|
||||
emit_health_event(&new_outer, &health);
|
||||
new_outer
|
||||
.emit("sidecar-exited", serde_json::Value::Null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn emit_health(&self, health: &SidecarHealth) {
|
||||
emit_health_event(&self.outer_sink, health);
|
||||
}
|
||||
}
|
||||
|
||||
fn emit_health_event(sink: &Arc<dyn EventSink>, health: &SidecarHealth) {
|
||||
let payload = serde_json::to_value(health).unwrap_or(serde_json::Value::Null);
|
||||
sink.emit("sidecar-health-changed", payload);
|
||||
}
|
||||
|
||||
/// Supervised sidecar process with automatic crash recovery.
|
||||
///
|
||||
/// Wraps a `SidecarManager` and intercepts exit events to perform automatic
|
||||
/// restarts with exponential backoff. Tracks health status and emits
|
||||
/// `sidecar-health-changed` events.
|
||||
pub struct SidecarSupervisor {
|
||||
manager: Arc<Mutex<SidecarManager>>,
|
||||
state: Arc<Mutex<SupervisorState>>,
|
||||
outer_sink: Arc<dyn EventSink>,
|
||||
#[allow(dead_code)]
|
||||
supervisor_config: SupervisorConfig,
|
||||
#[allow(dead_code)]
|
||||
sidecar_config: SidecarConfig,
|
||||
}
|
||||
|
||||
impl SidecarSupervisor {
|
||||
pub fn new(
|
||||
sink: Arc<dyn EventSink>,
|
||||
sidecar_config: SidecarConfig,
|
||||
supervisor_config: SupervisorConfig,
|
||||
) -> Self {
|
||||
let state = Arc::new(Mutex::new(SupervisorState::new()));
|
||||
|
||||
let interceptor: Arc<dyn EventSink> = Arc::new(SupervisorSink {
|
||||
outer_sink: sink.clone(),
|
||||
state: state.clone(),
|
||||
config: supervisor_config.clone(),
|
||||
sidecar_config: sidecar_config.clone(),
|
||||
});
|
||||
|
||||
let manager = SidecarManager::new(interceptor, sidecar_config.clone());
|
||||
|
||||
Self {
|
||||
manager: Arc::new(Mutex::new(manager)),
|
||||
state,
|
||||
outer_sink: sink,
|
||||
supervisor_config,
|
||||
sidecar_config,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the supervised sidecar process.
|
||||
pub fn start(&self) -> Result<(), String> {
|
||||
let manager = self.manager.lock().unwrap();
|
||||
let result = manager.start();
|
||||
if result.is_ok() {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.last_start_time = Some(Instant::now());
|
||||
state.health = SidecarHealth::Healthy;
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Send a raw JSON message to the sidecar.
|
||||
pub fn send_message(&self, msg: &serde_json::Value) -> Result<(), String> {
|
||||
self.manager.lock().unwrap().send_message(msg)
|
||||
}
|
||||
|
||||
/// Send an agent query to the sidecar.
|
||||
pub fn query(&self, options: &AgentQueryOptions) -> Result<(), String> {
|
||||
self.manager.lock().unwrap().query(options)
|
||||
}
|
||||
|
||||
/// Stop a specific agent session.
|
||||
pub fn stop_session(&self, session_id: &str) -> Result<(), String> {
|
||||
self.manager.lock().unwrap().stop_session(session_id)
|
||||
}
|
||||
|
||||
/// Check if the sidecar is ready to accept queries.
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.manager.lock().unwrap().is_ready()
|
||||
}
|
||||
|
||||
/// Shut down the sidecar process.
|
||||
pub fn shutdown(&self) -> Result<(), String> {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.health = SidecarHealth::Healthy;
|
||||
state.restart_count = 0;
|
||||
drop(state);
|
||||
self.manager.lock().unwrap().shutdown()
|
||||
}
|
||||
|
||||
/// Get the current health status.
|
||||
pub fn health(&self) -> SidecarHealth {
|
||||
self.state.lock().unwrap().health.clone()
|
||||
}
|
||||
|
||||
/// Get the current restart count.
|
||||
pub fn restart_count(&self) -> u32 {
|
||||
self.state.lock().unwrap().restart_count
|
||||
}
|
||||
|
||||
/// Manually reset the supervisor state (e.g., after user intervention).
|
||||
pub fn reset(&self) {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.health = SidecarHealth::Healthy;
|
||||
state.restart_count = 0;
|
||||
state.last_crash_time = None;
|
||||
emit_health_event(&self.outer_sink, &state.health);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SidecarSupervisor {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
|
||||
// ---- compute_backoff tests ----
|
||||
|
||||
#[test]
|
||||
fn test_backoff_base_case() {
|
||||
let d = compute_backoff(1000, 0, 30_000);
|
||||
assert_eq!(d, Duration::from_millis(1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backoff_exponential() {
|
||||
assert_eq!(compute_backoff(1000, 1, 30_000), Duration::from_millis(2000));
|
||||
assert_eq!(compute_backoff(1000, 2, 30_000), Duration::from_millis(4000));
|
||||
assert_eq!(compute_backoff(1000, 3, 30_000), Duration::from_millis(8000));
|
||||
assert_eq!(compute_backoff(1000, 4, 30_000), Duration::from_millis(16000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backoff_capped() {
|
||||
assert_eq!(compute_backoff(1000, 5, 30_000), Duration::from_millis(30_000));
|
||||
assert_eq!(compute_backoff(1000, 10, 30_000), Duration::from_millis(30_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backoff_overflow_safe() {
|
||||
// Very large attempt should not panic, just cap
|
||||
assert_eq!(compute_backoff(1000, 63, 30_000), Duration::from_millis(30_000));
|
||||
assert_eq!(compute_backoff(1000, 100, 30_000), Duration::from_millis(30_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backoff_custom_base() {
|
||||
assert_eq!(compute_backoff(500, 0, 10_000), Duration::from_millis(500));
|
||||
assert_eq!(compute_backoff(500, 1, 10_000), Duration::from_millis(1000));
|
||||
assert_eq!(compute_backoff(500, 5, 10_000), Duration::from_millis(10_000));
|
||||
}
|
||||
|
||||
// ---- SidecarHealth serialization tests ----
|
||||
|
||||
#[test]
|
||||
fn test_health_serialize_healthy() {
|
||||
let h = SidecarHealth::Healthy;
|
||||
let json = serde_json::to_value(&h).unwrap();
|
||||
assert_eq!(json["status"], "healthy");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_serialize_degraded() {
|
||||
let h = SidecarHealth::Degraded { restart_count: 3 };
|
||||
let json = serde_json::to_value(&h).unwrap();
|
||||
assert_eq!(json["status"], "degraded");
|
||||
assert_eq!(json["restart_count"], 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_serialize_failed() {
|
||||
let h = SidecarHealth::Failed {
|
||||
last_error: "process killed".to_string(),
|
||||
};
|
||||
let json = serde_json::to_value(&h).unwrap();
|
||||
assert_eq!(json["status"], "failed");
|
||||
assert_eq!(json["last_error"], "process killed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_deserialize_roundtrip() {
|
||||
let cases = vec![
|
||||
SidecarHealth::Healthy,
|
||||
SidecarHealth::Degraded { restart_count: 2 },
|
||||
SidecarHealth::Failed {
|
||||
last_error: "OOM".to_string(),
|
||||
},
|
||||
];
|
||||
for h in cases {
|
||||
let json = serde_json::to_string(&h).unwrap();
|
||||
let back: SidecarHealth = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(h, back);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- SupervisorConfig defaults ----
|
||||
|
||||
#[test]
|
||||
fn test_supervisor_config_defaults() {
|
||||
let cfg = SupervisorConfig::default();
|
||||
assert_eq!(cfg.max_retries, 5);
|
||||
assert_eq!(cfg.backoff_base_ms, 1000);
|
||||
assert_eq!(cfg.backoff_cap_ms, 30_000);
|
||||
assert_eq!(cfg.stability_window, Duration::from_secs(300));
|
||||
}
|
||||
|
||||
// ---- SupervisorState tests ----
|
||||
|
||||
#[test]
|
||||
fn test_initial_state() {
|
||||
let state = SupervisorState::new();
|
||||
assert_eq!(state.health, SidecarHealth::Healthy);
|
||||
assert_eq!(state.restart_count, 0);
|
||||
assert!(state.last_crash_time.is_none());
|
||||
assert!(state.last_start_time.is_none());
|
||||
}
|
||||
|
||||
// ---- Event interception tests (using mock sink) ----
|
||||
|
||||
/// Mock EventSink that records emitted events.
|
||||
struct MockSink {
|
||||
events: Mutex<Vec<(String, serde_json::Value)>>,
|
||||
exit_count: AtomicU32,
|
||||
}
|
||||
|
||||
impl MockSink {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
events: Mutex::new(Vec::new()),
|
||||
exit_count: AtomicU32::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn events(&self) -> Vec<(String, serde_json::Value)> {
|
||||
self.events.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
fn health_events(&self) -> Vec<SidecarHealth> {
|
||||
self.events
|
||||
.lock()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(name, _)| name == "sidecar-health-changed")
|
||||
.filter_map(|(_, payload)| serde_json::from_value(payload.clone()).ok())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventSink for MockSink {
|
||||
fn emit(&self, event: &str, payload: serde_json::Value) {
|
||||
if event == "sidecar-exited" {
|
||||
self.exit_count.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
self.events
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push((event.to_string(), payload));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_exit_events_forwarded() {
|
||||
let outer = Arc::new(MockSink::new());
|
||||
let state = Arc::new(Mutex::new(SupervisorState::new()));
|
||||
let sink = SupervisorSink {
|
||||
outer_sink: outer.clone(),
|
||||
state,
|
||||
config: SupervisorConfig::default(),
|
||||
sidecar_config: SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
let payload = serde_json::json!({"type": "ready"});
|
||||
sink.emit("sidecar-message", payload.clone());
|
||||
|
||||
let events = outer.events();
|
||||
assert_eq!(events.len(), 1);
|
||||
assert_eq!(events[0].0, "sidecar-message");
|
||||
assert_eq!(events[0].1, payload);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exit_triggers_degraded_health() {
|
||||
let outer = Arc::new(MockSink::new());
|
||||
let state = Arc::new(Mutex::new(SupervisorState::new()));
|
||||
let sink = SupervisorSink {
|
||||
outer_sink: outer.clone(),
|
||||
state: state.clone(),
|
||||
config: SupervisorConfig {
|
||||
max_retries: 5,
|
||||
backoff_base_ms: 100,
|
||||
backoff_cap_ms: 1000,
|
||||
stability_window: Duration::from_secs(300),
|
||||
},
|
||||
sidecar_config: SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
// Simulate exit
|
||||
sink.emit("sidecar-exited", serde_json::Value::Null);
|
||||
|
||||
let s = state.lock().unwrap();
|
||||
assert_eq!(s.restart_count, 1);
|
||||
assert!(s.last_crash_time.is_some());
|
||||
match &s.health {
|
||||
SidecarHealth::Degraded { restart_count } => assert_eq!(*restart_count, 1),
|
||||
other => panic!("Expected Degraded, got {:?}", other),
|
||||
}
|
||||
|
||||
// Should have emitted health-changed event
|
||||
let health_events = outer.health_events();
|
||||
assert_eq!(health_events.len(), 1);
|
||||
assert_eq!(
|
||||
health_events[0],
|
||||
SidecarHealth::Degraded { restart_count: 1 }
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exit_exceeding_max_retries_fails() {
|
||||
let outer = Arc::new(MockSink::new());
|
||||
let state = Arc::new(Mutex::new(SupervisorState {
|
||||
health: SidecarHealth::Degraded { restart_count: 5 },
|
||||
restart_count: 5,
|
||||
last_crash_time: Some(Instant::now()),
|
||||
last_start_time: Some(Instant::now()),
|
||||
}));
|
||||
|
||||
let sink = SupervisorSink {
|
||||
outer_sink: outer.clone(),
|
||||
state: state.clone(),
|
||||
config: SupervisorConfig {
|
||||
max_retries: 5,
|
||||
..SupervisorConfig::default()
|
||||
},
|
||||
sidecar_config: SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
// This is attempt 6, which exceeds max_retries=5
|
||||
sink.emit("sidecar-exited", serde_json::Value::Null);
|
||||
|
||||
let s = state.lock().unwrap();
|
||||
assert_eq!(s.restart_count, 6);
|
||||
match &s.health {
|
||||
SidecarHealth::Failed { last_error } => {
|
||||
assert!(last_error.contains("Exceeded max retries"));
|
||||
}
|
||||
other => panic!("Expected Failed, got {:?}", other),
|
||||
}
|
||||
|
||||
// Should have emitted health-changed with Failed + forwarded sidecar-exited
|
||||
let events = outer.events();
|
||||
let health_changed = events
|
||||
.iter()
|
||||
.filter(|(name, _)| name == "sidecar-health-changed")
|
||||
.count();
|
||||
let exited = events
|
||||
.iter()
|
||||
.filter(|(name, _)| name == "sidecar-exited")
|
||||
.count();
|
||||
assert_eq!(health_changed, 1);
|
||||
assert_eq!(exited, 1); // Forwarded after max retries
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stability_window_resets_count() {
|
||||
let outer = Arc::new(MockSink::new());
|
||||
// Simulate: started 6 minutes ago, ran stable
|
||||
let state = Arc::new(Mutex::new(SupervisorState {
|
||||
health: SidecarHealth::Degraded { restart_count: 3 },
|
||||
restart_count: 3,
|
||||
last_crash_time: Some(Instant::now() - Duration::from_secs(400)),
|
||||
last_start_time: Some(Instant::now() - Duration::from_secs(360)),
|
||||
}));
|
||||
|
||||
let sink = SupervisorSink {
|
||||
outer_sink: outer.clone(),
|
||||
state: state.clone(),
|
||||
config: SupervisorConfig {
|
||||
max_retries: 5,
|
||||
stability_window: Duration::from_secs(300), // 5 min
|
||||
backoff_base_ms: 100,
|
||||
backoff_cap_ms: 1000,
|
||||
},
|
||||
sidecar_config: SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
sink.emit("sidecar-exited", serde_json::Value::Null);
|
||||
|
||||
let s = state.lock().unwrap();
|
||||
// Count was reset to 0 then incremented to 1
|
||||
assert_eq!(s.restart_count, 1);
|
||||
match &s.health {
|
||||
SidecarHealth::Degraded { restart_count } => assert_eq!(*restart_count, 1),
|
||||
other => panic!("Expected Degraded(1), got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_crashes_increment_count() {
|
||||
let outer = Arc::new(MockSink::new());
|
||||
let state = Arc::new(Mutex::new(SupervisorState::new()));
|
||||
|
||||
let sink = SupervisorSink {
|
||||
outer_sink: outer.clone(),
|
||||
state: state.clone(),
|
||||
config: SupervisorConfig {
|
||||
max_retries: 10,
|
||||
backoff_base_ms: 100,
|
||||
backoff_cap_ms: 1000,
|
||||
stability_window: Duration::from_secs(300),
|
||||
},
|
||||
sidecar_config: SidecarConfig {
|
||||
search_paths: vec![],
|
||||
env_overrides: Default::default(),
|
||||
sandbox: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
for i in 1..=3 {
|
||||
sink.emit("sidecar-exited", serde_json::Value::Null);
|
||||
let s = state.lock().unwrap();
|
||||
assert_eq!(s.restart_count, i);
|
||||
}
|
||||
|
||||
let health_events = outer.health_events();
|
||||
assert_eq!(health_events.len(), 3);
|
||||
assert_eq!(
|
||||
health_events[2],
|
||||
SidecarHealth::Degraded { restart_count: 3 }
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_equality() {
|
||||
assert_eq!(SidecarHealth::Healthy, SidecarHealth::Healthy);
|
||||
assert_eq!(
|
||||
SidecarHealth::Degraded { restart_count: 2 },
|
||||
SidecarHealth::Degraded { restart_count: 2 }
|
||||
);
|
||||
assert_ne!(
|
||||
SidecarHealth::Degraded { restart_count: 1 },
|
||||
SidecarHealth::Degraded { restart_count: 2 }
|
||||
);
|
||||
assert_ne!(SidecarHealth::Healthy, SidecarHealth::Failed {
|
||||
last_error: String::new(),
|
||||
});
|
||||
}
|
||||
}
|
||||
24
agor-relay/Cargo.toml
Normal file
24
agor-relay/Cargo.toml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
[package]
|
||||
name = "agor-relay"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "Remote relay server for Agents Orchestrator multi-machine support"
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "agor-relay"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
agor-core = { path = "../agor-core" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.11"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-tungstenite = { version = "0.21", features = ["native-tls"] }
|
||||
tokio-native-tls = "0.3"
|
||||
native-tls = "0.2"
|
||||
futures-util = "0.3"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
441
agor-relay/src/main.rs
Normal file
441
agor-relay/src/main.rs
Normal file
|
|
@ -0,0 +1,441 @@
|
|||
// agor-relay — WebSocket relay server for remote PTY and agent management
|
||||
|
||||
use agor_core::event::EventSink;
|
||||
use agor_core::pty::{PtyManager, PtyOptions};
|
||||
use agor_core::sidecar::{AgentQueryOptions, SidecarConfig, SidecarManager};
|
||||
use clap::Parser;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use tokio_tungstenite::tungstenite::http;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "agor-relay", about = "Agents Orchestrator remote relay server")]
|
||||
struct Cli {
|
||||
/// Port to listen on
|
||||
#[arg(short, long, default_value = "9750")]
|
||||
port: u16,
|
||||
|
||||
/// Authentication token (required)
|
||||
#[arg(short, long)]
|
||||
token: String,
|
||||
|
||||
/// Allow insecure ws:// connections (dev mode only)
|
||||
#[arg(long, default_value = "false")]
|
||||
insecure: bool,
|
||||
|
||||
/// TLS certificate file (PEM format). Enables wss:// when provided with --tls-key.
|
||||
#[arg(long)]
|
||||
tls_cert: Option<String>,
|
||||
|
||||
/// TLS private key file (PEM format). Required when --tls-cert is provided.
|
||||
#[arg(long)]
|
||||
tls_key: Option<String>,
|
||||
|
||||
/// Additional sidecar search paths
|
||||
#[arg(long)]
|
||||
sidecar_path: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct RelayCommand {
|
||||
id: String,
|
||||
#[serde(rename = "type")]
|
||||
type_: String,
|
||||
payload: serde_json::Value,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct RelayEvent {
|
||||
#[serde(rename = "type")]
|
||||
type_: String,
|
||||
#[serde(rename = "sessionId", skip_serializing_if = "Option::is_none")]
|
||||
session_id: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
payload: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
/// EventSink that sends events as JSON over an mpsc channel (forwarded to WebSocket).
|
||||
struct WsEventSink {
|
||||
tx: mpsc::UnboundedSender<RelayEvent>,
|
||||
}
|
||||
|
||||
impl EventSink for WsEventSink {
|
||||
fn emit(&self, event: &str, payload: serde_json::Value) {
|
||||
// Parse event name to extract session ID for PTY events like "pty-data-{id}"
|
||||
let (type_, session_id) = if let Some(id) = event.strip_prefix("pty-data-") {
|
||||
("pty_data".to_string(), Some(id.to_string()))
|
||||
} else if let Some(id) = event.strip_prefix("pty-exit-") {
|
||||
("pty_exit".to_string(), Some(id.to_string()))
|
||||
} else {
|
||||
(event.replace('-', "_"), None)
|
||||
};
|
||||
|
||||
let _ = self.tx.send(RelayEvent {
|
||||
type_,
|
||||
session_id,
|
||||
payload: if payload.is_null() { None } else { Some(payload) },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a native-tls TLS acceptor from PEM cert and key files.
|
||||
fn build_tls_acceptor(cert_path: &str, key_path: &str) -> Result<tokio_native_tls::TlsAcceptor, String> {
|
||||
let cert_pem = std::fs::read(cert_path)
|
||||
.map_err(|e| format!("Failed to read TLS cert '{}': {}", cert_path, e))?;
|
||||
let key_pem = std::fs::read(key_path)
|
||||
.map_err(|e| format!("Failed to read TLS key '{}': {}", key_path, e))?;
|
||||
|
||||
let identity = native_tls::Identity::from_pkcs8(&cert_pem, &key_pem)
|
||||
.map_err(|e| format!("Failed to parse TLS identity (cert+key): {e}"))?;
|
||||
|
||||
let tls_acceptor = native_tls::TlsAcceptor::builder(identity)
|
||||
.min_protocol_version(Some(native_tls::Protocol::Tlsv12))
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build TLS acceptor: {e}"))?;
|
||||
|
||||
Ok(tokio_native_tls::TlsAcceptor::from(tls_acceptor))
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
let cli = Cli::parse();
|
||||
|
||||
// Validate TLS args
|
||||
let tls_acceptor = match (&cli.tls_cert, &cli.tls_key) {
|
||||
(Some(cert), Some(key)) => {
|
||||
let acceptor = build_tls_acceptor(cert, key).expect("TLS setup failed");
|
||||
log::info!("TLS enabled (cert: {cert}, key: {key})");
|
||||
Some(Arc::new(acceptor))
|
||||
}
|
||||
(Some(_), None) | (None, Some(_)) => {
|
||||
eprintln!("Error: --tls-cert and --tls-key must both be provided");
|
||||
std::process::exit(1);
|
||||
}
|
||||
(None, None) => {
|
||||
if !cli.insecure {
|
||||
log::warn!("Running without TLS. Use --tls-cert/--tls-key for encrypted connections, or --insecure to suppress this warning.");
|
||||
}
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], cli.port));
|
||||
let listener = TcpListener::bind(&addr).await.expect("Failed to bind");
|
||||
let protocol = if tls_acceptor.is_some() { "wss" } else { "ws" };
|
||||
log::info!("agor-relay listening on {protocol}://{addr}");
|
||||
|
||||
// Build sidecar config
|
||||
let mut search_paths: Vec<std::path::PathBuf> = cli
|
||||
.sidecar_path
|
||||
.iter()
|
||||
.map(std::path::PathBuf::from)
|
||||
.collect();
|
||||
// Default: look in current dir and next to binary
|
||||
if let Ok(exe_dir) = std::env::current_exe().map(|p| p.parent().unwrap().to_path_buf()) {
|
||||
search_paths.push(exe_dir.join("sidecar"));
|
||||
}
|
||||
search_paths.push(std::path::PathBuf::from("sidecar"));
|
||||
|
||||
let sidecar_config = SidecarConfig {
|
||||
search_paths,
|
||||
env_overrides: std::collections::HashMap::new(),
|
||||
sandbox: Default::default(),
|
||||
};
|
||||
let token = Arc::new(cli.token);
|
||||
|
||||
// Rate limiting state for auth failures
|
||||
let auth_failures: Arc<tokio::sync::Mutex<std::collections::HashMap<SocketAddr, (u32, std::time::Instant)>>> =
|
||||
Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new()));
|
||||
|
||||
while let Ok((stream, peer)) = listener.accept().await {
|
||||
let token = token.clone();
|
||||
let sidecar_config = sidecar_config.clone();
|
||||
let auth_failures = auth_failures.clone();
|
||||
let tls = tls_acceptor.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Check rate limit
|
||||
{
|
||||
let mut failures = auth_failures.lock().await;
|
||||
if let Some((count, last)) = failures.get(&peer) {
|
||||
if *count >= 10 && last.elapsed() < std::time::Duration::from_secs(300) {
|
||||
log::warn!("Rate limited: {peer}");
|
||||
return;
|
||||
}
|
||||
// Reset after cooldown
|
||||
if last.elapsed() >= std::time::Duration::from_secs(300) {
|
||||
failures.remove(&peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(tls_acceptor) = tls {
|
||||
// TLS path: wrap TCP stream with TLS, then upgrade to WebSocket
|
||||
match tls_acceptor.accept(stream).await {
|
||||
Ok(tls_stream) => {
|
||||
if let Err(e) = handle_tls_connection(tls_stream, peer, &token, &sidecar_config, &auth_failures).await {
|
||||
log::error!("TLS connection error from {peer}: {e}");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("TLS handshake failed from {peer}: {e}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Plain WebSocket path
|
||||
if let Err(e) = handle_connection(stream, peer, &token, &sidecar_config, &auth_failures).await {
|
||||
log::error!("Connection error from {peer}: {e}");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_connection(
|
||||
stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
expected_token: &str,
|
||||
sidecar_config: &SidecarConfig,
|
||||
auth_failures: &tokio::sync::Mutex<std::collections::HashMap<SocketAddr, (u32, std::time::Instant)>>,
|
||||
) -> Result<(), String> {
|
||||
let ws_stream = accept_ws_with_auth(stream, expected_token, peer, auth_failures).await?;
|
||||
run_ws_session(ws_stream, peer, sidecar_config).await
|
||||
}
|
||||
|
||||
async fn handle_tls_connection(
|
||||
stream: tokio_native_tls::TlsStream<TcpStream>,
|
||||
peer: SocketAddr,
|
||||
expected_token: &str,
|
||||
sidecar_config: &SidecarConfig,
|
||||
auth_failures: &tokio::sync::Mutex<std::collections::HashMap<SocketAddr, (u32, std::time::Instant)>>,
|
||||
) -> Result<(), String> {
|
||||
let ws_stream = accept_ws_with_auth(stream, expected_token, peer, auth_failures).await?;
|
||||
run_ws_session(ws_stream, peer, sidecar_config).await
|
||||
}
|
||||
|
||||
/// Accept a WebSocket connection with Bearer token auth validation.
|
||||
async fn accept_ws_with_auth<S>(
|
||||
stream: S,
|
||||
expected_token: &str,
|
||||
peer: SocketAddr,
|
||||
auth_failures: &tokio::sync::Mutex<std::collections::HashMap<SocketAddr, (u32, std::time::Instant)>>,
|
||||
) -> Result<tokio_tungstenite::WebSocketStream<S>, String>
|
||||
where
|
||||
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin,
|
||||
{
|
||||
let expected = format!("Bearer {expected_token}");
|
||||
tokio_tungstenite::accept_hdr_async(stream, |req: &http::Request<()>, response: http::Response<()>| {
|
||||
let auth = req.headers().get("authorization").and_then(|v| v.to_str().ok());
|
||||
match auth {
|
||||
Some(value) if value == expected => Ok(response),
|
||||
_ => {
|
||||
Err(http::Response::builder()
|
||||
.status(http::StatusCode::UNAUTHORIZED)
|
||||
.body(Some("Invalid token".to_string()))
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let _ = auth_failures.try_lock().map(|mut f| {
|
||||
let entry = f.entry(peer).or_insert((0, std::time::Instant::now()));
|
||||
entry.0 += 1;
|
||||
entry.1 = std::time::Instant::now();
|
||||
});
|
||||
format!("WebSocket handshake failed: {e}")
|
||||
})
|
||||
}
|
||||
|
||||
/// Run the WebSocket session (managers, event forwarding, command processing).
|
||||
async fn run_ws_session<S>(
|
||||
ws_stream: tokio_tungstenite::WebSocketStream<S>,
|
||||
peer: SocketAddr,
|
||||
sidecar_config: &SidecarConfig,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
log::info!("Client connected: {peer}");
|
||||
|
||||
// Set up event channel — shared between EventSink and command response sender
|
||||
let (event_tx, mut event_rx) = mpsc::unbounded_channel::<RelayEvent>();
|
||||
let sink_tx = event_tx.clone();
|
||||
let sink: Arc<dyn EventSink> = Arc::new(WsEventSink { tx: event_tx });
|
||||
|
||||
// Create managers for this connection
|
||||
let pty_manager = Arc::new(PtyManager::new(sink.clone()));
|
||||
let sidecar_manager = Arc::new(SidecarManager::new(sink, sidecar_config.clone()));
|
||||
|
||||
// Start sidecar
|
||||
if let Err(e) = sidecar_manager.start() {
|
||||
log::warn!("Sidecar startup failed for {peer}: {e}");
|
||||
}
|
||||
|
||||
let (mut ws_tx, mut ws_rx) = ws_stream.split();
|
||||
|
||||
// Send ready signal
|
||||
let ready_event = RelayEvent {
|
||||
type_: "ready".to_string(),
|
||||
session_id: None,
|
||||
payload: None,
|
||||
};
|
||||
let _ = ws_tx
|
||||
.send(Message::Text(serde_json::to_string(&ready_event).unwrap()))
|
||||
.await;
|
||||
|
||||
// Forward events to WebSocket
|
||||
let event_writer = tokio::spawn(async move {
|
||||
while let Some(event) = event_rx.recv().await {
|
||||
if let Ok(json) = serde_json::to_string(&event) {
|
||||
if ws_tx.send(Message::Text(json)).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Process incoming commands
|
||||
let pty_mgr = pty_manager.clone();
|
||||
let sidecar_mgr = sidecar_manager.clone();
|
||||
let response_tx = sink_tx;
|
||||
let command_reader = tokio::spawn(async move {
|
||||
while let Some(msg) = ws_rx.next().await {
|
||||
match msg {
|
||||
Ok(Message::Text(text)) => {
|
||||
if let Ok(cmd) = serde_json::from_str::<RelayCommand>(&text) {
|
||||
handle_relay_command(&pty_mgr, &sidecar_mgr, &response_tx, cmd).await;
|
||||
}
|
||||
}
|
||||
Ok(Message::Close(_)) => break,
|
||||
Err(e) => {
|
||||
log::error!("WebSocket read error from {peer}: {e}");
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for either task to finish
|
||||
tokio::select! {
|
||||
_ = event_writer => {}
|
||||
_ = command_reader => {}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
let _ = sidecar_manager.shutdown();
|
||||
log::info!("Client disconnected: {peer}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_relay_command(
|
||||
pty: &PtyManager,
|
||||
sidecar: &SidecarManager,
|
||||
response_tx: &mpsc::UnboundedSender<RelayEvent>,
|
||||
cmd: RelayCommand,
|
||||
) {
|
||||
match cmd.type_.as_str() {
|
||||
"ping" => {
|
||||
let _ = response_tx.send(RelayEvent {
|
||||
type_: "pong".to_string(),
|
||||
session_id: None,
|
||||
payload: None,
|
||||
});
|
||||
}
|
||||
"pty_create" => {
|
||||
let options: PtyOptions = match serde_json::from_value(cmd.payload) {
|
||||
Ok(opts) => opts,
|
||||
Err(e) => {
|
||||
send_error(response_tx, &cmd.id, &format!("Invalid pty_create payload: {e}"));
|
||||
return;
|
||||
}
|
||||
};
|
||||
match pty.spawn(options) {
|
||||
Ok(pty_id) => {
|
||||
log::info!("Spawned remote PTY: {pty_id}");
|
||||
let _ = response_tx.send(RelayEvent {
|
||||
type_: "pty_created".to_string(),
|
||||
session_id: Some(pty_id),
|
||||
payload: Some(serde_json::json!({ "commandId": cmd.id })),
|
||||
});
|
||||
}
|
||||
Err(e) => send_error(response_tx, &cmd.id, &format!("Failed to spawn PTY: {e}")),
|
||||
}
|
||||
}
|
||||
"pty_write" => {
|
||||
if let (Some(id), Some(data)) = (
|
||||
cmd.payload.get("id").and_then(|v| v.as_str()),
|
||||
cmd.payload.get("data").and_then(|v| v.as_str()),
|
||||
) {
|
||||
if let Err(e) = pty.write(id, data) {
|
||||
send_error(response_tx, &cmd.id, &format!("PTY write error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
"pty_resize" => {
|
||||
if let (Some(id), Some(cols), Some(rows)) = (
|
||||
cmd.payload.get("id").and_then(|v| v.as_str()),
|
||||
cmd.payload.get("cols").and_then(|v| v.as_u64()),
|
||||
cmd.payload.get("rows").and_then(|v| v.as_u64()),
|
||||
) {
|
||||
if let Err(e) = pty.resize(id, cols as u16, rows as u16) {
|
||||
send_error(response_tx, &cmd.id, &format!("PTY resize error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
"pty_close" => {
|
||||
if let Some(id) = cmd.payload.get("id").and_then(|v| v.as_str()) {
|
||||
if let Err(e) = pty.kill(id) {
|
||||
send_error(response_tx, &cmd.id, &format!("PTY kill error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
"agent_query" => {
|
||||
let options: AgentQueryOptions = match serde_json::from_value(cmd.payload) {
|
||||
Ok(opts) => opts,
|
||||
Err(e) => {
|
||||
send_error(response_tx, &cmd.id, &format!("Invalid agent_query payload: {e}"));
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Err(e) = sidecar.query(&options) {
|
||||
send_error(response_tx, &cmd.id, &format!("Agent query error: {e}"));
|
||||
}
|
||||
}
|
||||
"agent_stop" => {
|
||||
if let Some(session_id) = cmd.payload.get("sessionId").and_then(|v| v.as_str()) {
|
||||
if let Err(e) = sidecar.stop_session(session_id) {
|
||||
send_error(response_tx, &cmd.id, &format!("Agent stop error: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
"sidecar_restart" => {
|
||||
if let Err(e) = sidecar.restart() {
|
||||
send_error(response_tx, &cmd.id, &format!("Sidecar restart error: {e}"));
|
||||
}
|
||||
}
|
||||
other => {
|
||||
log::warn!("Unknown relay command: {other}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_error(tx: &mpsc::UnboundedSender<RelayEvent>, cmd_id: &str, message: &str) {
|
||||
log::error!("{message}");
|
||||
let _ = tx.send(RelayEvent {
|
||||
type_: "error".to_string(),
|
||||
session_id: None,
|
||||
payload: Some(serde_json::json!({
|
||||
"commandId": cmd_id,
|
||||
"message": message,
|
||||
})),
|
||||
});
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "bterminal-v2",
|
||||
"name": "agents-orchestrator",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ if $RUN_E2E; then
|
|||
step "E2E tests (WebDriverIO + tauri-driver)"
|
||||
|
||||
# Check for built binary
|
||||
BINARY=$(find "$V2_DIR/src-tauri/target" -name "bterminal*" -type f -executable -path "*/release/*" 2>/dev/null | head -1)
|
||||
BINARY=$(find "$V2_DIR/src-tauri/target" -name "agor*" -type f -executable -path "*/release/*" 2>/dev/null | head -1)
|
||||
if [ -z "$BINARY" ]; then
|
||||
echo -e "${YELLOW}⚠ No release binary found. Run 'npm run tauri build' first.${RESET}"
|
||||
fail "E2E (no binary)"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "bterminal-sidecar",
|
||||
"name": "agor-sidecar",
|
||||
"private": true,
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ crate-type = ["staticlib", "cdylib", "rlib"]
|
|||
tauri-build = { version = "2.5.6", features = [] }
|
||||
|
||||
[dependencies]
|
||||
bterminal-core = { path = "../bterminal-core" }
|
||||
agor-core = { path = "../agor-core" }
|
||||
serde_json = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
log = "0.4"
|
||||
|
|
@ -44,6 +44,11 @@ native-tls = "0.2"
|
|||
tokio-native-tls = "0.3"
|
||||
sha2 = "0.10"
|
||||
hex = "0.4"
|
||||
agor-pro = { path = "../agor-pro", optional = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
pro = ["dep:agor-pro"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// btmsg — Access to btmsg SQLite database
|
||||
// Database at ~/.local/share/bterminal/btmsg.db (created by btmsg CLI)
|
||||
// Database at ~/.local/share/agor/btmsg.db (created by btmsg CLI)
|
||||
// Path configurable via init() for test isolation.
|
||||
|
||||
use rusqlite::{params, Connection, OpenFlags};
|
||||
|
|
@ -19,7 +19,7 @@ fn db_path() -> PathBuf {
|
|||
DB_PATH.get().cloned().unwrap_or_else(|| {
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("bterminal")
|
||||
.join("agor")
|
||||
.join("btmsg.db")
|
||||
})
|
||||
}
|
||||
|
|
@ -1897,7 +1897,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_checkpoint_wal_nonexistent_db_is_ok() {
|
||||
let result = crate::checkpoint_wal(std::path::Path::new("/tmp/nonexistent_bterminal_test.db"));
|
||||
let result = crate::checkpoint_wal(std::path::Path::new("/tmp/nonexistent_agor_test.db"));
|
||||
assert!(result.is_ok(), "checkpoint_wal should return Ok for missing DB");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ fn db_path() -> PathBuf {
|
|||
DB_PATH.get().cloned().unwrap_or_else(|| {
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("bterminal")
|
||||
.join("agor")
|
||||
.join("btmsg.db")
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use tauri::State;
|
||||
use crate::AppState;
|
||||
use crate::sidecar::AgentQueryOptions;
|
||||
use bterminal_core::sandbox::SandboxConfig;
|
||||
use agor_core::sandbox::SandboxConfig;
|
||||
|
||||
#[tauri::command]
|
||||
#[tracing::instrument(skip(state, options), fields(session_id = %options.session_id))]
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ pub fn open_url(url: String) -> Result<(), String> {
|
|||
|
||||
#[tauri::command]
|
||||
pub fn is_test_mode() -> bool {
|
||||
std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1")
|
||||
std::env::var("AGOR_TEST").map_or(false, |v| v == "1")
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use bterminal_core::event::EventSink;
|
||||
use agor_core::event::EventSink;
|
||||
use tauri::{AppHandle, Emitter};
|
||||
|
||||
/// Bridges bterminal-core's EventSink trait to Tauri's event system.
|
||||
/// Bridges agor-core's EventSink trait to Tauri's event system.
|
||||
pub struct TauriEventSink(pub AppHandle);
|
||||
|
||||
impl EventSink for TauriEventSink {
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ impl ProjectFsWatcher {
|
|||
cwd: &str,
|
||||
) -> Result<(), String> {
|
||||
// In test mode, skip inotify watchers to avoid resource contention and flaky events
|
||||
if std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1") {
|
||||
if std::env::var("AGOR_TEST").map_or(false, |v| v == "1") {
|
||||
log::info!("Test mode: skipping fs watcher for project {project_id}");
|
||||
return Ok(());
|
||||
}
|
||||
|
|
@ -333,7 +333,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_count_watched_dirs_tempdir() {
|
||||
let tmp = std::env::temp_dir().join("bterminal_test_count_dirs");
|
||||
let tmp = std::env::temp_dir().join("agor_test_count_dirs");
|
||||
let _ = std::fs::remove_dir_all(&tmp);
|
||||
std::fs::create_dir_all(tmp.join("src/lib")).unwrap();
|
||||
std::fs::create_dir_all(tmp.join("node_modules/pkg")).unwrap(); // should be skipped
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// Project group configuration
|
||||
// Reads/writes ~/.config/bterminal/groups.json
|
||||
// Reads/writes ~/.config/agor/groups.json
|
||||
// Path configurable via init() for test isolation.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
|
@ -99,7 +99,7 @@ fn config_path() -> PathBuf {
|
|||
CONFIG_PATH.get().cloned().unwrap_or_else(|| {
|
||||
dirs::config_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join("bterminal")
|
||||
.join("agor")
|
||||
.join("groups.json")
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ mod session;
|
|||
mod telemetry;
|
||||
mod watcher;
|
||||
|
||||
use bterminal_core::config::AppConfig;
|
||||
use agor_core::config::AppConfig;
|
||||
use event_sink::TauriEventSink;
|
||||
use pty::PtyManager;
|
||||
use remote::RemoteManager;
|
||||
|
|
@ -145,7 +145,7 @@ pub fn run() {
|
|||
// Force dark GTK theme for native dialogs (file chooser, etc.)
|
||||
std::env::set_var("GTK_THEME", "Adwaita:dark");
|
||||
|
||||
// Resolve all paths via AppConfig (respects BTERMINAL_TEST_* env vars)
|
||||
// Resolve all paths via AppConfig (respects AGOR_TEST_* env vars)
|
||||
let app_config = AppConfig::from_env();
|
||||
if app_config.is_test_mode() {
|
||||
log::info!(
|
||||
|
|
@ -319,6 +319,8 @@ pub fn run() {
|
|||
.plugin(tauri_plugin_updater::Builder::new().build())
|
||||
.plugin(tauri_plugin_dialog::init())
|
||||
.setup(move |app| {
|
||||
#[cfg(feature = "pro")]
|
||||
app.handle().plugin(agor_pro::init())?;
|
||||
// Note: tauri-plugin-log is NOT initialized here because telemetry::init()
|
||||
// already sets up tracing-subscriber (which bridges the `log` crate via
|
||||
// tracing's compatibility layer). Adding plugin-log would panic with
|
||||
|
|
@ -327,7 +329,7 @@ pub fn run() {
|
|||
let config = app_config_arc.clone();
|
||||
|
||||
// Create TauriEventSink for core managers
|
||||
let sink: Arc<dyn bterminal_core::event::EventSink> =
|
||||
let sink: Arc<dyn agor_core::event::EventSink> =
|
||||
Arc::new(TauriEventSink(app.handle().clone()));
|
||||
|
||||
// Build sidecar config from Tauri paths
|
||||
|
|
@ -351,12 +353,12 @@ pub fn run() {
|
|||
// Forward test mode env vars to sidecar processes
|
||||
let mut env_overrides = std::collections::HashMap::new();
|
||||
if config.is_test_mode() {
|
||||
env_overrides.insert("BTERMINAL_TEST".into(), "1".into());
|
||||
if let Ok(v) = std::env::var("BTERMINAL_TEST_DATA_DIR") {
|
||||
env_overrides.insert("BTERMINAL_TEST_DATA_DIR".into(), v);
|
||||
env_overrides.insert("AGOR_TEST".into(), "1".into());
|
||||
if let Ok(v) = std::env::var("AGOR_TEST_DATA_DIR") {
|
||||
env_overrides.insert("AGOR_TEST_DATA_DIR".into(), v);
|
||||
}
|
||||
if let Ok(v) = std::env::var("BTERMINAL_TEST_CONFIG_DIR") {
|
||||
env_overrides.insert("BTERMINAL_TEST_CONFIG_DIR".into(), v);
|
||||
if let Ok(v) = std::env::var("AGOR_TEST_CONFIG_DIR") {
|
||||
env_overrides.insert("AGOR_TEST_CONFIG_DIR".into(), v);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -366,7 +368,7 @@ pub fn run() {
|
|||
dev_root.join("sidecar"),
|
||||
],
|
||||
env_overrides,
|
||||
sandbox: bterminal_core::sandbox::SandboxConfig::default(),
|
||||
sandbox: agor_core::sandbox::SandboxConfig::default(),
|
||||
};
|
||||
|
||||
let pty_manager = Arc::new(PtyManager::new(sink.clone()));
|
||||
|
|
@ -384,7 +386,7 @@ pub fn run() {
|
|||
let remote_manager = Arc::new(RemoteManager::new());
|
||||
|
||||
// Initialize FTS5 search database
|
||||
let search_db_path = config.data_dir.join("bterminal").join("search.db");
|
||||
let search_db_path = config.data_dir.join("agor").join("search.db");
|
||||
let search_db = Arc::new(
|
||||
search::SearchDb::open(&search_db_path).expect("Failed to open search database"),
|
||||
);
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ pub fn send_desktop_notification(
|
|||
match Notification::new()
|
||||
.summary(title)
|
||||
.body(body)
|
||||
.appname("BTerminal")
|
||||
.appname("Agents Orchestrator")
|
||||
.urgency(urgency_level)
|
||||
.show()
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// Plugin discovery and file reading.
|
||||
// Scans ~/.config/bterminal/plugins/ for plugin.json manifest files.
|
||||
// Scans ~/.config/agor/plugins/ for plugin.json manifest files.
|
||||
// Each plugin lives in its own subdirectory with a plugin.json manifest.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Thin wrapper — re-exports bterminal_core::pty types.
|
||||
// PtyManager is now in bterminal-core; this module only re-exports for lib.rs.
|
||||
// Thin wrapper — re-exports agor_core::pty types.
|
||||
// PtyManager is now in agor-core; this module only re-exports for lib.rs.
|
||||
|
||||
pub use bterminal_core::pty::{PtyManager, PtyOptions};
|
||||
pub use agor_core::pty::{PtyManager, PtyOptions};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Remote machine management — WebSocket client connections to bterminal-relay instances
|
||||
// Remote machine management — WebSocket client connections to agor-relay instances
|
||||
|
||||
use bterminal_core::pty::PtyOptions;
|
||||
use bterminal_core::sidecar::AgentQueryOptions;
|
||||
use agor_core::pty::PtyOptions;
|
||||
use agor_core::sidecar::AgentQueryOptions;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Sha256, Digest};
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
use keyring::Entry;
|
||||
|
||||
const SERVICE: &str = "bterminal";
|
||||
const SERVICE: &str = "agor";
|
||||
const KEYS_META: &str = "__bterminal_keys__";
|
||||
|
||||
/// Known secret key identifiers.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Thin wrapper — re-exports bterminal_core::sidecar types.
|
||||
// SidecarManager is now in bterminal-core; this module only re-exports for lib.rs.
|
||||
// Thin wrapper — re-exports agor_core::sidecar types.
|
||||
// SidecarManager is now in agor-core; this module only re-exports for lib.rs.
|
||||
|
||||
pub use bterminal_core::sidecar::{AgentQueryOptions, SidecarConfig, SidecarManager};
|
||||
pub use agor_core::sidecar::{AgentQueryOptions, SidecarConfig, SidecarManager};
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// OpenTelemetry telemetry — tracing spans + OTLP export to Tempo/Grafana
|
||||
//
|
||||
// Controlled by BTERMINAL_OTLP_ENDPOINT env var:
|
||||
// Controlled by AGOR_OTLP_ENDPOINT env var:
|
||||
// - Set (e.g. "http://localhost:4318") → export traces via OTLP/HTTP + console
|
||||
// - Absent → console-only (no network calls)
|
||||
|
||||
|
|
@ -28,16 +28,16 @@ impl Drop for TelemetryGuard {
|
|||
/// Call once at app startup, before any tracing macros fire.
|
||||
pub fn init() -> TelemetryGuard {
|
||||
let filter = EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::new("agent_orchestrator=info,agent_orchestrator_lib=info,bterminal_core=info"));
|
||||
.unwrap_or_else(|_| EnvFilter::new("agent_orchestrator=info,agent_orchestrator_lib=info,agor_core=info"));
|
||||
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_target(true)
|
||||
.compact();
|
||||
|
||||
// In test mode, never export telemetry (avoid contaminating production data)
|
||||
let is_test = std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1");
|
||||
let is_test = std::env::var("AGOR_TEST").map_or(false, |v| v == "1");
|
||||
|
||||
match std::env::var("BTERMINAL_OTLP_ENDPOINT") {
|
||||
match std::env::var("AGOR_OTLP_ENDPOINT") {
|
||||
Ok(endpoint) if !endpoint.is_empty() && !is_test => {
|
||||
match build_otlp_provider(&endpoint) {
|
||||
Ok(provider) => {
|
||||
|
|
@ -71,7 +71,7 @@ pub fn init() -> TelemetryGuard {
|
|||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
log::info!("Telemetry: console-only (BTERMINAL_OTLP_ENDPOINT not set)");
|
||||
log::info!("Telemetry: console-only (AGOR_OTLP_ENDPOINT not set)");
|
||||
TelemetryGuard { provider: None }
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ impl FileWatcherManager {
|
|||
path: &str,
|
||||
) -> Result<String, String> {
|
||||
// In test mode, skip file watching to avoid inotify noise and flaky events
|
||||
if std::env::var("BTERMINAL_TEST").map_or(false, |v| v == "1") {
|
||||
if std::env::var("AGOR_TEST").map_or(false, |v| v == "1") {
|
||||
return std::fs::read_to_string(path)
|
||||
.map_err(|e| format!("Failed to read file: {e}"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ export interface PluginMeta {
|
|||
permissions: string[];
|
||||
}
|
||||
|
||||
/** Discover all plugins in ~/.config/bterminal/plugins/ */
|
||||
/** Discover all plugins in ~/.config/agor/plugins/ */
|
||||
export async function discoverPlugins(): Promise<PluginMeta[]> {
|
||||
return invoke<PluginMeta[]>('plugins_discover');
|
||||
}
|
||||
|
|
|
|||
|
|
@ -748,7 +748,7 @@
|
|||
flex: 1;
|
||||
overflow-y: auto;
|
||||
container-type: inline-size;
|
||||
padding: 0.5rem var(--bterminal-pane-padding-inline, 0.75rem);
|
||||
padding: 0.5rem var(--agor-pane-padding-inline, 0.75rem);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.125rem;
|
||||
|
|
@ -1270,7 +1270,7 @@
|
|||
|
||||
/* === Status strip === */
|
||||
.status-strip {
|
||||
padding: 0.25rem var(--bterminal-pane-padding-inline, 0.75rem);
|
||||
padding: 0.25rem var(--agor-pane-padding-inline, 0.75rem);
|
||||
border-top: 1px solid var(--ctp-surface1);
|
||||
flex-shrink: 0;
|
||||
font-size: 0.8125rem;
|
||||
|
|
@ -1408,7 +1408,7 @@
|
|||
.session-controls {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
padding: 0.375rem var(--bterminal-pane-padding-inline, 0.75rem);
|
||||
padding: 0.375rem var(--agor-pane-padding-inline, 0.75rem);
|
||||
justify-content: center;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
|
@ -1451,7 +1451,7 @@
|
|||
|
||||
/* === Prompt container === */
|
||||
.prompt-container {
|
||||
padding: 0.5rem var(--bterminal-pane-padding-inline, 0.75rem);
|
||||
padding: 0.5rem var(--agor-pane-padding-inline, 0.75rem);
|
||||
flex-shrink: 0;
|
||||
border-top: 1px solid var(--ctp-surface0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@
|
|||
}
|
||||
|
||||
.markdown-body {
|
||||
padding: 1.5rem var(--bterminal-pane-padding-inline, 2rem);
|
||||
padding: 1.5rem var(--agor-pane-padding-inline, 2rem);
|
||||
font-family: 'Inter', system-ui, -apple-system, 'Segoe UI', sans-serif;
|
||||
font-size: 0.9rem;
|
||||
line-height: 1.7;
|
||||
|
|
|
|||
|
|
@ -885,7 +885,7 @@
|
|||
<section class="settings-section">
|
||||
<h2>Plugins</h2>
|
||||
{#if pluginEntries.length === 0}
|
||||
<p class="empty-notice">No plugins found in ~/.config/bterminal/plugins/</p>
|
||||
<p class="empty-notice">No plugins found in ~/.config/agor/plugins/</p>
|
||||
{:else}
|
||||
<div class="plugin-list">
|
||||
{#each pluginEntries as entry (entry.meta.id)}
|
||||
|
|
|
|||
|
|
@ -73,15 +73,15 @@ class MockWorker {
|
|||
const permissions = (data.permissions as string[]) || [];
|
||||
const meta = data.meta as Record<string, unknown>;
|
||||
|
||||
// Build a mock bterminal API that mimics worker-side behavior
|
||||
// Build a mock agor API that mimics worker-side behavior
|
||||
// by sending messages back to the main thread (this.sendToMain)
|
||||
const bterminal: Record<string, unknown> = {
|
||||
const agor: Record<string, unknown> = {
|
||||
meta: Object.freeze({ ...meta }),
|
||||
};
|
||||
|
||||
if (permissions.includes('palette')) {
|
||||
let cbId = 0;
|
||||
bterminal.palette = {
|
||||
agor.palette = {
|
||||
registerCommand: (label: string, callback: () => void) => {
|
||||
if (typeof label !== 'string' || !label.trim()) {
|
||||
throw new Error('Command label must be a non-empty string');
|
||||
|
|
@ -96,14 +96,14 @@ class MockWorker {
|
|||
}
|
||||
|
||||
if (permissions.includes('bttask:read')) {
|
||||
bterminal.tasks = {
|
||||
agor.tasks = {
|
||||
list: () => this.rpc('tasks.list', {}),
|
||||
comments: (taskId: string) => this.rpc('tasks.comments', { taskId }),
|
||||
};
|
||||
}
|
||||
|
||||
if (permissions.includes('btmsg:read')) {
|
||||
bterminal.messages = {
|
||||
agor.messages = {
|
||||
inbox: () => this.rpc('messages.inbox', {}),
|
||||
channels: () => this.rpc('messages.channels', {}),
|
||||
};
|
||||
|
|
@ -111,7 +111,7 @@ class MockWorker {
|
|||
|
||||
if (permissions.includes('events')) {
|
||||
let cbId = 0;
|
||||
bterminal.events = {
|
||||
agor.events = {
|
||||
on: (event: string, callback: (data: unknown) => void) => {
|
||||
if (typeof event !== 'string' || typeof callback !== 'function') {
|
||||
throw new Error('event.on requires (string, function)');
|
||||
|
|
@ -125,12 +125,12 @@ class MockWorker {
|
|||
};
|
||||
}
|
||||
|
||||
Object.freeze(bterminal);
|
||||
Object.freeze(agor);
|
||||
|
||||
// Execute the plugin code
|
||||
try {
|
||||
const fn = new Function('bterminal', `"use strict"; ${code}`);
|
||||
fn(bterminal);
|
||||
const fn = new Function('agor', `"use strict"; ${code}`);
|
||||
fn(agor);
|
||||
this.sendToMain({ type: 'loaded' });
|
||||
} catch (err) {
|
||||
this.sendToMain({ type: 'error', message: String(err) });
|
||||
|
|
@ -242,7 +242,7 @@ describe('plugin-host Worker isolation', () => {
|
|||
const meta = makeMeta({ id: 'freeze-test', permissions: [] });
|
||||
mockPluginCode(`
|
||||
try {
|
||||
bterminal.hacked = true;
|
||||
agor.hacked = true;
|
||||
throw new Error('FREEZE FAILED: could add property');
|
||||
} catch (e) {
|
||||
if (e.message === 'FREEZE FAILED: could add property') throw e;
|
||||
|
|
@ -255,7 +255,7 @@ describe('plugin-host Worker isolation', () => {
|
|||
const meta = makeMeta({ id: 'freeze-delete-test', permissions: [] });
|
||||
mockPluginCode(`
|
||||
try {
|
||||
delete bterminal.meta;
|
||||
delete agor.meta;
|
||||
throw new Error('FREEZE FAILED: could delete property');
|
||||
} catch (e) {
|
||||
if (e.message === 'FREEZE FAILED: could delete property') throw e;
|
||||
|
|
@ -267,14 +267,14 @@ describe('plugin-host Worker isolation', () => {
|
|||
it('meta is accessible and frozen', async () => {
|
||||
const meta = makeMeta({ id: 'meta-access', permissions: [] });
|
||||
mockPluginCode(`
|
||||
if (bterminal.meta.id !== 'meta-access') {
|
||||
if (agor.meta.id !== 'meta-access') {
|
||||
throw new Error('meta.id mismatch');
|
||||
}
|
||||
if (bterminal.meta.name !== 'Test Plugin') {
|
||||
if (agor.meta.name !== 'Test Plugin') {
|
||||
throw new Error('meta.name mismatch');
|
||||
}
|
||||
try {
|
||||
bterminal.meta.id = 'hacked';
|
||||
agor.meta.id = 'hacked';
|
||||
throw new Error('META FREEZE FAILED');
|
||||
} catch (e) {
|
||||
if (e.message === 'META FREEZE FAILED') throw e;
|
||||
|
|
@ -291,7 +291,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin with palette permission can register commands', async () => {
|
||||
const meta = makeMeta({ id: 'palette-plugin', permissions: ['palette'] });
|
||||
mockPluginCode(`
|
||||
bterminal.palette.registerCommand('Test Command', function() {});
|
||||
agor.palette.registerCommand('Test Command', function() {});
|
||||
`);
|
||||
|
||||
await loadPlugin(meta, GROUP_ID, AGENT_ID);
|
||||
|
|
@ -306,7 +306,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin without palette permission has no palette API', async () => {
|
||||
const meta = makeMeta({ id: 'no-palette-plugin', permissions: [] });
|
||||
mockPluginCode(`
|
||||
if (bterminal.palette !== undefined) {
|
||||
if (agor.palette !== undefined) {
|
||||
throw new Error('palette API should not be available');
|
||||
}
|
||||
`);
|
||||
|
|
@ -316,7 +316,7 @@ describe('plugin-host permissions', () => {
|
|||
it('palette.registerCommand rejects non-string label', async () => {
|
||||
const meta = makeMeta({ id: 'bad-label-plugin', permissions: ['palette'] });
|
||||
mockPluginCode(`
|
||||
bterminal.palette.registerCommand(123, function() {});
|
||||
agor.palette.registerCommand(123, function() {});
|
||||
`);
|
||||
await expect(loadPlugin(meta, GROUP_ID, AGENT_ID)).rejects.toThrow(
|
||||
'execution failed',
|
||||
|
|
@ -326,7 +326,7 @@ describe('plugin-host permissions', () => {
|
|||
it('palette.registerCommand rejects non-function callback', async () => {
|
||||
const meta = makeMeta({ id: 'bad-cb-plugin', permissions: ['palette'] });
|
||||
mockPluginCode(`
|
||||
bterminal.palette.registerCommand('Test', 'not-a-function');
|
||||
agor.palette.registerCommand('Test', 'not-a-function');
|
||||
`);
|
||||
await expect(loadPlugin(meta, GROUP_ID, AGENT_ID)).rejects.toThrow(
|
||||
'execution failed',
|
||||
|
|
@ -336,7 +336,7 @@ describe('plugin-host permissions', () => {
|
|||
it('palette.registerCommand rejects empty label', async () => {
|
||||
const meta = makeMeta({ id: 'empty-label-plugin', permissions: ['palette'] });
|
||||
mockPluginCode(`
|
||||
bterminal.palette.registerCommand(' ', function() {});
|
||||
agor.palette.registerCommand(' ', function() {});
|
||||
`);
|
||||
await expect(loadPlugin(meta, GROUP_ID, AGENT_ID)).rejects.toThrow(
|
||||
'execution failed',
|
||||
|
|
@ -348,7 +348,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin with bttask:read can call tasks.list', async () => {
|
||||
const meta = makeMeta({ id: 'task-plugin', permissions: ['bttask:read'] });
|
||||
mockPluginCode(`
|
||||
bterminal.tasks.list();
|
||||
agor.tasks.list();
|
||||
`);
|
||||
await expect(loadPlugin(meta, GROUP_ID, AGENT_ID)).resolves.toBeUndefined();
|
||||
});
|
||||
|
|
@ -356,7 +356,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin without bttask:read has no tasks API', async () => {
|
||||
const meta = makeMeta({ id: 'no-task-plugin', permissions: [] });
|
||||
mockPluginCode(`
|
||||
if (bterminal.tasks !== undefined) {
|
||||
if (agor.tasks !== undefined) {
|
||||
throw new Error('tasks API should not be available');
|
||||
}
|
||||
`);
|
||||
|
|
@ -368,7 +368,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin with btmsg:read can call messages.inbox', async () => {
|
||||
const meta = makeMeta({ id: 'msg-plugin', permissions: ['btmsg:read'] });
|
||||
mockPluginCode(`
|
||||
bterminal.messages.inbox();
|
||||
agor.messages.inbox();
|
||||
`);
|
||||
await expect(loadPlugin(meta, GROUP_ID, AGENT_ID)).resolves.toBeUndefined();
|
||||
});
|
||||
|
|
@ -376,7 +376,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin without btmsg:read has no messages API', async () => {
|
||||
const meta = makeMeta({ id: 'no-msg-plugin', permissions: [] });
|
||||
mockPluginCode(`
|
||||
if (bterminal.messages !== undefined) {
|
||||
if (agor.messages !== undefined) {
|
||||
throw new Error('messages API should not be available');
|
||||
}
|
||||
`);
|
||||
|
|
@ -388,7 +388,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin with events permission can subscribe', async () => {
|
||||
const meta = makeMeta({ id: 'events-plugin', permissions: ['events'] });
|
||||
mockPluginCode(`
|
||||
bterminal.events.on('test-event', function(data) {});
|
||||
agor.events.on('test-event', function(data) {});
|
||||
`);
|
||||
await loadPlugin(meta, GROUP_ID, AGENT_ID);
|
||||
expect(pluginEventBus.on).toHaveBeenCalledWith('test-event', expect.any(Function));
|
||||
|
|
@ -397,7 +397,7 @@ describe('plugin-host permissions', () => {
|
|||
it('plugin without events permission has no events API', async () => {
|
||||
const meta = makeMeta({ id: 'no-events-plugin', permissions: [] });
|
||||
mockPluginCode(`
|
||||
if (bterminal.events !== undefined) {
|
||||
if (agor.events !== undefined) {
|
||||
throw new Error('events API should not be available');
|
||||
}
|
||||
`);
|
||||
|
|
@ -437,7 +437,7 @@ describe('plugin-host lifecycle', () => {
|
|||
it('unloadPlugin removes the plugin and cleans up commands', async () => {
|
||||
const meta = makeMeta({ id: 'lifecycle-unload', permissions: ['palette'] });
|
||||
mockPluginCode(`
|
||||
bterminal.palette.registerCommand('Cmd1', function() {});
|
||||
agor.palette.registerCommand('Cmd1', function() {});
|
||||
`);
|
||||
|
||||
await loadPlugin(meta, GROUP_ID, AGENT_ID);
|
||||
|
|
@ -491,7 +491,7 @@ describe('plugin-host lifecycle', () => {
|
|||
it('unloadPlugin cleans up event subscriptions', async () => {
|
||||
const meta = makeMeta({ id: 'events-cleanup', permissions: ['events'] });
|
||||
mockPluginCode(`
|
||||
bterminal.events.on('my-event', function() {});
|
||||
agor.events.on('my-event', function() {});
|
||||
`);
|
||||
|
||||
await loadPlugin(meta, GROUP_ID, AGENT_ID);
|
||||
|
|
@ -507,11 +507,11 @@ describe('plugin-host lifecycle', () => {
|
|||
describe('plugin-host RPC routing', () => {
|
||||
it('tasks.list RPC is routed to main thread', async () => {
|
||||
const meta = makeMeta({ id: 'rpc-tasks', permissions: ['bttask:read'] });
|
||||
mockPluginCode(`bterminal.tasks.list();`);
|
||||
mockPluginCode(`agor.tasks.list();`);
|
||||
|
||||
// Mock the bttask bridge
|
||||
mockInvoke.mockImplementation((cmd: string) => {
|
||||
if (cmd === 'plugin_read_file') return Promise.resolve('bterminal.tasks.list();');
|
||||
if (cmd === 'plugin_read_file') return Promise.resolve('agor.tasks.list();');
|
||||
if (cmd === 'bttask_list') return Promise.resolve([]);
|
||||
return Promise.reject(new Error(`Unexpected: ${cmd}`));
|
||||
});
|
||||
|
|
@ -521,10 +521,10 @@ describe('plugin-host RPC routing', () => {
|
|||
|
||||
it('messages.inbox RPC is routed to main thread', async () => {
|
||||
const meta = makeMeta({ id: 'rpc-messages', permissions: ['btmsg:read'] });
|
||||
mockPluginCode(`bterminal.messages.inbox();`);
|
||||
mockPluginCode(`agor.messages.inbox();`);
|
||||
|
||||
mockInvoke.mockImplementation((cmd: string) => {
|
||||
if (cmd === 'plugin_read_file') return Promise.resolve('bterminal.messages.inbox();');
|
||||
if (cmd === 'plugin_read_file') return Promise.resolve('agor.messages.inbox();');
|
||||
if (cmd === 'btmsg_get_unread') return Promise.resolve([]);
|
||||
return Promise.reject(new Error(`Unexpected: ${cmd}`));
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Plugin Host — Web Worker sandbox for BTerminal plugins.
|
||||
* Plugin Host — Web Worker sandbox for Agents Orchestrator plugins.
|
||||
*
|
||||
* Each plugin runs in a dedicated Web Worker, providing true process-level
|
||||
* isolation from the main thread. The Worker has no access to the DOM,
|
||||
|
|
@ -38,7 +38,7 @@ const loadedPlugins = new Map<string, LoadedPlugin>();
|
|||
|
||||
/**
|
||||
* Build the Worker script as an inline blob.
|
||||
* The Worker receives plugin code + permissions and builds a sandboxed bterminal API
|
||||
* The Worker receives plugin code + permissions and builds a sandboxed agor API
|
||||
* that proxies all calls to the main thread via postMessage.
|
||||
*/
|
||||
function buildWorkerScript(): string {
|
||||
|
|
@ -73,7 +73,7 @@ self.onmessage = function(e) {
|
|||
const permissions = msg.permissions || [];
|
||||
const meta = msg.meta;
|
||||
|
||||
// Build the bterminal API based on permissions
|
||||
// Build the agor API based on permissions
|
||||
const api = { meta: Object.freeze(meta) };
|
||||
|
||||
if (permissions.includes('palette')) {
|
||||
|
|
@ -128,7 +128,7 @@ self.onmessage = function(e) {
|
|||
// Execute the plugin code
|
||||
try {
|
||||
const fn = (0, eval)(
|
||||
'(function(bterminal) { "use strict"; ' + msg.code + '\\n})'
|
||||
'(function(agor) { "use strict"; ' + msg.code + '\\n})'
|
||||
);
|
||||
fn(api);
|
||||
self.postMessage({ type: 'loaded' });
|
||||
|
|
|
|||
|
|
@ -57,5 +57,5 @@
|
|||
--border-radius: 4px;
|
||||
|
||||
/* Pane content padding — shared between AgentPane and MarkdownPane */
|
||||
--bterminal-pane-padding-inline: clamp(0.75rem, 3.5cqi, 2rem);
|
||||
--agor-pane-padding-inline: clamp(0.75rem, 3.5cqi, 2rem);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ function buildEnvironmentSection(group: GroupConfig): string {
|
|||
|
||||
return `## Environment
|
||||
|
||||
**Platform:** BTerminal Mission Control — multi-agent orchestration system
|
||||
**Platform:** Agents Orchestrator Mission Control — multi-agent orchestration system
|
||||
**Group:** ${group.name}
|
||||
**Your working directory:** Same as the monorepo root (shared across Tier 1 agents)
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ export async function detachPane(pane: Pane): Promise<void> {
|
|||
|
||||
const webview = new WebviewWindow(label, {
|
||||
url: `index.html?${params.toString()}`,
|
||||
title: `BTerminal — ${pane.title}`,
|
||||
title: `Agents Orchestrator — ${pane.title}`,
|
||||
width: 800,
|
||||
height: 600,
|
||||
decorations: true,
|
||||
|
|
|
|||
|
|
@ -21,21 +21,21 @@ npm run test:e2e
|
|||
SKIP_BUILD=1 npm run test:e2e
|
||||
|
||||
# With test isolation (custom data/config dirs)
|
||||
BTERMINAL_TEST_DATA_DIR=/tmp/bt-test/data BTERMINAL_TEST_CONFIG_DIR=/tmp/bt-test/config npm run test:e2e
|
||||
AGOR_TEST_DATA_DIR=/tmp/bt-test/data AGOR_TEST_CONFIG_DIR=/tmp/bt-test/config npm run test:e2e
|
||||
```
|
||||
|
||||
The `wdio.conf.js` handles:
|
||||
1. Building the debug binary (`cargo tauri build --debug --no-bundle`) in `onPrepare`
|
||||
2. Spawning `tauri-driver` before each session (TCP readiness probe, 10s deadline)
|
||||
3. Killing `tauri-driver` after each session
|
||||
4. Passing `BTERMINAL_TEST=1` env var to the app for test mode isolation
|
||||
4. Passing `AGOR_TEST=1` env var to the app for test mode isolation
|
||||
|
||||
## Test Mode (`BTERMINAL_TEST=1`)
|
||||
## Test Mode (`AGOR_TEST=1`)
|
||||
|
||||
When `BTERMINAL_TEST=1` is set:
|
||||
When `AGOR_TEST=1` is set:
|
||||
- File watchers (watcher.rs, fs_watcher.rs) are disabled to avoid inotify noise
|
||||
- Wake scheduler is disabled (no auto-wake timers)
|
||||
- Data/config directories can be overridden via `BTERMINAL_TEST_DATA_DIR` / `BTERMINAL_TEST_CONFIG_DIR`
|
||||
- Data/config directories can be overridden via `AGOR_TEST_DATA_DIR` / `AGOR_TEST_CONFIG_DIR`
|
||||
|
||||
## CI setup (headless)
|
||||
|
||||
|
|
@ -132,7 +132,7 @@ tests/e2e/
|
|||
├── fixtures.ts # Test fixture generator (isolated environments)
|
||||
├── results-db.ts # JSON test results store
|
||||
└── specs/
|
||||
├── bterminal.test.ts # Smoke tests (CSS class selectors, 50+ tests)
|
||||
├── agor.test.ts # Smoke tests (CSS class selectors, 50+ tests)
|
||||
└── agent-scenarios.test.ts # Phase A scenarios (data-testid selectors, 22 tests)
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -9,9 +9,9 @@ import { tmpdir } from 'node:os';
|
|||
export interface TestFixture {
|
||||
/** Root temp directory for this test run */
|
||||
rootDir: string;
|
||||
/** BTERMINAL_TEST_DATA_DIR — isolated data dir */
|
||||
/** AGOR_TEST_DATA_DIR — isolated data dir */
|
||||
dataDir: string;
|
||||
/** BTERMINAL_TEST_CONFIG_DIR — isolated config dir */
|
||||
/** AGOR_TEST_CONFIG_DIR — isolated config dir */
|
||||
configDir: string;
|
||||
/** Path to a minimal git repo for agent testing */
|
||||
projectDir: string;
|
||||
|
|
@ -25,7 +25,7 @@ export interface TestFixture {
|
|||
* - Temp config dir with a minimal groups.json
|
||||
* - A simple git repo with one file for agent testing
|
||||
*/
|
||||
export function createTestFixture(name = 'bterminal-e2e'): TestFixture {
|
||||
export function createTestFixture(name = 'agor-e2e'): TestFixture {
|
||||
const rootDir = join(tmpdir(), `${name}-${Date.now()}`);
|
||||
const dataDir = join(rootDir, 'data');
|
||||
const configDir = join(rootDir, 'config');
|
||||
|
|
@ -38,9 +38,9 @@ export function createTestFixture(name = 'bterminal-e2e'): TestFixture {
|
|||
|
||||
// Create a minimal git repo for agent testing
|
||||
execSync('git init', { cwd: projectDir, stdio: 'ignore' });
|
||||
execSync('git config user.email "test@bterminal.dev"', { cwd: projectDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "BTerminal Test"', { cwd: projectDir, stdio: 'ignore' });
|
||||
writeFileSync(join(projectDir, 'README.md'), '# Test Project\n\nA simple test project for BTerminal E2E tests.\n');
|
||||
execSync('git config user.email "test@agor.dev"', { cwd: projectDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "Agor Test"', { cwd: projectDir, stdio: 'ignore' });
|
||||
writeFileSync(join(projectDir, 'README.md'), '# Test Project\n\nA simple test project for Agor E2E tests.\n');
|
||||
writeFileSync(join(projectDir, 'hello.py'), 'def greet(name: str) -> str:\n return f"Hello, {name}!"\n');
|
||||
execSync('git add -A && git commit -m "initial commit"', { cwd: projectDir, stdio: 'ignore' });
|
||||
|
||||
|
|
@ -75,9 +75,9 @@ export function createTestFixture(name = 'bterminal-e2e'): TestFixture {
|
|||
);
|
||||
|
||||
const env: Record<string, string> = {
|
||||
BTERMINAL_TEST: '1',
|
||||
BTERMINAL_TEST_DATA_DIR: dataDir,
|
||||
BTERMINAL_TEST_CONFIG_DIR: configDir,
|
||||
AGOR_TEST: '1',
|
||||
AGOR_TEST_DATA_DIR: dataDir,
|
||||
AGOR_TEST_CONFIG_DIR: configDir,
|
||||
};
|
||||
|
||||
return { rootDir, dataDir, configDir, projectDir, env };
|
||||
|
|
@ -96,15 +96,15 @@ export function destroyTestFixture(fixture: TestFixture): void {
|
|||
* Create a groups.json with multiple projects for multi-project testing.
|
||||
*/
|
||||
export function createMultiProjectFixture(projectCount = 3): TestFixture {
|
||||
const fixture = createTestFixture('bterminal-multi');
|
||||
const fixture = createTestFixture('agor-multi');
|
||||
|
||||
const projects = [];
|
||||
for (let i = 0; i < projectCount; i++) {
|
||||
const projDir = join(fixture.rootDir, `project-${i}`);
|
||||
mkdirSync(projDir, { recursive: true });
|
||||
execSync('git init', { cwd: projDir, stdio: 'ignore' });
|
||||
execSync('git config user.email "test@bterminal.dev"', { cwd: projDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "BTerminal Test"', { cwd: projDir, stdio: 'ignore' });
|
||||
execSync('git config user.email "test@agor.dev"', { cwd: projDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "Agor Test"', { cwd: projDir, stdio: 'ignore' });
|
||||
writeFileSync(join(projDir, 'README.md'), `# Project ${i}\n`);
|
||||
execSync('git add -A && git commit -m "init"', { cwd: projDir, stdio: 'ignore' });
|
||||
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@ describe('Scenario 7 — Agent Prompt Submission', () => {
|
|||
|
||||
it('should show stop button during agent execution (if Claude available)', async function () {
|
||||
// Send a minimal prompt
|
||||
await sendAgentPrompt('Reply with exactly: BTERMINAL_TEST_OK');
|
||||
await sendAgentPrompt('Reply with exactly: AGOR_TEST_OK');
|
||||
|
||||
// Wait for running status (generous timeout for sidecar spin-up)
|
||||
try {
|
||||
|
|
|
|||
799
tests/e2e/specs/agor.test.ts
Normal file
799
tests/e2e/specs/agor.test.ts
Normal file
|
|
@ -0,0 +1,799 @@
|
|||
import { browser, expect } from '@wdio/globals';
|
||||
|
||||
// All E2E tests run in a single spec file because Tauri launches one app
|
||||
// instance per session, and tauri-driver doesn't support re-creating sessions.
|
||||
|
||||
describe('BTerminal — Smoke Tests', () => {
|
||||
it('should render the application window', async () => {
|
||||
// Wait for the app to fully load before any tests
|
||||
await browser.waitUntil(
|
||||
async () => (await browser.getTitle()) === 'BTerminal',
|
||||
{ timeout: 10_000, timeoutMsg: 'App did not load within 10s' },
|
||||
);
|
||||
const title = await browser.getTitle();
|
||||
expect(title).toBe('BTerminal');
|
||||
});
|
||||
|
||||
it('should display the status bar', async () => {
|
||||
const statusBar = await browser.$('.status-bar');
|
||||
await expect(statusBar).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should show version text in status bar', async () => {
|
||||
const version = await browser.$('.status-bar .version');
|
||||
await expect(version).toBeDisplayed();
|
||||
const text = await version.getText();
|
||||
expect(text).toContain('BTerminal');
|
||||
});
|
||||
|
||||
it('should display the sidebar rail', async () => {
|
||||
const sidebarRail = await browser.$('.sidebar-rail');
|
||||
await expect(sidebarRail).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should display the workspace area', async () => {
|
||||
const workspace = await browser.$('.workspace');
|
||||
await expect(workspace).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should toggle sidebar with settings button', async () => {
|
||||
const settingsBtn = await browser.$('.rail-btn');
|
||||
await settingsBtn.click();
|
||||
|
||||
const sidebarPanel = await browser.$('.sidebar-panel');
|
||||
await expect(sidebarPanel).toBeDisplayed();
|
||||
|
||||
// Click again to close
|
||||
await settingsBtn.click();
|
||||
await expect(sidebarPanel).not.toBeDisplayed();
|
||||
});
|
||||
});
|
||||
|
||||
describe('BTerminal — Workspace & Projects', () => {
|
||||
it('should display the project grid', async () => {
|
||||
const grid = await browser.$('.project-grid');
|
||||
await expect(grid).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should render at least one project box', async () => {
|
||||
const boxes = await browser.$$('.project-box');
|
||||
expect(boxes.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should show project header with name', async () => {
|
||||
const header = await browser.$('.project-header');
|
||||
await expect(header).toBeDisplayed();
|
||||
|
||||
const name = await browser.$('.project-name');
|
||||
const text = await name.getText();
|
||||
expect(text.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should show project-level tabs (Model, Docs, Context, Files, SSH, Memory, ...)', async () => {
|
||||
const box = await browser.$('.project-box');
|
||||
const tabs = await box.$$('.ptab');
|
||||
// v3 has 6+ tabs: Model, Docs, Context, Files, SSH, Memory (+ role-specific)
|
||||
expect(tabs.length).toBeGreaterThanOrEqual(6);
|
||||
});
|
||||
|
||||
it('should highlight active project on click', async () => {
|
||||
const header = await browser.$('.project-header');
|
||||
await header.click();
|
||||
|
||||
const activeBox = await browser.$('.project-box.active');
|
||||
await expect(activeBox).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should switch project tabs', async () => {
|
||||
// Use JS click — WebDriver clicks don't always trigger Svelte onclick
|
||||
// on buttons inside complex components via WebKit2GTK/tauri-driver
|
||||
const switched = await browser.execute(() => {
|
||||
const box = document.querySelector('.project-box');
|
||||
if (!box) return false;
|
||||
const tabs = box.querySelectorAll('.ptab');
|
||||
if (tabs.length < 2) return false;
|
||||
(tabs[1] as HTMLElement).click();
|
||||
return true;
|
||||
});
|
||||
expect(switched).toBe(true);
|
||||
await browser.pause(500);
|
||||
|
||||
const box = await browser.$('.project-box');
|
||||
const activeTab = await box.$('.ptab.active');
|
||||
const text = await activeTab.getText();
|
||||
// Tab[1] is "Docs" in v3 tab bar (Model, Docs, Context, Files, ...)
|
||||
expect(text.toLowerCase()).toContain('docs');
|
||||
|
||||
// Switch back to Model tab
|
||||
await browser.execute(() => {
|
||||
const tab = document.querySelector('.project-box .ptab');
|
||||
if (tab) (tab as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
it('should display the status bar with project count', async () => {
|
||||
const statusBar = await browser.$('.status-bar .left');
|
||||
const text = await statusBar.getText();
|
||||
expect(text).toContain('projects');
|
||||
});
|
||||
|
||||
it('should display project and agent info in status bar', async () => {
|
||||
const statusBar = await browser.$('.status-bar .left');
|
||||
const text = await statusBar.getText();
|
||||
// Status bar always shows project count; agent counts only when > 0
|
||||
// (shows "X running", "X idle", "X stalled" — not the word "agents")
|
||||
expect(text).toContain('projects');
|
||||
});
|
||||
});
|
||||
|
||||
/** Open the settings panel, waiting for content to render. */
|
||||
async function openSettings(): Promise<void> {
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
const isOpen = await panel.isDisplayed().catch(() => false);
|
||||
if (!isOpen) {
|
||||
// Use data-testid for unambiguous selection
|
||||
await browser.execute(() => {
|
||||
const btn = document.querySelector('[data-testid="settings-btn"]');
|
||||
if (btn) (btn as HTMLElement).click();
|
||||
});
|
||||
await panel.waitForDisplayed({ timeout: 5000 });
|
||||
}
|
||||
// Wait for settings content to mount
|
||||
await browser.waitUntil(
|
||||
async () => {
|
||||
const count = await browser.execute(() =>
|
||||
document.querySelectorAll('.settings-tab .settings-section').length,
|
||||
);
|
||||
return (count as number) >= 1;
|
||||
},
|
||||
{ timeout: 5000, timeoutMsg: 'Settings sections did not render within 5s' },
|
||||
);
|
||||
await browser.pause(200);
|
||||
}
|
||||
|
||||
/** Close the settings panel if open. */
|
||||
async function closeSettings(): Promise<void> {
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
if (await panel.isDisplayed().catch(() => false)) {
|
||||
await browser.execute(() => {
|
||||
const btn = document.querySelector('.panel-close');
|
||||
if (btn) (btn as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
}
|
||||
}
|
||||
|
||||
describe('BTerminal — Settings Panel', () => {
|
||||
before(async () => {
|
||||
await openSettings();
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await closeSettings();
|
||||
});
|
||||
|
||||
it('should display the settings tab container', async () => {
|
||||
const settingsTab = await browser.$('.settings-tab');
|
||||
await expect(settingsTab).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should show settings sections', async () => {
|
||||
const sections = await browser.$$('.settings-section');
|
||||
expect(sections.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should display theme dropdown', async () => {
|
||||
const dropdown = await browser.$('.custom-dropdown .dropdown-trigger');
|
||||
await expect(dropdown).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should open theme dropdown and show options', async () => {
|
||||
// Use JS click — WebDriver clicks don't reliably trigger Svelte onclick
|
||||
// on buttons inside scrollable panels via WebKit2GTK/tauri-driver
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const menu = await browser.$('.dropdown-menu');
|
||||
await menu.waitForExist({ timeout: 3000 });
|
||||
|
||||
const options = await browser.$$('.dropdown-option');
|
||||
expect(options.length).toBeGreaterThan(0);
|
||||
|
||||
// Close dropdown by clicking trigger again
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
it('should display group list', async () => {
|
||||
// Groups section is below Appearance/Defaults/Providers — scroll into view
|
||||
await browser.execute(() => {
|
||||
const el = document.querySelector('.group-list');
|
||||
if (el) el.scrollIntoView({ behavior: 'instant', block: 'center' });
|
||||
});
|
||||
await browser.pause(300);
|
||||
const groupList = await browser.$('.group-list');
|
||||
await expect(groupList).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should close settings panel with close button', async () => {
|
||||
// Ensure settings is open
|
||||
await openSettings();
|
||||
|
||||
// Use JS click for reliability
|
||||
await browser.execute(() => {
|
||||
const btn = document.querySelector('.panel-close');
|
||||
if (btn) (btn as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
await expect(panel).not.toBeDisplayed();
|
||||
});
|
||||
});
|
||||
|
||||
/** Open command palette — idempotent (won't toggle-close if already open). */
|
||||
async function openCommandPalette(): Promise<void> {
|
||||
// Ensure sidebar is closed first (it can intercept keyboard events)
|
||||
await closeSettings();
|
||||
|
||||
// Check if already open
|
||||
const alreadyOpen = await browser.execute(() => {
|
||||
const p = document.querySelector('.palette');
|
||||
return p !== null && getComputedStyle(p).display !== 'none';
|
||||
});
|
||||
if (alreadyOpen) return;
|
||||
|
||||
// Dispatch Ctrl+K via JS for reliability with WebKit2GTK/tauri-driver
|
||||
await browser.execute(() => {
|
||||
document.dispatchEvent(new KeyboardEvent('keydown', {
|
||||
key: 'k', code: 'KeyK', ctrlKey: true, bubbles: true, cancelable: true,
|
||||
}));
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const palette = await browser.$('.palette');
|
||||
await palette.waitForDisplayed({ timeout: 5000 });
|
||||
}
|
||||
|
||||
/** Close command palette if open — uses backdrop click (more reliable than Escape). */
|
||||
async function closeCommandPalette(): Promise<void> {
|
||||
const isOpen = await browser.execute(() => {
|
||||
const p = document.querySelector('.palette');
|
||||
return p !== null && getComputedStyle(p).display !== 'none';
|
||||
});
|
||||
if (!isOpen) return;
|
||||
|
||||
// Click backdrop to close (more reliable than dispatching Escape)
|
||||
await browser.execute(() => {
|
||||
const backdrop = document.querySelector('.palette-backdrop');
|
||||
if (backdrop) (backdrop as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
}
|
||||
|
||||
describe('BTerminal — Command Palette', () => {
|
||||
beforeEach(async () => {
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should show palette input', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
const input = await browser.$('.palette-input');
|
||||
await expect(input).toBeDisplayed();
|
||||
|
||||
// Verify input accepts text (functional focus test, not activeElement check
|
||||
// which is unreliable in WebKit2GTK/tauri-driver)
|
||||
const canType = await browser.execute(() => {
|
||||
const el = document.querySelector('.palette-input') as HTMLInputElement | null;
|
||||
if (!el) return false;
|
||||
el.focus();
|
||||
return el === document.activeElement;
|
||||
});
|
||||
expect(canType).toBe(true);
|
||||
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should show palette items with command labels and categories', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
const items = await browser.$$('.palette-item');
|
||||
expect(items.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
// Each command item should have a label
|
||||
const cmdLabel = await browser.$('.palette-item .cmd-label');
|
||||
await expect(cmdLabel).toBeDisplayed();
|
||||
const labelText = await cmdLabel.getText();
|
||||
expect(labelText.length).toBeGreaterThan(0);
|
||||
|
||||
// Commands should be grouped under category headers
|
||||
const categories = await browser.$$('.palette-category');
|
||||
expect(categories.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should highlight selected item in palette', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
// First item should be selected by default
|
||||
const selectedItem = await browser.$('.palette-item.selected');
|
||||
await expect(selectedItem).toBeExisting();
|
||||
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should filter palette items by typing', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
const itemsBefore = await browser.$$('.palette-item');
|
||||
const countBefore = itemsBefore.length;
|
||||
|
||||
// Type a nonsense string that won't match any group name
|
||||
const input = await browser.$('.palette-input');
|
||||
await input.setValue('zzz_nonexistent_group_xyz');
|
||||
await browser.pause(300);
|
||||
|
||||
// Should show no results or fewer items
|
||||
const noResults = await browser.$('.no-results');
|
||||
const itemsAfter = await browser.$$('.palette-item');
|
||||
// Either no-results message appears OR item count decreased
|
||||
const filtered = (await noResults.isExisting()) || itemsAfter.length < countBefore;
|
||||
expect(filtered).toBe(true);
|
||||
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should close palette by clicking backdrop', async () => {
|
||||
await openCommandPalette();
|
||||
const palette = await browser.$('.palette');
|
||||
|
||||
// Click the backdrop (outside the palette)
|
||||
await browser.execute(() => {
|
||||
const backdrop = document.querySelector('.palette-backdrop');
|
||||
if (backdrop) (backdrop as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
await expect(palette).not.toBeDisplayed();
|
||||
});
|
||||
});
|
||||
|
||||
describe('BTerminal — Terminal Tabs', () => {
|
||||
before(async () => {
|
||||
// Ensure Claude tab is active so terminal section is visible
|
||||
await browser.execute(() => {
|
||||
const tab = document.querySelector('.project-box .ptab');
|
||||
if (tab) (tab as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
it('should show terminal toggle on Claude tab', async () => {
|
||||
const toggle = await browser.$('.terminal-toggle');
|
||||
await expect(toggle).toBeDisplayed();
|
||||
|
||||
const label = await browser.$('.toggle-label');
|
||||
const text = await label.getText();
|
||||
expect(text.toLowerCase()).toContain('terminal');
|
||||
});
|
||||
|
||||
it('should expand terminal area on toggle click', async () => {
|
||||
// Click terminal toggle via JS
|
||||
await browser.execute(() => {
|
||||
const toggle = document.querySelector('.terminal-toggle');
|
||||
if (toggle) (toggle as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const termArea = await browser.$('.project-terminal-area');
|
||||
await expect(termArea).toBeDisplayed();
|
||||
|
||||
// Chevron should have expanded class
|
||||
const chevron = await browser.$('.toggle-chevron.expanded');
|
||||
await expect(chevron).toBeExisting();
|
||||
});
|
||||
|
||||
it('should show add tab button when terminal expanded', async () => {
|
||||
const addBtn = await browser.$('.tab-add');
|
||||
await expect(addBtn).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should add a shell tab', async () => {
|
||||
// Click add tab button via JS (Svelte onclick)
|
||||
await browser.execute(() => {
|
||||
const btn = document.querySelector('.tab-bar .tab-add');
|
||||
if (btn) (btn as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
// Verify tab title via JS to avoid stale element issues
|
||||
const title = await browser.execute(() => {
|
||||
const el = document.querySelector('.tab-bar .tab-title');
|
||||
return el ? el.textContent : '';
|
||||
});
|
||||
expect((title as string).toLowerCase()).toContain('shell');
|
||||
});
|
||||
|
||||
it('should show active tab styling', async () => {
|
||||
const activeTab = await browser.$('.tab.active');
|
||||
await expect(activeTab).toBeExisting();
|
||||
});
|
||||
|
||||
it('should add a second shell tab and switch between them', async () => {
|
||||
// Add second tab via JS
|
||||
await browser.execute(() => {
|
||||
const btn = document.querySelector('.tab-bar .tab-add');
|
||||
if (btn) (btn as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const tabCount = await browser.execute(() => {
|
||||
return document.querySelectorAll('.tab-bar .tab').length;
|
||||
});
|
||||
expect(tabCount as number).toBeGreaterThanOrEqual(2);
|
||||
|
||||
// Click first tab and verify it becomes active with Shell title
|
||||
await browser.execute(() => {
|
||||
const tabs = document.querySelectorAll('.tab-bar .tab');
|
||||
if (tabs[0]) (tabs[0] as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const activeTitle = await browser.execute(() => {
|
||||
const active = document.querySelector('.tab-bar .tab.active .tab-title');
|
||||
return active ? active.textContent : '';
|
||||
});
|
||||
expect(activeTitle as string).toContain('Shell');
|
||||
});
|
||||
|
||||
it('should close a tab', async () => {
|
||||
const tabsBefore = await browser.$$('.tab');
|
||||
const countBefore = tabsBefore.length;
|
||||
|
||||
// Close the last tab
|
||||
await browser.execute(() => {
|
||||
const closeBtns = document.querySelectorAll('.tab-close');
|
||||
if (closeBtns.length > 0) {
|
||||
(closeBtns[closeBtns.length - 1] as HTMLElement).click();
|
||||
}
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const tabsAfter = await browser.$$('.tab');
|
||||
expect(tabsAfter.length).toBe(Number(countBefore) - 1);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
// Clean up: close remaining tabs and collapse terminal
|
||||
await browser.execute(() => {
|
||||
// Close all tabs
|
||||
const closeBtns = document.querySelectorAll('.tab-close');
|
||||
closeBtns.forEach(btn => (btn as HTMLElement).click());
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
// Collapse terminal
|
||||
await browser.execute(() => {
|
||||
const toggle = document.querySelector('.terminal-toggle');
|
||||
if (toggle) {
|
||||
const chevron = toggle.querySelector('.toggle-chevron.expanded');
|
||||
if (chevron) (toggle as HTMLElement).click();
|
||||
}
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
});
|
||||
|
||||
describe('BTerminal — Theme Switching', () => {
|
||||
before(async () => {
|
||||
await openSettings();
|
||||
// Scroll to top for theme dropdown
|
||||
await browser.execute(() => {
|
||||
const content = document.querySelector('.panel-content') || document.querySelector('.sidebar-panel');
|
||||
if (content) content.scrollTop = 0;
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await closeSettings();
|
||||
});
|
||||
|
||||
it('should show theme dropdown with group labels', async () => {
|
||||
// Close any open dropdowns first
|
||||
await browser.execute(() => {
|
||||
const openMenu = document.querySelector('.dropdown-menu');
|
||||
if (openMenu) {
|
||||
const trigger = openMenu.closest('.custom-dropdown')?.querySelector('.dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
}
|
||||
});
|
||||
await browser.pause(200);
|
||||
|
||||
// Click the first dropdown trigger (theme dropdown)
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const menu = await browser.$('.dropdown-menu');
|
||||
await menu.waitForExist({ timeout: 5000 });
|
||||
|
||||
// Should have group labels (Catppuccin, Editor, Deep Dark)
|
||||
const groupLabels = await browser.$$('.dropdown-group-label');
|
||||
expect(groupLabels.length).toBeGreaterThanOrEqual(2);
|
||||
|
||||
// Close dropdown
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
it('should switch theme and update CSS variables', async () => {
|
||||
// Get current base color
|
||||
const baseBefore = await browser.execute(() => {
|
||||
return getComputedStyle(document.documentElement).getPropertyValue('--ctp-base').trim();
|
||||
});
|
||||
|
||||
// Open theme dropdown (first custom-dropdown in settings)
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
// Wait for dropdown menu
|
||||
const menu = await browser.$('.dropdown-menu');
|
||||
await menu.waitForExist({ timeout: 5000 });
|
||||
|
||||
// Click the first non-active theme option
|
||||
const changed = await browser.execute(() => {
|
||||
const options = document.querySelectorAll('.dropdown-menu .dropdown-option:not(.active)');
|
||||
if (options.length > 0) {
|
||||
(options[0] as HTMLElement).click();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
expect(changed).toBe(true);
|
||||
await browser.pause(500);
|
||||
|
||||
// Verify CSS variable changed
|
||||
const baseAfter = await browser.execute(() => {
|
||||
return getComputedStyle(document.documentElement).getPropertyValue('--ctp-base').trim();
|
||||
});
|
||||
expect(baseAfter).not.toBe(baseBefore);
|
||||
|
||||
// Switch back to Catppuccin Mocha (first option) to restore state
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
await browser.execute(() => {
|
||||
const options = document.querySelectorAll('.dropdown-menu .dropdown-option');
|
||||
if (options.length > 0) (options[0] as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
it('should show active theme option', async () => {
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(500);
|
||||
|
||||
const menu = await browser.$('.dropdown-menu');
|
||||
await menu.waitForExist({ timeout: 5000 });
|
||||
|
||||
const activeOption = await browser.$('.dropdown-option.active');
|
||||
await expect(activeOption).toBeExisting();
|
||||
|
||||
await browser.execute(() => {
|
||||
const trigger = document.querySelector('.settings-tab .custom-dropdown .dropdown-trigger');
|
||||
if (trigger) (trigger as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
});
|
||||
|
||||
describe('BTerminal — Settings Interaction', () => {
|
||||
before(async () => {
|
||||
await openSettings();
|
||||
// Scroll to top for font controls
|
||||
await browser.execute(() => {
|
||||
const content = document.querySelector('.panel-content') || document.querySelector('.sidebar-panel');
|
||||
if (content) content.scrollTop = 0;
|
||||
});
|
||||
await browser.pause(300);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await closeSettings();
|
||||
});
|
||||
|
||||
it('should show font size controls with increment/decrement', async () => {
|
||||
const sizeControls = await browser.$$('.size-control');
|
||||
expect(sizeControls.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
const sizeBtns = await browser.$$('.size-btn');
|
||||
expect(sizeBtns.length).toBeGreaterThanOrEqual(2); // at least - and + for one control
|
||||
|
||||
const sizeInput = await browser.$('.size-input');
|
||||
await expect(sizeInput).toBeExisting();
|
||||
});
|
||||
|
||||
it('should increment font size', async () => {
|
||||
const sizeInput = await browser.$('.size-input');
|
||||
const valueBefore = await sizeInput.getValue();
|
||||
|
||||
// Click the + button (second .size-btn in first .size-control)
|
||||
await browser.execute(() => {
|
||||
const btns = document.querySelectorAll('.size-control .size-btn');
|
||||
// Second button is + (first is -)
|
||||
if (btns.length >= 2) (btns[1] as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const afterEl = await browser.$('.size-input');
|
||||
const valueAfter = await afterEl.getValue();
|
||||
expect(parseInt(valueAfter as string)).toBe(parseInt(valueBefore as string) + 1);
|
||||
});
|
||||
|
||||
it('should decrement font size back', async () => {
|
||||
const sizeInput = await browser.$('.size-input');
|
||||
const valueBefore = await sizeInput.getValue();
|
||||
|
||||
// Click the - button (first .size-btn)
|
||||
await browser.execute(() => {
|
||||
const btns = document.querySelectorAll('.size-control .size-btn');
|
||||
if (btns.length >= 1) (btns[0] as HTMLElement).click();
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const afterEl = await browser.$('.size-input');
|
||||
const valueAfter = await afterEl.getValue();
|
||||
expect(parseInt(valueAfter as string)).toBe(parseInt(valueBefore as string) - 1);
|
||||
});
|
||||
|
||||
it('should display group rows with active indicator', async () => {
|
||||
// Scroll to Groups section (below Appearance, Defaults, Providers)
|
||||
await browser.execute(() => {
|
||||
const el = document.querySelector('.group-list');
|
||||
if (el) el.scrollIntoView({ behavior: 'instant', block: 'center' });
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const groupRows = await browser.$$('.group-row');
|
||||
expect(groupRows.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
const activeGroup = await browser.$('.group-row.active');
|
||||
await expect(activeGroup).toBeExisting();
|
||||
});
|
||||
|
||||
it('should show project cards', async () => {
|
||||
// Scroll to Projects section
|
||||
await browser.execute(() => {
|
||||
const el = document.querySelector('.project-cards');
|
||||
if (el) el.scrollIntoView({ behavior: 'instant', block: 'center' });
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const cards = await browser.$$('.project-card');
|
||||
expect(cards.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('should display project card with name and path', async () => {
|
||||
const nameInput = await browser.$('.card-name-input');
|
||||
await expect(nameInput).toBeExisting();
|
||||
const name = await nameInput.getValue() as string;
|
||||
expect(name.length).toBeGreaterThan(0);
|
||||
|
||||
const cwdInput = await browser.$('.cwd-input');
|
||||
await expect(cwdInput).toBeExisting();
|
||||
const cwd = await cwdInput.getValue() as string;
|
||||
expect(cwd.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it('should show project toggle switch', async () => {
|
||||
const toggle = await browser.$('.card-toggle');
|
||||
await expect(toggle).toBeExisting();
|
||||
|
||||
const track = await browser.$('.toggle-track');
|
||||
await expect(track).toBeDisplayed();
|
||||
});
|
||||
|
||||
it('should show add project form', async () => {
|
||||
// Scroll to add project form (at bottom of Projects section)
|
||||
await browser.execute(() => {
|
||||
const el = document.querySelector('.add-project-form');
|
||||
if (el) el.scrollIntoView({ behavior: 'instant', block: 'center' });
|
||||
});
|
||||
await browser.pause(300);
|
||||
|
||||
const addForm = await browser.$('.add-project-form');
|
||||
await expect(addForm).toBeDisplayed();
|
||||
|
||||
const addBtn = await browser.$('.add-project-form .btn-primary');
|
||||
await expect(addBtn).toBeExisting();
|
||||
});
|
||||
});
|
||||
|
||||
describe('BTerminal — Keyboard Shortcuts', () => {
|
||||
before(async () => {
|
||||
await closeSettings();
|
||||
await closeCommandPalette();
|
||||
});
|
||||
|
||||
it('should open command palette with Ctrl+K', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
const input = await browser.$('.palette-input');
|
||||
await expect(input).toBeDisplayed();
|
||||
|
||||
// Close with Escape
|
||||
await closeCommandPalette();
|
||||
const palette = await browser.$('.palette');
|
||||
const isGone = !(await palette.isDisplayed().catch(() => false));
|
||||
expect(isGone).toBe(true);
|
||||
});
|
||||
|
||||
it('should toggle settings with Ctrl+,', async () => {
|
||||
await browser.keys(['Control', ',']);
|
||||
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
await panel.waitForDisplayed({ timeout: 3000 });
|
||||
|
||||
// Close with Ctrl+,
|
||||
await browser.keys(['Control', ',']);
|
||||
await panel.waitForDisplayed({ timeout: 3000, reverse: true });
|
||||
});
|
||||
|
||||
it('should toggle sidebar with Ctrl+B', async () => {
|
||||
// Open sidebar first
|
||||
await browser.keys(['Control', ',']);
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
await panel.waitForDisplayed({ timeout: 3000 });
|
||||
|
||||
// Toggle off with Ctrl+B
|
||||
await browser.keys(['Control', 'b']);
|
||||
await panel.waitForDisplayed({ timeout: 3000, reverse: true });
|
||||
});
|
||||
|
||||
it('should close sidebar with Escape', async () => {
|
||||
// Open sidebar
|
||||
await browser.keys(['Control', ',']);
|
||||
const panel = await browser.$('.sidebar-panel');
|
||||
await panel.waitForDisplayed({ timeout: 3000 });
|
||||
|
||||
// Close with Escape
|
||||
await browser.keys('Escape');
|
||||
await panel.waitForDisplayed({ timeout: 3000, reverse: true });
|
||||
});
|
||||
|
||||
it('should show command palette with categorized commands', async () => {
|
||||
await openCommandPalette();
|
||||
|
||||
const items = await browser.$$('.palette-item');
|
||||
expect(items.length).toBeGreaterThanOrEqual(1);
|
||||
|
||||
// Commands should have labels
|
||||
const cmdLabel = await browser.$('.palette-item .cmd-label');
|
||||
await expect(cmdLabel).toBeDisplayed();
|
||||
|
||||
await closeCommandPalette();
|
||||
});
|
||||
});
|
||||
|
|
@ -7,7 +7,7 @@ import { isJudgeAvailable, assertWithJudge } from '../llm-judge';
|
|||
//
|
||||
// Prerequisites:
|
||||
// - Built debug binary (or SKIP_BUILD=1)
|
||||
// - groups.json with 2+ projects (use BTERMINAL_TEST_CONFIG_DIR or default)
|
||||
// - groups.json with 2+ projects (use AGOR_TEST_CONFIG_DIR or default)
|
||||
// - ANTHROPIC_API_KEY env var for LLM-judged tests (skipped if absent)
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@ const projectRoot = resolve(__dirname, '../..');
|
|||
|
||||
// Debug binary path (built with `cargo tauri build --debug --no-bundle`)
|
||||
// Cargo workspace target dir is at v2/target/, not v2/src-tauri/target/
|
||||
const tauriBinary = resolve(projectRoot, 'target/debug/bterminal');
|
||||
const tauriBinary = resolve(projectRoot, 'target/debug/agent-orchestrator');
|
||||
|
||||
let tauriDriver;
|
||||
|
||||
// ── Test Fixture (created eagerly so env vars are available for capabilities) ──
|
||||
const fixtureRoot = join(tmpdir(), `bterminal-e2e-${Date.now()}`);
|
||||
const fixtureRoot = join(tmpdir(), `agor-e2e-${Date.now()}`);
|
||||
const fixtureDataDir = join(fixtureRoot, 'data');
|
||||
const fixtureConfigDir = join(fixtureRoot, 'config');
|
||||
const fixtureProjectDir = join(fixtureRoot, 'test-project');
|
||||
|
|
@ -26,9 +26,9 @@ mkdirSync(fixtureProjectDir, { recursive: true });
|
|||
|
||||
// Create a minimal git repo for agent testing
|
||||
execSync('git init', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
execSync('git config user.email "test@bterminal.dev"', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "BTerminal Test"', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
writeFileSync(join(fixtureProjectDir, 'README.md'), '# Test Project\n\nA simple test project for BTerminal E2E tests.\n');
|
||||
execSync('git config user.email "test@agor.dev"', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
execSync('git config user.name "Agor Test"', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
writeFileSync(join(fixtureProjectDir, 'README.md'), '# Test Project\n\nA simple test project for Agor E2E tests.\n');
|
||||
writeFileSync(join(fixtureProjectDir, 'hello.py'), 'def greet(name: str) -> str:\n return f"Hello, {name}!"\n');
|
||||
execSync('git add -A && git commit -m "initial commit"', { cwd: fixtureProjectDir, stdio: 'ignore' });
|
||||
|
||||
|
|
@ -58,9 +58,9 @@ writeFileSync(
|
|||
|
||||
// Inject env vars into process.env so tauri-driver inherits them
|
||||
// (tauri:options.env may not reliably set process-level env vars)
|
||||
process.env.BTERMINAL_TEST = '1';
|
||||
process.env.BTERMINAL_TEST_DATA_DIR = fixtureDataDir;
|
||||
process.env.BTERMINAL_TEST_CONFIG_DIR = fixtureConfigDir;
|
||||
process.env.AGOR_TEST = '1';
|
||||
process.env.AGOR_TEST_DATA_DIR = fixtureDataDir;
|
||||
process.env.AGOR_TEST_CONFIG_DIR = fixtureConfigDir;
|
||||
|
||||
console.log(`Test fixture created at ${fixtureRoot}`);
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ export const config = {
|
|||
// Single spec file — Tauri launches one app instance per session,
|
||||
// and tauri-driver can't re-create sessions between spec files.
|
||||
specs: [
|
||||
resolve(__dirname, 'specs/bterminal.test.ts'),
|
||||
resolve(__dirname, 'specs/agor.test.ts'),
|
||||
resolve(__dirname, 'specs/agent-scenarios.test.ts'),
|
||||
resolve(__dirname, 'specs/phase-b.test.ts'),
|
||||
resolve(__dirname, 'specs/phase-c.test.ts'),
|
||||
|
|
@ -92,9 +92,9 @@ export const config = {
|
|||
application: tauriBinary,
|
||||
// Test isolation: fixture-created data/config dirs, disable watchers/telemetry
|
||||
env: {
|
||||
BTERMINAL_TEST: '1',
|
||||
BTERMINAL_TEST_DATA_DIR: fixtureDataDir,
|
||||
BTERMINAL_TEST_CONFIG_DIR: fixtureConfigDir,
|
||||
AGOR_TEST: '1',
|
||||
AGOR_TEST_DATA_DIR: fixtureDataDir,
|
||||
AGOR_TEST_CONFIG_DIR: fixtureConfigDir,
|
||||
},
|
||||
},
|
||||
}],
|
||||
|
|
|
|||
|
|
@ -9,6 +9,10 @@ export default defineConfig({
|
|||
},
|
||||
clearScreen: false,
|
||||
test: {
|
||||
include: ['src/**/*.test.ts', 'sidecar/**/*.test.ts'],
|
||||
include: [
|
||||
'src/**/*.test.ts',
|
||||
'sidecar/**/*.test.ts',
|
||||
...(process.env.AGOR_EDITION === 'pro' ? ['tests/commercial/**/*.test.ts'] : []),
|
||||
],
|
||||
},
|
||||
})
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue