feat: add Landlock sandbox for sidecar process isolation

SandboxConfig with RW/RO paths applied via pre_exec() in sidecar child
process. Requires kernel 6.2+ with graceful fallback. Per-project toggle
in SettingsTab. 9 unit tests.
This commit is contained in:
Hibryda 2026-03-12 04:57:29 +01:00
parent 548478f115
commit b2c379516c
8 changed files with 363 additions and 12 deletions

View file

@ -12,3 +12,4 @@ log = "0.4"
portable-pty = "0.8"
uuid = { version = "1", features = ["v4"] }
dirs = "5"
landlock = "0.4"

View file

@ -95,6 +95,11 @@ impl AppConfig {
self.config_dir.join("groups.json")
}
/// Path to plugins directory
pub fn plugins_dir(&self) -> PathBuf {
self.config_dir.join("plugins")
}
/// Whether running in test mode (BTERMINAL_TEST=1)
pub fn is_test_mode(&self) -> bool {
self.test_mode

View file

@ -0,0 +1,270 @@
// Landlock-based filesystem sandboxing for sidecar processes.
//
// Landlock is a Linux Security Module (LSM) available since kernel 5.13.
// It restricts filesystem access for the calling process and all its children.
// Applied via pre_exec() on the sidecar child process before exec.
//
// Restrictions can only be tightened after application — never relaxed.
// The sidecar is long-lived and handles queries for multiple projects,
// so we apply the union of all project paths at sidecar start time.
use std::path::PathBuf;
use landlock::{
Access, AccessFs, PathBeneath, PathFd, Ruleset, RulesetAttr, RulesetCreatedAttr,
RulesetStatus, ABI,
};
/// Target Landlock ABI version. V3 requires kernel 6.2+ (we run 6.12+).
/// Falls back gracefully on older kernels via best-effort mode.
const TARGET_ABI: ABI = ABI::V3;
/// Configuration for Landlock filesystem sandboxing.
#[derive(Debug, Clone)]
pub struct SandboxConfig {
/// Directories with full read+write+execute access (project CWDs, worktrees, tmp)
pub rw_paths: Vec<PathBuf>,
/// Directories with read-only access (system libs, runtimes, config)
pub ro_paths: Vec<PathBuf>,
/// Whether sandboxing is enabled
pub enabled: bool,
}
impl Default for SandboxConfig {
fn default() -> Self {
Self {
rw_paths: Vec::new(),
ro_paths: Vec::new(),
enabled: false,
}
}
}
impl SandboxConfig {
/// Build a sandbox config for a set of project directories.
///
/// `project_cwds` — directories that need read+write access (one per project).
/// `worktree_roots` — optional worktree directories (one per project that uses worktrees).
///
/// System paths (runtimes, libraries, /etc) are added as read-only automatically.
pub fn for_projects(project_cwds: &[&str], worktree_roots: &[&str]) -> Self {
let mut rw = Vec::new();
for cwd in project_cwds {
rw.push(PathBuf::from(cwd));
}
for wt in worktree_roots {
rw.push(PathBuf::from(wt));
}
// Temp dir for sidecar scratch files
rw.push(std::env::temp_dir());
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("/root"));
let ro = vec![
PathBuf::from("/usr"), // system binaries + libraries
PathBuf::from("/lib"), // shared libraries
PathBuf::from("/lib64"), // 64-bit shared libraries
PathBuf::from("/etc"), // system configuration (read only)
PathBuf::from("/proc"), // process info (Landlock V3+ handles this)
PathBuf::from("/dev"), // device nodes (stdin/stdout/stderr, /dev/null, urandom)
PathBuf::from("/bin"), // essential binaries (symlink to /usr/bin on most distros)
PathBuf::from("/sbin"), // essential system binaries
home.join(".local"), // ~/.local/bin (claude CLI, user-installed tools)
home.join(".deno"), // Deno runtime cache
home.join(".nvm"), // Node.js version manager
home.join(".config"), // XDG config (claude profiles, bterminal config)
home.join(".claude"), // Claude CLI data (worktrees, skills, settings)
];
Self {
rw_paths: rw,
ro_paths: ro,
enabled: true,
}
}
/// Build a sandbox config for a single project directory.
pub fn for_project(cwd: &str, worktree: Option<&str>) -> Self {
let worktrees: Vec<&str> = worktree.into_iter().collect();
Self::for_projects(&[cwd], &worktrees)
}
/// Apply Landlock restrictions to the current process.
///
/// This must be called in the child process (e.g., via `pre_exec`) BEFORE exec.
/// Once applied, restrictions are inherited by all child processes and cannot be relaxed.
///
/// Returns:
/// - `Ok(true)` if Landlock was applied and enforced
/// - `Ok(false)` if the kernel does not support Landlock (graceful degradation)
/// - `Err(msg)` on configuration or syscall errors
pub fn apply(&self) -> Result<bool, String> {
if !self.enabled {
return Ok(false);
}
let access_all = AccessFs::from_all(TARGET_ABI);
let access_read = AccessFs::from_read(TARGET_ABI);
// Create ruleset handling all filesystem access types
let mut ruleset = Ruleset::default()
.handle_access(access_all)
.map_err(|e| format!("Landlock: failed to handle access: {e}"))?
.create()
.map_err(|e| format!("Landlock: failed to create ruleset: {e}"))?;
// Add read+write rules for project directories and tmp
for path in &self.rw_paths {
if path.exists() {
let fd = PathFd::new(path)
.map_err(|e| format!("Landlock: PathFd failed for {}: {e}", path.display()))?;
ruleset = ruleset
.add_rule(PathBeneath::new(fd, access_all))
.map_err(|e| {
format!("Landlock: add_rule (rw) failed for {}: {e}", path.display())
})?;
} else {
log::warn!(
"Landlock: skipping non-existent rw path: {}",
path.display()
);
}
}
// Add read-only rules for system paths
for path in &self.ro_paths {
if path.exists() {
let fd = PathFd::new(path)
.map_err(|e| format!("Landlock: PathFd failed for {}: {e}", path.display()))?;
ruleset = ruleset
.add_rule(PathBeneath::new(fd, access_read))
.map_err(|e| {
format!("Landlock: add_rule (ro) failed for {}: {e}", path.display())
})?;
}
// Silently skip non-existent read-only paths (e.g., /lib64 on some systems)
}
// Enforce the ruleset on this thread (and inherited by children)
let status = ruleset
.restrict_self()
.map_err(|e| format!("Landlock: restrict_self failed: {e}"))?;
let enforced = status.ruleset != RulesetStatus::NotEnforced;
if enforced {
log::info!("Landlock sandbox applied ({} rw, {} ro paths)", self.rw_paths.len(), self.ro_paths.len());
} else {
log::warn!("Landlock sandbox was not enforced (kernel may lack support)");
}
Ok(enforced)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_is_disabled() {
let config = SandboxConfig::default();
assert!(!config.enabled);
assert!(config.rw_paths.is_empty());
assert!(config.ro_paths.is_empty());
}
#[test]
fn test_for_project_single_cwd() {
let config = SandboxConfig::for_project("/home/user/myproject", None);
assert!(config.enabled);
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
assert!(config.rw_paths.contains(&std::env::temp_dir()));
// No worktree path added
assert!(!config
.rw_paths
.iter()
.any(|p| p.to_string_lossy().contains("worktree")));
}
#[test]
fn test_for_project_with_worktree() {
let config = SandboxConfig::for_project(
"/home/user/myproject",
Some("/home/user/myproject/.claude/worktrees/abc123"),
);
assert!(config.enabled);
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/myproject")));
assert!(config.rw_paths.contains(&PathBuf::from(
"/home/user/myproject/.claude/worktrees/abc123"
)));
}
#[test]
fn test_for_projects_multiple_cwds() {
let config = SandboxConfig::for_projects(
&["/home/user/project-a", "/home/user/project-b"],
&["/home/user/project-a/.claude/worktrees/s1"],
);
assert!(config.enabled);
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/project-a")));
assert!(config.rw_paths.contains(&PathBuf::from("/home/user/project-b")));
assert!(config.rw_paths.contains(&PathBuf::from(
"/home/user/project-a/.claude/worktrees/s1"
)));
// tmp always present
assert!(config.rw_paths.contains(&std::env::temp_dir()));
}
#[test]
fn test_ro_paths_include_system_dirs() {
let config = SandboxConfig::for_project("/tmp/test", None);
let ro_strs: Vec<String> = config.ro_paths.iter().map(|p| p.display().to_string()).collect();
assert!(ro_strs.iter().any(|p| p == "/usr"), "missing /usr");
assert!(ro_strs.iter().any(|p| p == "/lib"), "missing /lib");
assert!(ro_strs.iter().any(|p| p == "/etc"), "missing /etc");
assert!(ro_strs.iter().any(|p| p == "/proc"), "missing /proc");
assert!(ro_strs.iter().any(|p| p == "/dev"), "missing /dev");
assert!(ro_strs.iter().any(|p| p == "/bin"), "missing /bin");
}
#[test]
fn test_ro_paths_include_runtime_dirs() {
let config = SandboxConfig::for_project("/tmp/test", None);
let home = dirs::home_dir().unwrap();
assert!(config.ro_paths.contains(&home.join(".local")));
assert!(config.ro_paths.contains(&home.join(".deno")));
assert!(config.ro_paths.contains(&home.join(".nvm")));
assert!(config.ro_paths.contains(&home.join(".config")));
assert!(config.ro_paths.contains(&home.join(".claude")));
}
#[test]
fn test_disabled_apply_returns_false() {
let config = SandboxConfig::default();
assert_eq!(config.apply().unwrap(), false);
}
#[test]
fn test_rw_paths_count() {
// Single project: cwd + tmp = 2
let config = SandboxConfig::for_project("/tmp/test", None);
assert_eq!(config.rw_paths.len(), 2);
// With worktree: cwd + worktree + tmp = 3
let config = SandboxConfig::for_project("/tmp/test", Some("/tmp/wt"));
assert_eq!(config.rw_paths.len(), 3);
}
#[test]
fn test_for_projects_empty() {
let config = SandboxConfig::for_projects(&[], &[]);
assert!(config.enabled);
// Only tmp dir in rw
assert_eq!(config.rw_paths.len(), 1);
assert_eq!(config.rw_paths[0], std::env::temp_dir());
}
}

View file

@ -3,12 +3,15 @@
use serde::{Deserialize, Serialize};
use std::io::{BufRead, BufReader, Write};
#[cfg(unix)]
use std::os::unix::process::CommandExt;
use std::path::PathBuf;
use std::process::{Child, Command, Stdio};
use std::sync::{Arc, Mutex};
use std::thread;
use crate::event::EventSink;
use crate::sandbox::SandboxConfig;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentQueryOptions {
@ -46,6 +49,8 @@ pub struct SidecarConfig {
pub search_paths: Vec<PathBuf>,
/// Extra env vars forwarded to sidecar processes (e.g. BTERMINAL_TEST=1 for test isolation)
pub env_overrides: std::collections::HashMap<String, String>,
/// Landlock filesystem sandbox configuration (Linux 5.13+, applied via pre_exec)
pub sandbox: SandboxConfig,
}
struct SidecarCommand {
@ -58,7 +63,7 @@ pub struct SidecarManager {
stdin_writer: Arc<Mutex<Option<Box<dyn Write + Send>>>>,
ready: Arc<Mutex<bool>>,
sink: Arc<dyn EventSink>,
config: SidecarConfig,
config: Mutex<SidecarConfig>,
}
impl SidecarManager {
@ -68,17 +73,23 @@ impl SidecarManager {
stdin_writer: Arc::new(Mutex::new(None)),
ready: Arc::new(Mutex::new(false)),
sink,
config,
config: Mutex::new(config),
}
}
/// Update the sandbox configuration. Takes effect on next sidecar (re)start.
pub fn set_sandbox(&self, sandbox: SandboxConfig) {
self.config.lock().unwrap().sandbox = sandbox;
}
pub fn start(&self) -> Result<(), String> {
let mut child_lock = self.child.lock().unwrap();
if child_lock.is_some() {
return Err("Sidecar already running".to_string());
}
let cmd = self.resolve_sidecar_command()?;
let config = self.config.lock().unwrap();
let cmd = self.resolve_sidecar_command_with_config(&config)?;
log::info!("Starting sidecar: {} {}", cmd.program, cmd.args.join(" "));
@ -92,14 +103,36 @@ impl SidecarManager {
})
.collect();
let mut child = Command::new(&cmd.program)
let mut command = Command::new(&cmd.program);
command
.args(&cmd.args)
.env_clear()
.envs(clean_env)
.envs(self.config.env_overrides.iter().map(|(k, v)| (k.as_str(), v.as_str())))
.envs(config.env_overrides.iter().map(|(k, v)| (k.as_str(), v.as_str())))
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stderr(Stdio::piped());
// Apply Landlock sandbox in child process before exec (Linux only).
// Restrictions are inherited by all child processes (provider CLIs).
#[cfg(unix)]
if config.sandbox.enabled {
let sandbox = config.sandbox.clone();
unsafe {
command.pre_exec(move || {
sandbox.apply().map(|enforced| {
if !enforced {
log::warn!("Landlock sandbox not enforced in sidecar child");
}
}).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
});
}
}
// Drop config lock before spawn (pre_exec closure owns the sandbox clone)
drop(config);
let mut child = command
.spawn()
.map_err(|e| format!("Failed to start sidecar: {e}"))?;
@ -197,11 +230,12 @@ impl SidecarManager {
// Validate that the requested provider has a runner available
let runner_name = format!("{}-runner.mjs", options.provider);
let runner_exists = self
.config
let config = self.config.lock().unwrap();
let runner_exists = config
.search_paths
.iter()
.any(|base| base.join("dist").join(&runner_name).exists());
drop(config);
if !runner_exists {
return Err(format!(
"No sidecar runner found for provider '{}' (expected {})",
@ -265,12 +299,12 @@ impl SidecarManager {
/// Resolve a sidecar runner command. Uses the default claude-runner for startup.
/// Future providers will have their own runners (e.g. codex-runner.mjs).
fn resolve_sidecar_command(&self) -> Result<SidecarCommand, String> {
self.resolve_sidecar_for_provider("claude")
fn resolve_sidecar_command_with_config(&self, config: &SidecarConfig) -> Result<SidecarCommand, String> {
Self::resolve_sidecar_for_provider_with_config(config, "claude")
}
/// Resolve a sidecar command for a specific provider's runner file.
fn resolve_sidecar_for_provider(&self, provider: &str) -> Result<SidecarCommand, String> {
fn resolve_sidecar_for_provider_with_config(config: &SidecarConfig, provider: &str) -> Result<SidecarCommand, String> {
let runner_name = format!("{}-runner.mjs", provider);
// Try Deno first (faster startup, better perf), fall back to Node.js.
@ -289,7 +323,7 @@ impl SidecarManager {
let mut checked = Vec::new();
for base in &self.config.search_paths {
for base in &config.search_paths {
let mjs_path = base.join("dist").join(&runner_name);
if mjs_path.exists() {
if has_deno {

View file

@ -99,6 +99,7 @@ async fn main() {
let sidecar_config = SidecarConfig {
search_paths,
env_overrides: std::collections::HashMap::new(),
sandbox: Default::default(),
};
let token = Arc::new(cli.token);

View file

@ -1,6 +1,7 @@
use tauri::State;
use crate::AppState;
use crate::sidecar::AgentQueryOptions;
use bterminal_core::sandbox::SandboxConfig;
#[tauri::command]
#[tracing::instrument(skip(state, options), fields(session_id = %options.session_id))]
@ -27,3 +28,31 @@ pub fn agent_ready(state: State<'_, AppState>) -> bool {
pub fn agent_restart(state: State<'_, AppState>) -> Result<(), String> {
state.sidecar_manager.restart()
}
/// Update sidecar sandbox configuration and restart to apply.
/// `project_cwds` — directories needing read+write access.
/// `worktree_roots` — optional worktree directories.
/// `enabled` — whether Landlock sandboxing is active.
#[tauri::command]
#[tracing::instrument(skip(state))]
pub fn agent_set_sandbox(
state: State<'_, AppState>,
project_cwds: Vec<String>,
worktree_roots: Vec<String>,
enabled: bool,
) -> Result<(), String> {
let cwd_refs: Vec<&str> = project_cwds.iter().map(|s| s.as_str()).collect();
let wt_refs: Vec<&str> = worktree_roots.iter().map(|s| s.as_str()).collect();
let mut sandbox = SandboxConfig::for_projects(&cwd_refs, &wt_refs);
sandbox.enabled = enabled;
state.sidecar_manager.set_sandbox(sandbox);
// Restart sidecar so Landlock restrictions take effect on the new process
if state.sidecar_manager.is_ready() {
state.sidecar_manager.restart()?;
}
Ok(())
}

View file

@ -51,6 +51,15 @@ export async function restartAgent(): Promise<void> {
return invoke('agent_restart');
}
/** Update Landlock sandbox config and restart sidecar to apply. */
export async function setSandbox(
projectCwds: string[],
worktreeRoots: string[],
enabled: boolean,
): Promise<void> {
return invoke('agent_set_sandbox', { projectCwds, worktreeRoots, enabled });
}
export interface SidecarMessage {
type: string;
sessionId?: string;

View file

@ -16,6 +16,8 @@ export interface ProjectConfig {
provider?: ProviderId;
/** When true, agents for this project use git worktrees for isolation */
useWorktrees?: boolean;
/** When true, sidecar process is sandboxed via Landlock (Linux 5.13+, restricts filesystem access) */
sandboxEnabled?: boolean;
/** Anchor token budget scale (defaults to 'medium' = 6K tokens) */
anchorBudgetScale?: AnchorBudgetScale;
/** Stall detection threshold in minutes (defaults to 15) */