fix(security): audit fixes — path traversal, race conditions, memory leaks, transaction safety

- lib.rs: claude_read_skill path traversal prevention (canonicalize + starts_with)
- agent-dispatcher.ts: re-entrancy guard on exit handler, clear maps in stop
- machines.svelte.ts: track UnlistenFn array + destroyMachineListeners()
- agent-runner.ts: controller.signal.aborted, async handleMessage + .catch()
- remote.rs: try_lock → async lock, abort tasks on remove
- session.rs: unchecked_transaction for save_agent_messages
- agent-bridge.ts: safe msg.event check (implicit in dispatcher changes)
This commit is contained in:
Hibryda 2026-03-08 20:03:50 +01:00
parent 73ca780b54
commit 4bdb74721d
6 changed files with 102 additions and 57 deletions

View file

@ -26,7 +26,9 @@ function log(message: string) {
rl.on('line', (line: string) => {
try {
const msg = JSON.parse(line);
handleMessage(msg);
handleMessage(msg).catch((err: unknown) => {
log(`Unhandled error in message handler: ${err}`);
});
} catch {
log(`Invalid JSON: ${line}`);
}
@ -53,13 +55,13 @@ interface StopMessage {
sessionId: string;
}
function handleMessage(msg: Record<string, unknown>) {
async function handleMessage(msg: Record<string, unknown>) {
switch (msg.type) {
case 'ping':
send({ type: 'pong' });
break;
case 'query':
handleQuery(msg as unknown as QueryMessage);
await handleQuery(msg as unknown as QueryMessage);
break;
case 'stop':
handleStop(msg as unknown as StopMessage);
@ -149,7 +151,7 @@ async function handleQuery(msg: QueryMessage) {
sessions.delete(sessionId);
const errMsg = err instanceof Error ? err.message : String(err);
if (errMsg.includes('aborted') || errMsg.includes('AbortError')) {
if (controller.signal.aborted) {
log(`Agent session ${sessionId} aborted`);
send({
type: 'agent_stopped',

View file

@ -364,7 +364,19 @@ fn claude_list_skills() -> Vec<ClaudeSkill> {
#[tauri::command]
fn claude_read_skill(path: String) -> Result<String, String> {
std::fs::read_to_string(&path).map_err(|e| format!("Failed to read skill: {e}"))
// Validate path is under ~/.claude/skills/ to prevent path traversal
let skills_dir = dirs::home_dir()
.ok_or("Cannot determine home directory")?
.join(".claude")
.join("skills");
let canonical_skills = skills_dir.canonicalize()
.map_err(|_| "Skills directory does not exist".to_string())?;
let canonical_path = std::path::Path::new(&path).canonicalize()
.map_err(|e| format!("Invalid skill path: {e}"))?;
if !canonical_path.starts_with(&canonical_skills) {
return Err("Access denied: path is outside skills directory".to_string());
}
std::fs::read_to_string(&canonical_path).map_err(|e| format!("Failed to read skill: {e}"))
}
// --- Group config commands (v3) ---
@ -458,18 +470,18 @@ fn cli_get_group() -> Option<String> {
// --- Remote machine commands ---
#[tauri::command]
fn remote_list(state: State<'_, AppState>) -> Vec<remote::RemoteMachineInfo> {
state.remote_manager.list_machines()
async fn remote_list(state: State<'_, AppState>) -> Result<Vec<remote::RemoteMachineInfo>, String> {
Ok(state.remote_manager.list_machines().await)
}
#[tauri::command]
async fn remote_add(state: State<'_, AppState>, config: RemoteMachineConfig) -> Result<String, String> {
Ok(state.remote_manager.add_machine(config))
Ok(state.remote_manager.add_machine(config).await)
}
#[tauri::command]
async fn remote_remove(state: State<'_, AppState>, machine_id: String) -> Result<(), String> {
state.remote_manager.remove_machine(&machine_id)
state.remote_manager.remove_machine(&machine_id).await
}
#[tauri::command]

View file

@ -69,22 +69,18 @@ impl RemoteManager {
}
}
pub fn list_machines(&self) -> Vec<RemoteMachineInfo> {
// Use try_lock for sync context (called from Tauri command handler)
let machines = self.machines.try_lock();
match machines {
Ok(m) => m.values().map(|m| RemoteMachineInfo {
id: m.id.clone(),
label: m.config.label.clone(),
url: m.config.url.clone(),
status: m.status.clone(),
auto_connect: m.config.auto_connect,
}).collect(),
Err(_) => Vec::new(),
}
pub async fn list_machines(&self) -> Vec<RemoteMachineInfo> {
let machines = self.machines.lock().await;
machines.values().map(|m| RemoteMachineInfo {
id: m.id.clone(),
label: m.config.label.clone(),
url: m.config.url.clone(),
status: m.status.clone(),
auto_connect: m.config.auto_connect,
}).collect()
}
pub fn add_machine(&self, config: RemoteMachineConfig) -> String {
pub async fn add_machine(&self, config: RemoteMachineConfig) -> String {
let id = uuid::Uuid::new_v4().to_string();
let machine = RemoteMachine {
id: id.clone(),
@ -92,16 +88,18 @@ impl RemoteManager {
status: "disconnected".to_string(),
connection: None,
};
// Use try_lock for sync context
if let Ok(mut machines) = self.machines.try_lock() {
machines.insert(id.clone(), machine);
}
self.machines.lock().await.insert(id.clone(), machine);
id
}
pub fn remove_machine(&self, machine_id: &str) -> Result<(), String> {
let mut machines = self.machines.try_lock()
.map_err(|_| "Lock contention".to_string())?;
pub async fn remove_machine(&self, machine_id: &str) -> Result<(), String> {
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
// Abort connection tasks before removing to prevent resource leaks
if let Some(conn) = machine.connection.take() {
conn._handle.abort();
}
}
machines.remove(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
Ok(())

View file

@ -373,13 +373,17 @@ impl SessionDb {
messages: &[AgentMessageRecord],
) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
// Wrap DELETE+INSERTs in a transaction to prevent partial writes on crash
let tx = conn.unchecked_transaction()
.map_err(|e| format!("Begin transaction failed: {e}"))?;
// Clear previous messages for this session
conn.execute(
tx.execute(
"DELETE FROM agent_messages WHERE session_id = ?1",
params![session_id],
).map_err(|e| format!("Delete old messages failed: {e}"))?;
let mut stmt = conn.prepare(
let mut stmt = tx.prepare(
"INSERT INTO agent_messages (session_id, project_id, sdk_session_id, message_type, content, parent_id, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
).map_err(|e| format!("Prepare insert failed: {e}"))?;
@ -394,6 +398,8 @@ impl SessionDb {
msg.created_at,
]).map_err(|e| format!("Insert message failed: {e}"))?;
}
drop(stmt);
tx.commit().map_err(|e| format!("Commit failed: {e}"))?;
Ok(())
}

View file

@ -39,6 +39,7 @@ let sidecarAlive = true;
// Sidecar crash recovery state
const MAX_RESTART_ATTEMPTS = 3;
let restartAttempts = 0;
let restarting = false;
export function isSidecarAlive(): boolean {
return sidecarAlive;
}
@ -68,7 +69,7 @@ export async function startAgentDispatcher(): Promise<void> {
break;
case 'agent_event':
handleAgentEvent(sessionId, msg.event!);
if (msg.event) handleAgentEvent(sessionId, msg.event);
break;
case 'agent_stopped':
@ -88,6 +89,11 @@ export async function startAgentDispatcher(): Promise<void> {
unlistenExit = await onSidecarExited(async () => {
sidecarAlive = false;
// Guard against re-entrant exit handler (double-restart race)
if (restarting) return;
restarting = true;
// Mark all running sessions as errored
for (const session of getAgentSessions()) {
if (session.status === 'running' || session.status === 'starting') {
@ -96,22 +102,26 @@ export async function startAgentDispatcher(): Promise<void> {
}
// Attempt auto-restart with exponential backoff
if (restartAttempts < MAX_RESTART_ATTEMPTS) {
restartAttempts++;
const delayMs = 1000 * Math.pow(2, restartAttempts - 1); // 1s, 2s, 4s
notify('warning', `Sidecar crashed, restarting (attempt ${restartAttempts}/${MAX_RESTART_ATTEMPTS})...`);
await new Promise((resolve) => setTimeout(resolve, delayMs));
try {
await restartAgent();
sidecarAlive = true;
// Note: restartAttempts is reset when next sidecar message arrives
} catch {
if (restartAttempts >= MAX_RESTART_ATTEMPTS) {
notify('error', `Sidecar restart failed after ${MAX_RESTART_ATTEMPTS} attempts`);
try {
if (restartAttempts < MAX_RESTART_ATTEMPTS) {
restartAttempts++;
const delayMs = 1000 * Math.pow(2, restartAttempts - 1); // 1s, 2s, 4s
notify('warning', `Sidecar crashed, restarting (attempt ${restartAttempts}/${MAX_RESTART_ATTEMPTS})...`);
await new Promise((resolve) => setTimeout(resolve, delayMs));
try {
await restartAgent();
sidecarAlive = true;
// Note: restartAttempts is reset when next sidecar message arrives
} catch {
if (restartAttempts >= MAX_RESTART_ATTEMPTS) {
notify('error', `Sidecar restart failed after ${MAX_RESTART_ATTEMPTS} attempts`);
}
}
} else {
notify('error', `Sidecar restart failed after ${MAX_RESTART_ATTEMPTS} attempts`);
}
} else {
notify('error', `Sidecar restart failed after ${MAX_RESTART_ATTEMPTS} attempts`);
} finally {
restarting = false;
}
});
}
@ -300,4 +310,7 @@ export function stopAgentDispatcher(): void {
unlistenExit();
unlistenExit = null;
}
// Clear routing maps to prevent unbounded memory growth
toolUseToChildPane.clear();
sessionProjectMap.clear();
}

View file

@ -71,41 +71,47 @@ export async function disconnectMachine(id: string): Promise<void> {
if (machine) machine.status = 'disconnected';
}
// Stored unlisten functions for cleanup
let unlistenFns: (() => void)[] = [];
// Initialize event listeners for machine status updates
export async function initMachineListeners(): Promise<void> {
await onRemoteMachineReady((msg) => {
// Clean up any existing listeners first
destroyMachineListeners();
unlistenFns.push(await onRemoteMachineReady((msg) => {
const machine = machines.find(m => m.id === msg.machineId);
if (machine) {
machine.status = 'connected';
notify('success', `Connected to ${machine.label}`);
}
});
}));
await onRemoteMachineDisconnected((msg) => {
unlistenFns.push(await onRemoteMachineDisconnected((msg) => {
const machine = machines.find(m => m.id === msg.machineId);
if (machine) {
machine.status = 'disconnected';
notify('warning', `Disconnected from ${machine.label}`);
}
});
}));
await onRemoteError((msg) => {
unlistenFns.push(await onRemoteError((msg) => {
const machine = machines.find(m => m.id === msg.machineId);
if (machine) {
machine.status = 'error';
notify('error', `Error from ${machine.label}: ${msg.error}`);
}
});
}));
await onRemoteMachineReconnecting((msg) => {
unlistenFns.push(await onRemoteMachineReconnecting((msg) => {
const machine = machines.find(m => m.id === msg.machineId);
if (machine) {
machine.status = 'reconnecting';
notify('info', `Reconnecting to ${machine.label} in ${msg.backoffSecs}s…`);
}
});
}));
await onRemoteMachineReconnectReady((msg) => {
unlistenFns.push(await onRemoteMachineReconnectReady((msg) => {
const machine = machines.find(m => m.id === msg.machineId);
if (machine) {
notify('info', `${machine.label} reachable — reconnecting…`);
@ -113,5 +119,13 @@ export async function initMachineListeners(): Promise<void> {
notify('error', `Auto-reconnect failed for ${machine.label}: ${e}`);
});
}
});
}));
}
/** Remove all event listeners to prevent leaks */
export function destroyMachineListeners(): void {
for (const unlisten of unlistenFns) {
unlisten();
}
unlistenFns = [];
}