feat(agor-pty): complete PTY daemon — auth, sessions, output fanout

This commit is contained in:
Hibryda 2026-03-20 03:10:49 +01:00
parent 4b5583430d
commit f3456bd09d
6 changed files with 1853 additions and 65 deletions

1131
agor-pty/Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -5,6 +5,9 @@ edition = "2021"
description = "Standalone PTY multiplexer daemon — manages terminal sessions via Unix socket IPC"
license = "MIT"
# Standalone — NOT part of the workspace Cargo.toml (same pattern as ui-gpui)
[workspace]
# Binary: the daemon process
[[bin]]
name = "agor-ptyd"

441
agor-pty/src/daemon.rs Normal file
View file

@ -0,0 +1,441 @@
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::{broadcast, mpsc, Mutex};
use crate::auth::AuthToken;
use crate::protocol::{decode_input, encode_output, ClientMessage, DaemonMessage};
use crate::session::SessionManager;
/// High-water mark for the per-client send queue (in messages, not bytes).
/// We limit to ~256 KB worth of medium-sized chunks before dropping the client.
const CLIENT_QUEUE_CAP: usize = 64;
/// Shared mutable state accessible from all tasks.
struct State {
sessions: SessionManager,
/// session_id → set of client_ids currently subscribed.
subscriptions: HashMap<String, HashSet<u64>>,
/// client_id → channel to push messages back to that client's write task.
client_txs: HashMap<u64, mpsc::Sender<DaemonMessage>>,
next_client_id: u64,
}
impl State {
fn new(default_shell: String) -> Self {
Self {
sessions: SessionManager::new(default_shell),
subscriptions: HashMap::new(),
client_txs: HashMap::new(),
next_client_id: 1,
}
}
fn alloc_client_id(&mut self) -> u64 {
let id = self.next_client_id;
self.next_client_id += 1;
id
}
/// Remove a client from all subscription sets and from the client map.
fn remove_client(&mut self, cid: u64) {
self.client_txs.remove(&cid);
for subs in self.subscriptions.values_mut() {
subs.remove(&cid);
}
}
/// Fan-out a message to all subscribers of `session_id`.
fn fanout(&self, session_id: &str, msg: DaemonMessage) {
if let Some(subs) = self.subscriptions.get(session_id) {
for cid in subs {
if let Some(tx) = self.client_txs.get(cid) {
// Non-blocking: drop slow clients silently.
let _ = tx.try_send(msg.clone());
}
}
}
}
}
pub struct Daemon {
socket_path: PathBuf,
token: AuthToken,
default_shell: String,
}
impl Daemon {
pub fn new(socket_path: PathBuf, token: AuthToken, default_shell: String) -> Self {
Self {
socket_path,
token,
default_shell,
}
}
/// Run until `shutdown_rx` fires.
pub async fn run(self, mut shutdown_rx: broadcast::Receiver<()>) -> Result<(), String> {
// Remove stale socket from a previous run.
let _ = std::fs::remove_file(&self.socket_path);
let listener = UnixListener::bind(&self.socket_path)
.map_err(|e| format!("bind {:?}: {e}", self.socket_path))?;
log::info!("agor-ptyd v0.1.0 listening on {:?}", self.socket_path);
let state = Arc::new(Mutex::new(State::new(self.default_shell.clone())));
let token = Arc::new(self.token);
loop {
tokio::select! {
accept = listener.accept() => {
match accept {
Ok((stream, _addr)) => {
let state = state.clone();
let token = token.clone();
tokio::spawn(handle_client(stream, state, token));
}
Err(e) => log::warn!("accept error: {e}"),
}
}
_ = shutdown_rx.recv() => {
log::info!("shutdown signal received — stopping daemon");
break;
}
}
}
// Cleanup socket file.
let _ = std::fs::remove_file(&self.socket_path);
Ok(())
}
}
/// Handle a single client connection from handshake to disconnect.
async fn handle_client(
stream: UnixStream,
state: Arc<Mutex<State>>,
token: Arc<AuthToken>,
) {
let (read_half, write_half) = stream.into_split();
let mut reader = BufReader::new(read_half);
// First message must be Auth.
let mut line = String::new();
if reader.read_line(&mut line).await.unwrap_or(0) == 0 {
log::warn!("client disconnected before auth");
return;
}
let auth_msg: ClientMessage = match serde_json::from_str(line.trim()) {
Ok(m) => m,
Err(e) => {
log::warn!("invalid auth message: {e}");
return;
}
};
let presented_token = match auth_msg {
ClientMessage::Auth { token: t } => t,
_ => {
log::warn!("first message was not Auth — dropping client");
return;
}
};
if !token.verify(&presented_token) {
log::warn!("auth failed (token={} redacted)", token.redacted());
// Send failure then drop the connection.
let _ = send_line(
write_half,
&DaemonMessage::AuthResult { ok: false },
)
.await;
return;
}
// Register client.
let (out_tx, out_rx) = mpsc::channel::<DaemonMessage>(CLIENT_QUEUE_CAP);
let cid = {
let mut st = state.lock().await;
let cid = st.alloc_client_id();
st.client_txs.insert(cid, out_tx.clone());
cid
};
log::info!("client {cid} authenticated");
// Send auth success.
if let Err(e) = out_tx.try_send(DaemonMessage::AuthResult { ok: true }) {
log::warn!("client {cid}: failed to queue AuthResult: {e}");
state.lock().await.remove_client(cid);
return;
}
// Spawn a dedicated write task so the reader loop is never blocked by
// slow writes to the socket.
let write_task = tokio::spawn(write_loop(write_half, out_rx));
// Read loop.
loop {
let mut line = String::new();
match reader.read_line(&mut line).await {
Ok(0) => break, // EOF
Ok(_) => {}
Err(e) => {
log::debug!("client {cid} read error: {e}");
break;
}
}
let msg: ClientMessage = match serde_json::from_str(line.trim()) {
Ok(m) => m,
Err(e) => {
log::warn!("client {cid} bad message: {e}");
let _ = out_tx
.try_send(DaemonMessage::Error {
message: format!("parse error: {e}"),
});
continue;
}
};
handle_message(cid, msg, &state, &out_tx).await;
}
// Cleanup on disconnect.
log::info!("client {cid} disconnected");
state.lock().await.remove_client(cid);
write_task.abort();
}
/// Dispatch a single client message to the appropriate handler.
async fn handle_message(
cid: u64,
msg: ClientMessage,
state: &Arc<Mutex<State>>,
out_tx: &mpsc::Sender<DaemonMessage>,
) {
match msg {
ClientMessage::Auth { .. } => {
// Already authenticated — ignore duplicate.
}
ClientMessage::Ping => {
let _ = out_tx.try_send(DaemonMessage::Pong);
}
ClientMessage::ListSessions => {
let list = state.lock().await.sessions.list();
let _ = out_tx.try_send(DaemonMessage::SessionList { sessions: list });
}
ClientMessage::CreateSession { id, shell, cwd, env, cols, rows } => {
let state_clone = state.clone();
let out_tx_clone = out_tx.clone();
let id_clone = id.clone();
let result = {
let mut st = state.lock().await;
st.sessions.create_session(
id.clone(),
shell,
cwd,
env,
cols,
rows,
move |sid, code| {
// Invoked from the blocking reader task when child exits.
let state_clone = state_clone.clone();
let _ = &out_tx_clone; // captured for lifetime, not used
tokio::spawn(async move {
let st = state_clone.lock().await;
st.fanout(
&sid,
DaemonMessage::SessionClosed {
session_id: sid.clone(),
exit_code: code,
},
);
drop(st);
});
},
)
};
match result {
Ok((pid, output_rx)) => {
let _ = out_tx.try_send(DaemonMessage::SessionCreated {
session_id: id_clone.clone(),
pid,
});
// Immediately subscribe the creating client.
{
let mut st = state.lock().await;
st.subscriptions
.entry(id_clone.clone())
.or_default()
.insert(cid);
}
// Start a fanout task for this session's output.
let state_clone = state.clone();
tokio::spawn(output_fanout_task(id_clone, output_rx, state_clone));
}
Err(e) => {
let _ = out_tx.try_send(DaemonMessage::Error { message: e });
}
}
}
ClientMessage::WriteInput { session_id, data } => {
let bytes = match decode_input(&data) {
Ok(b) => b,
Err(e) => {
let _ = out_tx.try_send(DaemonMessage::Error {
message: format!("bad input encoding: {e}"),
});
return;
}
};
let st = state.lock().await;
match st.sessions.get(&session_id) {
Some(sess) => {
if let Err(e) = sess.write_input(&bytes).await {
let _ = out_tx.try_send(DaemonMessage::Error { message: e });
}
}
None => {
let _ = out_tx.try_send(DaemonMessage::Error {
message: format!("session {session_id} not found"),
});
}
}
}
ClientMessage::Resize { session_id, cols, rows } => {
let mut st = state.lock().await;
match st.sessions.get_mut(&session_id) {
Some(sess) => {
sess.note_resize(cols, rows);
}
None => {
let _ = out_tx.try_send(DaemonMessage::Error {
message: format!("session {session_id} not found"),
});
}
}
}
ClientMessage::Subscribe { session_id } => {
let (exists, rx) = {
let st = state.lock().await;
let exists = st.sessions.get(&session_id).is_some();
let rx = st
.sessions
.get(&session_id)
.map(|s| s.subscribe());
(exists, rx)
};
if !exists {
let _ = out_tx.try_send(DaemonMessage::Error {
message: format!("session {session_id} not found"),
});
return;
}
{
let mut st = state.lock().await;
st.subscriptions
.entry(session_id.clone())
.or_default()
.insert(cid);
}
// If a new rx came back, start a fanout task (handles reconnect case
// where the original fanout task has gone away after all receivers
// dropped). We always start one; duplicates are harmless since the
// broadcast channel keeps all messages.
if let Some(rx) = rx {
let state_clone = state.clone();
tokio::spawn(output_fanout_task(session_id, rx, state_clone));
}
}
ClientMessage::Unsubscribe { session_id } => {
let mut st = state.lock().await;
if let Some(subs) = st.subscriptions.get_mut(&session_id) {
subs.remove(&cid);
}
}
ClientMessage::CloseSession { session_id } => {
let mut st = state.lock().await;
if let Err(e) = st.sessions.close_session(&session_id) {
let _ = out_tx.try_send(DaemonMessage::Error { message: e });
} else {
st.subscriptions.remove(&session_id);
}
}
}
}
/// Reads from a session's broadcast channel and fans output to all subscribed
/// clients via their individual mpsc queues.
async fn output_fanout_task(
session_id: String,
mut rx: broadcast::Receiver<Vec<u8>>,
state: Arc<Mutex<State>>,
) {
loop {
match rx.recv().await {
Ok(chunk) => {
let encoded = encode_output(&chunk);
let msg = DaemonMessage::SessionOutput {
session_id: session_id.clone(),
data: encoded,
};
state.lock().await.fanout(&session_id, msg);
}
Err(broadcast::error::RecvError::Lagged(n)) => {
log::warn!("session {session_id} fanout lagged, dropped {n} messages");
}
Err(broadcast::error::RecvError::Closed) => {
log::debug!("session {session_id} output channel closed");
break;
}
}
}
}
/// Drains the per-client mpsc queue and writes newline-delimited JSON to the
/// socket.
async fn write_loop(
mut writer: tokio::net::unix::OwnedWriteHalf,
mut rx: mpsc::Receiver<DaemonMessage>,
) {
while let Some(msg) = rx.recv().await {
match serde_json::to_string(&msg) {
Ok(mut json) => {
json.push('\n');
if let Err(e) = writer.write_all(json.as_bytes()).await {
log::debug!("write error: {e}");
break;
}
}
Err(e) => {
log::warn!("serialize error: {e}");
}
}
}
}
/// One-shot write for pre-auth messages (write_half not yet consumed by the
/// write_loop task).
async fn send_line(
mut writer: tokio::net::unix::OwnedWriteHalf,
msg: &DaemonMessage,
) -> Result<(), String> {
let mut json = serde_json::to_string(msg)
.map_err(|e| format!("serialize: {e}"))?;
json.push('\n');
writer
.write_all(json.as_bytes())
.await
.map_err(|e| format!("write: {e}"))
}

5
agor-pty/src/lib.rs Normal file
View file

@ -0,0 +1,5 @@
/// Public library surface for IPC clients (Tauri, Electrobun, integration tests).
///
/// Only protocol types are exposed — the daemon internals (session manager,
/// auth, socket server) are not part of the public API.
pub mod protocol;

207
agor-pty/src/main.rs Normal file
View file

@ -0,0 +1,207 @@
mod auth;
mod daemon;
mod protocol;
mod session;
use std::path::PathBuf;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::broadcast;
use auth::AuthToken;
use daemon::Daemon;
const VERSION: &str = "0.1.0";
// ---------------------------------------------------------------------------
// CLI argument parsing — no clap needed for 3 flags.
// ---------------------------------------------------------------------------
struct Cli {
socket_dir: Option<PathBuf>,
default_shell: Option<String>,
verbose: bool,
}
impl Cli {
fn parse() -> Result<Self, String> {
let mut args = std::env::args().skip(1).peekable();
let mut socket_dir = None;
let mut default_shell = None;
let mut verbose = false;
while let Some(arg) = args.next() {
match arg.as_str() {
"--socket-dir" => {
socket_dir = Some(PathBuf::from(
args.next().ok_or("--socket-dir requires a value")?,
));
}
"--shell" => {
default_shell = Some(
args.next().ok_or("--shell requires a value")?,
);
}
"--verbose" | "-v" => {
verbose = true;
}
"--help" | "-h" => {
print_usage();
std::process::exit(0);
}
other => {
return Err(format!("unknown argument: {other}"));
}
}
}
Ok(Self {
socket_dir,
default_shell,
verbose,
})
}
}
fn print_usage() {
eprintln!(
"USAGE: agor-ptyd [OPTIONS]\n\
\n\
OPTIONS:\n\
--socket-dir <PATH> Socket directory\n\
(default: /run/user/$UID/agor or ~/.local/share/agor/run)\n\
--shell <PATH> Default shell (default: $SHELL or /bin/bash)\n\
--verbose Enable debug logging\n\
--help Show this message"
);
}
// ---------------------------------------------------------------------------
// Socket directory resolution
// ---------------------------------------------------------------------------
fn resolve_socket_dir(override_path: Option<PathBuf>) -> Result<PathBuf, String> {
if let Some(p) = override_path {
return Ok(p);
}
// Prefer XDG runtime dir.
if let Ok(uid_str) = std::env::var("UID").or_else(|_| {
// UID is not always exported; fall back to getuid().
Ok::<_, std::env::VarError>(unsafe { libc_getuid() }.to_string())
}) {
let xdg = PathBuf::from(format!("/run/user/{uid_str}/agor"));
if xdg.parent().map(|p| p.exists()).unwrap_or(false) {
return Ok(xdg);
}
}
// Fallback: ~/.local/share/agor/run
let home = std::env::var("HOME").map_err(|_| "HOME not set".to_string())?;
Ok(PathBuf::from(home).join(".local/share/agor/run"))
}
#[cfg(target_os = "linux")]
unsafe fn libc_getuid() -> u32 {
// Safety: getuid() is always safe.
extern "C" {
fn getuid() -> u32;
}
getuid()
}
#[cfg(not(target_os = "linux"))]
unsafe fn libc_getuid() -> u32 {
0
}
// ---------------------------------------------------------------------------
// Default shell resolution
// ---------------------------------------------------------------------------
fn resolve_shell(override_shell: Option<String>) -> String {
override_shell
.or_else(|| std::env::var("SHELL").ok())
.filter(|s| !s.is_empty())
.unwrap_or_else(|| "/bin/bash".into())
}
// ---------------------------------------------------------------------------
// Entry point
// ---------------------------------------------------------------------------
#[tokio::main]
async fn main() {
let cli = match Cli::parse() {
Ok(c) => c,
Err(e) => {
eprintln!("error: {e}");
print_usage();
std::process::exit(1);
}
};
// Initialise logging.
let log_level = if cli.verbose { "debug" } else { "info" };
env_logger::Builder::from_env(
env_logger::Env::default().default_filter_or(log_level),
)
.init();
log::info!("agor-ptyd v{VERSION} starting");
let socket_dir = match resolve_socket_dir(cli.socket_dir) {
Ok(d) => d,
Err(e) => {
log::error!("cannot resolve socket directory: {e}");
std::process::exit(1);
}
};
// Ensure the directory exists.
if let Err(e) = std::fs::create_dir_all(&socket_dir) {
log::error!("cannot create socket directory {socket_dir:?}: {e}");
std::process::exit(1);
}
let socket_path = socket_dir.join("ptyd.sock");
let shell = resolve_shell(cli.default_shell);
// Generate and persist auth token.
let token = match AuthToken::generate_and_persist(&socket_dir) {
Ok(t) => t,
Err(e) => {
log::error!("token generation failed: {e}");
std::process::exit(1);
}
};
// Shutdown broadcast channel — one sender, N receivers.
let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1);
// Signal handlers for SIGTERM and SIGINT.
let shutdown_tx_sigterm = shutdown_tx.clone();
let shutdown_tx_sigint = shutdown_tx.clone();
tokio::spawn(async move {
let mut sigterm = signal(SignalKind::terminate()).expect("SIGTERM handler");
sigterm.recv().await;
log::info!("SIGTERM received");
let _ = shutdown_tx_sigterm.send(());
});
tokio::spawn(async move {
let mut sigint = signal(SignalKind::interrupt()).expect("SIGINT handler");
sigint.recv().await;
log::info!("SIGINT received");
let _ = shutdown_tx_sigint.send(());
});
let daemon = Daemon::new(socket_path, token, shell);
if let Err(e) = daemon.run(shutdown_rx).await {
log::error!("daemon exited with error: {e}");
std::process::exit(1);
}
log::info!("agor-ptyd shut down cleanly");
}

View file

@ -1,5 +1,6 @@
use std::collections::HashMap;
use std::io::{Read, Write as IoWrite};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
@ -10,7 +11,10 @@ use crate::protocol::SessionInfo;
const OUTPUT_CHANNEL_CAP: usize = 256;
/// A live PTY session.
/// A live (or recently exited) PTY session.
///
/// All fields that cross await points are either `Send + Sync` or wrapped in
/// `Arc<Mutex<_>>` so the Session itself is `Send`.
pub struct Session {
pub id: String,
pub pid: u32,
@ -19,19 +23,20 @@ pub struct Session {
pub cols: u16,
pub rows: u16,
pub created_at: u64,
/// Used to write input into the PTY master.
/// Used to write input into the PTY master. `Box<dyn Write + Send>`.
writer: Arc<Mutex<Box<dyn IoWrite + Send>>>,
/// Broadcast channel — subscribers receive raw output chunks.
/// Broadcast channel — all subscribers receive raw output chunks.
pub tx: broadcast::Sender<Vec<u8>>,
/// Set to false when the child process exits.
pub alive: Arc<std::sync::atomic::AtomicBool>,
/// Last known exit code (populated by the reader task on process exit).
/// false once the child process exits.
pub alive: Arc<AtomicBool>,
/// Last known exit code (set by the reader task on child exit).
/// Public for callers that poll exit state after SessionClosed is received.
#[allow(dead_code)]
pub exit_code: Arc<Mutex<Option<i32>>>,
/// Keep the master alive so the PTY stays open.
_master: Box<dyn portable_pty::MasterPty + Send>,
}
impl Session {
/// Snapshot metadata for ListSessions responses.
pub fn snapshot(&self) -> SessionInfo {
SessionInfo {
id: self.id.clone(),
@ -41,40 +46,35 @@ impl Session {
cols: self.cols,
rows: self.rows,
created_at: self.created_at,
alive: self.alive.load(std::sync::atomic::Ordering::Relaxed),
alive: self.alive.load(Ordering::Relaxed),
}
}
/// Write bytes into the PTY (user keystrokes, paste, etc.).
/// Write raw bytes into the PTY master (keyboard input, paste, etc.).
pub async fn write_input(&self, data: &[u8]) -> Result<(), String> {
let mut w = self.writer.lock().await;
w.write_all(data)
.map_err(|e| format!("PTY write failed for session {}: {e}", self.id))
.map_err(|e| format!("PTY write for {}: {e}", self.id))
}
/// Send TIOCSWINSZ to resize the PTY.
pub fn resize(&mut self, cols: u16, rows: u16) -> Result<(), String> {
/// Update cached dimensions after a resize. The actual TIOCSWINSZ is issued
/// by the daemon before calling this.
pub fn note_resize(&mut self, cols: u16, rows: u16) {
self.cols = cols;
self.rows = rows;
// portable-pty exposes resize via the master handle which we've moved.
// We reach into nix directly via the stored master fd.
// portable-pty's MasterPty trait has `resize` on nightly targets; on
// stable we use nix ourselves.
log::debug!(
"session {} resize → {}x{} (handled via pty master)",
self.id, cols, rows
);
// The resize is done by the caller via `master.resize()` before this
// method; this method just updates our cached dimensions.
Ok(())
}
/// Return a new receiver subscribed to this session's broadcast output.
pub fn subscribe(&self) -> broadcast::Receiver<Vec<u8>> {
self.tx.subscribe()
}
}
/// Owns all sessions and serialises mutations.
// ---------------------------------------------------------------------------
// Session manager
// ---------------------------------------------------------------------------
/// Owns the full set of PTY sessions.
pub struct SessionManager {
sessions: HashMap<String, Session>,
default_shell: String,
@ -88,8 +88,10 @@ impl SessionManager {
}
}
/// Create and start a new PTY session. Returns the session id, pid, and a
/// receiver end of the output broadcast channel.
/// Spawn a new PTY session.
///
/// Returns `(pid, output_rx)` on success. `on_exit` is called from the
/// blocking reader task once the child process exits.
pub fn create_session(
&mut self,
id: String,
@ -98,7 +100,6 @@ impl SessionManager {
env: Option<HashMap<String, String>>,
cols: u16,
rows: u16,
// Callback invoked from the reader task when the child exits.
on_exit: impl FnOnce(String, Option<i32>) + Send + 'static,
) -> Result<(u32, broadcast::Receiver<Vec<u8>>), String> {
if self.sessions.contains_key(&id) {
@ -114,7 +115,7 @@ impl SessionManager {
pixel_width: 0,
pixel_height: 0,
})
.map_err(|e| format!("openpty failed: {e}"))?;
.map_err(|e| format!("openpty: {e}"))?;
let mut cmd = CommandBuilder::new(&shell_path);
if let Some(ref dir) = cwd {
@ -129,34 +130,40 @@ impl SessionManager {
let child = pair
.slave
.spawn_command(cmd)
.map_err(|e| format!("spawn failed: {e}"))?;
.map_err(|e| format!("spawn: {e}"))?;
let pid = child.process_id().unwrap_or(0);
let cwd_str = cwd.unwrap_or_else(|| std::env::current_dir()
.map(|p| p.to_string_lossy().into_owned())
.unwrap_or_else(|_| "/".into()));
// portable-pty requires us to take the writer from the master.
let cwd_str = cwd.unwrap_or_else(|| {
std::env::current_dir()
.map(|p| p.to_string_lossy().into_owned())
.unwrap_or_else(|_| "/".into())
});
// Take the writer before moving `pair.master` into the reader task.
let writer = pair
.master
.take_writer()
.map_err(|e| format!("take_writer failed: {e}"))?;
.map_err(|e| format!("take_writer: {e}"))?;
// Obtain a blocking reader for the reader task.
// Clone a reader; the master handle itself moves into the blocking task
// so the PTY stays open until the reader is done.
let reader = pair
.master
.try_clone_reader()
.map_err(|e| format!("clone_reader failed: {e}"))?;
.map_err(|e| format!("clone_reader: {e}"))?;
let (tx, rx) = broadcast::channel(OUTPUT_CHANNEL_CAP);
let alive = Arc::new(std::sync::atomic::AtomicBool::new(true));
let alive = Arc::new(AtomicBool::new(true));
let exit_code = Arc::new(Mutex::new(None::<i32>));
// Spawn a blocking task to drain PTY output and broadcast it.
// Spawn the blocking reader task. It takes ownership of `pair.master`
// (via `_master`) so the PTY file descriptor stays open.
let tx_clone = tx.clone();
let alive_clone = alive.clone();
let exit_code_clone = exit_code.clone();
let id_clone = id.clone();
let _master = pair.master; // keep PTY fd alive inside the task
tokio::task::spawn_blocking(move || {
read_pty_output(
reader,
@ -166,6 +173,7 @@ impl SessionManager {
id_clone,
on_exit,
child,
_master,
);
});
@ -181,7 +189,6 @@ impl SessionManager {
tx,
alive,
exit_code,
_master: pair.master,
};
log::info!("created session {id} pid={pid}");
@ -201,8 +208,8 @@ impl SessionManager {
self.sessions.values().map(|s| s.snapshot()).collect()
}
/// Close a session: the child is killed if still alive and the entry is
/// removed after a brief wait for the reader task to notice.
/// Remove a session entry. The reader task will notice the PTY is closed
/// and stop on its own.
pub fn close_session(&mut self, id: &str) -> Result<(), String> {
if self.sessions.remove(id).is_some() {
log::info!("closed session {id}");
@ -211,10 +218,6 @@ impl SessionManager {
Err(format!("session {id} not found"))
}
}
pub fn sessions(&self) -> &HashMap<String, Session> {
&self.sessions
}
}
// ---------------------------------------------------------------------------
@ -228,24 +231,27 @@ fn unix_now() -> u64 {
.unwrap_or(0)
}
/// Blocking PTY reader — lives in a `spawn_blocking` task.
/// Blocking PTY reader — lives inside `tokio::task::spawn_blocking`.
///
/// `_master` is held here so the PTY file descriptor is not closed until this
/// task finishes.
#[allow(clippy::too_many_arguments)]
fn read_pty_output(
mut reader: Box<dyn Read + Send>,
tx: broadcast::Sender<Vec<u8>>,
alive: Arc<std::sync::atomic::AtomicBool>,
alive: Arc<AtomicBool>,
exit_code_cell: Arc<Mutex<Option<i32>>>,
id: String,
on_exit: impl FnOnce(String, Option<i32>),
mut child: Box<dyn portable_pty::Child + Send>,
_master: Box<dyn portable_pty::MasterPty + Send>,
) {
let mut buf = [0u8; 4096];
loop {
match reader.read(&mut buf) {
Ok(0) => break,
Ok(n) => {
let chunk = buf[..n].to_vec();
// Non-blocking send — if all receivers are gone, ignore.
let _ = tx.send(chunk);
let _ = tx.send(buf[..n].to_vec());
}
Err(e) => {
log::debug!("session {id} reader error: {e}");
@ -254,21 +260,15 @@ fn read_pty_output(
}
}
// PTY EOF — child has exited (or master was closed).
alive.store(false, std::sync::atomic::AtomicBool::from(false).load(std::sync::atomic::Ordering::SeqCst).into());
alive.store(false, std::sync::atomic::Ordering::Relaxed);
alive.store(false, Ordering::Relaxed);
let code = child.wait().ok().and_then(|status| {
if let Some(exit) = status.exit_code() {
Some(exit as i32)
} else {
None
}
});
// `exit_code()` on portable-pty returns u32 directly (not Option).
let code = child
.wait()
.ok()
.map(|status| status.exit_code() as i32);
// Write exit code into the shared cell.
// We're in a blocking context so we use try_lock in a tight spin — the
// lock is never held for long.
// Write exit code using try_lock spin — the lock is never held for long.
loop {
if let Ok(mut guard) = exit_code_cell.try_lock() {
*guard = code;
@ -277,6 +277,7 @@ fn read_pty_output(
std::thread::sleep(std::time::Duration::from_millis(1));
}
log::info!("session {id} exited with code {:?}", code);
log::info!("session {id} exited with code {code:?}");
on_exit(id, code);
// `_master` drops here — PTY closed.
}