feat: add SidecarManager actor pattern, SPKI pinning, btmsg seen_messages, Aider autonomous mode

Tribunal priorities 1-4: SidecarManager refactored to mpsc actor thread
(eliminates TOCTOU race), SPKI TOFU certificate pinning for relay TLS,
per-message btmsg acknowledgment via seen_messages table, Aider
autonomous mode toggle gating shell execution.
This commit is contained in:
Hibryda 2026-03-14 04:39:40 +01:00
parent 949d90887d
commit 23b4d0cf26
22 changed files with 1273 additions and 297 deletions

View file

@ -40,6 +40,10 @@ opentelemetry-otlp = { version = "0.28", features = ["http-proto", "reqwest-clie
tracing-opentelemetry = "0.29"
keyring = { version = "3", features = ["linux-native"] }
notify-rust = "4"
native-tls = "0.2"
tokio-native-tls = "0.3"
sha2 = "0.10"
hex = "0.4"
[dev-dependencies]
tempfile = "3"

View file

@ -35,6 +35,25 @@ fn open_db() -> Result<Connection, String> {
.map_err(|e| format!("Failed to set WAL mode: {e}"))?;
conn.query_row("PRAGMA busy_timeout = 5000", [], |_| Ok(()))
.map_err(|e| format!("Failed to set busy_timeout: {e}"))?;
conn.execute_batch("PRAGMA foreign_keys = ON")
.map_err(|e| format!("Failed to enable foreign keys: {e}"))?;
// Migration: add seen_messages table if not present
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS seen_messages (
session_id TEXT NOT NULL,
message_id TEXT NOT NULL,
seen_at INTEGER NOT NULL DEFAULT (unixepoch()),
PRIMARY KEY (session_id, message_id),
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_seen_messages_session ON seen_messages(session_id);"
).map_err(|e| format!("Migration error (seen_messages): {e}"))?;
// Migration: add sender_group_id column to messages if not present
// SQLite ALTER TABLE ADD COLUMN is a no-op if column already exists (errors silently)
let _ = conn.execute_batch("ALTER TABLE messages ADD COLUMN sender_group_id TEXT");
Ok(conn)
}
@ -161,6 +180,79 @@ pub fn unread_messages(agent_id: &str) -> Result<Vec<BtmsgMessage>, String> {
msgs.collect::<Result<Vec<_>, _>>().map_err(|e| format!("Row error: {e}"))
}
/// Get messages that have not been seen by this session.
/// Unlike unread_messages (which uses the global `read` flag),
/// this tracks per-session acknowledgment via the seen_messages table.
pub fn unseen_messages(agent_id: &str, session_id: &str) -> Result<Vec<BtmsgMessage>, String> {
let db = open_db()?;
let mut stmt = db.prepare(
"SELECT m.id, m.from_agent, m.to_agent, m.content, m.read, m.reply_to, m.created_at, \
a.name AS sender_name, a.role AS sender_role \
FROM messages m \
LEFT JOIN agents a ON a.id = m.from_agent \
WHERE m.to_agent = ?1 \
AND m.id NOT IN (SELECT message_id FROM seen_messages WHERE session_id = ?2) \
ORDER BY m.created_at ASC"
).map_err(|e| format!("Prepare unseen query: {e}"))?;
let rows = stmt.query_map(params![agent_id, session_id], |row| {
Ok(BtmsgMessage {
id: row.get("id")?,
from_agent: row.get("from_agent")?,
to_agent: row.get("to_agent")?,
content: row.get("content")?,
read: row.get::<_, i32>("read")? != 0,
reply_to: row.get("reply_to")?,
created_at: row.get("created_at")?,
sender_name: row.get("sender_name")?,
sender_role: row.get("sender_role")?,
})
}).map_err(|e| format!("Query unseen: {e}"))?;
rows.collect::<Result<Vec<_>, _>>().map_err(|e| format!("Row error: {e}"))
}
/// Mark specific message IDs as seen by this session.
pub fn mark_messages_seen(session_id: &str, message_ids: &[String]) -> Result<(), String> {
if message_ids.is_empty() {
return Ok(());
}
let db = open_db()?;
let mut stmt = db.prepare(
"INSERT OR IGNORE INTO seen_messages (session_id, message_id) VALUES (?1, ?2)"
).map_err(|e| format!("Prepare mark_seen: {e}"))?;
for id in message_ids {
stmt.execute(params![session_id, id])
.map_err(|e| format!("Insert seen: {e}"))?;
}
Ok(())
}
/// Prune seen_messages entries older than the given threshold.
/// Uses emergency aggressive pruning (3 days) when row count exceeds the threshold.
pub fn prune_seen_messages(max_age_secs: i64, emergency_threshold: i64) -> Result<u64, String> {
let db = open_db()?;
let count: i64 = db.query_row(
"SELECT COUNT(*) FROM seen_messages", [], |row| row.get(0)
).map_err(|e| format!("Count seen: {e}"))?;
let threshold_secs = if count > emergency_threshold {
// Emergency: prune more aggressively (3 days instead of configured max)
max_age_secs.min(3 * 24 * 3600)
} else {
max_age_secs
};
let deleted = db.execute(
"DELETE FROM seen_messages WHERE seen_at < unixepoch() - ?1",
params![threshold_secs],
).map_err(|e| format!("Prune seen: {e}"))?;
Ok(deleted as u64)
}
pub fn history(agent_id: &str, other_id: &str, limit: i32) -> Result<Vec<BtmsgMessage>, String> {
let db = open_db()?;
let mut stmt = db.prepare(
@ -254,7 +346,8 @@ pub fn send_message(from_agent: &str, to_agent: &str, content: &str) -> Result<S
let msg_id = uuid::Uuid::new_v4().to_string();
db.execute(
"INSERT INTO messages (id, from_agent, to_agent, content, group_id) VALUES (?1, ?2, ?3, ?4, ?5)",
"INSERT INTO messages (id, from_agent, to_agent, content, group_id, sender_group_id) \
VALUES (?1, ?2, ?3, ?4, ?5, (SELECT group_id FROM agents WHERE id = ?2))",
params![msg_id, from_agent, to_agent, content, group_id],
).map_err(|e| format!("Insert error: {e}"))?;
@ -518,6 +611,7 @@ fn open_db_or_create() -> Result<Connection, String> {
read INTEGER DEFAULT 0,
reply_to TEXT,
group_id TEXT NOT NULL,
sender_group_id TEXT,
created_at TEXT DEFAULT (datetime('now')),
FOREIGN KEY (from_agent) REFERENCES agents(id),
FOREIGN KEY (to_agent) REFERENCES agents(id)
@ -619,14 +713,28 @@ fn open_db_or_create() -> Result<Connection, String> {
);
CREATE INDEX IF NOT EXISTS idx_audit_log_agent ON audit_log(agent_id);
CREATE INDEX IF NOT EXISTS idx_audit_log_type ON audit_log(event_type);"
CREATE INDEX IF NOT EXISTS idx_audit_log_type ON audit_log(event_type);
CREATE TABLE IF NOT EXISTS seen_messages (
session_id TEXT NOT NULL,
message_id TEXT NOT NULL,
seen_at INTEGER NOT NULL DEFAULT (unixepoch()),
PRIMARY KEY (session_id, message_id),
FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_seen_messages_session ON seen_messages(session_id);"
).map_err(|e| format!("Schema creation error: {e}"))?;
// Enable foreign keys for ON DELETE CASCADE support
conn.execute_batch("PRAGMA foreign_keys = ON")
.map_err(|e| format!("Failed to enable foreign keys: {e}"))?;
Ok(conn)
}
// ---- Heartbeat monitoring ----
#[allow(dead_code)] // Constructed in get_agent_heartbeats, called via Tauri IPC
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AgentHeartbeat {
@ -651,6 +759,7 @@ pub fn record_heartbeat(agent_id: &str) -> Result<(), String> {
Ok(())
}
#[allow(dead_code)] // Called via Tauri IPC command btmsg_get_agent_heartbeats
pub fn get_agent_heartbeats(group_id: &str) -> Result<Vec<AgentHeartbeat>, String> {
let db = open_db()?;
let mut stmt = db
@ -713,21 +822,6 @@ pub struct DeadLetter {
pub created_at: String,
}
pub fn queue_dead_letter(
from_agent: &str,
to_agent: &str,
content: &str,
error: &str,
) -> Result<(), String> {
let db = open_db()?;
db.execute(
"INSERT INTO dead_letter_queue (from_agent, to_agent, content, error) VALUES (?1, ?2, ?3, ?4)",
params![from_agent, to_agent, content, error],
)
.map_err(|e| format!("Dead letter insert error: {e}"))?;
Ok(())
}
pub fn get_dead_letters(group_id: &str, limit: i32) -> Result<Vec<DeadLetter>, String> {
let db = open_db()?;
let mut stmt = db
@ -757,6 +851,22 @@ pub fn get_dead_letters(group_id: &str, limit: i32) -> Result<Vec<DeadLetter>, S
.map_err(|e| format!("Row error: {e}"))
}
#[allow(dead_code)] // Called via Tauri IPC command btmsg_queue_dead_letter
pub fn queue_dead_letter(
from_agent: &str,
to_agent: &str,
content: &str,
error: &str,
) -> Result<(), String> {
let db = open_db()?;
db.execute(
"INSERT INTO dead_letter_queue (from_agent, to_agent, content, error) VALUES (?1, ?2, ?3, ?4)",
params![from_agent, to_agent, content, error],
)
.map_err(|e| format!("Dead letter insert error: {e}"))?;
Ok(())
}
pub fn clear_dead_letters(group_id: &str) -> Result<(), String> {
let db = open_db()?;
db.execute(

View file

@ -78,6 +78,23 @@ pub fn btmsg_register_agents(config: groups::GroupsFile) -> Result<(), String> {
btmsg::register_agents_from_groups(&config)
}
// ---- Per-message acknowledgment (seen_messages) ----
#[tauri::command]
pub fn btmsg_unseen_messages(agent_id: String, session_id: String) -> Result<Vec<btmsg::BtmsgMessage>, String> {
btmsg::unseen_messages(&agent_id, &session_id)
}
#[tauri::command]
pub fn btmsg_mark_seen(session_id: String, message_ids: Vec<String>) -> Result<(), String> {
btmsg::mark_messages_seen(&session_id, &message_ids)
}
#[tauri::command]
pub fn btmsg_prune_seen() -> Result<u64, String> {
btmsg::prune_seen_messages(7 * 24 * 3600, 200_000)
}
// ---- Heartbeat monitoring ----
#[tauri::command]
@ -90,6 +107,11 @@ pub fn btmsg_get_stale_agents(group_id: String, threshold_secs: i64) -> Result<V
btmsg::get_stale_agents(&group_id, threshold_secs)
}
#[tauri::command]
pub fn btmsg_get_agent_heartbeats(group_id: String) -> Result<Vec<btmsg::AgentHeartbeat>, String> {
btmsg::get_agent_heartbeats(&group_id)
}
// ---- Dead letter queue ----
#[tauri::command]
@ -102,6 +124,16 @@ pub fn btmsg_clear_dead_letters(group_id: String) -> Result<(), String> {
btmsg::clear_dead_letters(&group_id)
}
#[tauri::command]
pub fn btmsg_queue_dead_letter(
from_agent: String,
to_agent: String,
content: String,
error: String,
) -> Result<(), String> {
btmsg::queue_dead_letter(&from_agent, &to_agent, &content, &error)
}
// ---- Audit log ----
#[tauri::command]

View file

@ -1,6 +1,6 @@
use tauri::State;
use crate::AppState;
use crate::remote::{RemoteMachineConfig, RemoteMachineInfo};
use crate::remote::{self, RemoteMachineConfig, RemoteMachineInfo};
use crate::pty::PtyOptions;
use crate::sidecar::AgentQueryOptions;
@ -63,3 +63,23 @@ pub async fn remote_pty_resize(state: State<'_, AppState>, machine_id: String, i
pub async fn remote_pty_kill(state: State<'_, AppState>, machine_id: String, id: String) -> Result<(), String> {
state.remote_manager.pty_kill(&machine_id, &id).await
}
// --- SPKI certificate pinning ---
#[tauri::command]
#[tracing::instrument]
pub async fn remote_probe_spki(url: String) -> Result<String, String> {
remote::probe_spki_hash(&url).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_add_pin(state: State<'_, AppState>, machine_id: String, pin: String) -> Result<(), String> {
state.remote_manager.add_spki_pin(&machine_id, pin).await
}
#[tauri::command]
#[tracing::instrument(skip(state))]
pub async fn remote_remove_pin(state: State<'_, AppState>, machine_id: String, pin: String) -> Result<(), String> {
state.remote_manager.remove_spki_pin(&machine_id, &pin).await
}

View file

@ -33,3 +33,27 @@ pub fn search_index_message(
) -> Result<(), String> {
state.search_db.index_message(&session_id, &role, &content)
}
#[tauri::command]
pub fn search_index_task(
state: State<'_, AppState>,
task_id: String,
title: String,
description: String,
status: String,
assigned_to: String,
) -> Result<(), String> {
state.search_db.index_task(&task_id, &title, &description, &status, &assigned_to)
}
#[tauri::command]
pub fn search_index_btmsg(
state: State<'_, AppState>,
msg_id: String,
from_agent: String,
to_agent: String,
content: String,
channel: String,
) -> Result<(), String> {
state.search_db.index_btmsg(&msg_id, &from_agent, &to_agent, &content, &channel)
}

View file

@ -28,6 +28,7 @@ pub struct CtxDb {
}
impl CtxDb {
#[cfg(test)]
fn default_db_path() -> PathBuf {
dirs::home_dir()
.unwrap_or_default()
@ -35,6 +36,7 @@ impl CtxDb {
.join("context.db")
}
#[cfg(test)]
pub fn new() -> Self {
Self::new_with_path(Self::default_db_path())
}

View file

@ -226,6 +226,15 @@ mod tests {
cwd: "/tmp/test".to_string(),
profile: "default".to_string(),
enabled: true,
provider: None,
model: None,
use_worktrees: None,
sandbox_enabled: None,
anchor_budget_scale: None,
stall_threshold_min: None,
is_agent: None,
agent_role: None,
system_prompt: None,
}],
agents: vec![],
}],

View file

@ -248,6 +248,9 @@ pub fn run() {
commands::remote::remote_pty_write,
commands::remote::remote_pty_resize,
commands::remote::remote_pty_kill,
commands::remote::remote_probe_spki,
commands::remote::remote_add_pin,
commands::remote::remote_remove_pin,
// btmsg (agent messenger)
commands::btmsg::btmsg_get_agents,
commands::btmsg::btmsg_unread_count,
@ -264,11 +267,17 @@ pub fn run() {
commands::btmsg::btmsg_create_channel,
commands::btmsg::btmsg_add_channel_member,
commands::btmsg::btmsg_register_agents,
// btmsg per-message acknowledgment
commands::btmsg::btmsg_unseen_messages,
commands::btmsg::btmsg_mark_seen,
commands::btmsg::btmsg_prune_seen,
// btmsg health monitoring
commands::btmsg::btmsg_record_heartbeat,
commands::btmsg::btmsg_get_stale_agents,
commands::btmsg::btmsg_get_agent_heartbeats,
commands::btmsg::btmsg_get_dead_letters,
commands::btmsg::btmsg_clear_dead_letters,
commands::btmsg::btmsg_queue_dead_letter,
// Audit log
commands::btmsg::audit_log_event,
commands::btmsg::audit_log_list,
@ -286,6 +295,8 @@ pub fn run() {
commands::search::search_query,
commands::search::search_rebuild,
commands::search::search_index_message,
commands::search::search_index_task,
commands::search::search_index_btmsg,
// Notifications
commands::notifications::notify_desktop,
// Secrets (system keyring)

View file

@ -26,6 +26,7 @@ pub struct MemoraDb {
}
impl MemoraDb {
#[cfg(test)]
fn default_db_path() -> std::path::PathBuf {
dirs::data_dir()
.unwrap_or_else(|| dirs::home_dir().unwrap_or_default().join(".local/share"))
@ -33,6 +34,7 @@ impl MemoraDb {
.join("memories.db")
}
#[cfg(test)]
pub fn new() -> Self {
Self::new_with_path(Self::default_db_path())
}

View file

@ -3,7 +3,7 @@
// Each plugin lives in its own subdirectory with a plugin.json manifest.
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use std::path::Path;
/// Plugin manifest — parsed from plugin.json
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -137,11 +137,6 @@ pub fn read_plugin_file(
.map_err(|e| format!("Failed to read plugin file: {e}"))
}
/// Get the plugins directory path from a config directory
pub fn plugins_dir(config_dir: &Path) -> PathBuf {
config_dir.join("plugins")
}
#[cfg(test)]
mod tests {
use super::*;
@ -257,9 +252,4 @@ mod tests {
assert!(result.is_err());
}
#[test]
fn test_plugins_dir_path() {
let config = Path::new("/home/user/.config/bterminal");
assert_eq!(plugins_dir(config), PathBuf::from("/home/user/.config/bterminal/plugins"));
}
}

View file

@ -4,6 +4,7 @@ use bterminal_core::pty::PtyOptions;
use bterminal_core::sidecar::AgentQueryOptions;
use futures_util::{SinkExt, StreamExt};
use serde::{Deserialize, Serialize};
use sha2::{Sha256, Digest};
use std::collections::HashMap;
use std::sync::Arc;
use tauri::{AppHandle, Emitter};
@ -16,6 +17,9 @@ pub struct RemoteMachineConfig {
pub url: String,
pub token: String,
pub auto_connect: bool,
/// SPKI SHA-256 pin(s) for certificate verification. Empty = TOFU on first connect.
#[serde(default)]
pub spki_pins: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -25,6 +29,8 @@ pub struct RemoteMachineInfo {
pub url: String,
pub status: String,
pub auto_connect: bool,
/// Currently stored SPKI pin hashes (hex-encoded SHA-256)
pub spki_pins: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -79,6 +85,7 @@ impl RemoteManager {
url: m.config.url.clone(),
status: m.status.clone(),
auto_connect: m.config.auto_connect,
spki_pins: m.config.spki_pins.clone(),
}).collect()
}
@ -110,8 +117,28 @@ impl RemoteManager {
Ok(())
}
/// Add an SPKI pin hash to a machine's trusted pins.
pub async fn add_spki_pin(&self, machine_id: &str, pin: String) -> Result<(), String> {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
if !machine.config.spki_pins.contains(&pin) {
machine.config.spki_pins.push(pin);
}
Ok(())
}
/// Remove an SPKI pin hash from a machine's trusted pins.
pub async fn remove_spki_pin(&self, machine_id: &str, pin: &str) -> Result<(), String> {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
machine.config.spki_pins.retain(|p| p != pin);
Ok(())
}
pub async fn connect(&self, app: &AppHandle, machine_id: &str) -> Result<(), String> {
let (url, token) = {
let (url, token, spki_pins) = {
let mut machines = self.machines.lock().await;
let machine = machines.get_mut(machine_id)
.ok_or_else(|| format!("Machine {machine_id} not found"))?;
@ -121,9 +148,60 @@ impl RemoteManager {
machine.status = "connecting".to_string();
// Reset cancellation flag for new connection
machine.cancelled.store(false, std::sync::atomic::Ordering::Relaxed);
(machine.config.url.clone(), machine.config.token.clone())
(machine.config.url.clone(), machine.config.token.clone(), machine.config.spki_pins.clone())
};
// SPKI certificate pin verification for wss:// connections
if url.starts_with("wss://") {
if !spki_pins.is_empty() {
// Verify server certificate against stored pins
let server_hash = probe_spki_hash(&url).await.map_err(|e| {
// Reset status on probe failure
let machines = self.machines.clone();
let mid = machine_id.to_string();
tauri::async_runtime::spawn(async move {
let mut machines = machines.lock().await;
if let Some(machine) = machines.get_mut(&mid) {
machine.status = "disconnected".to_string();
}
});
format!("SPKI probe failed: {e}")
})?;
if !spki_pins.contains(&server_hash) {
// Pin mismatch — possible MITM or certificate rotation
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
machine.status = "disconnected".to_string();
}
return Err(format!(
"SPKI pin mismatch! Server certificate hash '{server_hash}' does not match \
any trusted pin. This may indicate a MITM attack or certificate rotation. \
Update the pin in Settings if this is expected."
));
}
log::info!("SPKI pin verified for machine {machine_id}");
} else {
// TOFU: no pins stored — probe and auto-store on first wss:// connect
match probe_spki_hash(&url).await {
Ok(hash) => {
log::info!("TOFU: storing SPKI pin for machine {machine_id}: {hash}");
let mut machines = self.machines.lock().await;
if let Some(machine) = machines.get_mut(machine_id) {
machine.config.spki_pins.push(hash.clone());
}
let _ = app.emit("remote-spki-tofu", &serde_json::json!({
"machineId": machine_id,
"hash": hash,
}));
}
Err(e) => {
log::warn!("TOFU: failed to probe SPKI hash for {machine_id}: {e}");
// Continue without pinning — non-blocking
}
}
}
}
// Build WebSocket request with auth header
let request = tokio_tungstenite::tungstenite::http::Request::builder()
.uri(&url)
@ -430,6 +508,57 @@ impl RemoteManager {
}
}
/// Probe a relay server's TLS certificate and return its SHA-256 hash (hex-encoded).
/// Connects with a permissive TLS config to extract the certificate, then hashes it.
/// Only works for wss:// URLs.
pub async fn probe_spki_hash(url: &str) -> Result<String, String> {
let host = extract_host(url).ok_or_else(|| "Invalid URL".to_string())?;
let hostname = host.split(':').next().unwrap_or(&host).to_string();
let addr = if host.contains(':') {
host.clone()
} else {
format!("{host}:9750")
};
// Build a permissive TLS connector to get the certificate regardless of CA trust
let connector = native_tls::TlsConnector::builder()
.danger_accept_invalid_certs(true)
.build()
.map_err(|e| format!("TLS connector error: {e}"))?;
let connector = tokio_native_tls::TlsConnector::from(connector);
let tcp = tokio::time::timeout(
std::time::Duration::from_secs(5),
tokio::net::TcpStream::connect(&addr),
)
.await
.map_err(|_| "Connection timeout".to_string())?
.map_err(|e| format!("TCP connect failed: {e}"))?;
let tls_stream = connector
.connect(&hostname, tcp)
.await
.map_err(|e| format!("TLS handshake failed: {e}"))?;
// Extract peer certificate DER bytes
let cert = tls_stream
.get_ref()
.peer_certificate()
.map_err(|e| format!("Failed to get peer certificate: {e}"))?
.ok_or_else(|| "No peer certificate presented".to_string())?;
let cert_der = cert
.to_der()
.map_err(|e| format!("Failed to encode certificate DER: {e}"))?;
// SHA-256 hash of the full DER-encoded certificate
let mut hasher = Sha256::new();
hasher.update(&cert_der);
let hash = hasher.finalize();
Ok(hex::encode(hash))
}
/// Probe whether a relay is reachable via TCP connect only (no WS upgrade).
/// This avoids allocating per-connection resources (PtyManager, SidecarManager) on the relay.
async fn attempt_tcp_probe(url: &str) -> Result<(), String> {

View file

@ -89,6 +89,7 @@ impl SearchDb {
}
/// Index a task into the search_tasks FTS5 table.
#[allow(dead_code)] // Called via Tauri IPC command search_index_task
pub fn index_task(
&self,
task_id: &str,
@ -108,6 +109,7 @@ impl SearchDb {
}
/// Index a btmsg message into the search_btmsg FTS5 table.
#[allow(dead_code)] // Called via Tauri IPC command search_index_btmsg
pub fn index_btmsg(
&self,
msg_id: &str,
@ -264,7 +266,6 @@ fn chrono_now() -> String {
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
fn temp_search_db() -> (SearchDb, tempfile::TempDir) {
let dir = tempfile::tempdir().unwrap();