Infrastructure: - adapters/: base, tauri (port 9750), electrobun (port 9761 + PTY daemon) - helpers/: 120+ centralized selectors, reusable actions, custom assertions - wdio.shared.conf.js + stack-specific configs 18 unified specs (205 tests): splash(6) smoke(15) settings(19) terminal(14) agent(15) search(12) files(15) comms(10) tasks(10) theme(12) groups(12) keyboard(8) notifications(10) diagnostics(8) status-bar(12) context(9) worktree(8) llm-judged(10) Daemon: --stack tauri|electrobun|both flag Scripts: test:e2e:tauri, test:e2e:electrobun, test:e2e:both
277 lines
8.6 KiB
TypeScript
277 lines
8.6 KiB
TypeScript
// WDIO programmatic runner — launches specs and streams results to a callback
|
|
// Uses @wdio/cli Launcher for test execution, reads results-db for smart caching.
|
|
// Supports --stack flag: tauri (default), electrobun, or both.
|
|
|
|
import { resolve, dirname, basename } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
import { existsSync, readdirSync, writeFileSync } from 'node:fs';
|
|
import { execSync } from 'node:child_process';
|
|
import type { TestStatus } from './dashboard.ts';
|
|
import { ResultsDb } from '../infra/results-db.ts';
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const PROJECT_ROOT = resolve(__dirname, '../../..');
|
|
const SPECS_DIR = resolve(PROJECT_ROOT, 'tests/e2e/specs');
|
|
const RESULTS_PATH = resolve(PROJECT_ROOT, 'test-results/results.json');
|
|
|
|
export type StackTarget = 'tauri' | 'electrobun' | 'both';
|
|
|
|
/** Resolve the WDIO config file for a given stack */
|
|
function getWdioConf(stack: StackTarget): string {
|
|
switch (stack) {
|
|
case 'tauri':
|
|
return resolve(PROJECT_ROOT, 'tests/e2e/wdio.tauri.conf.js');
|
|
case 'electrobun':
|
|
return resolve(PROJECT_ROOT, 'tests/e2e/wdio.electrobun.conf.js');
|
|
default:
|
|
return resolve(PROJECT_ROOT, 'tests/e2e/wdio.tauri.conf.js');
|
|
}
|
|
}
|
|
|
|
// Legacy fallback — original config
|
|
const WDIO_CONF_LEGACY = resolve(PROJECT_ROOT, 'tests/e2e/wdio.conf.js');
|
|
|
|
export interface TestResult {
|
|
name: string;
|
|
specFile: string;
|
|
status: TestStatus;
|
|
durationMs?: number;
|
|
error?: string;
|
|
stack?: string;
|
|
}
|
|
|
|
export type ResultCallback = (result: TestResult) => void;
|
|
|
|
export interface RunOptions {
|
|
pattern?: string;
|
|
full?: boolean;
|
|
onResult?: ResultCallback;
|
|
stack?: StackTarget;
|
|
}
|
|
|
|
// ── Spec discovery ──
|
|
|
|
export function discoverSpecs(pattern?: string): string[] {
|
|
const files = readdirSync(SPECS_DIR)
|
|
.filter((f) => f.endsWith('.test.ts'))
|
|
.sort();
|
|
|
|
if (pattern) {
|
|
const lp = pattern.toLowerCase();
|
|
return files.filter((f) => f.toLowerCase().includes(lp));
|
|
}
|
|
return files;
|
|
}
|
|
|
|
export function specDisplayName(specFile: string): string {
|
|
return basename(specFile, '.test.ts');
|
|
}
|
|
|
|
// ── Smart cache ──
|
|
|
|
function getPassedSpecs(db: ResultsDb): Set<string> {
|
|
const passed = new Set<string>();
|
|
for (const run of db.getRecentRuns(5)) {
|
|
if (run.status !== 'passed' && run.status !== 'failed') continue;
|
|
for (const step of db.getStepsForRun(run.run_id)) {
|
|
if (step.status === 'passed') passed.add(step.scenario_name);
|
|
}
|
|
}
|
|
return passed;
|
|
}
|
|
|
|
function filterByCache(specs: string[], db: ResultsDb): { run: string[]; skipped: string[] } {
|
|
const cached = getPassedSpecs(db);
|
|
const run: string[] = [];
|
|
const skipped: string[] = [];
|
|
for (const spec of specs) {
|
|
(cached.has(specDisplayName(spec)) ? skipped : run).push(spec);
|
|
}
|
|
return { run, skipped };
|
|
}
|
|
|
|
// ── Git info ──
|
|
|
|
function getGitInfo(): { branch: string | null; sha: string | null } {
|
|
try {
|
|
const branch = execSync('git rev-parse --abbrev-ref HEAD', {
|
|
cwd: PROJECT_ROOT,
|
|
encoding: 'utf-8',
|
|
}).trim();
|
|
const sha = execSync('git rev-parse --short HEAD', {
|
|
cwd: PROJECT_ROOT,
|
|
encoding: 'utf-8',
|
|
}).trim();
|
|
return { branch, sha };
|
|
} catch {
|
|
return { branch: null, sha: null };
|
|
}
|
|
}
|
|
|
|
// ── Single-stack runner ──
|
|
|
|
async function runSingleStack(
|
|
stack: StackTarget,
|
|
opts: RunOptions,
|
|
specsToRun: string[],
|
|
db: ResultsDb,
|
|
runId: string,
|
|
): Promise<TestResult[]> {
|
|
const results: TestResult[] = [];
|
|
const confPath = getWdioConf(stack);
|
|
|
|
// Fall back to legacy config if new one doesn't exist
|
|
const wdioConf = existsSync(confPath) ? confPath : WDIO_CONF_LEGACY;
|
|
const stackLabel = stack === 'both' ? 'tauri' : stack;
|
|
|
|
// Mark specs as running
|
|
for (const spec of specsToRun) {
|
|
opts.onResult?.({ name: `[${stackLabel}] ${specDisplayName(spec)}`, specFile: spec, status: 'running', stack: stackLabel });
|
|
}
|
|
|
|
const specPaths = specsToRun.map((s) => resolve(SPECS_DIR, s));
|
|
const startTime = Date.now();
|
|
let exitCode = 1;
|
|
const capturedLines: string[] = [];
|
|
const origWrite = process.stdout.write.bind(process.stdout);
|
|
process.stdout.write = function (chunk: any, ...args: any[]) {
|
|
const str = typeof chunk === 'string' ? chunk : chunk.toString();
|
|
capturedLines.push(str);
|
|
return origWrite(chunk, ...args);
|
|
} as typeof process.stdout.write;
|
|
|
|
try {
|
|
const { Launcher } = await import('@wdio/cli');
|
|
const launcher = new Launcher(wdioConf, { specs: specPaths });
|
|
exitCode = await launcher.run();
|
|
} catch (err: unknown) {
|
|
const msg = err instanceof Error ? err.message : String(err);
|
|
for (const spec of specsToRun) {
|
|
const name = specDisplayName(spec);
|
|
const result: TestResult = {
|
|
name: `[${stackLabel}] ${name}`,
|
|
specFile: spec,
|
|
status: 'failed',
|
|
error: `Launcher error: ${msg}`,
|
|
stack: stackLabel,
|
|
};
|
|
results.push(result);
|
|
opts.onResult?.(result);
|
|
db.recordStep({
|
|
run_id: runId, scenario_name: `[${stackLabel}] ${name}`, step_name: 'launcher',
|
|
status: 'error', duration_ms: null, error_message: msg,
|
|
screenshot_path: null, agent_cost_usd: null,
|
|
});
|
|
}
|
|
return results;
|
|
} finally {
|
|
process.stdout.write = origWrite;
|
|
}
|
|
|
|
const totalDuration = Date.now() - startTime;
|
|
const perSpecDuration = Math.round(totalDuration / specsToRun.length);
|
|
const passedSet = new Set<string>();
|
|
const failedSet = new Set<string>();
|
|
const output = capturedLines.join('');
|
|
|
|
for (const spec of specsToRun) {
|
|
if (output.includes('PASSED') && output.includes(spec)) {
|
|
passedSet.add(spec);
|
|
} else if (output.includes('FAILED') && output.includes(spec)) {
|
|
failedSet.add(spec);
|
|
} else if (exitCode === 0) {
|
|
passedSet.add(spec);
|
|
} else {
|
|
failedSet.add(spec);
|
|
}
|
|
}
|
|
|
|
for (const spec of specsToRun) {
|
|
const name = specDisplayName(spec);
|
|
const passed = passedSet.has(spec);
|
|
const status: TestStatus = passed ? 'passed' : 'failed';
|
|
const errMsg = passed ? null : 'Spec run had failures (check WDIO output above)';
|
|
const result: TestResult = {
|
|
name: `[${stackLabel}] ${name}`,
|
|
specFile: spec,
|
|
status,
|
|
durationMs: perSpecDuration,
|
|
error: errMsg ?? undefined,
|
|
stack: stackLabel,
|
|
};
|
|
results.push(result);
|
|
opts.onResult?.(result);
|
|
db.recordStep({
|
|
run_id: runId, scenario_name: `[${stackLabel}] ${name}`, step_name: 'spec',
|
|
status, duration_ms: perSpecDuration, error_message: errMsg,
|
|
screenshot_path: null, agent_cost_usd: null,
|
|
});
|
|
}
|
|
|
|
return results;
|
|
}
|
|
|
|
// ── Main runner ──
|
|
|
|
export async function runSpecs(opts: RunOptions = {}): Promise<TestResult[]> {
|
|
const db = new ResultsDb();
|
|
const allSpecs = discoverSpecs(opts.pattern);
|
|
const results: TestResult[] = [];
|
|
const stack = opts.stack ?? 'tauri';
|
|
|
|
let specsToRun: string[];
|
|
let skippedSpecs: string[] = [];
|
|
|
|
if (opts.full) {
|
|
specsToRun = allSpecs;
|
|
} else {
|
|
const filtered = filterByCache(allSpecs, db);
|
|
specsToRun = filtered.run;
|
|
skippedSpecs = filtered.skipped;
|
|
}
|
|
|
|
// Emit skipped specs immediately
|
|
for (const spec of skippedSpecs) {
|
|
const result: TestResult = { name: specDisplayName(spec), specFile: spec, status: 'skipped' };
|
|
results.push(result);
|
|
opts.onResult?.(result);
|
|
}
|
|
|
|
if (specsToRun.length === 0) {
|
|
return results;
|
|
}
|
|
|
|
const git = getGitInfo();
|
|
const runId = `daemon-${stack}-${Date.now()}`;
|
|
db.startRun(runId, git.branch ?? undefined, git.sha ?? undefined);
|
|
|
|
if (stack === 'both') {
|
|
// Run against Tauri first, then Electrobun
|
|
console.log('\n=== Running specs against TAURI stack ===\n');
|
|
const tauriResults = await runSingleStack('tauri', opts, specsToRun, db, runId);
|
|
results.push(...tauriResults);
|
|
|
|
console.log('\n=== Running specs against ELECTROBUN stack ===\n');
|
|
const ebunResults = await runSingleStack('electrobun', opts, specsToRun, db, runId);
|
|
results.push(...ebunResults);
|
|
|
|
const allPassed = [...tauriResults, ...ebunResults].every(r => r.status === 'passed');
|
|
const totalDuration = results.reduce((sum, r) => sum + (r.durationMs ?? 0), 0);
|
|
db.finishRun(runId, allPassed ? 'passed' : 'failed', totalDuration);
|
|
} else {
|
|
const startTime = Date.now();
|
|
const stackResults = await runSingleStack(stack, opts, specsToRun, db, runId);
|
|
results.push(...stackResults);
|
|
|
|
const allPassed = stackResults.every(r => r.status === 'passed');
|
|
db.finishRun(runId, allPassed ? 'passed' : 'failed', Date.now() - startTime);
|
|
}
|
|
|
|
return results;
|
|
}
|
|
|
|
export function clearCache(): void {
|
|
if (existsSync(RESULTS_PATH)) {
|
|
writeFileSync(RESULTS_PATH, JSON.stringify({ runs: [], steps: [] }, null, 2));
|
|
}
|
|
}
|