- Added telemetry utility to capture application events and metrics. - Integrated PostHog for event tracking with distinct user identification. - Implemented telemetry initialization, event capturing, and shutdown procedures. feat: add UV environment setup for Python management - Created utilities to manage Python installation and configuration. - Implemented network optimization checks for Python installation mirrors. - Added functions to set up managed Python environments with error handling. feat: enhance host API communication with token management - Introduced host API token retrieval and management for secure requests. - Updated host API fetch functions to include token in headers. - Added support for creating event sources with authentication. test: add comprehensive tests for gateway protocol and startup helpers - Implemented unit tests for gateway protocol helpers, event dispatching, and state management. - Added tests for startup recovery strategies and process policies. - Ensured coverage for connection monitoring and restart governance logic.
518 lines
15 KiB
TypeScript
518 lines
15 KiB
TypeScript
// @vitest-environment node
|
|
import { describe, expect, it, vi } from 'vitest';
|
|
|
|
vi.mock('@electron/service/logger', () => {
|
|
const logger = {
|
|
debug: vi.fn(),
|
|
info: vi.fn(),
|
|
warn: vi.fn(),
|
|
error: vi.fn(),
|
|
};
|
|
|
|
return {
|
|
default: logger,
|
|
logManager: logger,
|
|
};
|
|
});
|
|
|
|
import {
|
|
classifyGatewayStderrMessage,
|
|
recordGatewayStartupStderrLine,
|
|
} from '../electron/gateway/startup-stderr';
|
|
import { resolveGatewayLaunchStrategy } from '../electron/gateway/launch-strategy';
|
|
import { getGatewayStartupRecoveryAction } from '../electron/gateway/startup-recovery';
|
|
import { runGatewayStartupSequence } from '../electron/gateway/startup-orchestrator';
|
|
import {
|
|
DEFAULT_RECONNECT_CONFIG,
|
|
getDeferredRestartAction,
|
|
getReconnectScheduleDecision,
|
|
getReconnectSkipReason,
|
|
shouldDeferRestart,
|
|
} from '../electron/gateway/process-policy';
|
|
import { GatewayLifecycleController, LifecycleSupersededError } from '../electron/gateway/lifecycle-controller';
|
|
import { GatewayConnectionMonitor } from '../electron/gateway/connection-monitor';
|
|
import { GatewayRestartController } from '../electron/gateway/restart-controller';
|
|
import { GatewayRestartGovernor } from '../electron/gateway/restart-governor';
|
|
import {
|
|
DEFAULT_GATEWAY_RELOAD_POLICY,
|
|
parseGatewayReloadPolicy,
|
|
} from '../electron/gateway/reload-policy';
|
|
|
|
describe('startup-stderr helpers', () => {
|
|
it('downgrades known config warnings to debug', () => {
|
|
expect(
|
|
classifyGatewayStderrMessage(
|
|
'2026-04-23T09:14:55.599+08:00 Config warnings: stale plugin ignored',
|
|
),
|
|
).toEqual({
|
|
level: 'debug',
|
|
normalized: '2026-04-23T09:14:55.599+08:00 Config warnings: stale plugin ignored',
|
|
});
|
|
});
|
|
|
|
it('keeps actionable stderr as warn', () => {
|
|
expect(classifyGatewayStderrMessage('fatal: failed to start gateway')).toEqual({
|
|
level: 'warn',
|
|
normalized: 'fatal: failed to start gateway',
|
|
});
|
|
});
|
|
|
|
it('caps startup stderr history', () => {
|
|
const lines: string[] = [];
|
|
for (let i = 0; i < 130; i += 1) {
|
|
recordGatewayStartupStderrLine(lines, `line-${i}`);
|
|
}
|
|
|
|
expect(lines).toHaveLength(120);
|
|
expect(lines[0]).toBe('line-10');
|
|
expect(lines.at(-1)).toBe('line-129');
|
|
});
|
|
});
|
|
|
|
describe('startup-recovery helpers', () => {
|
|
it('retries transient startup errors before max attempts', () => {
|
|
expect(getGatewayStartupRecoveryAction({
|
|
startupError: new Error('OpenClaw Gateway exited before becoming ready (code=-1)'),
|
|
startupStderrLines: [],
|
|
configRepairAttempted: false,
|
|
attempt: 1,
|
|
maxAttempts: 3,
|
|
})).toBe('retry');
|
|
});
|
|
|
|
it('prefers repair for invalid config signals', () => {
|
|
expect(getGatewayStartupRecoveryAction({
|
|
startupError: new Error('startup failed'),
|
|
startupStderrLines: ['invalid config: unrecognized key browser.foo'],
|
|
configRepairAttempted: false,
|
|
attempt: 1,
|
|
maxAttempts: 3,
|
|
})).toBe('repair');
|
|
});
|
|
|
|
it('fails once retries are exhausted', () => {
|
|
expect(getGatewayStartupRecoveryAction({
|
|
startupError: new Error('OpenClaw Gateway failed to become ready on port 18789'),
|
|
startupStderrLines: [],
|
|
configRepairAttempted: false,
|
|
attempt: 3,
|
|
maxAttempts: 3,
|
|
})).toBe('fail');
|
|
});
|
|
});
|
|
|
|
describe('launch-strategy helpers', () => {
|
|
it('uses node-runtime on Windows dev by default', () => {
|
|
expect(resolveGatewayLaunchStrategy({
|
|
platform: 'win32',
|
|
mode: 'dev',
|
|
})).toBe('node-runtime');
|
|
});
|
|
|
|
it('keeps utility-process for packaged Windows builds', () => {
|
|
expect(resolveGatewayLaunchStrategy({
|
|
platform: 'win32',
|
|
mode: 'packaged',
|
|
})).toBe('utility-process');
|
|
});
|
|
|
|
it('honors forced strategy overrides', () => {
|
|
expect(resolveGatewayLaunchStrategy({
|
|
platform: 'darwin',
|
|
mode: 'dev',
|
|
forced: 'utility',
|
|
})).toBe('utility-process');
|
|
|
|
expect(resolveGatewayLaunchStrategy({
|
|
platform: 'linux',
|
|
mode: 'packaged',
|
|
forced: 'node',
|
|
})).toBe('node-runtime');
|
|
});
|
|
});
|
|
|
|
describe('runGatewayStartupSequence', () => {
|
|
it('connects to an existing gateway without starting a new process', async () => {
|
|
const connect = vi.fn(async () => {});
|
|
const startProcess = vi.fn(async () => {});
|
|
const onConnectedToExistingGateway = vi.fn();
|
|
|
|
await runGatewayStartupSequence({
|
|
port: 18789,
|
|
shouldWaitForPortFree: false,
|
|
resetStartupStderrLines: vi.fn(),
|
|
getStartupStderrLines: () => [],
|
|
findExistingGateway: async () => ({ port: 18789 }),
|
|
connect,
|
|
onConnectedToExistingGateway,
|
|
waitForPortFree: vi.fn(async () => {}),
|
|
startProcess,
|
|
waitForReady: vi.fn(async () => {}),
|
|
onConnectedToManagedGateway: vi.fn(),
|
|
delay: vi.fn(async () => {}),
|
|
});
|
|
|
|
expect(connect).toHaveBeenCalledWith(18789, undefined);
|
|
expect(onConnectedToExistingGateway).toHaveBeenCalledTimes(1);
|
|
expect(startProcess).not.toHaveBeenCalled();
|
|
});
|
|
|
|
it('starts and connects a managed gateway when none exists', async () => {
|
|
const events: string[] = [];
|
|
|
|
await runGatewayStartupSequence({
|
|
port: 19001,
|
|
shouldWaitForPortFree: true,
|
|
resetStartupStderrLines: vi.fn(() => {
|
|
events.push('reset');
|
|
}),
|
|
getStartupStderrLines: () => [],
|
|
findExistingGateway: async () => null,
|
|
connect: async () => {
|
|
events.push('connect');
|
|
},
|
|
onConnectedToExistingGateway: vi.fn(),
|
|
waitForPortFree: async () => {
|
|
events.push('wait-port');
|
|
},
|
|
startProcess: async () => {
|
|
events.push('start');
|
|
},
|
|
waitForReady: async () => {
|
|
events.push('ready');
|
|
},
|
|
onConnectedToManagedGateway: () => {
|
|
events.push('managed-connected');
|
|
},
|
|
delay: vi.fn(async () => {}),
|
|
});
|
|
|
|
expect(events).toEqual(['reset', 'wait-port', 'start', 'ready', 'connect', 'managed-connected']);
|
|
});
|
|
|
|
it('retries once after a transient startup error', async () => {
|
|
let attempts = 0;
|
|
const delay = vi.fn(async () => {});
|
|
const startProcess = vi.fn(async () => {
|
|
attempts += 1;
|
|
if (attempts === 1) {
|
|
throw new Error('OpenClaw Gateway exited before becoming ready (code=-1)');
|
|
}
|
|
});
|
|
const connect = vi.fn(async () => {});
|
|
|
|
await runGatewayStartupSequence({
|
|
port: 19001,
|
|
shouldWaitForPortFree: false,
|
|
maxStartAttempts: 3,
|
|
resetStartupStderrLines: vi.fn(),
|
|
getStartupStderrLines: () => [],
|
|
findExistingGateway: async () => null,
|
|
connect,
|
|
onConnectedToExistingGateway: vi.fn(),
|
|
waitForPortFree: vi.fn(async () => {}),
|
|
startProcess,
|
|
waitForReady: vi.fn(async () => {}),
|
|
onConnectedToManagedGateway: vi.fn(),
|
|
delay,
|
|
});
|
|
|
|
expect(startProcess).toHaveBeenCalledTimes(2);
|
|
expect(delay).toHaveBeenCalledTimes(1);
|
|
expect(connect).toHaveBeenCalledTimes(1);
|
|
});
|
|
|
|
it('runs doctor repair once for invalid config signals before retrying startup', async () => {
|
|
let attempts = 0;
|
|
const events: string[] = [];
|
|
const runDoctorRepair = vi.fn(async () => true);
|
|
|
|
await runGatewayStartupSequence({
|
|
port: 19002,
|
|
shouldWaitForPortFree: false,
|
|
maxStartAttempts: 3,
|
|
resetStartupStderrLines: vi.fn(() => {
|
|
events.push('reset');
|
|
}),
|
|
getStartupStderrLines: () => attempts === 1 ? ['invalid config: unrecognized key browser.foo'] : [],
|
|
findExistingGateway: async () => null,
|
|
connect: async () => {
|
|
events.push('connect');
|
|
},
|
|
onConnectedToExistingGateway: vi.fn(),
|
|
waitForPortFree: vi.fn(async () => {}),
|
|
startProcess: async () => {
|
|
attempts += 1;
|
|
events.push(`start-${attempts}`);
|
|
if (attempts === 1) {
|
|
throw new Error('startup failed');
|
|
}
|
|
},
|
|
waitForReady: async () => {
|
|
events.push('ready');
|
|
},
|
|
onConnectedToManagedGateway: () => {
|
|
events.push('managed-connected');
|
|
},
|
|
runDoctorRepair,
|
|
onDoctorRepairSuccess: () => {
|
|
events.push('repair-success');
|
|
},
|
|
delay: vi.fn(async () => {}),
|
|
});
|
|
|
|
expect(runDoctorRepair).toHaveBeenCalledTimes(1);
|
|
expect(events).toEqual([
|
|
'reset',
|
|
'start-1',
|
|
'repair-success',
|
|
'reset',
|
|
'start-2',
|
|
'ready',
|
|
'connect',
|
|
'managed-connected',
|
|
]);
|
|
});
|
|
|
|
it('bubbles lifecycle superseded errors without retrying', async () => {
|
|
await expect(runGatewayStartupSequence({
|
|
port: 19003,
|
|
shouldWaitForPortFree: false,
|
|
resetStartupStderrLines: vi.fn(),
|
|
getStartupStderrLines: () => [],
|
|
findExistingGateway: async () => {
|
|
throw new LifecycleSupersededError('stale start');
|
|
},
|
|
connect: vi.fn(async () => {}),
|
|
onConnectedToExistingGateway: vi.fn(),
|
|
waitForPortFree: vi.fn(async () => {}),
|
|
startProcess: vi.fn(async () => {}),
|
|
waitForReady: vi.fn(async () => {}),
|
|
onConnectedToManagedGateway: vi.fn(),
|
|
delay: vi.fn(async () => {}),
|
|
})).rejects.toThrow('stale start');
|
|
});
|
|
});
|
|
|
|
describe('process-policy helpers', () => {
|
|
it('schedules reconnect attempts with exponential backoff', () => {
|
|
expect(getReconnectScheduleDecision({
|
|
shouldReconnect: true,
|
|
hasReconnectTimer: false,
|
|
reconnectAttempts: 2,
|
|
maxAttempts: DEFAULT_RECONNECT_CONFIG.maxAttempts,
|
|
baseDelay: DEFAULT_RECONNECT_CONFIG.baseDelay,
|
|
maxDelay: DEFAULT_RECONNECT_CONFIG.maxDelay,
|
|
})).toEqual({
|
|
action: 'schedule',
|
|
nextAttempt: 3,
|
|
maxAttempts: 10,
|
|
delay: 4000,
|
|
});
|
|
});
|
|
|
|
it('returns skip reasons for reconnect callbacks', () => {
|
|
expect(getReconnectSkipReason({
|
|
scheduledEpoch: 1,
|
|
currentEpoch: 1,
|
|
shouldReconnect: false,
|
|
})).toBe('auto-reconnect disabled');
|
|
|
|
expect(getReconnectSkipReason({
|
|
scheduledEpoch: 1,
|
|
currentEpoch: 2,
|
|
shouldReconnect: true,
|
|
})).toContain('stale reconnect callback');
|
|
});
|
|
|
|
it('classifies deferred restart actions', () => {
|
|
expect(shouldDeferRestart({ state: 'starting', startLock: false })).toBe(true);
|
|
expect(getDeferredRestartAction({
|
|
hasPendingRestart: true,
|
|
state: 'running',
|
|
startLock: false,
|
|
shouldReconnect: true,
|
|
})).toBe('execute');
|
|
expect(getDeferredRestartAction({
|
|
hasPendingRestart: true,
|
|
state: 'running',
|
|
startLock: false,
|
|
shouldReconnect: false,
|
|
})).toBe('drop');
|
|
});
|
|
});
|
|
|
|
describe('lifecycle-controller helpers', () => {
|
|
it('tracks lifecycle epochs and rejects stale phases', () => {
|
|
const lifecycle = new GatewayLifecycleController();
|
|
const firstEpoch = lifecycle.bump('start');
|
|
expect(firstEpoch).toBe(1);
|
|
|
|
lifecycle.bump('restart');
|
|
expect(() => lifecycle.assert(firstEpoch, 'connect')).toThrow(LifecycleSupersededError);
|
|
});
|
|
});
|
|
|
|
describe('connection-monitor helpers', () => {
|
|
it('triggers heartbeat timeout after consecutive misses', () => {
|
|
vi.useFakeTimers();
|
|
try {
|
|
const sendPing = vi.fn();
|
|
const onHeartbeatTimeout = vi.fn();
|
|
const monitor = new GatewayConnectionMonitor();
|
|
|
|
monitor.startPing({
|
|
sendPing,
|
|
onHeartbeatTimeout,
|
|
intervalMs: 1000,
|
|
timeoutMs: 500,
|
|
maxConsecutiveMisses: 2,
|
|
});
|
|
|
|
vi.advanceTimersByTime(1000);
|
|
vi.advanceTimersByTime(1000);
|
|
vi.advanceTimersByTime(1000);
|
|
|
|
expect(sendPing).toHaveBeenCalledTimes(2);
|
|
expect(onHeartbeatTimeout).toHaveBeenCalledWith({
|
|
consecutiveMisses: 2,
|
|
timeoutMs: 500,
|
|
});
|
|
} finally {
|
|
vi.useRealTimers();
|
|
}
|
|
});
|
|
|
|
it('resets heartbeat misses when alive messages arrive', () => {
|
|
vi.useFakeTimers();
|
|
try {
|
|
const monitor = new GatewayConnectionMonitor();
|
|
monitor.startPing({
|
|
sendPing: vi.fn(),
|
|
onHeartbeatTimeout: vi.fn(),
|
|
intervalMs: 1000,
|
|
timeoutMs: 500,
|
|
maxConsecutiveMisses: 3,
|
|
});
|
|
|
|
vi.advanceTimersByTime(1000);
|
|
vi.advanceTimersByTime(1000);
|
|
expect(monitor.getConsecutiveMisses()).toBe(1);
|
|
|
|
monitor.markAlive('message');
|
|
expect(monitor.getConsecutiveMisses()).toBe(0);
|
|
} finally {
|
|
vi.useRealTimers();
|
|
}
|
|
});
|
|
});
|
|
|
|
describe('restart-controller helpers', () => {
|
|
it('flushes deferred restarts once lifecycle settles', () => {
|
|
const controller = new GatewayRestartController();
|
|
const executeRestart = vi.fn();
|
|
|
|
controller.markDeferredRestart('restart', {
|
|
state: 'starting',
|
|
startLock: true,
|
|
});
|
|
|
|
controller.flushDeferredRestart('status:starting->running', {
|
|
state: 'running',
|
|
startLock: false,
|
|
shouldReconnect: true,
|
|
}, executeRestart);
|
|
|
|
expect(executeRestart).toHaveBeenCalledTimes(1);
|
|
});
|
|
|
|
it('drops deferred restarts if another restart already completed later', () => {
|
|
vi.useFakeTimers();
|
|
try {
|
|
vi.setSystemTime(new Date('2026-04-23T12:00:00Z'));
|
|
const controller = new GatewayRestartController();
|
|
const executeRestart = vi.fn();
|
|
|
|
controller.markDeferredRestart('restart', {
|
|
state: 'starting',
|
|
startLock: true,
|
|
});
|
|
|
|
vi.setSystemTime(new Date('2026-04-23T12:00:01Z'));
|
|
controller.recordRestartCompleted();
|
|
controller.flushDeferredRestart('start:finally', {
|
|
state: 'running',
|
|
startLock: false,
|
|
shouldReconnect: true,
|
|
}, executeRestart);
|
|
|
|
expect(executeRestart).not.toHaveBeenCalled();
|
|
} finally {
|
|
vi.useRealTimers();
|
|
}
|
|
});
|
|
|
|
it('debounces repeated restart requests', () => {
|
|
vi.useFakeTimers();
|
|
try {
|
|
const controller = new GatewayRestartController();
|
|
const executeRestart = vi.fn();
|
|
|
|
controller.debouncedRestart(1000, executeRestart);
|
|
controller.debouncedRestart(1000, executeRestart);
|
|
vi.advanceTimersByTime(999);
|
|
expect(executeRestart).not.toHaveBeenCalled();
|
|
|
|
vi.advanceTimersByTime(1);
|
|
expect(executeRestart).toHaveBeenCalledTimes(1);
|
|
} finally {
|
|
vi.useRealTimers();
|
|
}
|
|
});
|
|
});
|
|
|
|
describe('restart-governor helpers', () => {
|
|
it('suppresses restart attempts during cooldown', () => {
|
|
const governor = new GatewayRestartGovernor({ cooldownMs: 5000 });
|
|
|
|
expect(governor.decide(1000)).toEqual({ allow: true });
|
|
governor.recordExecuted(1000);
|
|
expect(governor.decide(3000)).toEqual({
|
|
allow: false,
|
|
reason: 'cooldown_active',
|
|
retryAfterMs: 3000,
|
|
});
|
|
expect(governor.getCounters()).toEqual({
|
|
executedTotal: 1,
|
|
suppressedTotal: 1,
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('reload-policy helpers', () => {
|
|
it('parses valid gateway reload config and clamps debounce', () => {
|
|
expect(parseGatewayReloadPolicy({
|
|
gateway: {
|
|
reload: {
|
|
mode: 'restart',
|
|
debounceMs: 120000,
|
|
},
|
|
},
|
|
})).toEqual({
|
|
mode: 'restart',
|
|
debounceMs: 60000,
|
|
});
|
|
});
|
|
|
|
it('falls back to defaults for invalid values', () => {
|
|
expect(parseGatewayReloadPolicy({
|
|
gateway: {
|
|
reload: {
|
|
mode: 'invalid',
|
|
debounceMs: 'oops',
|
|
},
|
|
},
|
|
})).toEqual(DEFAULT_GATEWAY_RELOAD_POLICY);
|
|
});
|
|
});
|