277 lines
8.7 KiB
TypeScript
277 lines
8.7 KiB
TypeScript
/**
|
|
* macOS Native E2E Performance Test (Appium mac2).
|
|
*
|
|
* Drives the real Tauri stack to validate audio ingestion through
|
|
* Rust -> gRPC -> DB, and captures latency metrics.
|
|
*
|
|
* Requires VITE_E2E_MODE build so __NOTEFLOW_TEST_API__ is available.
|
|
*/
|
|
|
|
import { execFileSync } from 'node:child_process';
|
|
import { randomUUID } from 'node:crypto';
|
|
import { existsSync } from 'node:fs';
|
|
import { tmpdir } from 'node:os';
|
|
import path from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
import { waitForAppReady } from './fixtures';
|
|
|
|
const __filename = fileURLToPath(import.meta.url);
|
|
const __dirname = path.dirname(__filename);
|
|
|
|
const ServerConfig = {
|
|
host: '127.0.0.1',
|
|
port: '50052',
|
|
} as const;
|
|
|
|
const FALLBACK_WAV_PATH = path.resolve(__dirname, 'fixtures', 'test-tones-2s.wav');
|
|
const SPEECH_TEXT = 'NoteFlow performance test. Hello world. This is a sample sentence.';
|
|
|
|
function ensureSpeechWav(): string {
|
|
if (process.platform !== 'darwin') {
|
|
return FALLBACK_WAV_PATH;
|
|
}
|
|
const outputPath = path.join(tmpdir(), `noteflow-e2e-speech-${randomUUID()}.wav`);
|
|
try {
|
|
execFileSync(
|
|
'say',
|
|
[
|
|
'-o',
|
|
outputPath,
|
|
'--file-format=WAVE',
|
|
'--data-format=LEF32@16000',
|
|
SPEECH_TEXT,
|
|
],
|
|
{ stdio: 'ignore' }
|
|
);
|
|
} catch {
|
|
return FALLBACK_WAV_PATH;
|
|
}
|
|
return existsSync(outputPath) ? outputPath : FALLBACK_WAV_PATH;
|
|
}
|
|
|
|
const WAV_PATH = ensureSpeechWav();
|
|
|
|
const Timeouts = {
|
|
TRANSCRIPT_WAIT_MS: 60000,
|
|
} as const;
|
|
|
|
type PerfResult = {
|
|
connectionMode: string | null;
|
|
meetingId?: string;
|
|
segmentCount?: number;
|
|
audioDiagnostics?: {
|
|
supported: boolean;
|
|
samples?: Array<{
|
|
label: string;
|
|
atMs: number;
|
|
spoolSamples: number;
|
|
droppedChunks: number;
|
|
sampleRate: number;
|
|
}>;
|
|
throughputSamplesPerSec?: number | null;
|
|
throughputSecondsPerSec?: number | null;
|
|
spoolSamplesDelta?: number | null;
|
|
droppedChunksDelta?: number | null;
|
|
};
|
|
timings?: {
|
|
injectMs: number;
|
|
firstPartialMs: number | null;
|
|
firstFinalMs: number | null;
|
|
fetchMs: number;
|
|
totalMs: number;
|
|
};
|
|
error?: string;
|
|
};
|
|
|
|
describe('audio pipeline performance', () => {
|
|
before(async () => {
|
|
await waitForAppReady();
|
|
});
|
|
|
|
it('runs audio ingestion end-to-end via test injection', async function () {
|
|
try {
|
|
await browser.execute(() => 1);
|
|
} catch (error) {
|
|
const message = error instanceof Error ? error.message : String(error);
|
|
if (message.includes('execute/sync') || message.includes('unknown method')) {
|
|
this.skip();
|
|
}
|
|
throw error;
|
|
}
|
|
|
|
const result = await browser.execute(
|
|
async (payload) => {
|
|
const api = window.__NOTEFLOW_API__;
|
|
const testApi = window.__NOTEFLOW_TEST_API__;
|
|
const connection = window.__NOTEFLOW_CONNECTION__?.getConnectionState?.();
|
|
|
|
if (!api) {
|
|
return { error: 'Missing __NOTEFLOW_API__ (E2E mode not enabled?)' };
|
|
}
|
|
|
|
// Ensure server preference points at the intended backend.
|
|
testApi?.updatePreferences?.({
|
|
server_address_customized: true,
|
|
server_host: payload.host,
|
|
server_port: payload.port,
|
|
simulate_transcription: false,
|
|
});
|
|
|
|
try {
|
|
await api.connect?.(`http://${payload.host}:${payload.port}`);
|
|
} catch (error) {
|
|
return {
|
|
error: `Failed to connect to backend at ${payload.host}:${payload.port}: ${
|
|
error instanceof Error ? error.message : String(error)
|
|
}`,
|
|
connectionMode: connection?.mode ?? null,
|
|
};
|
|
}
|
|
|
|
const startTotal = performance.now();
|
|
const meeting = await api.createMeeting({ title: `E2E Audio Perf ${Date.now()}` });
|
|
const stream = await api.startTranscription(meeting.id);
|
|
const diagnosticsSupported = typeof api.getAudioPipelineDiagnostics === 'function';
|
|
const diagSamples: Array<{
|
|
label: string;
|
|
atMs: number;
|
|
spoolSamples: number;
|
|
droppedChunks: number;
|
|
sampleRate: number;
|
|
}> = [];
|
|
|
|
const sampleDiagnostics = async (label: string) => {
|
|
if (!diagnosticsSupported) {
|
|
return;
|
|
}
|
|
const diag = await api.getAudioPipelineDiagnostics();
|
|
diagSamples.push({
|
|
label,
|
|
atMs: performance.now(),
|
|
spoolSamples: diag.sessionAudioSpoolSamples ?? 0,
|
|
droppedChunks: diag.droppedChunkCount ?? 0,
|
|
sampleRate: diag.audioConfig?.sampleRate ?? 0,
|
|
});
|
|
};
|
|
|
|
let firstPartialAt: number | null = null;
|
|
let firstFinalAt: number | null = null;
|
|
|
|
const updatePromise = new Promise<void>((resolve, reject) => {
|
|
const timeoutId = window.setTimeout(() => {
|
|
reject(new Error('Timed out waiting for transcript updates'));
|
|
}, payload.timeoutMs);
|
|
|
|
void stream.onUpdate((update) => {
|
|
const now = performance.now();
|
|
if (update.type === 'partial' && firstPartialAt === null) {
|
|
firstPartialAt = now;
|
|
}
|
|
if (update.type === 'final' && firstFinalAt === null) {
|
|
firstFinalAt = now;
|
|
window.clearTimeout(timeoutId);
|
|
resolve();
|
|
}
|
|
});
|
|
});
|
|
|
|
const injectStart = performance.now();
|
|
await sampleDiagnostics('before_inject');
|
|
if (typeof api.injectTestAudio === 'function') {
|
|
await api.injectTestAudio(meeting.id, {
|
|
wavPath: payload.wavPath,
|
|
speed: 2.0,
|
|
chunkMs: 100,
|
|
});
|
|
} else if (typeof testApi?.injectTestAudio === 'function') {
|
|
await testApi.injectTestAudio(meeting.id, {
|
|
wavPath: payload.wavPath,
|
|
speed: 2.0,
|
|
chunkMs: 100,
|
|
});
|
|
} else {
|
|
return { error: 'Test audio injection API not available in this build' };
|
|
}
|
|
await sampleDiagnostics('after_inject');
|
|
|
|
try {
|
|
await updatePromise;
|
|
} catch (error) {
|
|
await stream.close?.().catch(() => {});
|
|
return {
|
|
error: error instanceof Error ? error.message : String(error),
|
|
};
|
|
}
|
|
|
|
await stream.close?.();
|
|
await sampleDiagnostics('after_final');
|
|
|
|
let throughputSamplesPerSec: number | null = null;
|
|
let throughputSecondsPerSec: number | null = null;
|
|
let spoolSamplesDelta: number | null = null;
|
|
let droppedChunksDelta: number | null = null;
|
|
if (diagSamples.length >= 2) {
|
|
const first = diagSamples[0];
|
|
const last = diagSamples[diagSamples.length - 1];
|
|
const deltaMs = last.atMs - first.atMs;
|
|
spoolSamplesDelta = last.spoolSamples - first.spoolSamples;
|
|
droppedChunksDelta = last.droppedChunks - first.droppedChunks;
|
|
if (deltaMs > 0) {
|
|
throughputSamplesPerSec = (spoolSamplesDelta / deltaMs) * 1000;
|
|
}
|
|
if (throughputSamplesPerSec && first.sampleRate > 0) {
|
|
throughputSecondsPerSec = throughputSamplesPerSec / first.sampleRate;
|
|
}
|
|
}
|
|
|
|
const fetchStart = performance.now();
|
|
const meetingWithSegments = await api.getMeeting({
|
|
meeting_id: meeting.id,
|
|
include_segments: true,
|
|
});
|
|
const fetchMs = performance.now() - fetchStart;
|
|
|
|
const segmentCount = meetingWithSegments.segments?.length ?? 0;
|
|
|
|
return {
|
|
connectionMode: window.__NOTEFLOW_CONNECTION__?.getConnectionState?.().mode ?? null,
|
|
meetingId: meeting.id,
|
|
segmentCount,
|
|
audioDiagnostics: {
|
|
supported: diagnosticsSupported,
|
|
samples: diagSamples.length > 0 ? diagSamples : undefined,
|
|
throughputSamplesPerSec,
|
|
throughputSecondsPerSec,
|
|
spoolSamplesDelta,
|
|
droppedChunksDelta,
|
|
},
|
|
timings: {
|
|
injectMs: performance.now() - injectStart,
|
|
firstPartialMs: firstPartialAt ? firstPartialAt - injectStart : null,
|
|
firstFinalMs: firstFinalAt ? firstFinalAt - injectStart : null,
|
|
fetchMs,
|
|
totalMs: performance.now() - startTotal,
|
|
},
|
|
};
|
|
},
|
|
{
|
|
host: ServerConfig.host,
|
|
port: ServerConfig.port,
|
|
wavPath: WAV_PATH,
|
|
timeoutMs: Timeouts.TRANSCRIPT_WAIT_MS,
|
|
}
|
|
);
|
|
|
|
const perf = result as PerfResult;
|
|
if (perf.error) {
|
|
throw new Error(perf.error);
|
|
}
|
|
|
|
expect(perf.connectionMode).toBe('connected');
|
|
expect(perf.segmentCount).toBeGreaterThan(0);
|
|
expect(perf.timings?.firstFinalMs ?? 0).toBeGreaterThan(0);
|
|
expect(perf.audioDiagnostics?.supported).toBe(true);
|
|
expect(perf.audioDiagnostics?.spoolSamplesDelta ?? 0).toBeGreaterThan(0);
|
|
});
|
|
});
|