big commit

This commit is contained in:
2026-01-22 15:34:56 +00:00
parent 073b70cc39
commit 19e39bed5a
213 changed files with 24507 additions and 10397 deletions

BIN
.coverage

Binary file not shown.

View File

@@ -120,7 +120,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-LOG-001",
"reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.",
"reason": "Stdlib logging is blocked. Use `from noteflow.infrastructure.logging import get_logger` and `logger = get_logger(__name__)`.",
"severity": "HIGH"
}
}
@@ -139,7 +139,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-LOG-001",
"reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.",
"reason": "Stdlib logging is blocked. Use `from noteflow.infrastructure.logging import get_logger` and `logger = get_logger(__name__)`.",
"severity": "HIGH"
}
}
@@ -157,7 +157,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-LOG-001",
"reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.",
"reason": "Stdlib logging is blocked. Use `from noteflow.infrastructure.logging import get_logger` and `logger = get_logger(__name__)`.",
"severity": "HIGH"
}
}

View File

@@ -132,7 +132,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-ASSERT-001",
"reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.",
"reason": "Multiple bare asserts are blocked. Use one assert per test or add messages (e.g., `assert cond, 'why'`).",
"severity": "HIGH"
}
}
@@ -151,7 +151,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-ASSERT-001",
"reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.",
"reason": "Multiple bare asserts are blocked. Use one assert per test or add messages (e.g., `assert cond, 'why'`).",
"severity": "HIGH"
}
}
@@ -169,7 +169,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-ASSERT-001",
"reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.",
"reason": "Multiple bare asserts are blocked. Use one assert per test or add messages (e.g., `assert cond, 'why'`).",
"severity": "HIGH"
}
}

View File

@@ -132,7 +132,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-LINT-002",
"reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.",
"reason": "Biome/ESLint/TS ignore directives are blocked. Fix the lint issue or refactor; use `make lint-fix`.",
"severity": "HIGH"
}
}
@@ -151,7 +151,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-LINT-002",
"reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.",
"reason": "Biome/ESLint/TS ignore directives are blocked. Fix the lint issue or refactor; use `make lint-fix`.",
"severity": "HIGH"
}
}
@@ -169,7 +169,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-LINT-002",
"reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.",
"reason": "Biome/ESLint/TS ignore directives are blocked. Fix the lint issue or refactor; use `make lint-fix`.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-LINT-001",
"reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.",
"reason": "Biome/ESLint/TS ignore directives are blocked. Fix the lint issue or refactor; use `make lint-fix`.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-001",
"reason": "Broad Exception handlers that only log are prohibited.",
"reason": "Broad `except Exception` with only logging is blocked. Catch specific exceptions or re-raise after logging.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-001",
"reason": "Broad Exception handlers that only log are prohibited.",
"reason": "Broad `except Exception` with only logging is blocked. Catch specific exceptions or re-raise after logging.",
"severity": "HIGH"
}
}
@@ -140,7 +140,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-001",
"reason": "Broad Exception handlers that only log are prohibited.",
"reason": "Broad `except Exception` with only logging is blocked. Catch specific exceptions or re-raise after logging.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-QUALITY-001",
"reason": "Direct edits to src/test/code-quality.test.ts are prohibited.",
"reason": "Direct edits to `client/src/test/code-quality.test.ts` are blocked. Fix code issues instead.",
"severity": "HIGH"
}
}

View File

@@ -106,7 +106,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-QUALITY-002",
"reason": "Direct edits to src/test/code-quality.test.ts are prohibited.",
"reason": "Direct edits to `client/src/test/code-quality.test.ts` are blocked. Fix code issues instead.",
"severity": "HIGH"
}
}
@@ -121,7 +121,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-QUALITY-002",
"reason": "Direct edits to src/test/code-quality.test.ts are prohibited.",
"reason": "Direct edits to `client/src/test/code-quality.test.ts` are blocked. Fix code issues instead.",
"severity": "HIGH"
}
}

View File

@@ -120,7 +120,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-QUALITY-003",
"reason": "Direct edits to src/test/code-quality.test.ts are prohibited.",
"reason": "Direct edits to `client/src/test/code-quality.test.ts` are blocked. Fix code issues instead.",
"severity": "HIGH"
}
}

View File

@@ -120,7 +120,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-QUALITY-004",
"reason": "Direct edits to src/test/code-quality.test.ts are prohibited.",
"reason": "Direct edits to `client/src/test/code-quality.test.ts` are blocked. Fix code issues instead.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-DT-001",
"reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.",
"reason": "Fallback to `datetime.now()` is blocked. Use `utc_now()` or require a caller-provided timestamp.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-DT-001",
"reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.",
"reason": "Fallback to `datetime.now()` is blocked. Use `utc_now()` or require a caller-provided timestamp.",
"severity": "HIGH"
}
}
@@ -140,7 +140,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-DT-001",
"reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.",
"reason": "Fallback to `datetime.now()` is blocked. Use `utc_now()` or require a caller-provided timestamp.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-002",
"reason": "Swallowing exceptions and returning defaults is prohibited.",
"reason": "Swallowing exceptions and returning defaults is blocked. Catch specific errors and re-raise or return a typed error/result.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-002",
"reason": "Swallowing exceptions and returning defaults is prohibited.",
"reason": "Swallowing exceptions and returning defaults is blocked. Catch specific errors and re-raise or return a typed error/result.",
"severity": "HIGH"
}
}
@@ -140,7 +140,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-002",
"reason": "Swallowing exceptions and returning defaults is prohibited.",
"reason": "Swallowing exceptions and returning defaults is blocked. Catch specific errors and re-raise or return a typed error/result.",
"severity": "HIGH"
}
}

View File

@@ -134,7 +134,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-FIX-001",
"reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.",
"reason": "Duplicate global fixtures are blocked. Reuse fixtures in `tests/conftest.py`, or add new ones there.",
"severity": "HIGH"
}
}
@@ -154,7 +154,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-FIX-001",
"reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.",
"reason": "Duplicate global fixtures are blocked. Reuse fixtures in `tests/conftest.py`, or add new ones there.",
"severity": "HIGH"
}
}
@@ -173,7 +173,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-FIX-001",
"reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.",
"reason": "Duplicate global fixtures are blocked. Reuse fixtures in `tests/conftest.py`, or add new ones there.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-CONFIG-002",
"reason": "Frontend linter/config file edits are prohibited.",
"reason": "Frontend lint/type config edits are blocked. Fix code or use `make lint-fix` / `make type-check`.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-CONFIG-002",
"reason": "Frontend linter/config file edits are prohibited.",
"reason": "Frontend lint/type config edits are blocked. Fix code or use `make lint-fix` / `make type-check`.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "TS-CONFIG-001",
"reason": "Frontend linter/config file edits are prohibited.",
"reason": "Frontend lint/type config edits are blocked. Fix code or use `make lint-fix` / `make type-check`.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-CONFIG-002",
"reason": "Python linter/config file edits are prohibited.",
"reason": "Python lint/type config edits are blocked. Fix code or use `make lint-fix-py` / `make type-check-py`.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-CONFIG-002",
"reason": "Python linter/config file edits are prohibited.",
"reason": "Python lint/type config edits are blocked. Fix code or use `make lint-fix-py` / `make type-check-py`.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-CONFIG-001",
"reason": "Python linter/config file edits are prohibited.",
"reason": "Python lint/type config edits are blocked. Fix code or use `make lint-fix-py` / `make type-check-py`.",
"severity": "HIGH"
}
}

View File

@@ -133,7 +133,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-001",
"reason": "Magic numbers are prohibited. Use named constants.",
"reason": "Magic numbers are blocked. Define a named `typing.Final` constant (e.g., in domain/constants) and use it.",
"severity": "HIGH"
}
}
@@ -153,7 +153,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-001",
"reason": "Magic numbers are prohibited. Use named constants.",
"reason": "Magic numbers are blocked. Define a named `typing.Final` constant (e.g., in domain/constants) and use it.",
"severity": "HIGH"
}
}
@@ -172,7 +172,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-001",
"reason": "Magic numbers are prohibited. Use named constants.",
"reason": "Magic numbers are blocked. Define a named `typing.Final` constant (e.g., in domain/constants) and use it.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "BUILD-001",
"reason": "Makefile edits are prohibited.",
"reason": "Makefile edits are blocked. Use existing `make` targets or request explicit permission.",
"severity": "HIGH"
}
}

View File

@@ -106,7 +106,7 @@ deny contains decision if {
decision := {
"rule_id": "BUILD-002",
"reason": "Makefile edits are prohibited.",
"reason": "Makefile edits are blocked. Use existing `make` targets or request explicit permission.",
"severity": "HIGH"
}
}
@@ -121,7 +121,7 @@ deny contains decision if {
decision := {
"rule_id": "BUILD-002",
"reason": "Makefile edits are prohibited.",
"reason": "Makefile edits are blocked. Use existing `make` targets or request explicit permission.",
"severity": "HIGH"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "GIT-001",
"reason": "Git commit --no-verify is prohibited.",
"reason": "`git commit --no-verify` is blocked. Run required checks (e.g., `make quality`) instead.",
"severity": "HIGH"
}
}

View File

@@ -107,7 +107,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-003",
"reason": "Silent exception handlers returning empty values are prohibited.",
"reason": "Silent exception handlers returning empty values are blocked. Propagate the error or return a typed Result; log with context if needed.",
"severity": "HIGH"
}
}
@@ -123,7 +123,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-003",
"reason": "Silent exception handlers returning empty values are prohibited.",
"reason": "Silent exception handlers returning empty values are blocked. Propagate the error or return a typed Result; log with context if needed.",
"severity": "HIGH"
}
}
@@ -140,7 +140,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-EXC-003",
"reason": "Silent exception handlers returning empty values are prohibited.",
"reason": "Silent exception handlers returning empty values are blocked. Propagate the error or return a typed Result; log with context if needed.",
"severity": "HIGH"
}
}

View File

@@ -132,7 +132,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-STRUCT-001",
"reason": "Loops or conditionals inside tests are prohibited. Use parametrization.",
"reason": "Loops/conditionals inside tests are blocked. Use `@pytest.mark.parametrize` or split into separate tests.",
"severity": "HIGH"
}
}
@@ -151,7 +151,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-STRUCT-001",
"reason": "Loops or conditionals inside tests are prohibited. Use parametrization.",
"reason": "Loops/conditionals inside tests are blocked. Use `@pytest.mark.parametrize` or split into separate tests.",
"severity": "HIGH"
}
}
@@ -169,7 +169,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-STRUCT-001",
"reason": "Loops or conditionals inside tests are prohibited. Use parametrization.",
"reason": "Loops/conditionals inside tests are blocked. Use `@pytest.mark.parametrize` or split into separate tests.",
"severity": "HIGH"
}
}

View File

@@ -108,7 +108,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-002",
"reason": "Direct edits to tests/quality are prohibited (except baselines.json).",
"reason": "Direct edits to `tests/quality` are blocked. Fix the underlying code; only `tests/quality/baselines.json` may be edited with explicit approval.",
"severity": "HIGH"
}
}
@@ -124,7 +124,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-002",
"reason": "Direct edits to tests/quality are prohibited (except baselines.json).",
"reason": "Direct edits to `tests/quality` are blocked. Fix the underlying code; only `tests/quality/baselines.json` may be edited with explicit approval.",
"severity": "HIGH"
}
}

View File

@@ -21,7 +21,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-001",
"reason": "Direct edits to tests/quality are prohibited (except baselines.json).",
"reason": "Direct edits to `tests/quality` are blocked. Fix the underlying code; only `tests/quality/baselines.json` may be edited with explicit approval.",
"severity": "HIGH"
}
}

View File

@@ -15,7 +15,7 @@ import rego.v1
deny contains decision if {
input.tool_input.command == "CUPCAKE_EXAMPLE_RULE_THAT_NEVER_FIRES_12345"
decision := {
"reason": "This will never happen",
"reason": "Example policy: replace this reason with actionable remediation guidance.",
"severity": "LOW",
"rule_id": "EXAMPLE-001"
}

View File

@@ -137,7 +137,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-001",
"reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.",
"reason": "`Any` is blocked in Python types. Prefer Protocol/TypeVar/TypedDict or concrete types; use `cast()` with a justification comment only as a last resort.",
"severity": "HIGH"
}
}
@@ -154,7 +154,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-001",
"reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.",
"reason": "`Any` is blocked in Python types. Prefer Protocol/TypeVar/TypedDict or concrete types; use `cast()` with a justification comment only as a last resort.",
"severity": "HIGH"
}
}
@@ -175,7 +175,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-001",
"reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.",
"reason": "`Any` is blocked in Python types. Prefer Protocol/TypeVar/TypedDict or concrete types; use `cast()` with a justification comment only as a last resort.",
"severity": "HIGH"
}
}

View File

@@ -132,7 +132,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-002",
"reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.",
"reason": "Type suppression directives are blocked. Fix the type issue using Protocol/TypeVar/TypedDict or `cast()` with a justification comment.",
"severity": "HIGH"
}
}
@@ -149,7 +149,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-002",
"reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.",
"reason": "Type suppression directives are blocked. Fix the type issue using Protocol/TypeVar/TypedDict or `cast()` with a justification comment.",
"severity": "HIGH"
}
}
@@ -170,7 +170,7 @@ deny contains decision if {
decision := {
"rule_id": "PY-TYPE-002",
"reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.",
"reason": "Type suppression directives are blocked. Fix the type issue using Protocol/TypeVar/TypedDict or `cast()` with a justification comment.",
"severity": "HIGH"
}
}

View File

@@ -106,7 +106,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-004",
"reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.",
"reason": "Warning: avoid editing `tests/quality/baselines.json`. Prefer fixing code; if approved, keep baseline changes minimal and documented.",
"severity": "LOW"
}
}
@@ -121,7 +121,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-004",
"reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.",
"reason": "Warning: avoid editing `tests/quality/baselines.json`. Prefer fixing code; if approved, keep baseline changes minimal and documented.",
"severity": "LOW"
}
}

View File

@@ -20,7 +20,7 @@ deny contains decision if {
decision := {
"rule_id": "TEST-QUALITY-003",
"reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.",
"reason": "Warning: avoid editing `tests/quality/baselines.json`. Prefer fixing code; if approved, keep baseline changes minimal and documented.",
"severity": "LOW"
}
}

View File

@@ -132,7 +132,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-002",
"reason": "Warning: file content exceeds 500 lines. Consider refactoring.",
"reason": "Warning: file exceeds 500 lines. Consider extracting helpers or splitting modules/classes.",
"severity": "LOW"
}
}
@@ -151,7 +151,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-002",
"reason": "Warning: file content exceeds 500 lines. Consider refactoring.",
"reason": "Warning: file exceeds 500 lines. Consider extracting helpers or splitting modules/classes.",
"severity": "LOW"
}
}
@@ -169,7 +169,7 @@ deny contains decision if {
decision := {
"rule_id": "STYLE-002",
"reason": "Warning: file content exceeds 500 lines. Consider refactoring.",
"reason": "Warning: file exceeds 500 lines. Consider extracting helpers or splitting modules/classes.",
"severity": "LOW"
}
}

4
.run_quality_check.sh Normal file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
cd /home/trav/repos/noteflow
source .venv/bin/activate
pytest tests/quality/test_code_smells.py -v --tb=short 2>&1 | tail -100

View File

@@ -1,7 +1,7 @@
# NoteFlow Quality Checks
# Runs TypeScript, Rust, and Python quality checks
.PHONY: all quality quality-ts quality-rs quality-py lint type-check test-quality \
.PHONY: all quality quality-ts quality-rs quality-py lint type-check test-quality coverage coverage-ts \
lint-rs clippy fmt fmt-rs fmt-check check help e2e e2e-ui e2e-grpc \
ensure-py ensure-ts ensure-rs ensure-hygiene install-hooks uninstall-hooks
@@ -100,6 +100,15 @@ test-quality: ensure-ts
@echo "=== TypeScript Quality Tests ==="
cd client && npm run test:quality
## Run Vitest with coverage
coverage-ts: ensure-ts
@echo "=== TypeScript Coverage (Vitest) ==="
cd client && npm run test -- --coverage
## Run all coverage checks
coverage: coverage-ts
@echo "✓ Coverage checks passed"
#-------------------------------------------------------------------------------
# Rust Quality Checks
#-------------------------------------------------------------------------------
@@ -257,6 +266,8 @@ help:
@echo " check Run Biome check (lint + format)"
@echo " check-fix Auto-fix all Biome issues"
@echo " test-quality Run Vitest quality tests"
@echo " coverage Run coverage checks"
@echo " coverage-ts Run Vitest test coverage"
@echo ""
@echo "Rust:"
@echo " clippy Run Clippy linter"

61
check_violations.py Normal file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""Quick script to check long parameter list violations."""
from __future__ import annotations
import importlib
import sys
from collections.abc import Mapping
from typing import Protocol, cast
sys.path.insert(0, "/home/trav/repos/noteflow/tests")
sys.path.insert(0, "/home/trav/repos/noteflow/src")
class Violation(Protocol):
stable_id: str
relative_path: str
identifier: str
detail: str
class CollectLongParameterLists(Protocol):
def __call__(self, *, parse_errors: list[str]) -> list[Violation]: ...
def load_collectors() -> tuple[CollectLongParameterLists, Mapping[str, set[str]]]:
code_smells = importlib.import_module("quality._detectors.code_smells")
baseline_module = importlib.import_module("quality._baseline")
collect = cast(CollectLongParameterLists, getattr(code_smells, "collect_long_parameter_lists"))
baseline = cast(Mapping[str, set[str]], getattr(baseline_module, "load_baseline")())
return collect, baseline
collect_long_parameter_lists, baseline = load_collectors()
parse_errors: list[str] = []
violations = collect_long_parameter_lists(parse_errors=parse_errors)
allowed_ids = baseline.get("long_parameter_list", set())
current_ids = {v.stable_id for v in violations}
new_ids = current_ids - allowed_ids
new_violations = [v for v in violations if v.stable_id in new_ids]
print(f"Total violations found: {len(violations)}")
print(f"Baseline allows: {len(allowed_ids)}")
print(f"NEW violations (not in baseline): {len(new_violations)}")
print()
if new_violations:
print("NEW VIOLATIONS:")
for v in sorted(new_violations, key=lambda x: x.stable_id):
print(f" {v.relative_path}:{v.identifier} ({v.detail})")
else:
print("No new violations - test would PASS")
if parse_errors:
print(f"\nParse errors: {len(parse_errors)}")
for e in parse_errors[:5]:
print(f" {e}")

View File

@@ -6,7 +6,7 @@
*/
import { invoke } from '@tauri-apps/api/core';
import { TauriCommands } from '../src/api/tauri-constants';
import { TauriCommands } from '../src/api/adapters/tauri/constants';
/**
* Test environment information returned by check_test_environment.
@@ -52,7 +52,7 @@ export interface TestAudioResult {
* Check if the test environment is properly configured for audio tests.
*/
export async function checkTestEnvironment(): Promise<TestEnvironmentInfo> {
return invoke(TauriCommands.CHECK_TEST_ENVIRONMENT);
return invoke<TestEnvironmentInfo>(TauriCommands.CHECK_TEST_ENVIRONMENT);
}
/**
@@ -66,7 +66,7 @@ export async function injectTestAudio(
meetingId: string,
config: TestAudioConfig
): Promise<TestAudioResult> {
return invoke(TauriCommands.INJECT_TEST_AUDIO, {
return invoke<TestAudioResult>(TauriCommands.INJECT_TEST_AUDIO, {
meeting_id: meetingId,
config,
});
@@ -86,7 +86,7 @@ export async function injectTestTone(
durationSeconds: number,
sampleRate?: number
): Promise<TestAudioResult> {
return invoke(TauriCommands.INJECT_TEST_TONE, {
return invoke<TestAudioResult>(TauriCommands.INJECT_TEST_TONE, {
meeting_id: meetingId,
frequency_hz: frequencyHz,
duration_seconds: durationSeconds,

86
client/e2e/tasks.spec.ts Normal file
View File

@@ -0,0 +1,86 @@
import { expect, test } from '@playwright/test';
import { callAPI, navigateTo, waitForAPI, waitForLoadingComplete } from './fixtures';
const shouldRun = process.env.NOTEFLOW_E2E === '1';
const TEST_WORKSPACE_ID = '00000000-0000-0000-0000-000000000001';
test.describe('tasks api integration', () => {
test.skip(!shouldRun, 'Set NOTEFLOW_E2E=1 to enable end-to-end tests.');
test.beforeEach(async ({ page }) => {
await navigateTo(page, '/');
await waitForAPI(page);
});
test('listTasks returns array of tasks', async ({ page }) => {
const result = await callAPI<{ tasks: unknown[]; total: number }>(page, 'listTasks', {
workspace_id: TEST_WORKSPACE_ID,
});
expect(result).toHaveProperty('tasks');
expect(result).toHaveProperty('total');
expect(Array.isArray(result.tasks)).toBe(true);
expect(typeof result.total).toBe('number');
});
test('listTasks supports status filtering', async ({ page }) => {
const result = await callAPI<{ tasks: { status: string }[]; total: number }>(
page,
'listTasks',
{
workspace_id: TEST_WORKSPACE_ID,
statuses: ['open'],
}
);
for (const task of result.tasks) {
expect(task.status).toBe('open');
}
});
test('listTasks supports pagination', async ({ page }) => {
const pageSize = 5;
const page1 = await callAPI<{ tasks: { id: string }[]; total: number }>(page, 'listTasks', {
workspace_id: TEST_WORKSPACE_ID,
limit: pageSize,
offset: 0,
});
expect(page1.tasks.length).toBeLessThanOrEqual(pageSize);
if (page1.total > pageSize) {
const page2 = await callAPI<{ tasks: { id: string }[] }>(page, 'listTasks', {
workspace_id: TEST_WORKSPACE_ID,
limit: pageSize,
offset: pageSize,
});
const page1Ids = page1.tasks.map((t) => t.id);
const page2Ids = page2.tasks.map((t) => t.id);
const overlap = page1Ids.filter((id) => page2Ids.includes(id));
expect(overlap.length).toBe(0);
}
});
});
test.describe('tasks page ui', () => {
test.skip(!shouldRun, 'Set NOTEFLOW_E2E=1 to enable end-to-end tests.');
test('tasks page loads successfully', async ({ page }) => {
await navigateTo(page, '/tasks');
await waitForLoadingComplete(page);
const mainContent = page.locator('main');
await expect(mainContent).toBeVisible();
});
test('tasks page displays task list or empty state', async ({ page }) => {
await navigateTo(page, '/tasks');
await waitForLoadingComplete(page);
const taskList = page.locator('[data-testid="task-list"], [data-testid="empty-state"]');
const hasContent = (await taskList.count()) > 0 || (await page.locator('main').isVisible());
expect(hasContent).toBe(true);
});
});

View File

@@ -8,6 +8,9 @@
"name": "noteflow-client",
"version": "0.1.0",
"dependencies": {
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/sortable": "^8.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@hookform/resolvers": "^3.10.0",
"@radix-ui/react-accordion": "^1.2.11",
"@radix-ui/react-alert-dialog": "^1.1.14",
@@ -533,6 +536,59 @@
"node": ">=18"
}
},
"node_modules/@dnd-kit/accessibility": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/@dnd-kit/accessibility/-/accessibility-3.1.1.tgz",
"integrity": "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==",
"license": "MIT",
"dependencies": {
"tslib": "^2.0.0"
},
"peerDependencies": {
"react": ">=16.8.0"
}
},
"node_modules/@dnd-kit/core": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/@dnd-kit/core/-/core-6.3.1.tgz",
"integrity": "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==",
"license": "MIT",
"dependencies": {
"@dnd-kit/accessibility": "^3.1.1",
"@dnd-kit/utilities": "^3.2.2",
"tslib": "^2.0.0"
},
"peerDependencies": {
"react": ">=16.8.0",
"react-dom": ">=16.8.0"
}
},
"node_modules/@dnd-kit/sortable": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/@dnd-kit/sortable/-/sortable-8.0.0.tgz",
"integrity": "sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==",
"license": "MIT",
"dependencies": {
"@dnd-kit/utilities": "^3.2.2",
"tslib": "^2.0.0"
},
"peerDependencies": {
"@dnd-kit/core": "^6.1.0",
"react": ">=16.8.0"
}
},
"node_modules/@dnd-kit/utilities": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/@dnd-kit/utilities/-/utilities-3.2.2.tgz",
"integrity": "sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==",
"license": "MIT",
"dependencies": {
"tslib": "^2.0.0"
},
"peerDependencies": {
"react": ">=16.8.0"
}
},
"node_modules/@esbuild/aix-ppc64": {
"version": "0.25.12",
"resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",

View File

@@ -61,6 +61,9 @@
"@radix-ui/react-toggle": "^1.1.9",
"@radix-ui/react-toggle-group": "^1.1.10",
"@radix-ui/react-tooltip": "^1.2.7",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/sortable": "^8.0.0",
"@dnd-kit/utilities": "^3.2.2",
"@tanstack/react-query": "^5.83.0",
"@tanstack/react-virtual": "^3.13.13",
"@tauri-apps/api": "^2.9.1",

View File

@@ -0,0 +1,437 @@
Finished `test` profile [unoptimized + debuginfo] target(s) in 0.57s
Running unittests src/lib.rs (target/debug/deps/noteflow_lib-38d0670d84e04361)
running 323 tests
test audio::capture::tests::calculate_rms_i16_empty_returns_zero ... ok
test audio::capture::tests::rms_to_db_negative_returns_floor ... ok
test audio::capture::tests::calculate_rms_u16_empty_returns_zero ... ok
test audio::capture::tests::rms_to_db_silence_returns_floor ... ok
test audio::capture::tests::rms_to_db_unit_signal_is_zero ... ok
test audio::capture::tests::calculate_rms_empty_returns_zero ... ok
test audio::capture::tests::normalize_for_asr_empty_returns_unity_gain ... ok
test audio::capture::tests::calculate_rms_i16_min_value_within_bounds ... ok
test audio::capture::tests::calculate_rms_silence_returns_zero ... ok
test audio::capture::tests::calculate_rms_u16_midpoint_near_zero ... ok
test audio::capture::tests::normalize_for_asr_respects_max_gain ... ok
test audio::capture::tests::calculate_rms_unit_signal ... ok
test audio::capture::tests::soft_clip_linear_region ... ok
test audio::capture::tests::soft_clip_negative_saturation ... ok
test audio::capture::tests::normalize_for_asr_silence_returns_unity_gain ... ok
test audio::capture::tests::normalize_for_asr_boosts_quiet_audio ... ok
test audio::capture::tests::soft_clip_saturation_region ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_positive_drift ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_new ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_negative_drift ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_reset ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_returns_ratio_above_threshold ... ok
test audio::drift_compensation::metrics::tests::test_record_overflow ... ok
test audio::drift_compensation::metrics::tests::test_update_values ... ok
test audio::drift_compensation::detector::tests::test_drift_detector_no_drift ... ok
test audio::drift_compensation::metrics::tests::test_drift_metrics_new ... ok
test audio::drift_compensation::metrics::tests::test_record_adjustment ... ok
test audio::drift_compensation::metrics::tests::test_record_error ... ok
test audio::drift_compensation::metrics::tests::test_snapshot ... ok
test audio::drift_compensation::resampler::tests::test_adaptive_resampler_reset ... ok
test audio::drift_compensation::resampler::tests::test_interleave_stereo ... ok
test audio::drift_compensation::resampler::tests::test_deinterleave_stereo ... ok
test audio::drift_compensation::resampler::tests::test_deinterleave_mono ... ok
test audio::drift_compensation::metrics::tests::test_reset ... ok
test audio::drift_compensation::metrics::tests::test_set_enabled ... ok
test audio::drift_compensation::resampler::tests::test_adaptive_resampler_new ... ok
test audio::drift_compensation::resampler::tests::test_empty_input ... ok
test audio::drift_compensation::resampler::tests::test_adaptive_resampler_slew_limiting ... ok
test audio::drift_compensation::resampler::tests::test_adaptive_resampler_passthrough ... ok
test audio::drift_compensation::resampler::tests::test_adaptive_resampler_set_ratio ... ok
test audio::loader::tests::test_parse_samples ... ok
test audio::loader::tests::test_samples_to_chunks ... ok
test audio::mixer::tests::test_invalid_channels_fallback ... ok
test audio::mixer::tests::test_drift_compensation_enabled_by_default ... ok
test audio::mixer::tests::test_invalid_sample_rate_fallback ... ok
test audio::playback::tests::playback_command_debug ... ok
test audio::mixer::tests::test_mixer_clipping ... ok
test audio::mixer::tests::test_mixer_with_drift_compensation_disabled ... ok
test audio::mixer::tests::test_mixer_clear_resets_drift_state ... ok
test audio::playback::tests::playback_started_debug ... ok
test audio::mixer::tests::test_mixer_with_gains ... ok
test audio::mixer::tests::test_drift_compensation_toggle ... ok
test audio::mixer::tests::test_drift_metrics_snapshot ... ok
test audio::mixer::tests::test_mixer_basic ... ok
test audio::mixer::tests::test_mixer_single_source ... ok
test cache::tests::cache_stats_hit_rate ... ok
test commands::audio::helpers::tests::legacy_device_id_returns_none ... ok
test cache::tests::cache_key_formatting ... ok
test commands::audio::helpers::tests::device_id_kind_mismatch ... ok
test commands::audio_testing::tests::output_test_flag_prevents_concurrent_runs ... ok
test commands::audio_testing::tests::stop_input_test_clears_running_flag ... ok
test commands::audio_testing::tests::stop_input_test_is_idempotent ... ok
test commands::audio_testing::tests::stop_output_test_clears_running_flag ... ok
test commands::audio::helpers::tests::device_id_round_trip ... ok
test commands::audio::helpers::tests::device_id_with_index_is_parsed ... ok
test commands::audio_testing::tests::stop_output_test_is_idempotent ... ok
test commands::identity_tests::tests::complete_auth_login_result_failure ... ok
test commands::identity_tests::tests::complete_auth_login_result_success_with_user ... ok
test commands::identity_tests::tests::complete_auth_login_result_success_without_user ... ok
test commands::identity_tests::tests::get_current_user_result_construction ... ok
test commands::identity_tests::tests::get_current_user_result_local_defaults ... ok
test commands::audio_testing::tests::input_test_flag_prevents_concurrent_runs ... ok
test commands::identity_tests::tests::get_current_user_result_serialization ... ok
test commands::identity_tests::tests::complete_auth_login_result_serialization ... ok
test commands::identity_tests::tests::initiate_auth_login_result_construction ... ok
test commands::identity_tests::tests::initiate_auth_login_result_serialization ... ok
test commands::identity_tests::tests::list_workspaces_result_construction ... ok
test commands::identity_tests::tests::list_workspaces_result_empty ... ok
test commands::identity_tests::tests::logout_provider_list_all ... ok
test commands::identity_tests::tests::logout_provider_list_single ... ok
test commands::identity_tests::tests::logout_result_full_success ... ok
test commands::identity_tests::tests::logout_result_serialization ... ok
test commands::identity_tests::tests::logout_result_with_multiple_revocation_errors ... ok
test commands::identity_tests::tests::logout_result_partial_success ... ok
test commands::identity_tests::tests::logout_result_no_providers ... ok
test commands::identity_tests::tests::switch_workspace_matching_logic ... ok
test commands::identity_tests::tests::switch_workspace_result_failure ... ok
test commands::identity_tests::tests::switch_workspace_result_success ... ok
test cache::memory::tests::set_and_get ... ok
test cache::memory::tests::delete_removes_entry ... ok
test cache::memory::tests::exists_checks_presence ... ok
test commands::identity_tests::tests::workspace_info_construction ... ok
test cache::memory::tests::get_nonexistent_returns_none ... ok
test cache::memory::tests::stats_tracking ... ok
test cache::tests::noop_cache_returns_none ... ok
test commands::identity_tests::tests::workspace_info_serialization ... ok
test commands::identity_tests::tests::workspace_role_serialization ... ok
test commands::playback::audio::tests::trim_audio_buffer_returns_empty_if_start_after_end ... ok
test commands::playback::audio::tests::trim_audio_buffer_returns_original_for_zero_start ... ok
test commands::playback_tests::tests::playback_info_without_highlight ... ok
test commands::playback_tests::tests::playback_info_construction ... ok
test commands::playback_tests::tests::playback_state_default ... ok
test commands::playback_tests::tests::seek_position_with_zero_duration ... ok
test commands::playback_tests::tests::playback_state_serialization ... ok
test commands::playback_tests::tests::seek_position_clamping ... ok
test commands::playback_tests::tests::validate_finite_position ... ok
test commands::playback::audio::tests::trim_audio_buffer_skips_before_start_time ... ok
test commands::recording::tests::bytemuck_f32_to_bytes_handles_empty ... ok
test commands::recording::tests::bytemuck_f32_to_bytes_matches_manual_conversion ... ok
test commands::recording::tests::bytemuck_f32_to_bytes_size_is_correct ... ok
test commands::recording::tests::calculate_rms_handles_silence ... ok
test commands::recording::tests::calculate_rms_unit_signal ... ok
test commands::recording::tests::decode_input_device_id_accepts_input_ids ... ok
test commands::recording::tests::decode_input_device_id_rejects_output_ids ... ok
test commands::recording::tests::downmix_to_mono_averages_channels ... ok
test commands::recording_tests::tests::elapsed_seconds_formatting ... ok
test commands::recording_tests::tests::recording_state_transitions ... ok
test commands::recording::tests::downmix_to_mono_handles_partial_frames ... ok
test config::tests::cache_backend_parsing ... ok
test constants::tests::crypto_sizes_are_correct ... ok
test constants::tests::grpc_timeouts_are_reasonable ... ok
test commands::recording_tests::tests::now_timestamp_is_positive ... ok
test commands::recording::audio::tests::append_spool_chunks_writes_header_and_samples ... ok
test config::tests::default_config_is_valid ... ok
test audio::mixer::tests::test_mixer_buffer_overflow_recorded ... ok
test crypto::tests::crypto_manager_debug_format ... ok
test crypto::tests::crypto_manager_default_matches_new ... ok
test crypto::tests::crypto_manager_new_does_not_initialize ... ok
test audio::loader::tests::load_audio_file_rejects_truncated_payload ... ok
test commands::recording::tests::audio_file_roundtrip_matches_samples ... ok
test audio::loader::tests::load_audio_file_rejects_short_payload ... ok
test crypto::tests::test_different_nonces ... ok
test crypto::tests::test_encrypt_decrypt_with_key ... ok
test crypto::tests::test_hex_encode_decode ... ok
test error::tests::classify_already_connected_as_client ... ok
test error::tests::classify_annotation_not_found ... ok
test error::tests::classify_audio_capture_as_client ... ok
test error::tests::classify_audio_playback_as_client ... ok
test error::tests::classify_connection_error_as_network ... ok
test error::tests::classify_device_not_found_as_client ... ok
test error::tests::classify_encryption_as_client ... ok
test error::tests::classify_integration_not_found ... ok
test error::tests::classify_invalid_input ... ok
test error::tests::classify_grpc_unknown_as_retryable ... ok
test error::tests::classify_grpc_deadline_exceeded ... ok
test error::tests::classify_grpc_resource_exhausted ... ok
test error::tests::classify_grpc_internal ... ok
test error::tests::classify_grpc_unavailable_as_network ... ok
test error::tests::classify_grpc_not_found ... ok
test error::tests::classify_grpc_permission_denied ... ok
test error::tests::classify_grpc_unauthenticated ... ok
test error::tests::classify_grpc_invalid_argument ... ok
test error::tests::classify_invalid_operation ... ok
test error::tests::classify_meeting_not_found ... ok
test error::tests::classify_not_connected_as_client ... ok
test error::tests::classify_stream_as_client ... ok
test error::tests::classify_timeout_error ... ok
test error::tests::error_classification_serialization ... ok
test error::tests::error_classification_without_grpc_status ... ok
test events::tests::grpc_error_classification_is_preserved_in_event_payload ... ok
test grpc::client_tests::tests::annotation_construction ... ok
test grpc::client_tests::tests::annotation_type_from_i32 ... ok
test grpc::client_tests::tests::annotation_type_from_str ... ok
test grpc::client_tests::tests::audio_device_info_construction ... ok
test grpc::client_tests::tests::connection_state_reconnecting_tracks_attempts ... ok
test grpc::client_tests::tests::connection_state_transitions ... ok
test grpc::client_tests::tests::export_format_from_i32 ... ok
test grpc::client_tests::tests::export_format_from_str ... ok
test grpc::client_tests::tests::export_result_empty ... ok
test grpc::client_tests::tests::grpc_client_empty_endpoint ... ok
test grpc::client_tests::tests::grpc_client_endpoint_normalization ... ok
test grpc::client_tests::tests::grpc_client_initial_state ... ok
test grpc::client_tests::tests::job_status_from_i32 ... ok
test grpc::client_tests::tests::meeting_info_new ... ok
test grpc::client_tests::tests::meeting_info_stopped ... ok
test grpc::client_tests::tests::meeting_new ... ok
test grpc::client_tests::tests::meeting_state_from_i32 ... ok
test grpc::client_tests::tests::meeting_state_serialization ... ok
test grpc::client_tests::tests::priority_from_i32 ... ok
test grpc::client_tests::tests::meeting_to_info ... ok
test grpc::client_tests::tests::segment_construction ... ok
test grpc::client_tests::tests::server_info_default ... ok
test grpc::client_tests::tests::meeting_stopped ... ok
test grpc::client_tests::tests::timestamped_audio_construction ... ok
test grpc::client_tests::tests::update_type_from_i32 ... ok
test grpc::client_tests::tests::word_timing_construction ... ok
test grpc::proto_compliance_tests::tests::all_types_instantiable ... ok
test grpc::proto_compliance_tests::tests::annotation_type_count ... ok
test grpc::proto_compliance_tests::tests::annotation_type_ordinals ... ok
test grpc::proto_compliance_tests::tests::annotation_type_roundtrip ... ok
test grpc::proto_compliance_tests::tests::annotation_type_serialization ... ok
test grpc::proto_compliance_tests::tests::annotation_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::audio_chunk_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::action_item_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::diarization_job_status_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::export_format_count ... ok
test grpc::proto_compliance_tests::tests::export_format_ordinals ... ok
test grpc::proto_compliance_tests::tests::export_format_roundtrip ... ok
test grpc::proto_compliance_tests::tests::export_format_serialization ... ok
test grpc::proto_compliance_tests::tests::export_result_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::job_status_count ... ok
test grpc::proto_compliance_tests::tests::job_status_ordinals ... ok
test grpc::proto_compliance_tests::tests::job_status_roundtrip ... ok
test grpc::proto_compliance_tests::tests::job_status_serialization ... ok
test grpc::proto_compliance_tests::tests::key_point_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::list_meetings_response_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::meeting_state_count ... ok
test grpc::proto_compliance_tests::tests::meeting_state_roundtrip ... ok
test grpc::proto_compliance_tests::tests::meeting_state_serialization ... ok
test grpc::proto_compliance_tests::tests::priority_count ... ok
test grpc::proto_compliance_tests::tests::priority_ordinals ... ok
test grpc::proto_compliance_tests::tests::meeting_state_ordinals ... ok
test grpc::proto_compliance_tests::tests::priority_roundtrip ... ok
test grpc::proto_compliance_tests::tests::priority_serialization ... ok
test grpc::proto_compliance_tests::tests::rename_speaker_result_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::summary_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::transcript_update_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::segment_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::server_info_fields_match_proto ... ok
test grpc::proto_compliance_tests::tests::update_type_serialization ... ok
test grpc::proto_compliance_tests::tests::update_type_roundtrip ... ok
test grpc::proto_compliance_tests::tests::update_type_ordinals ... ok
test grpc::streaming::activity::tests::test_activity_atomic_updates_are_visible ... ok
test grpc::streaming::activity::tests::test_activity_resumption::case_1_resume_after_brief_inactivity ... ok
test grpc::proto_compliance_tests::tests::update_type_count ... ok
test grpc::streaming::activity::tests::test_activity_resumption::case_3_partial_resume ... ok
test grpc::streaming::activity::tests::test_activity_timestamp_monotonic ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_1_short_timeout_active ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_2_short_timeout_inactive ... ok
test grpc::proto_compliance_tests::tests::word_timing_fields_match_proto ... ok
test grpc::streaming::activity::tests::test_activity_resumption::case_2_resume_after_long_inactivity ... ok
test commands::recording_tests::tests::now_timestamp_is_increasing ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_5_long_timeout_active ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_3_medium_timeout_active ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_4_medium_timeout_inactive ... ok
test grpc::streaming::activity::tests::test_edge_cases::case_2_zero_inactivity ... ok
test grpc::streaming::activity::tests::test_edge_cases::case_3_exact_boundary ... ok
test grpc::streaming::activity::tests::test_edge_cases::case_1_zero_timeout ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_1_well_before_threshold ... ok
test grpc::streaming::activity::tests::test_edge_cases::case_4_one_ms_before ... ok
test grpc::streaming::activity::tests::test_configurable_inactivity_timeout::case_6_long_timeout_inactive ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_3_four_minutes_inactive ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_2_one_minute_inactive ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_5_exactly_at_threshold ... ok
test grpc::streaming::activity::tests::test_edge_cases::case_5_one_ms_after ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_4_just_before_threshold ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_7_well_past_threshold ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_1_just_started ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_3_one_hour ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_4_two_hours ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_5_three_hours ... ok
test grpc::streaming::activity::tests::test_long_meeting_with_breaks ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_2_fifteen_minutes ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_6_just_before_max ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_7_exactly_at_max ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_8_just_after_max ... ok
test grpc::streaming::activity::tests::test_max_duration_threshold::case_9_five_hours ... ok
test grpc::streaming::activity::tests::test_saturating_sub_handles_clock_skew ... ok
test grpc::streaming::activity::tests::test_inactivity_threshold_behavior::case_6_just_after_threshold ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_1_standup ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_2_sync ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_3_planning ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_4_workshop ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_5_half_day ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_6_long_workshop ... ok
test grpc::streaming::activity::tests::test_typical_meeting_durations::case_7_at_limit ... ok
test grpc::streaming::converters::tests::convert_segment_preserves_structure ... ok
test grpc::streaming::converters::tests::convert_word_preserves_fields ... ok
test grpc::streaming::manager::tests::test_stream_state_info_variants::case_1_idle ... ok
test grpc::streaming::manager::tests::test_stream_state_info_variants::case_3_active ... ok
test grpc::streaming::manager::tests::test_stream_state_info_variants::case_2_starting ... ok
test grpc::streaming::manager::tests::test_stream_state_info_variants::case_4_stopping ... ok
test helpers::tests::format_duration_long ... ok
test helpers::tests::clamp_works_correctly ... ok
test helpers::tests::format_duration_short ... ok
test helpers::tests::new_id_is_valid_uuid ... ok
test helpers::tests::new_id_is_unique ... ok
test helpers::tests::normalize_db_level_handles_zero_range ... ok
test helpers::tests::normalize_db_level_works ... ok
test helpers::tests::now_timestamp_is_positive ... ok
test helpers::tests::sanitize_filename_removes_invalid_chars ... ok
test identity::tests::identity_store_new_does_not_load ... ok
test identity::tests::stored_identity_default_is_local ... ok
test identity::tests::stored_identity_from_auth_is_not_local ... ok
test oauth_loopback::tests::test_parse_callback_error ... ok
test oauth_loopback::tests::test_parse_callback_missing_code ... ok
test oauth_loopback::tests::test_parse_callback_success ... ok
test oauth_loopback::tests::test_parse_callback_with_encoded_values ... ok
test oauth_loopback::tests::test_urlencoding_decode ... ok
test oauth_loopback::tests::test_urlencoding_decode_multibyte_utf8 ... ok
test state::shutdown::tests::double_shutdown_is_idempotent ... ok
test state::shutdown::tests::drop_triggers_shutdown ... ok
test state::shutdown::tests::shutdown_manager_signals_cancellation ... ok
test state::state_tests::audio_config_default ... ok
test state::state_tests::playback_info_without_highlight ... ok
test state::state_tests::playback_state_copy ... ok
test state::state_tests::playback_state_default ... ok
test state::state_tests::audio_config_uses_saved_device_ids ... ok
test state::state_tests::playback_info_construction ... ok
test state::state_tests::playback_state_equality ... ok
test state::state_tests::playback_state_serialization ... ok
test state::state_tests::trigger_action_serialization ... ok
test state::state_tests::trigger_decision_auto_start ... ok
test state::state_tests::trigger_signal_construction ... ok
test state::state_tests::trigger_decision_construction ... ok
test state::state_tests::trigger_source_serialization ... ok
test state::state_tests::trigger_state_add_dismissed_enforces_bounds ... ok
test state::state_tests::trigger_decision_serialization ... ok
test state::state_tests::trigger_signal_without_app_name ... ok
test state::state_tests::trigger_state_add_dismissed_prevents_duplicates ... ok
test triggers::tests::test_is_meeting_app ... ok
test triggers::tests::test_snooze ... ok
test triggers::tests::test_parse_linux_exec_command ... ok
test triggers::tests::test_snooze_invalid_duration ... ok
test triggers::tests::test_index_desktop_file_tracks_exec_and_wm_class ... ok
test commands::recording::session::processing::tests::smoke_long_recording_caps_buffer_samples ... ok
test grpc::client_tests::tests::identity_interceptor_omits_auth_when_not_authenticated ... ok
test grpc::client_tests::tests::identity_interceptor_injects_required_headers ... ok
test grpc::client_tests::tests::identity_interceptor_debug_output ... ok
test grpc::client_tests::tests::identity_interceptor_uses_local_defaults ... ok
test grpc::client_tests::tests::identity_interceptor_generates_unique_request_ids ... ok
test identity::tests::identity_manager_provides_defaults ... ok
test commands::recording_tests::tests::elapsed_seconds_calculation ... ok
test result: ok. 323 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.11s
Running unittests src/main.rs (target/debug/deps/noteflow_tauri-8a72594d19768762)
running 0 tests
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
Running tests/async_robustness.rs (target/debug/deps/async_robustness-987a75be37133bf8)
running 11 tests
test cancellation_token_child_inherits_cancellation ... ok
test receiver_handles_sender_closure ... ok
test async_writes_are_serialized ... ok
test concurrent_async_reads_dont_block ... ok
test spawned_tasks_can_be_awaited ... ok
test graceful_shutdown_sequence ... ok
test atomic_state_transition_prevents_double_operation ... ok
test multiple_tasks_share_cancellation_token ... ok
test background_task_respects_cancellation ... ok
test dropped_handle_task_continues ... ok
test atomic_shutdown_flag_works ... ok
test result: ok. 11 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.12s
Running tests/device_integration.rs (target/debug/deps/device_integration-9a9fb58e27a1b7f0)
running 4 tests
test input_device_available ... ignored, requires physical audio devices
test input_device_supports_requested_format ... ignored, requires physical audio devices
test output_device_available ... ignored, requires physical audio devices
test output_device_supports_requested_format ... ignored, requires physical audio devices
test result: ok. 0 passed; 0 failed; 4 ignored; 0 measured; 0 filtered out; finished in 0.01s
Running tests/grpc_integration.rs (target/debug/deps/grpc_integration-f7f0512454ddbfdc)
running 20 tests
test integration::calendar_operations ... ignored, integration test; requires running server
test integration::cloud_consent_operations ... ignored, integration test; requires running server
test integration::cloud_summary_reconnect_with_jitter ... ignored, integration test; requires running server
test integration::connect_fails_gracefully_with_invalid_server ... ignored, integration test; requires running server
test integration::connect_with_none_uses_cached_endpoint ... ignored, integration test; requires running server
test integration::create_and_delete_meeting_roundtrip ... ignored, integration test; requires running server
test integration::diarization_operations ... ignored, integration test; requires running server
test integration::diarization_refinement_smoke ... ignored, integration test; requires running server
test integration::full_meeting_lifecycle ... ignored, integration test; requires running server
test integration::get_server_info_returns_valid_response ... ignored, integration test; requires running server
test integration::list_meetings_returns_valid_response ... ignored, integration test; requires running server
test integration::observability_operations ... ignored, integration test; requires running server
test integration::preferences_roundtrip ... ignored, integration test; requires running server
test integration::project_operations ... ignored, integration test; requires running server
test integration::real_audio_streaming_e2e ... ignored, integration test; requires running server
test integration::real_audio_streaming_extreme_duration ... ignored, integration test; requires running server
test integration::server_is_reachable ... ignored, integration test; requires running server
test integration::streaming_rejects_format_change_mid_stream ... ignored, integration test; requires running server
test integration::user_integrations_operations ... ignored, integration test; requires running server
test integration::webhook_crud_operations ... ignored, integration test; requires running server
test result: ok. 0 passed; 0 failed; 20 ignored; 0 measured; 0 filtered out; finished in 0.00s
Running tests/harness.rs (target/debug/deps/harness-c82ab06386f14f2a)
running 0 tests
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
Running tests/robustness.rs (target/debug/deps/robustness-5daa78ffbb362da8)
running 16 tests
test annotation_time_validation ... ok
test crypto_constants_match_aes_gcm_requirements ... ok
test event_names_follow_convention ... ok
test format_duration_handles_edge_cases ... ok
test normalize_db_level_clamps_correctly ... ok
test pagination_validation_clamps_values ... ok
test playback_state_wrapper_default ... ok
test sanitize_filename_removes_dangerous_chars ... ok
test playback_state_wrapper_reset_clears_state ... ok
test sort_order_validation ... ok
test timeout_constants_are_reasonable ... ok
test trigger_source_serializes_correctly ... ok
test trigger_state_dismissed_triggers_deduplicates ... ok
test trigger_state_dismissed_triggers_bounded ... ok
test atomic_counter_is_thread_safe ... ok
test trigger_state_snooze_works ... ok
test result: ok. 16 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 1.10s
Running tests/setup_spawn_tests.rs (target/debug/deps/setup_spawn_tests-da5dcd9ab0b2f89d)
running 7 tests
test spawn_pattern_works_without_existing_runtime ... ok
test rapid_spawn_shutdown_is_stable ... ok
test local_runtime_in_spawned_thread_works ... ok
test broadcast_channel_works_with_local_runtime ... ok
test simulated_event_emitter_pattern ... ok
test tokio_select_works_in_local_runtime ... ok
test multiple_threads_with_local_runtimes ... ok
test result: ok. 7 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.11s
Doc-tests noteflow_lib
running 1 test
test src/crypto/mod.rs - crypto::CryptoManager (line 227) ... ignored
test result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in 0.00s

View File

@@ -75,7 +75,9 @@ impl AudioCapture {
// Send complete buffers
while buffer.len() >= buffer_size {
let chunk: Vec<f32> = buffer.drain(..buffer_size).collect();
let _ = audio_tx.blocking_send(chunk);
if let Err(e) = audio_tx.blocking_send(chunk) {
tracing::warn!("Audio buffer full or channel closed: {}", e);
}
}
},
move |err| {
@@ -116,15 +118,20 @@ pub fn calculate_rms(samples: &[f32]) -> f32 {
}
/// Calculate RMS for i16 audio samples, normalized to f32 range [-1.0, 1.0].
///
/// Uses 32768.0 for normalization to handle the asymmetric i16 range (-32768 to 32767)
/// symmetrically, ensuring values stay within [-1.0, 1.0].
pub fn calculate_rms_i16(samples: &[i16]) -> f32 {
if samples.is_empty() {
return 0.0;
}
const I16_NORMALIZATION_FACTOR: f32 = 32768.0;
let sum_squares: f32 = samples
.iter()
.map(|&s| {
let normalized = s as f32 / i16::MAX as f32;
let normalized = s as f32 / I16_NORMALIZATION_FACTOR;
normalized * normalized
})
.sum();
@@ -158,6 +165,130 @@ pub fn rms_to_db(rms: f32) -> f32 {
// Note: Use helpers::normalize_db_level() for normalizing dB to 0.0-1.0 range
#[cfg(test)]
mod tests {
use super::*;
const SAMPLE_COUNT: usize = 100;
const SILENCE_SAMPLE: f32 = 0.0;
const UNIT_SAMPLE: f32 = 1.0;
const QUIET_SAMPLE: f32 = 0.001;
const LOW_SAMPLE: f32 = 0.01;
const BOOST_SAMPLE: f32 = 0.02;
const MIDPOINT_U16: u16 = 32768;
const NEAR_ZERO_THRESHOLD: f32 = 0.001;
#[test]
fn calculate_rms_empty_returns_zero() {
assert_eq!(calculate_rms(&[]), 0.0);
}
#[test]
fn calculate_rms_silence_returns_zero() {
let samples = vec![SILENCE_SAMPLE; SAMPLE_COUNT];
assert_eq!(calculate_rms(&samples), 0.0);
}
#[test]
fn calculate_rms_unit_signal() {
let samples = vec![UNIT_SAMPLE; SAMPLE_COUNT];
assert!((calculate_rms(&samples) - UNIT_SAMPLE).abs() < f32::EPSILON);
}
#[test]
fn calculate_rms_i16_empty_returns_zero() {
assert_eq!(calculate_rms_i16(&[]), 0.0);
}
#[test]
fn calculate_rms_i16_min_value_within_bounds() {
let samples = vec![i16::MIN; SAMPLE_COUNT];
let rms = calculate_rms_i16(&samples);
assert!(rms <= 1.0, "RMS {} should be <= 1.0", rms);
}
#[test]
fn calculate_rms_u16_empty_returns_zero() {
assert_eq!(calculate_rms_u16(&[]), 0.0);
}
#[test]
fn calculate_rms_u16_midpoint_near_zero() {
let samples = vec![MIDPOINT_U16; SAMPLE_COUNT];
let rms = calculate_rms_u16(&samples);
assert!(rms < NEAR_ZERO_THRESHOLD, "Midpoint RMS {} should be near 0", rms);
}
#[test]
fn rms_to_db_silence_returns_floor() {
assert_eq!(rms_to_db(0.0), audio_config::MIN_DB_LEVEL);
}
#[test]
fn rms_to_db_negative_returns_floor() {
assert_eq!(rms_to_db(-0.5), audio_config::MIN_DB_LEVEL);
}
#[test]
fn rms_to_db_unit_signal_is_zero() {
assert!((rms_to_db(1.0) - 0.0).abs() < f32::EPSILON);
}
#[test]
fn soft_clip_linear_region() {
assert!((soft_clip(0.3) - 0.3).abs() < f32::EPSILON);
assert!((soft_clip(-0.3) - (-0.3)).abs() < f32::EPSILON);
}
#[test]
fn soft_clip_saturation_region() {
let result = soft_clip(2.0);
assert!(result < 1.0, "Clipped value {} should be < 1.0", result);
assert!(result > 0.5, "Clipped value {} should be > 0.5", result);
}
#[test]
fn soft_clip_negative_saturation() {
let result = soft_clip(-2.0);
assert!(result > -1.0, "Clipped value {} should be > -1.0", result);
assert!(result < -0.5, "Clipped value {} should be < -0.5", result);
}
#[test]
fn normalize_for_asr_empty_returns_unity_gain() {
let mut samples: Vec<f32> = vec![];
let gain = normalize_for_asr(&mut samples);
assert_eq!(gain, 1.0);
}
#[test]
fn normalize_for_asr_silence_returns_unity_gain() {
let mut samples = vec![QUIET_SAMPLE; SAMPLE_COUNT];
let gain = normalize_for_asr(&mut samples);
assert_eq!(gain, 1.0);
}
#[test]
fn normalize_for_asr_boosts_quiet_audio() {
let mut samples = vec![LOW_SAMPLE; SAMPLE_COUNT];
let gain = normalize_for_asr(&mut samples);
assert!(gain > 1.0, "Gain {} should be > 1.0 for quiet audio", gain);
}
#[test]
fn normalize_for_asr_respects_max_gain() {
let mut samples = vec![QUIET_SAMPLE; SAMPLE_COUNT];
samples[0] = BOOST_SAMPLE;
let gain = normalize_for_asr(&mut samples);
assert!(
gain <= MAX_GAIN,
"Gain {} should be <= MAX_GAIN {}",
gain,
MAX_GAIN
);
}
}
// ============================================================================
// Audio Normalization for ASR
// Boosts quiet audio (e.g., system loopback) to improve speech recognition.

View File

@@ -3,7 +3,7 @@ use std::sync::Arc;
use tauri::State;
use crate::error::Result;
use crate::grpc::types::analytics::{AnalyticsOverview, ListSpeakerStatsResult};
use crate::grpc::types::analytics::{AnalyticsOverview, EntityAnalytics, ListSpeakerStatsResult};
use crate::state::AppState;
#[tauri::command(rename_all = "snake_case")]
@@ -33,3 +33,18 @@ pub async fn list_speaker_stats(
.list_speaker_stats(start_time, end_time, project_id, project_ids)
.await
}
#[tauri::command(rename_all = "snake_case")]
pub async fn get_entity_analytics(
state: State<'_, Arc<AppState>>,
start_time: f64,
end_time: f64,
project_id: Option<String>,
project_ids: Option<Vec<String>>,
top_limit: Option<i32>,
) -> Result<EntityAnalytics> {
state
.grpc_client
.get_entity_analytics(start_time, end_time, project_id, project_ids, top_limit)
.await
}

View File

@@ -25,6 +25,7 @@ pub async fn create_meeting(
/// List meetings with optional filters.
#[tauri::command(rename_all = "snake_case")]
#[allow(clippy::too_many_arguments)]
pub async fn list_meetings(
state: State<'_, Arc<AppState>>,
states: Option<Vec<i32>>,
@@ -33,16 +34,15 @@ pub async fn list_meetings(
sort_order: Option<i32>,
project_id: Option<String>,
project_ids: Option<Vec<String>>,
include_segments: Option<bool>,
) -> Result<ListMeetingsResponse> {
// Validate and clamp pagination parameters to non-negative values
let validated_limit = limit
.unwrap_or(pagination::DEFAULT_LIMIT)
.clamp(pagination::MIN_LIMIT, pagination::MAX_MEETINGS_LIMIT);
let validated_offset = offset.unwrap_or(0).max(0);
// Sort order: 1 = newest first (desc), -1 = oldest first (asc)
let validated_sort = match sort_order.unwrap_or(1) {
-1 => -1,
_ => 1, // Default to newest first for any invalid value
_ => 1,
};
state
@@ -54,6 +54,7 @@ pub async fn list_meetings(
validated_sort,
project_id,
project_ids.unwrap_or_default(),
include_segments.unwrap_or(false),
)
.await
}

View File

@@ -1,7 +1,8 @@
use crate::error::Result;
use crate::grpc::noteflow as pb;
use crate::grpc::types::analytics::{
AnalyticsOverview, DailyMeetingStats, ListSpeakerStatsResult, SpeakerStat,
AnalyticsOverview, DailyMeetingStats, EntityAnalytics, EntityCategoryStat,
ListSpeakerStatsResult, SpeakerStat, TopEntity,
};
use super::core::GrpcClient;
@@ -57,6 +58,42 @@ impl GrpcClient {
speakers: response.speakers.into_iter().map(map_speaker_stat).collect(),
})
}
pub async fn get_entity_analytics(
&self,
start_time: f64,
end_time: f64,
project_id: Option<String>,
project_ids: Option<Vec<String>>,
top_limit: Option<i32>,
) -> Result<EntityAnalytics> {
let mut client = self.get_client()?;
let response = client
.get_entity_analytics(pb::GetEntityAnalyticsRequest {
start_time,
end_time,
project_id,
project_ids: project_ids.unwrap_or_default(),
top_limit: top_limit.unwrap_or(20),
})
.await?
.into_inner();
Ok(EntityAnalytics {
by_category: response
.by_category
.into_iter()
.map(map_entity_category_stat)
.collect(),
top_entities: response
.top_entities
.into_iter()
.map(map_top_entity)
.collect(),
total_entities: response.total_entities,
total_mentions: response.total_mentions,
})
}
}
fn map_daily_stats(proto: pb::DailyMeetingStatsProto) -> DailyMeetingStats {
@@ -78,3 +115,20 @@ fn map_speaker_stat(proto: pb::SpeakerStatProto) -> SpeakerStat {
avg_confidence: proto.avg_confidence,
}
}
fn map_entity_category_stat(proto: pb::EntityCategoryStatProto) -> EntityCategoryStat {
EntityCategoryStat {
category: proto.category,
count: proto.count,
total_mentions: proto.total_mentions,
}
}
fn map_top_entity(proto: pb::TopEntityProto) -> TopEntity {
TopEntity {
text: proto.text,
category: proto.category,
mention_count: proto.mention_count,
meeting_count: proto.meeting_count,
}
}

View File

@@ -69,6 +69,7 @@ impl GrpcClient {
/// List meetings with filters.
#[instrument(skip(self))]
#[allow(clippy::too_many_arguments)]
pub async fn list_meetings(
&self,
states: Vec<i32>,
@@ -77,6 +78,7 @@ impl GrpcClient {
sort_order: i32,
project_id: Option<String>,
project_ids: Vec<String>,
include_segments: bool,
) -> Result<ListMeetingsResponse> {
let mut client = self.get_client()?;
let response = client
@@ -87,6 +89,7 @@ impl GrpcClient {
sort_order,
project_id,
project_ids,
include_segments,
})
.await?
.into_inner();

View File

@@ -185,6 +185,9 @@ pub struct ListMeetingsRequest {
/// Optional project filter for multiple projects (overrides project_id when provided)
#[prost(string, repeated, tag = "6")]
pub project_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Whether to include full transcript segments (default: false)
#[prost(bool, tag = "7")]
pub include_segments: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListMeetingsResponse {
@@ -2524,6 +2527,50 @@ pub struct ListSpeakerStatsResponse {
#[prost(message, repeated, tag = "1")]
pub speakers: ::prost::alloc::vec::Vec<SpeakerStatProto>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EntityCategoryStatProto {
#[prost(string, tag = "1")]
pub category: ::prost::alloc::string::String,
#[prost(int32, tag = "2")]
pub count: i32,
#[prost(int32, tag = "3")]
pub total_mentions: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TopEntityProto {
#[prost(string, tag = "1")]
pub text: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub category: ::prost::alloc::string::String,
#[prost(int32, tag = "3")]
pub mention_count: i32,
#[prost(int32, tag = "4")]
pub meeting_count: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetEntityAnalyticsRequest {
#[prost(double, tag = "1")]
pub start_time: f64,
#[prost(double, tag = "2")]
pub end_time: f64,
#[prost(string, optional, tag = "3")]
pub project_id: ::core::option::Option<::prost::alloc::string::String>,
#[prost(string, repeated, tag = "4")]
pub project_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(int32, tag = "5")]
pub top_limit: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetEntityAnalyticsResponse {
#[prost(message, repeated, tag = "1")]
pub by_category: ::prost::alloc::vec::Vec<EntityCategoryStatProto>,
#[prost(message, repeated, tag = "2")]
pub top_entities: ::prost::alloc::vec::Vec<TopEntityProto>,
#[prost(int32, tag = "3")]
pub total_entities: i32,
#[prost(int32, tag = "4")]
pub total_mentions: i32,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum UpdateType {
@@ -5190,6 +5237,32 @@ pub mod note_flow_service_client {
.insert(GrpcMethod::new("noteflow.NoteFlowService", "ListSpeakerStats"));
self.inner.unary(req, path, codec).await
}
pub async fn get_entity_analytics(
&mut self,
request: impl tonic::IntoRequest<super::GetEntityAnalyticsRequest>,
) -> std::result::Result<
tonic::Response<super::GetEntityAnalyticsResponse>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::unknown(
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/noteflow.NoteFlowService/GetEntityAnalytics",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(
GrpcMethod::new("noteflow.NoteFlowService", "GetEntityAnalytics"),
);
self.inner.unary(req, path, codec).await
}
/// Project membership management (Sprint 18)
pub async fn add_project_member(
&mut self,

View File

@@ -32,3 +32,26 @@ pub struct SpeakerStat {
pub struct ListSpeakerStatsResult {
pub speakers: Vec<SpeakerStat>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EntityCategoryStat {
pub category: String,
pub count: i32,
pub total_mentions: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TopEntity {
pub text: String,
pub category: String,
pub mention_count: i32,
pub meeting_count: i32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EntityAnalytics {
pub by_category: Vec<EntityCategoryStat>,
pub top_entities: Vec<TopEntity>,
pub total_entities: i32,
pub total_mentions: i32,
}

View File

@@ -199,9 +199,10 @@ macro_rules! app_invoke_handler {
// Tasks (2 commands) - Bugfinder Sprint
commands::list_tasks,
commands::update_task,
// Analytics (2 commands) - Bugfinder Sprint
// Analytics (3 commands) - Bugfinder Sprint
commands::get_analytics_overview,
commands::list_speaker_stats,
commands::get_entity_analytics,
]
};
}

View File

@@ -412,8 +412,8 @@ mod integration {
.await
.expect("Failed to connect");
// list_meetings(states, limit, offset, sort_order, project_id, project_ids)
let result = client.list_meetings(vec![], 10, 0, 0, None, vec![]).await;
// list_meetings(states, limit, offset, sort_order, project_id, project_ids, include_segments)
let result = client.list_meetings(vec![], 10, 0, 0, None, vec![], false).await;
assert!(
result.is_ok(),
"Failed to list meetings: {:?}",

View File

@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest';
import { cachedAppsAPI } from './apps';
describe('cachedAppsAPI', () => {
it('returns empty installed apps with paging defaults', async () => {
const response = await cachedAppsAPI.listInstalledApps();
expect(response.apps).toEqual([]);
expect(response.total).toBe(0);
expect(response.page).toBe(0);
expect(response.page_size).toBe(50);
expect(response.has_more).toBe(false);
const responseWithOptions = await cachedAppsAPI.listInstalledApps({ page: 2, pageSize: 10 });
expect(responseWithOptions.page).toBe(2);
expect(responseWithOptions.page_size).toBe(10);
});
it('rejects cache invalidation in cached mode', async () => {
await expect(cachedAppsAPI.invalidateAppCache()).rejects.toThrow('Cached read-only mode');
});
});

View File

@@ -0,0 +1,34 @@
import { describe, expect, it } from 'vitest';
import { cachedAudioAPI } from './audio';
describe('cachedAudioAPI', () => {
it('returns empty devices and default environment info', async () => {
await expect(cachedAudioAPI.listAudioDevices()).resolves.toEqual([]);
await expect(cachedAudioAPI.getDefaultAudioDevice(true)).resolves.toBeNull();
await expect(cachedAudioAPI.checkTestEnvironment()).resolves.toEqual({
hasInputDevices: false,
hasVirtualDevice: false,
inputDevices: [],
isServerConnected: false,
canRunAudioTests: false,
});
});
it('rejects mutating audio operations', async () => {
await expect(cachedAudioAPI.selectAudioDevice('device', true)).rejects.toThrow(
'Cached read-only mode'
);
await expect(
cachedAudioAPI.injectTestAudio('meeting', {
isInput: true,
volumeDb: -12,
durationSeconds: 1,
})
).rejects.toThrow('Cached read-only mode');
await expect(cachedAudioAPI.injectTestTone('meeting', 440, 1, 16000)).rejects.toThrow(
'Cached read-only mode'
);
});
});

View File

@@ -0,0 +1,89 @@
import { describe, expect, it, vi } from 'vitest';
import { cachedProjectsAPI } from './projects';
import { offlineProjects } from './defaults';
describe('cachedProjectsAPI', () => {
it('returns projects from offline cache and handles missing data', async () => {
const project = offlineProjects.projects[0];
const workspaceId = project?.workspace_id ?? 'workspace-1';
const list = await cachedProjectsAPI.listProjects({ workspace_id: workspaceId });
expect(list.total_count).toBe(list.projects.length);
if (project) {
const fetched = await cachedProjectsAPI.getProject({ project_id: project.id });
expect(fetched.id).toBe(project.id);
const bySlug = await cachedProjectsAPI.getProjectBySlug({
workspace_id: project.workspace_id,
slug: project.slug,
});
expect(bySlug.id).toBe(project.id);
}
await expect(
cachedProjectsAPI.getProject({ project_id: 'missing' })
).rejects.toThrow('Project not available in offline cache.');
await expect(
cachedProjectsAPI.getProjectBySlug({
workspace_id: 'missing',
slug: 'missing',
})
).rejects.toThrow('Project not available in offline cache.');
});
it('returns active project or throws when none available', async () => {
const workspaceId = offlineProjects.projects[0]?.workspace_id ?? 'workspace-1';
const active = await cachedProjectsAPI.getActiveProject({ workspace_id: workspaceId });
expect(active.project_id).toBeDefined();
expect(active.project).toBeDefined();
vi.resetModules();
vi.doMock('./defaults', () => ({
offlineProjects: { projects: [], total_count: 0 },
}));
const { cachedProjectsAPI: emptyAPI } = await import('./projects');
await expect(emptyAPI.getActiveProject({ workspace_id: workspaceId })).rejects.toThrow(
'No project available in offline cache.'
);
});
it('rejects write operations and provides empty members', async () => {
await expect(
cachedProjectsAPI.createProject({ workspace_id: 'w1', name: 'x' })
).rejects.toThrow(
'Cached read-only mode'
);
await expect(cachedProjectsAPI.updateProject({ project_id: 'p1', name: 'y' })).rejects.toThrow(
'Cached read-only mode'
);
await expect(cachedProjectsAPI.archiveProject('p1')).rejects.toThrow('Cached read-only mode');
await expect(cachedProjectsAPI.restoreProject('p1')).rejects.toThrow('Cached read-only mode');
await expect(cachedProjectsAPI.deleteProject('p1')).rejects.toThrow('Cached read-only mode');
await expect(
cachedProjectsAPI.addProjectMember({ project_id: 'p1', user_id: 'u1', role: 'viewer' })
).rejects.toThrow('Cached read-only mode');
await expect(
cachedProjectsAPI.updateProjectMemberRole({
project_id: 'p1',
user_id: 'u1',
role: 'editor',
})
).rejects.toThrow('Cached read-only mode');
await expect(
cachedProjectsAPI.removeProjectMember({ project_id: 'p1', user_id: 'u1' })
).rejects.toThrow('Cached read-only mode');
await expect(
cachedProjectsAPI.listProjectMembers({ project_id: 'p1', limit: 20, offset: 0 })
).resolves.toEqual({ members: [], total_count: 0 });
await expect(
cachedProjectsAPI.setActiveProject({ workspace_id: 'w1', project_id: 'p1' })
).resolves.toBeUndefined();
});
});

View File

@@ -0,0 +1,39 @@
import { describe, expect, it } from 'vitest';
import { cachedTemplatesAPI } from './templates';
describe('cachedTemplatesAPI', () => {
it('returns empty lists and consent status', async () => {
const templates = await cachedTemplatesAPI.listSummarizationTemplates({ workspace_id: 'w1' });
expect(templates.total_count).toBe(0);
const versions = await cachedTemplatesAPI.listSummarizationTemplateVersions({
template_id: 't1',
});
expect(versions.versions).toEqual([]);
const consent = await cachedTemplatesAPI.getCloudConsentStatus();
expect(consent.consentGranted).toBe(false);
});
it('rejects template mutations and throws for get', async () => {
await expect(
cachedTemplatesAPI.getSummarizationTemplate({ template_id: 't1' })
).rejects.toThrow('Summarization templates are unavailable in offline mode.');
await expect(
cachedTemplatesAPI.createSummarizationTemplate({ workspace_id: 'w1', name: 'x' })
).rejects.toThrow('Cached read-only mode');
await expect(
cachedTemplatesAPI.updateSummarizationTemplate({ template_id: 't1', name: 'x' })
).rejects.toThrow('Cached read-only mode');
await expect(
cachedTemplatesAPI.archiveSummarizationTemplate({ template_id: 't1' })
).rejects.toThrow('Cached read-only mode');
await expect(
cachedTemplatesAPI.restoreSummarizationTemplateVersion({ template_version_id: 'v1' })
).rejects.toThrow('Cached read-only mode');
await expect(cachedTemplatesAPI.grantCloudConsent()).rejects.toThrow('Cached read-only mode');
await expect(cachedTemplatesAPI.revokeCloudConsent()).rejects.toThrow('Cached read-only mode');
});
});

View File

@@ -0,0 +1,18 @@
import { describe, expect, it } from 'vitest';
import { cachedTriggersAPI } from './triggers';
describe('cachedTriggersAPI', () => {
it('returns trigger status from offline cache', async () => {
const status = await cachedTriggersAPI.getTriggerStatus();
expect(status).toEqual({ enabled: false, is_snoozed: false });
});
it('rejects trigger mutations in cached mode', async () => {
await expect(cachedTriggersAPI.setTriggerEnabled(true)).rejects.toThrow('Cached read-only mode');
await expect(cachedTriggersAPI.snoozeTriggers(10)).rejects.toThrow('Cached read-only mode');
await expect(cachedTriggersAPI.resetSnooze()).rejects.toThrow('Cached read-only mode');
await expect(cachedTriggersAPI.dismissTrigger()).rejects.toThrow('Cached read-only mode');
await expect(cachedTriggersAPI.acceptTrigger('Title')).rejects.toThrow('Cached read-only mode');
});
});

View File

@@ -125,6 +125,12 @@ import type {
SetHuggingFaceTokenResult,
HuggingFaceTokenStatus,
ValidateHuggingFaceTokenResult,
ListTasksResponse,
UpdateTaskRequest,
Task,
AnalyticsOverview,
ListSpeakerStatsResponse,
EntityAnalytics,
} from '../../types';
// In-memory store
@@ -2004,4 +2010,51 @@ export const mockAPI: NoteFlowAPI = {
errorMessage: 'No token configured',
};
},
async listTasks(): Promise<ListTasksResponse> {
await delay(100);
return { tasks: [], total_count: 0 };
},
async updateTask(request: UpdateTaskRequest): Promise<Task> {
await delay(100);
return {
id: request.task_id,
meeting_id: null,
action_item_id: null,
text: request.text ?? '',
status: request.status ?? 'open',
assignee_person_id: request.assignee_person_id ?? null,
due_date: request.due_date ?? null,
priority: request.priority ?? 0,
completed_at: null,
};
},
async getAnalyticsOverview(): Promise<AnalyticsOverview> {
await delay(100);
return {
daily: [],
total_meetings: 0,
total_duration: 0,
total_words: 0,
total_segments: 0,
speaker_count: 0,
};
},
async listSpeakerStats(): Promise<ListSpeakerStatsResponse> {
await delay(100);
return { speakers: [] };
},
async getEntityAnalytics(): Promise<EntityAnalytics> {
await delay(100);
return {
by_category: [],
top_entities: [],
total_entities: 0,
total_mentions: 0,
};
},
};

View File

@@ -26,6 +26,7 @@ describe('tauri-adapter mapping (core)', () => {
sort_order: 1,
project_id: undefined,
project_ids: [],
include_segments: false,
});
});

View File

@@ -194,9 +194,11 @@ describe('tauri-adapter mapping (transcription)', () => {
expect(onError).toHaveBeenCalledWith(
expect.objectContaining({
code: 'stream_close_failed',
message: expect.stringContaining('Failed to stop recording'),
})
);
const firstCall = onError.mock.calls[0]?.[0] as { message?: unknown } | undefined;
const message = typeof firstCall?.message === 'string' ? firstCall.message : String(firstCall?.message ?? '');
expect(message).toContain('Failed to stop recording');
expect(unlisten).toHaveBeenCalledTimes(1);
});

View File

@@ -148,6 +148,7 @@ export const TauriCommands = {
// Analytics (Bugfinder Sprint)
GET_ANALYTICS_OVERVIEW: 'get_analytics_overview',
LIST_SPEAKER_STATS: 'list_speaker_stats',
GET_ENTITY_ANALYTICS: 'get_entity_analytics',
} as const;
/**

View File

@@ -1,6 +1,8 @@
import type {
AnalyticsOverview,
AnalyticsOverviewRequest,
EntityAnalytics,
EntityAnalyticsRequest,
ListSpeakerStatsRequest,
ListSpeakerStatsResponse,
} from '../../../types';
@@ -10,7 +12,7 @@ import type { TauriInvoke } from '../types';
export function createAnalyticsApi(
invoke: TauriInvoke
): Pick<NoteFlowAPI, 'getAnalyticsOverview' | 'listSpeakerStats'> {
): Pick<NoteFlowAPI, 'getAnalyticsOverview' | 'listSpeakerStats' | 'getEntityAnalytics'> {
return {
async getAnalyticsOverview(request: AnalyticsOverviewRequest): Promise<AnalyticsOverview> {
return invoke<AnalyticsOverview>(TauriCommands.GET_ANALYTICS_OVERVIEW, {
@@ -28,5 +30,14 @@ export function createAnalyticsApi(
project_ids: request.project_ids,
});
},
async getEntityAnalytics(request: EntityAnalyticsRequest): Promise<EntityAnalytics> {
return invoke<EntityAnalytics>(TauriCommands.GET_ENTITY_ANALYTICS, {
start_time: request.start_time,
end_time: request.end_time,
project_id: request.project_id,
project_ids: request.project_ids,
top_limit: request.top_limit,
});
},
};
}

View File

@@ -64,6 +64,7 @@ export function createMeetingApi(
sort_order: sortOrderToGrpcEnum(request.sort_order),
project_id: normalizeProjectId(request.project_id),
project_ids: normalizeProjectIds(request.project_ids ?? []),
include_segments: request.include_segments ?? false,
});
if (response.meetings?.length) {
meetingCache.cacheMeetings(response.meetings);

View File

@@ -0,0 +1,350 @@
import { describe, expect, it, vi } from 'vitest';
import { TauriCommands } from '../constants';
import { createAnalyticsApi } from './analytics';
import { createAppsApi } from './apps';
import { createAsrApi } from './asr';
import { createAudioApi } from './audio';
import { createOidcApi } from './oidc';
import { createProjectApi } from './projects';
import { createTaskApi } from './tasks';
describe('tauri section APIs', () => {
it('maps analytics commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
const api = createAnalyticsApi(invoke);
await api.getAnalyticsOverview({ start_time: 1, end_time: 2, project_id: 'p1' });
await api.listSpeakerStats({ start_time: 1, end_time: 2, project_ids: ['p1'] });
await api.getEntityAnalytics({ start_time: 1, end_time: 2, top_limit: 5 });
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_ANALYTICS_OVERVIEW, {
start_time: 1,
end_time: 2,
project_id: 'p1',
project_ids: undefined,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_SPEAKER_STATS, {
start_time: 1,
end_time: 2,
project_id: undefined,
project_ids: ['p1'],
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_ENTITY_ANALYTICS, {
start_time: 1,
end_time: 2,
project_id: undefined,
project_ids: undefined,
top_limit: 5,
});
});
it('maps apps commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({ apps: [], total: 0, page: 0, page_size: 50, has_more: false });
const api = createAppsApi(invoke);
await api.listInstalledApps({ commonOnly: true, page: 1, pageSize: 10, forceRefresh: true });
await api.invalidateAppCache();
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_INSTALLED_APPS, {
common_only: true,
page: 1,
page_size: 10,
force_refresh: true,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.INVALIDATE_APP_CACHE);
});
it('maps task commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({ tasks: [], total_count: 0 });
invoke.mockResolvedValueOnce({ task: { id: 't1' } });
const api = createTaskApi(invoke);
await api.listTasks({ statuses: ['open'], limit: 10, offset: 0 });
const updated = await api.updateTask({ task_id: 't1', text: 'hi', status: 'open' });
expect(updated).toEqual({ id: 't1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_TASKS, {
statuses: ['open'],
limit: 10,
offset: 0,
project_id: undefined,
project_ids: undefined,
meeting_id: undefined,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_TASK, {
task_id: 't1',
text: 'hi',
status: 'open',
assignee_person_id: undefined,
due_date: undefined,
priority: undefined,
});
});
it('maps asr commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce({} as unknown);
invoke.mockResolvedValueOnce(true);
invoke.mockResolvedValueOnce({} as unknown);
const api = createAsrApi(invoke);
await api.getAsrConfiguration();
await api.updateAsrConfiguration({ modelSize: 'base' });
await api.getAsrJobStatus('job1');
await api.getStreamingConfiguration();
await api.updateStreamingConfiguration({ partialCadenceSeconds: 2 });
await api.setHuggingFaceToken({ token: 'hf', validate: false });
await api.getHuggingFaceTokenStatus();
await api.deleteHuggingFaceToken();
await api.validateHuggingFaceToken();
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_ASR_CONFIGURATION);
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_ASR_CONFIGURATION, {
request: { modelSize: 'base' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_ASR_JOB_STATUS, { job_id: 'job1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_STREAMING_CONFIGURATION);
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_STREAMING_CONFIGURATION, {
request: { partialCadenceSeconds: 2 },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.SET_HUGGINGFACE_TOKEN, {
request: { token: 'hf', validate: false },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_HUGGINGFACE_TOKEN_STATUS);
expect(invoke).toHaveBeenCalledWith(TauriCommands.DELETE_HUGGINGFACE_TOKEN);
expect(invoke).toHaveBeenCalledWith(TauriCommands.VALIDATE_HUGGINGFACE_TOKEN);
});
it('maps audio commands', async () => {
const invoke = vi.fn();
invoke
.mockResolvedValueOnce([])
.mockResolvedValueOnce(null)
.mockResolvedValueOnce(undefined)
.mockResolvedValueOnce([])
.mockResolvedValueOnce(undefined)
.mockResolvedValueOnce(undefined)
.mockResolvedValueOnce(undefined)
.mockResolvedValueOnce({ enabled: true })
.mockResolvedValueOnce({
has_input_devices: true,
has_virtual_device: false,
input_devices: ['mic'],
is_server_connected: true,
can_run_audio_tests: true,
})
.mockResolvedValueOnce({
chunks_sent: 1,
duration_seconds: 2,
sample_rate: 16000,
})
.mockResolvedValueOnce({
chunks_sent: 3,
duration_seconds: 4,
sample_rate: 8000,
});
const api = createAudioApi(invoke);
await api.listAudioDevices();
await api.getDefaultAudioDevice(true);
await api.selectAudioDevice('mic', true);
await api.listLoopbackDevices();
await api.setSystemAudioDevice('loopback');
await api.setDualCaptureEnabled(true);
await api.setAudioMixLevels(0.5, 0.25);
await api.getDualCaptureConfig();
const env = await api.checkTestEnvironment();
const audio = await api.injectTestAudio('m1', { wavPath: '/tmp.wav', speed: 1.5, chunkMs: 250 });
const tone = await api.injectTestTone('m1', 440, 2, 16000);
expect(env.canRunAudioTests).toBe(true);
expect(audio.sampleRate).toBe(16000);
expect(tone.chunksSent).toBe(3);
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_AUDIO_DEVICES);
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_DEFAULT_AUDIO_DEVICE, { is_input: true });
expect(invoke).toHaveBeenCalledWith(TauriCommands.SELECT_AUDIO_DEVICE, {
device_id: 'mic',
is_input: true,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_LOOPBACK_DEVICES);
expect(invoke).toHaveBeenCalledWith(TauriCommands.SET_SYSTEM_AUDIO_DEVICE, {
device_id: 'loopback',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.SET_DUAL_CAPTURE_ENABLED, { enabled: true });
expect(invoke).toHaveBeenCalledWith(TauriCommands.SET_AUDIO_MIX_LEVELS, {
mic_gain: 0.5,
system_gain: 0.25,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_DUAL_CAPTURE_CONFIG);
expect(invoke).toHaveBeenCalledWith(TauriCommands.CHECK_TEST_ENVIRONMENT);
expect(invoke).toHaveBeenCalledWith(TauriCommands.INJECT_TEST_AUDIO, {
meeting_id: 'm1',
config: { wav_path: '/tmp.wav', speed: 1.5, chunk_ms: 250 },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.INJECT_TEST_TONE, {
meeting_id: 'm1',
frequency_hz: 440,
duration_seconds: 2,
sample_rate: 16000,
});
});
it('maps oidc commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({ id: 'oidc' });
invoke.mockResolvedValueOnce({ providers: [] });
invoke.mockResolvedValueOnce({ id: 'oidc' });
invoke.mockResolvedValueOnce({ id: 'oidc' });
invoke.mockResolvedValueOnce({ success: true });
invoke.mockResolvedValueOnce({ success: true });
invoke.mockResolvedValueOnce({ success: true });
invoke.mockResolvedValueOnce({ presets: [] });
const api = createOidcApi(invoke);
await api.registerOidcProvider({
workspace_id: 'w1',
name: 'oidc',
issuer_url: 'https://issuer',
client_id: 'client',
preset: 'custom',
scopes: ['openid'],
allowed_groups: [],
auto_discover: true,
});
await api.listOidcProviders('w1', true);
await api.getOidcProvider('oidc');
await api.updateOidcProvider({
provider_id: 'oidc',
name: 'oidc2',
scopes: ['openid'],
allowed_groups: [],
});
await api.deleteOidcProvider('oidc');
await api.refreshOidcDiscovery('oidc', 'w1');
await api.testOidcConnection('oidc');
await api.listOidcPresets();
expect(invoke).toHaveBeenCalledWith(TauriCommands.REGISTER_OIDC_PROVIDER, {
request: {
workspace_id: 'w1',
name: 'oidc',
issuer_url: 'https://issuer',
client_id: 'client',
preset: 'custom',
scopes: ['openid'],
allowed_groups: [],
auto_discover: true,
},
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_OIDC_PROVIDERS, {
workspace_id: 'w1',
enabled_only: true,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_OIDC_PROVIDER, { provider_id: 'oidc' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_OIDC_PROVIDER, {
request: {
provider_id: 'oidc',
name: 'oidc2',
scopes: ['openid'],
allowed_groups: [],
},
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.DELETE_OIDC_PROVIDER, {
provider_id: 'oidc',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.REFRESH_OIDC_DISCOVERY, {
provider_id: 'oidc',
workspace_id: 'w1',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.TEST_OIDC_CONNECTION, {
provider_id: 'oidc',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_OIDC_PRESETS);
});
it('maps project commands', async () => {
const invoke = vi.fn();
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ projects: [], total_count: 0 });
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ id: 'p1' });
invoke.mockResolvedValueOnce({ success: true });
invoke.mockResolvedValueOnce(undefined);
invoke.mockResolvedValueOnce({ project_id: 'p1', project: { id: 'p1' } });
invoke.mockResolvedValueOnce({ id: 'member' });
invoke.mockResolvedValueOnce({ id: 'member' });
invoke.mockResolvedValueOnce({ success: true });
invoke.mockResolvedValueOnce({ members: [], total_count: 0 });
const api = createProjectApi(invoke);
await api.createProject({ workspace_id: 'w1', name: 'Project' });
await api.getProject({ project_id: 'p1' });
await api.getProjectBySlug({ workspace_id: 'w1', slug: 'proj' });
await api.listProjects({ workspace_id: 'w1' });
await api.updateProject({ project_id: 'p1', name: 'Project 2' });
await api.archiveProject('p1');
await api.restoreProject('p1');
await api.deleteProject('p1');
await api.setActiveProject({ workspace_id: 'w1', project_id: 'p1' });
await api.getActiveProject({ workspace_id: 'w1' });
await api.addProjectMember({ project_id: 'p1', user_id: 'u1', role: 'viewer' });
await api.updateProjectMemberRole({ project_id: 'p1', user_id: 'u1', role: 'editor' });
await api.removeProjectMember({ project_id: 'p1', user_id: 'u1' });
await api.listProjectMembers({ project_id: 'p1', limit: 10, offset: 0 });
expect(invoke).toHaveBeenCalledWith(TauriCommands.CREATE_PROJECT, {
request: { workspace_id: 'w1', name: 'Project' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_PROJECT, { project_id: 'p1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_PROJECT_BY_SLUG, {
workspace_id: 'w1',
slug: 'proj',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_PROJECTS, {
workspace_id: 'w1',
include_archived: false,
limit: undefined,
offset: undefined,
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_PROJECT, {
request: { project_id: 'p1', name: 'Project 2' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.ARCHIVE_PROJECT, { project_id: 'p1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.RESTORE_PROJECT, { project_id: 'p1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.DELETE_PROJECT, { project_id: 'p1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.SET_ACTIVE_PROJECT, {
workspace_id: 'w1',
project_id: 'p1',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_ACTIVE_PROJECT, { workspace_id: 'w1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.ADD_PROJECT_MEMBER, {
request: { project_id: 'p1', user_id: 'u1', role: 'viewer' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_PROJECT_MEMBER_ROLE, {
request: { project_id: 'p1', user_id: 'u1', role: 'editor' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.REMOVE_PROJECT_MEMBER, {
request: { project_id: 'p1', user_id: 'u1' },
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_PROJECT_MEMBERS, {
project_id: 'p1',
limit: 10,
offset: 0,
});
});
});

View File

@@ -0,0 +1,90 @@
import { describe, expect, it, vi } from 'vitest';
import { createSummarizationApi } from './summarization';
import { TauriCommands } from '../constants';
vi.mock('@/lib/observability/client', () => ({
addClientLog: vi.fn(),
}));
describe('createSummarizationApi', () => {
it('maps summarization template operations', async () => {
const invoke = vi.fn().mockResolvedValue({});
const api = createSummarizationApi(invoke);
await api.listSummarizationTemplates({ workspace_id: 'w1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_SUMMARIZATION_TEMPLATES, {
workspace_id: 'w1',
include_system: true,
include_archived: false,
limit: undefined,
offset: undefined,
});
await api.getSummarizationTemplate({ template_id: 't1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.GET_SUMMARIZATION_TEMPLATE, {
template_id: 't1',
include_current_version: true,
});
await api.createSummarizationTemplate({
workspace_id: 'w1',
name: 'Name',
description: 'Desc',
content: 'Content',
change_note: 'Note',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.CREATE_SUMMARIZATION_TEMPLATE, {
workspace_id: 'w1',
name: 'Name',
description: 'Desc',
content: 'Content',
change_note: 'Note',
});
await api.updateSummarizationTemplate({
template_id: 't1',
name: 'New',
description: 'Desc',
content: 'Content',
change_note: 'Note',
});
expect(invoke).toHaveBeenCalledWith(TauriCommands.UPDATE_SUMMARIZATION_TEMPLATE, {
template_id: 't1',
name: 'New',
description: 'Desc',
content: 'Content',
change_note: 'Note',
});
await api.archiveSummarizationTemplate({ template_id: 't1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.ARCHIVE_SUMMARIZATION_TEMPLATE, {
template_id: 't1',
});
await api.listSummarizationTemplateVersions({ template_id: 't1', limit: 5, offset: 0 });
expect(invoke).toHaveBeenCalledWith(TauriCommands.LIST_SUMMARIZATION_TEMPLATE_VERSIONS, {
template_id: 't1',
limit: 5,
offset: 0,
});
await api.restoreSummarizationTemplateVersion({ template_id: 't1', version_id: 'v1' });
expect(invoke).toHaveBeenCalledWith(TauriCommands.RESTORE_SUMMARIZATION_TEMPLATE_VERSION, {
template_id: 't1',
version_id: 'v1',
});
});
it('tracks cloud consent actions and status', async () => {
const invoke = vi.fn().mockResolvedValue({ consent_granted: true });
const api = createSummarizationApi(invoke);
await api.grantCloudConsent();
expect(invoke).toHaveBeenCalledWith(TauriCommands.GRANT_CLOUD_CONSENT);
await api.revokeCloudConsent();
expect(invoke).toHaveBeenCalledWith(TauriCommands.REVOKE_CLOUD_CONSENT);
const status = await api.getCloudConsentStatus();
expect(status.consentGranted).toBe(true);
});
});

View File

@@ -0,0 +1,32 @@
import { describe, expect, it } from 'vitest';
import { IdentityDefaults } from '@/api/core/constants';
import { normalizeProjectId, normalizeProjectIds, recordingBlockedDetails, RECORDING_BLOCKED_PREFIX } from '@/api/adapters/tauri/utils';
describe('tauri utils', () => {
it('normalizes project ids', () => {
expect(normalizeProjectId()).toBeUndefined();
expect(normalizeProjectId(' ')).toBeUndefined();
expect(normalizeProjectId(IdentityDefaults.DEFAULT_PROJECT_ID)).toBeUndefined();
expect(normalizeProjectId(' custom ')).toBe('custom');
const ids = normalizeProjectIds([
' one ',
IdentityDefaults.DEFAULT_PROJECT_ID,
'',
'two',
]);
expect(ids).toEqual(['one', 'two']);
});
it('extracts recording blocked details', () => {
const message = `${RECORDING_BLOCKED_PREFIX}: rule_id=abc, rule_label=Focus, app_name=Zoom`;
const details = recordingBlockedDetails(message);
expect(details).toEqual({ ruleId: 'abc', ruleLabel: 'Focus', appName: 'Zoom' });
const errorDetails = recordingBlockedDetails(new Error(message));
expect(errorDetails?.ruleId).toBe('abc');
expect(recordingBlockedDetails('nope')).toBeNull();
});
});

View File

@@ -16,6 +16,8 @@ import type {
AddProjectMemberRequest,
AnalyticsOverview,
AnalyticsOverviewRequest,
EntityAnalytics,
EntityAnalyticsRequest,
ArchiveSummarizationTemplateRequest,
Annotation,
ASRConfiguration,
@@ -958,6 +960,8 @@ export interface NoteFlowAPI {
getAnalyticsOverview(request: AnalyticsOverviewRequest): Promise<AnalyticsOverview>;
listSpeakerStats(request: ListSpeakerStatsRequest): Promise<ListSpeakerStatsResponse>;
getEntityAnalytics(request: EntityAnalyticsRequest): Promise<EntityAnalytics>;
}
// --- API Instance Management ---

View File

@@ -47,3 +47,31 @@ export interface ListSpeakerStatsRequest {
export interface ListSpeakerStatsResponse {
speakers: SpeakerStat[];
}
export interface EntityCategoryStat {
category: string;
count: number;
total_mentions: number;
}
export interface TopEntity {
text: string;
category: string;
mention_count: number;
meeting_count: number;
}
export interface EntityAnalytics {
by_category: EntityCategoryStat[];
top_entities: TopEntity[];
total_entities: number;
total_mentions: number;
}
export interface EntityAnalyticsRequest {
start_time: number;
end_time: number;
project_id?: string;
project_ids?: string[];
top_limit?: number;
}

View File

@@ -47,8 +47,6 @@ export function getSyncErrorMessage(errorCode: SyncErrorCode | undefined): strin
return 'External service error - please try again later';
case 'internal_error':
return 'Internal error - please contact support';
case 'unknown':
case 'unspecified':
default:
return 'Sync failed';
}

View File

@@ -31,6 +31,8 @@ export interface ListMeetingsRequest {
project_id?: string;
/** Optional project scope for multiple projects (overrides project_id when provided) */
project_ids?: string[];
/** Include full transcript segments (default: false) */
include_segments?: boolean;
}
/**

View File

@@ -55,4 +55,5 @@ export interface UserPreferences {
meetings_project_ids: string[];
tasks_project_scope: ProjectScope;
tasks_project_ids: string[];
tasks_view_mode: 'list' | 'board';
}

View File

@@ -0,0 +1,17 @@
import { render, screen } from '@testing-library/react';
import { describe, expect, it } from 'vitest';
import { AnnotationTypeBadge } from '@/components/common/badges/annotation-type-badge';
describe('AnnotationTypeBadge', () => {
it('renders label and icon by default', () => {
const { container } = render(<AnnotationTypeBadge type="action_item" />);
expect(screen.getByText('Action')).toBeInTheDocument();
expect(container.querySelector('svg')).not.toBeNull();
});
it('renders without icon when disabled', () => {
render(<AnnotationTypeBadge type="note" showIcon={false} />);
expect(screen.getByText('Note')).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,55 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { ConfirmationDialog } from '@/components/common/dialogs/confirmation-dialog';
const TITLE = 'Confirm Action';
const DESCRIPTION = 'Are you sure?';
const CONFIRM = 'Confirm';
function renderDialog(props?: Partial<Parameters<typeof ConfirmationDialog>[0]>) {
const onOpenChange = vi.fn();
const onConfirm = vi.fn();
const onCancel = vi.fn();
render(
<ConfirmationDialog
open
onOpenChange={onOpenChange}
onConfirm={onConfirm}
onCancel={onCancel}
title={TITLE}
description={DESCRIPTION}
confirmContent={CONFIRM}
{...props}
/>
);
return { onOpenChange, onConfirm, onCancel };
}
describe('ConfirmationDialog', () => {
it('renders content and handles confirm', async () => {
const { onOpenChange, onConfirm } = renderDialog();
expect(screen.getByText(TITLE)).toBeInTheDocument();
expect(screen.getByText(DESCRIPTION)).toBeInTheDocument();
fireEvent.click(screen.getByRole('button', { name: CONFIRM }));
expect(onConfirm).toHaveBeenCalled();
expect(onOpenChange).toHaveBeenCalledWith(false);
});
it('handles cancel and hides cancel button when disabled', async () => {
const { onOpenChange, onCancel } = renderDialog({ cancelContent: 'Nope' });
fireEvent.click(screen.getByRole('button', { name: 'Nope' }));
expect(onCancel).toHaveBeenCalled();
expect(onOpenChange).toHaveBeenCalledWith(false);
renderDialog({ showCancel: false, cancelContent: 'Nope' });
expect(screen.queryByRole('button', { name: 'Nope' })).toBeNull();
});
});

View File

@@ -0,0 +1,45 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { afterEach, describe, expect, it, vi } from 'vitest';
import { ErrorBoundary } from '@/components/common/error-boundary';
function Thrower() {
throw new Error('Kaboom');
}
describe('ErrorBoundary', () => {
afterEach(() => {
vi.unstubAllGlobals();
vi.restoreAllMocks();
});
it('renders children when no error', () => {
render(
<ErrorBoundary>
<div>All good</div>
</ErrorBoundary>
);
expect(screen.getByText('All good')).toBeInTheDocument();
});
it('renders fallback UI on error and reloads', async () => {
const reloadSpy = vi.fn();
vi.stubGlobal('location', { ...window.location, reload: reloadSpy });
const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined);
render(
<ErrorBoundary>
<Thrower />
</ErrorBoundary>
);
expect(screen.getByText('Something went wrong')).toBeInTheDocument();
expect(screen.getByText('Kaboom')).toBeInTheDocument();
fireEvent.click(screen.getByRole('button', { name: /reload app/i }));
expect(reloadSpy).toHaveBeenCalled();
consoleSpy.mockRestore();
});
});

View File

@@ -0,0 +1,30 @@
import { render, screen } from '@testing-library/react';
import { describe, expect, it } from 'vitest';
import { MemoryRouter } from 'react-router-dom';
import { NavLink } from './nav-link';
describe('NavLink', () => {
it('applies active class when route matches', () => {
render(
<MemoryRouter initialEntries={['/active']}>
<NavLink to="/active" activeClassName="is-active">
Active
</NavLink>
</MemoryRouter>
);
expect(screen.getByRole('link', { name: 'Active' })).toHaveClass('is-active');
});
it('does not apply active class when route does not match', () => {
render(
<MemoryRouter initialEntries={['/inactive']}>
<NavLink to="/active" activeClassName="is-active">
Active
</NavLink>
</MemoryRouter>
);
expect(screen.getByRole('link', { name: 'Active' })).not.toHaveClass('is-active');
});
});

View File

@@ -0,0 +1,40 @@
import { render, screen } from '@testing-library/react';
import { describe, expect, it } from 'vitest';
import { CenteredStatsCard, MiniStatsCard, StatsCard } from './stats-card';
const Icon = ({ className }: { className?: string }) => (
<svg data-testid="icon" className={className} />
);
describe('StatsCard', () => {
it('renders title, value, description, and variant styles', () => {
render(
<StatsCard
icon={Icon}
title="Sessions"
value={12}
description="Monthly"
variant="success"
/>
);
expect(screen.getByText('Sessions')).toBeInTheDocument();
expect(screen.getByText('12')).toHaveClass('text-success');
expect(screen.getByText('Monthly')).toBeInTheDocument();
});
it('renders mini stats card with custom icon background', () => {
render(<MiniStatsCard icon={Icon} value="5" label="Errors" iconBgClass="bg-red-500" />);
expect(screen.getByText('5')).toBeInTheDocument();
expect(screen.getByText('Errors')).toBeInTheDocument();
expect(screen.getByTestId('icon').parentElement).toHaveClass('bg-red-500');
});
it('renders centered stats card', () => {
render(<CenteredStatsCard value={42} label="Total" variant="destructive" />);
expect(screen.getByText('42')).toHaveClass('text-destructive');
expect(screen.getByText('Total')).toBeInTheDocument();
});
});

View File

@@ -24,7 +24,7 @@ const profileSamples: ProfileSample[] = [];
const profileListeners = new Set<ProfileListener>();
function isDevMode(): boolean {
return typeof import.meta !== 'undefined' && import.meta.env.DEV;
return Boolean(import.meta?.env?.DEV);
}
function readProfilerEnabled(): boolean {

View File

@@ -0,0 +1,38 @@
import { describe, expect, it } from 'vitest';
import { mapSpeakerStats, speakerLabel, wordCountTickLabel } from '@/components/features/analytics/analytics-utils';
describe('analytics-utils', () => {
it('formats speaker labels safely', () => {
expect(speakerLabel(null)).toBe('');
expect(speakerLabel({})).toBe('');
expect(speakerLabel({ speakerId: 'spk', percentage: 12.345 })).toBe('spk: 12.3%');
});
it('formats word count ticks', () => {
expect(wordCountTickLabel('abc')).toBe('');
expect(wordCountTickLabel(999)).toBe('999');
expect(wordCountTickLabel(1200)).toBe('1.2k');
});
it('maps speaker stats with percentages', () => {
const mapped = mapSpeakerStats([
{
speaker_id: 'a',
display_name: 'A',
total_time: 50,
segment_count: 2,
meeting_count: 1,
},
{
speaker_id: 'b',
display_name: 'B',
total_time: 50,
segment_count: 1,
meeting_count: 1,
},
]);
expect(mapped[0]?.percentage).toBe(50);
expect(mapped[1]?.percentage).toBe(50);
});
});

View File

@@ -1,3 +1,5 @@
import type { SpeakerStat } from '@/api/types';
export const SPEAKER_COLORS = [
'hsl(var(--chart-1))',
'hsl(var(--chart-2))',
@@ -14,6 +16,15 @@ export const SPEAKER_COLOR_CLASSES = [
'bg-[hsl(var(--chart-5))]',
];
export interface SpeakerStats {
speakerId: string;
displayName: string;
totalTime: number;
percentage: number;
segmentCount: number;
meetingCount: number;
}
export function speakerLabel(entry: unknown): string {
if (!entry || typeof entry !== 'object') {
return '';
@@ -34,3 +45,15 @@ export function wordCountTickLabel(value: unknown): string {
}
return numeric >= 1000 ? `${(numeric / 1000).toFixed(1)}k` : `${numeric}`;
}
export function mapSpeakerStats(speakers: SpeakerStat[]): SpeakerStats[] {
const totalTime = speakers.reduce((sum, s) => sum + s.total_time, 0);
return speakers.map((s) => ({
speakerId: s.speaker_id,
displayName: s.display_name,
totalTime: s.total_time,
percentage: totalTime > 0 ? (s.total_time / totalTime) * 100 : 0,
segmentCount: s.segment_count,
meetingCount: s.meeting_count,
}));
}

View File

@@ -0,0 +1,216 @@
import { Bar, BarChart, CartesianGrid, Cell, Legend, Pie, PieChart, ResponsiveContainer, Tooltip, XAxis, YAxis } from 'recharts';
import { FileText, Hash, Tag, TrendingUp } from 'lucide-react';
import type { EntityAnalytics } from '@/api/types';
import { StatsCard } from '@/components/common';
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card';
import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart';
import { chartAxis, chartStrokes, flexLayout, overflow } from '@/lib/ui/styles';
const titleRowClass = flexLayout.itemsGap2;
const CATEGORY_COLORS = [
'hsl(var(--chart-1))',
'hsl(var(--chart-2))',
'hsl(var(--chart-3))',
'hsl(var(--chart-4))',
'hsl(var(--chart-5))',
'hsl(221, 83%, 53%)',
'hsl(262, 83%, 58%)',
'hsl(330, 81%, 60%)',
];
interface EntitiesTabProps {
entityAnalytics: EntityAnalytics;
}
export function EntitiesTab({ entityAnalytics }: EntitiesTabProps) {
const categoryData = entityAnalytics.by_category.map((cat, idx) => ({
category: cat.category,
entity_count: cat.count,
mention_count: cat.total_mentions,
fill: CATEGORY_COLORS[idx % CATEGORY_COLORS.length],
}));
const categoryLabel = (label: unknown): string => {
if (typeof label !== 'string') {
return String(label ?? '');
}
if (!label) {
return '';
}
return label.charAt(0).toUpperCase() + label.slice(1);
};
const chartConfig = entityAnalytics.by_category.reduce<
Record<string, { label: string; color: string }>
>((acc, cat, idx) => {
acc[cat.category] = {
label: categoryLabel(cat.category),
color: CATEGORY_COLORS[idx % CATEGORY_COLORS.length],
};
return acc;
}, {});
return (
<div className="space-y-6">
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
<StatsCard
icon={Hash}
title="Total Entities"
value={entityAnalytics.total_entities.toLocaleString()}
description="Unique entities extracted"
/>
<StatsCard
icon={Tag}
title="Total Mentions"
value={entityAnalytics.total_mentions.toLocaleString()}
description="Times entities were mentioned"
/>
<StatsCard
icon={FileText}
title="Categories"
value={entityAnalytics.by_category.length.toString()}
description="Entity categories detected"
/>
<StatsCard
icon={TrendingUp}
title="Avg Mentions"
value={
entityAnalytics.total_entities > 0
? (entityAnalytics.total_mentions / entityAnalytics.total_entities).toFixed(1)
: '0'
}
description="Mentions per entity"
/>
</div>
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<Tag className="h-5 w-5 text-primary" />
Entities by Category
</CardTitle>
<CardDescription>Distribution of entity types</CardDescription>
</CardHeader>
<CardContent>
<div className="h-[300px] flex items-center justify-center">
{categoryData.length > 0 ? (
<ResponsiveContainer width="100%" height="100%">
<PieChart>
<Pie
data={categoryData}
dataKey="entity_count"
nameKey="category"
cx="50%"
cy="50%"
outerRadius={100}
innerRadius={60}
label={({ category, percent }) =>
`${category} (${(percent * 100).toFixed(0)}%)`
}
labelLine={false}
>
{categoryData.map((entry) => (
<Cell key={`cell-${entry.category}`} fill={entry.fill} />
))}
</Pie>
<Tooltip
formatter={(value: number | string, name: unknown) => [
`${Number(value)} entities`,
categoryLabel(name),
]}
/>
<Legend />
</PieChart>
</ResponsiveContainer>
) : (
<p className="text-muted-foreground">No entity data available</p>
)}
</div>
</CardContent>
</Card>
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<Hash className="h-5 w-5 text-primary" />
Mentions by Category
</CardTitle>
<CardDescription>How often each category is mentioned</CardDescription>
</CardHeader>
<CardContent>
<div className={overflow.xAuto}>
<ChartContainer config={chartConfig} className="h-[300px] min-w-[360px]">
<BarChart data={categoryData} layout="vertical">
<CartesianGrid strokeDasharray="3 3" className={chartStrokes.muted} />
<XAxis type="number" className={chartAxis.xAxis} tick={{ fontSize: 12 }} />
<YAxis
dataKey="category"
type="category"
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
width={80}
tickFormatter={(v) => categoryLabel(v)}
/>
<ChartTooltip
content={
<ChartTooltipContent
formatter={(value) => `${Number(value).toLocaleString()} mentions`}
/>
}
/>
<Bar dataKey="mention_count" name="Mentions" radius={[0, 4, 4, 0]}>
{categoryData.map((entry) => (
<Cell key={`bar-${entry.category}`} fill={entry.fill} />
))}
</Bar>
</BarChart>
</ChartContainer>
</div>
</CardContent>
</Card>
</div>
{entityAnalytics.top_entities.length > 0 && (
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<TrendingUp className="h-5 w-5 text-primary" />
Top Entities
</CardTitle>
<CardDescription>Most frequently mentioned entities across your meetings</CardDescription>
</CardHeader>
<CardContent>
<div className="overflow-x-auto">
<table className="w-full">
<thead>
<tr className="border-b">
<th className="text-left py-3 px-4 font-medium text-muted-foreground">Entity</th>
<th className="text-left py-3 px-4 font-medium text-muted-foreground">Category</th>
<th className="text-right py-3 px-4 font-medium text-muted-foreground">Mentions</th>
<th className="text-right py-3 px-4 font-medium text-muted-foreground">Meetings</th>
</tr>
</thead>
<tbody>
{entityAnalytics.top_entities.map((entity) => (
<tr key={`${entity.text}-${entity.category}`} className="border-b last:border-0">
<td className="py-3 px-4 font-medium">{entity.text}</td>
<td className="py-3 px-4">
<span className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-muted">
{entity.category.charAt(0).toUpperCase() + entity.category.slice(1)}
</span>
</td>
<td className="py-3 px-4 text-right">{entity.mention_count.toLocaleString()}</td>
<td className="py-3 px-4 text-right">{entity.meeting_count.toLocaleString()}</td>
</tr>
))}
</tbody>
</table>
</div>
</CardContent>
</Card>
)}
</div>
);
}

View File

@@ -0,0 +1,114 @@
import { render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { LogTimeline } from '@/components/features/analytics/log-timeline';
import { summarizeLogGroup } from '@/lib/observability/group-summarizer';
import type { LogGroup } from '@/lib/observability/groups';
import type { LogEntryData } from '@/components/features/analytics/log-entry';
import type { VirtualItem } from '@tanstack/react-virtual';
function createLog(overrides: Partial<LogEntryData>): LogEntryData {
return {
id: overrides.id ?? 'log-1',
timestamp: overrides.timestamp ?? Date.now(),
level: overrides.level ?? 'info',
source: overrides.source ?? 'app',
message: overrides.message ?? 'Hello',
origin: overrides.origin ?? 'client',
...overrides,
};
}
function createGroup(group: Partial<LogGroup> & { id: string; logs: LogEntryData[] }): LogGroup {
const summary = group.summary ?? summarizeLogGroup(group.logs);
const timestamps = group.logs.map((log) => log.timestamp);
const startTime = group.startTime ?? Math.min(...timestamps);
const endTime = group.endTime ?? Math.max(...timestamps);
return {
id: group.id,
groupType: group.groupType ?? 'meeting',
label: group.label ?? 'Group',
logs: group.logs,
summary,
startTime,
endTime,
entityId: group.entityId,
operationId: group.operationId,
};
}
describe('LogTimeline', () => {
it('returns null when no groups', () => {
const { container } = render(
<LogTimeline
groups={[]}
viewMode="friendly"
expandedLogs={new Set()}
onToggleLog={vi.fn()}
/>
);
expect(container.firstChild).toBeNull();
});
it('renders groups with badges, gaps, and hidden count', () => {
const now = Date.now();
const errorLog = createLog({ id: 'err', level: 'error', timestamp: now });
const infoLog = createLog({ id: 'info', level: 'info', timestamp: now - 1 });
const warningLog = createLog({ id: 'warn', level: 'warning', timestamp: now - 200000 });
const groups: LogGroup[] = [
createGroup({ id: 'g1', label: 'Errors', logs: [errorLog, infoLog], groupType: 'time' }),
createGroup({ id: 'g2', label: 'Warnings', logs: [warningLog], groupType: 'operation' }),
];
render(
<LogTimeline
groups={groups}
viewMode="technical"
expandedLogs={new Set(['err'])}
onToggleLog={vi.fn()}
maxLogsPerGroup={1}
/>
);
expect(screen.getByText('Errors')).toBeInTheDocument();
expect(screen.getByText('Warnings')).toBeInTheDocument();
expect(screen.getAllByText('1 error').length).toBeGreaterThan(0);
expect(screen.getAllByText('1 warning').length).toBeGreaterThan(0);
expect(screen.getAllByText('2 logs').length).toBeGreaterThan(0);
expect(screen.getByText(/minute/)).toBeInTheDocument();
expect(screen.getByRole('button', { name: /more log/i })).toBeInTheDocument();
});
it('renders virtualized groups when provided', () => {
const log = createLog({ id: 'v1', level: 'info' });
const groups = [createGroup({ id: 'vg', label: 'Virtual', logs: [log] })];
const virtualItem: VirtualItem = {
key: '0',
index: 0,
start: 0,
end: 10,
size: 10,
lane: 0,
};
render(
<LogTimeline
groups={groups}
viewMode="friendly"
expandedLogs={new Set()}
onToggleLog={vi.fn()}
virtualItems={[virtualItem]}
virtualTotalSize={10}
measureElement={vi.fn()}
/>
);
expect(screen.getByText('Virtual')).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,131 @@
import { createRef } from 'react';
import { render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { LogsTabList } from '@/components/features/analytics/logs-tab-list';
import type { LogEntryData } from '@/components/features/analytics/log-entry';
import type { LogGroup } from '@/lib/observability/groups';
import { summarizeLogGroup } from '@/lib/observability/group-summarizer';
import type { SummarizedLog } from '@/lib/observability/summarizer';
vi.mock('@/components/features/analytics/log-entry', () => ({
LogEntry: ({ summarized }: { summarized: { log: { id: string } } }) => (
<div data-testid="log-entry">{summarized.log.id}</div>
),
}));
vi.mock('@/components/features/analytics/log-timeline', () => ({
LogTimeline: () => <div data-testid="log-timeline" />,
}));
function baseProps() {
return {
isLoading: false,
filteredLogs: [],
summarizedLogs: [],
logGroups: [],
groupMode: 'none' as const,
viewMode: 'friendly' as const,
expandedLogs: new Set<string>(),
onToggleExpanded: vi.fn(),
searchQuery: '',
levelFilter: 'all' as const,
sourceFilter: 'all' as const,
originFilter: 'all' as const,
shouldVirtualizeLogs: false,
shouldVirtualizeGroups: false,
viewportRef: createRef<HTMLDivElement>(),
virtualItems: [],
virtualTotalSize: 0,
measureElement: vi.fn(),
groupVirtualItems: [],
groupVirtualTotalSize: 0,
groupMeasureElement: vi.fn(),
};
}
const buildLog = (id: string): LogEntryData => ({
id,
timestamp: 123,
level: 'info',
source: 'app',
message: 'Hello',
origin: 'client',
});
const buildGroup = (log: LogEntryData): LogGroup => {
const summary = summarizeLogGroup([log]);
return {
id: 'group-1',
groupType: 'meeting',
label: 'Group',
logs: [log],
summary,
startTime: log.timestamp,
endTime: log.timestamp,
};
};
describe('LogsTabList', () => {
it('renders loading state', () => {
render(<LogsTabList {...baseProps()} isLoading />);
expect(screen.getByText('Loading logs...')).toBeInTheDocument();
});
it('renders empty state with and without filters', () => {
render(<LogsTabList {...baseProps()} />);
expect(screen.getByText('Logs will appear here as events occur')).toBeInTheDocument();
render(
<LogsTabList
{...baseProps()}
searchQuery="foo"
levelFilter="error"
/>
);
expect(screen.getByText('Try adjusting your filters')).toBeInTheDocument();
});
it('renders grouped logs via timeline', () => {
const log = buildLog('l1');
render(
<LogsTabList
{...baseProps()}
filteredLogs={[log]}
logGroups={[buildGroup(log)]}
groupMode="time"
/>
);
expect(screen.getByTestId('log-timeline')).toBeInTheDocument();
});
it('renders virtualized and non-virtualized log lists', () => {
const log = buildLog('l1');
const summarizedLogs: SummarizedLog<LogEntryData>[] = [
{ log, count: 1, isGroup: false, groupedLogs: undefined },
];
const { unmount } = render(
<LogsTabList
{...baseProps()}
filteredLogs={[log]}
summarizedLogs={summarizedLogs}
shouldVirtualizeLogs
virtualItems={[{ index: 0, start: 0 }]}
virtualTotalSize={10}
/>
);
expect(screen.getByTestId('log-entry')).toHaveTextContent('l1');
unmount();
render(
<LogsTabList
{...baseProps()}
filteredLogs={[log]}
summarizedLogs={summarizedLogs}
/>
);
expect(screen.getAllByTestId('log-entry')).toHaveLength(1);
});
});

View File

@@ -0,0 +1,324 @@
// Meetings analytics tab content
import { useId } from 'react';
import { format } from 'date-fns';
import {
Area,
AreaChart,
Bar,
BarChart,
CartesianGrid,
Cell,
Legend,
Pie,
PieChart,
ResponsiveContainer,
Tooltip,
XAxis,
YAxis,
} from 'recharts';
import { Calendar, Clock, FileText, Mic, TrendingUp, Users } from 'lucide-react';
import type { AnalyticsOverview } from '@/api/types';
import {
SPEAKER_COLORS,
SPEAKER_COLOR_CLASSES,
type SpeakerStats,
speakerLabel,
wordCountTickLabel,
} from './analytics-utils';
import { StatsCard } from '@/components/common';
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card';
import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart';
import { formatDuration } from '@/lib/utils/format';
import { chartAxis, chartHeight, chartStrokes, flexLayout, overflow, typography } from '@/lib/ui/styles';
import { cn } from '@/lib/utils';
const titleRowClass = flexLayout.itemsGap2;
const ANALYTICS_DAYS = 14;
interface DailyStats {
date: string;
dateLabel: string;
meetings: number;
totalDuration: number;
wordCount: number;
}
function mapDailyStats(overview: AnalyticsOverview): DailyStats[] {
return overview.daily.map((d) => ({
date: d.date,
dateLabel: format(new Date(d.date), 'MMM d'),
meetings: d.meetings,
totalDuration: d.total_duration,
wordCount: d.word_count,
}));
}
interface MeetingsTabProps {
overview: AnalyticsOverview;
speakerStats: SpeakerStats[];
chartConfig: Record<string, { label: string; color: string }>;
}
export function MeetingsTab({ overview, speakerStats, chartConfig }: MeetingsTabProps) {
const chartId = useId();
const gridProps = { strokeDasharray: '3 3', className: chartStrokes.muted };
const durationTooltip = (
<ChartTooltipContent formatter={(value) => formatDuration(Number(value))} />
);
const defaultTooltip = <ChartTooltipContent />;
const wordsTooltip = (
<ChartTooltipContent formatter={(value) => `${Number(value).toLocaleString()} words`} />
);
const dailyTrends = mapDailyStats(overview);
const avgDuration =
overview.total_meetings > 0 ? overview.total_duration / overview.total_meetings : 0;
const avgWordsPerMeeting =
overview.total_meetings > 0 ? overview.total_words / overview.total_meetings : 0;
return (
<div className="space-y-6">
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
<StatsCard
icon={Calendar}
title="Total Meetings"
value={overview.total_meetings.toString()}
description="All recorded meetings"
/>
<StatsCard
icon={Clock}
title="Total Duration"
value={formatDuration(overview.total_duration)}
description={`Avg: ${formatDuration(avgDuration)}`}
/>
<StatsCard
icon={FileText}
title="Total Words"
value={overview.total_words.toLocaleString()}
description={`Avg: ${Math.round(avgWordsPerMeeting).toLocaleString()} per meeting`}
/>
<StatsCard
icon={Users}
title="Unique Speakers"
value={overview.speaker_count.toString()}
description={`${overview.total_segments} segments total`}
/>
</div>
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<TrendingUp className="h-5 w-5 text-primary" />
Meeting Duration Trends
</CardTitle>
<CardDescription>Daily meeting duration over the last {ANALYTICS_DAYS} days</CardDescription>
</CardHeader>
<CardContent>
<div className={overflow.xAuto}>
<ChartContainer
config={chartConfig}
className={`${chartHeight.standard} min-w-[360px]`}
>
<AreaChart data={dailyTrends}>
<defs>
<linearGradient id={`${chartId}-durationGradient`} x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor="hsl(var(--chart-2))" stopOpacity={0.3} />
<stop offset="95%" stopColor="hsl(var(--chart-2))" stopOpacity={0} />
</linearGradient>
</defs>
<CartesianGrid {...gridProps} />
<XAxis dataKey="dateLabel" className={chartAxis.xAxis} tick={{ fontSize: 12 }} />
<YAxis
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
tickFormatter={(v) => `${Math.round(v / 60)}m`}
/>
<ChartTooltip content={durationTooltip} />
<Area
type="monotone"
dataKey="totalDuration"
name="Duration"
stroke="hsl(var(--chart-2))"
fill={`url(#${chartId}-durationGradient)`}
strokeWidth={2}
/>
</AreaChart>
</ChartContainer>
</div>
</CardContent>
</Card>
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<Calendar className="h-5 w-5 text-primary" />
Meetings Per Day
</CardTitle>
<CardDescription>Number of meetings recorded each day</CardDescription>
</CardHeader>
<CardContent>
<div className={overflow.xAuto}>
<ChartContainer config={chartConfig} className="h-[300px] min-w-[360px]">
<BarChart data={dailyTrends}>
<CartesianGrid {...gridProps} />
<XAxis
dataKey="dateLabel"
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
/>
<YAxis
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
allowDecimals={false}
/>
<ChartTooltip content={defaultTooltip} />
<Bar
dataKey="meetings"
name="Meetings"
fill="hsl(var(--chart-1))"
radius={[4, 4, 0, 0]}
/>
</BarChart>
</ChartContainer>
</div>
</CardContent>
</Card>
</div>
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<Mic className="h-5 w-5 text-primary" />
Speaker Participation
</CardTitle>
<CardDescription>Speaking time distribution across all meetings</CardDescription>
</CardHeader>
<CardContent>
<div className="h-[300px] flex items-center justify-center">
{speakerStats.length > 0 ? (
<ResponsiveContainer width="100%" height="100%">
<PieChart>
<Pie
data={speakerStats}
dataKey="percentage"
nameKey="displayName"
cx="50%"
cy="50%"
outerRadius={100}
innerRadius={60}
label={speakerLabel}
labelLine={false}
>
{speakerStats.map((stat, idx) => (
<Cell
key={`cell-${stat.speakerId}`}
fill={SPEAKER_COLORS[idx % SPEAKER_COLORS.length]}
/>
))}
</Pie>
<Tooltip
formatter={(value: number, name: string) => [`${value.toFixed(1)}%`, name]}
/>
<Legend />
</PieChart>
</ResponsiveContainer>
) : (
<p className="text-muted-foreground">No speaker data available</p>
)}
</div>
</CardContent>
</Card>
<Card>
<CardHeader>
<CardTitle className={titleRowClass}>
<FileText className="h-5 w-5 text-primary" />
Word Count Trends
</CardTitle>
<CardDescription>Words transcribed per day</CardDescription>
</CardHeader>
<CardContent>
<div className={overflow.xAuto}>
<ChartContainer config={chartConfig} className="h-[300px] min-w-[360px]">
<AreaChart data={dailyTrends}>
<defs>
<linearGradient id={`${chartId}-wordsGradient`} x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor="hsl(var(--chart-3))" stopOpacity={0.3} />
<stop offset="95%" stopColor="hsl(var(--chart-3))" stopOpacity={0} />
</linearGradient>
</defs>
<CartesianGrid {...gridProps} />
<XAxis
dataKey="dateLabel"
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
/>
<YAxis
className="text-xs fill-muted-foreground"
tick={{ fontSize: 12 }}
tickFormatter={wordCountTickLabel}
/>
<ChartTooltip content={wordsTooltip} />
<Area
type="monotone"
dataKey="wordCount"
name="Words"
stroke="hsl(var(--chart-3))"
fill={`url(#${chartId}-wordsGradient)`}
strokeWidth={2}
/>
</AreaChart>
</ChartContainer>
</div>
</CardContent>
</Card>
</div>
{speakerStats.length > 0 && (
<Card>
<CardHeader>
<CardTitle>Speaker Breakdown</CardTitle>
<CardDescription>Detailed speaking time by speaker</CardDescription>
</CardHeader>
<CardContent>
<div className="space-y-4">
{speakerStats.map((speaker, index) => {
const speakerColorClass =
SPEAKER_COLOR_CLASSES[index % SPEAKER_COLOR_CLASSES.length];
const barWidth = `${speaker.percentage}%`;
return (
<div key={speaker.speakerId} className="flex items-center gap-4">
<div className={cn('w-3 h-3 rounded-full shrink-0', speakerColorClass)} />
<div className="flex-1 min-w-0">
<div className="flex justify-between items-center mb-1">
<span className={cn(typography.fontMedium, 'truncate')}>
{speaker.displayName}
</span>
<span className="text-sm text-muted-foreground">
{formatDuration(speaker.totalTime)} ({speaker.percentage.toFixed(1)}%)
</span>
</div>
<div className="h-2 bg-muted rounded-full overflow-hidden">
<div
className={cn(
'h-full rounded-full transition-all duration-500',
speakerColorClass
)}
style={{ width: barWidth }}
/>
</div>
</div>
</div>
);
})}
</div>
</CardContent>
</Card>
)}
</div>
);
}

View File

@@ -0,0 +1,86 @@
import { act, fireEvent, render, screen, waitFor } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { CalendarConnectionPanel } from '@/components/features/calendar/calendar-connection-panel';
const useOAuthFlow = vi.fn();
vi.mock('@/hooks', () => ({
useOAuthFlow: () => useOAuthFlow(),
}));
describe('CalendarConnectionPanel', () => {
it('shows empty state and error alert', () => {
useOAuthFlow.mockReturnValue({
state: { status: 'error', error: 'Boom' },
initiateAuth: vi.fn(),
checkConnection: vi.fn(),
disconnect: vi.fn(),
reset: vi.fn(),
});
render(<CalendarConnectionPanel providers={[]} />);
expect(screen.getByText('Boom')).toBeInTheDocument();
expect(
screen.getByText('No calendar providers available. Check your server configuration.')
).toBeInTheDocument();
});
it('handles connect and disconnect actions', async () => {
const initiateAuth = vi.fn();
const checkConnection = vi.fn();
const disconnect = vi.fn().mockResolvedValue(true);
const onConnectionChange = vi.fn();
useOAuthFlow.mockReturnValue({
state: { status: 'idle', provider: null, connection: null },
initiateAuth,
checkConnection,
disconnect,
reset: vi.fn(),
});
render(
<CalendarConnectionPanel
providers={[
{ name: 'google', display_name: 'Google', is_authenticated: true },
{ name: 'outlook', display_name: 'Outlook', is_authenticated: false },
]}
onConnectionChange={onConnectionChange}
/>
);
expect(checkConnection).toHaveBeenCalledWith('google');
await act(async () => {
fireEvent.click(screen.getByRole('button', { name: /disconnect/i }));
});
await waitFor(() => {
expect(onConnectionChange).toHaveBeenCalled();
});
fireEvent.click(screen.getByRole('button', { name: /^connect$/i }));
expect(initiateAuth).toHaveBeenCalledWith('outlook');
});
it('renders status badges and cancel action', () => {
const reset = vi.fn();
useOAuthFlow.mockReturnValue({
state: { status: 'awaiting_callback', provider: 'google', connection: { email: 'me@x.com' } },
initiateAuth: vi.fn(),
checkConnection: vi.fn(),
disconnect: vi.fn(),
reset,
});
render(
<CalendarConnectionPanel
providers={[{ name: 'google', display_name: 'Google', is_authenticated: false }]}
/>
);
expect(screen.getByText('Awaiting authorization...')).toBeInTheDocument();
fireEvent.click(screen.getByRole('button', { name: /cancel/i }));
expect(reset).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,101 @@
import { render, screen } from '@testing-library/react';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { CalendarEventsPanel } from '@/components/features/calendar/calendar-events-panel';
import type { CalendarEvent } from '@/api/types';
import type { CalendarSyncState } from '@/hooks/sync/use-calendar-sync';
const mockFetchEvents = vi.fn();
const mockStartAutoRefresh = vi.fn();
const mockStopAutoRefresh = vi.fn();
let mockState: CalendarSyncState;
let mockIsAutoRefreshing = false;
vi.mock('@/hooks', () => ({
useCalendarSync: () => ({
state: mockState,
fetchEvents: mockFetchEvents,
startAutoRefresh: mockStartAutoRefresh,
stopAutoRefresh: mockStopAutoRefresh,
isAutoRefreshing: mockIsAutoRefreshing,
}),
}));
describe('CalendarEventsPanel', () => {
beforeEach(() => {
mockState = {
status: 'idle',
events: [],
providers: [],
error: null,
lastSync: null,
};
mockIsAutoRefreshing = false;
mockFetchEvents.mockClear();
mockStartAutoRefresh.mockClear();
mockStopAutoRefresh.mockClear();
});
afterEach(() => {
mockFetchEvents.mockClear();
});
it('renders empty state when no events', () => {
render(<CalendarEventsPanel />);
expect(screen.getByText('No upcoming events')).toBeInTheDocument();
expect(mockFetchEvents).toHaveBeenCalled();
});
it('renders error state', () => {
mockState = {
status: 'error',
events: [],
providers: [],
error: 'Boom',
lastSync: null,
};
render(<CalendarEventsPanel />);
expect(screen.getByText('Boom')).toBeInTheDocument();
});
it('renders events list with details', () => {
const now = Math.floor(Date.now() / 1000);
const event: CalendarEvent = {
id: 'e1',
title: 'Team Sync',
start_time: now + 3600,
end_time: now + 7200,
attendees: ['a@example.com', 'b@example.com'],
meeting_url: 'https://meet.example.com',
location: 'Room 1',
is_recurring: true,
};
mockState = {
status: 'success',
events: [event],
providers: [],
error: null,
lastSync: null,
};
render(<CalendarEventsPanel />);
expect(screen.getByText('Team Sync')).toBeInTheDocument();
expect(screen.getByText(/attendee/)).toBeInTheDocument();
expect(screen.getByText('Video call')).toBeInTheDocument();
expect(screen.getByText('Room 1')).toBeInTheDocument();
expect(screen.getByText('Recurring')).toBeInTheDocument();
});
it('shows auto-refresh status when enabled', () => {
mockIsAutoRefreshing = true;
render(<CalendarEventsPanel autoRefreshInterval={60000} />);
expect(mockStartAutoRefresh).toHaveBeenCalled();
expect(screen.getByText(/Auto-refreshing every 1 minutes/)).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,299 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { MemoryRouter } from 'react-router-dom';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { UpcomingMeetings } from './upcoming-meetings';
type CalendarEvent = {
id: string;
title: string;
start_time: number;
end_time: number;
location?: string;
attendees?: string[];
meeting_link?: string;
calendar_name?: string;
provider?: string;
};
const mocks = vi.hoisted(() => ({
useCalendarSync: vi.fn(),
useMeetingReminders: vi.fn(),
preferences: {
getIntegrations: vi.fn(),
},
}));
vi.mock('@/hooks', () => ({
useCalendarSync: mocks.useCalendarSync,
useMeetingReminders: mocks.useMeetingReminders,
}));
vi.mock('@/lib/preferences', () => ({
preferences: mocks.preferences,
}));
vi.mock('@/components/ui/badge', () => ({
Badge: ({ children }: { children: React.ReactNode }) => <span>{children}</span>,
}));
vi.mock('@/components/ui/button', () => ({
Button: ({
children,
onClick,
disabled,
}: {
children: React.ReactNode;
onClick?: () => void;
disabled?: boolean;
}) => (
<button type="button" onClick={onClick} disabled={disabled}>
{children}
</button>
),
}));
vi.mock('@/components/ui/card', () => ({
Card: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
CardHeader: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
CardTitle: ({ children }: { children: React.ReactNode }) => <h3>{children}</h3>,
CardDescription: ({ children }: { children: React.ReactNode }) => <p>{children}</p>,
CardContent: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
}));
vi.mock('@/components/ui/checkbox', () => ({
Checkbox: ({
checked,
onCheckedChange,
id,
}: {
checked?: boolean;
onCheckedChange?: (next: boolean) => void;
id?: string;
}) => (
<button
type="button"
data-testid={`checkbox-${id ?? 'unknown'}`}
onClick={() => onCheckedChange?.(!checked)}
/>
),
}));
vi.mock('@/components/ui/label', () => ({
Label: ({ children }: { children: React.ReactNode }) => <span>{children}</span>,
}));
vi.mock('@/components/ui/popover', () => ({
Popover: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
PopoverTrigger: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
PopoverContent: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
}));
vi.mock('@/components/ui/skeleton', () => ({
Skeleton: ({ className }: { className?: string }) => (
<div data-testid="skeleton" className={className} />
),
}));
vi.mock('@/components/ui/switch', () => ({
Switch: ({
checked,
onCheckedChange,
disabled,
}: {
checked?: boolean;
onCheckedChange?: (next: boolean) => void;
disabled?: boolean;
}) => (
<button
type="button"
data-testid="switch"
disabled={disabled}
onClick={() => onCheckedChange?.(!checked)}
/>
),
}));
vi.mock('@/components/ui/tooltip', () => ({
Tooltip: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
TooltipProvider: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
TooltipTrigger: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
TooltipContent: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
}));
const renderWithRouter = (ui: React.ReactElement) =>
render(<MemoryRouter>{ui}</MemoryRouter>);
const createEvent = (id: string, overrides: Partial<CalendarEvent> = {}): CalendarEvent => ({
id,
title: `Event ${id}`,
start_time: 1_700_000_000,
end_time: 1_700_000_600,
attendees: ['Ada', 'Lin', 'Sam', 'Pat'],
meeting_link: 'https://example.com',
calendar_name: 'Team Calendar',
...overrides,
});
describe('UpcomingMeetings', () => {
beforeEach(() => {
mocks.useCalendarSync.mockReset();
mocks.useMeetingReminders.mockReset();
mocks.preferences.getIntegrations.mockReset();
mocks.preferences.getIntegrations.mockReturnValue([]);
mocks.useMeetingReminders.mockReturnValue({
permission: 'granted',
settings: { enabled: true, reminderMinutes: [30] },
toggleReminders: vi.fn(),
setReminderMinutes: vi.fn(),
requestPermission: vi.fn(),
isSupported: true,
});
});
it('renders skeleton when loading and calendars connected', () => {
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useCalendarSync.mockReturnValue({
state: { status: 'loading', events: [] },
fetchEvents: vi.fn(),
});
renderWithRouter(<UpcomingMeetings />);
expect(screen.getByTestId('upcoming-meetings-skeleton')).toBeInTheDocument();
});
it('renders error state and retries', () => {
const fetchEvents = vi.fn();
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useCalendarSync.mockReturnValue({
state: { status: 'error', events: [] },
fetchEvents,
});
renderWithRouter(<UpcomingMeetings />);
expect(screen.getByText('Unable to load calendar events')).toBeInTheDocument();
fireEvent.click(screen.getByText('Try again'));
expect(fetchEvents).toHaveBeenCalled();
});
it('prompts to connect a calendar when none are connected', () => {
mocks.useCalendarSync.mockReturnValue({
state: { status: 'success', events: [] },
fetchEvents: vi.fn(),
});
renderWithRouter(<UpcomingMeetings />);
expect(screen.getByText('No calendars connected')).toBeInTheDocument();
expect(screen.getByText('Connect Calendar')).toBeInTheDocument();
});
it('renders empty state when there are no events', () => {
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useCalendarSync.mockReturnValue({
state: { status: 'success', events: [] },
fetchEvents: vi.fn(),
});
renderWithRouter(<UpcomingMeetings />);
expect(screen.getByText('No upcoming meetings scheduled')).toBeInTheDocument();
});
it('renders events with attendees and pagination', () => {
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useMeetingReminders.mockReturnValue({
permission: 'granted',
settings: { enabled: false, reminderMinutes: [] },
toggleReminders: vi.fn(),
setReminderMinutes: vi.fn(),
requestPermission: vi.fn(),
isSupported: false,
});
const events = [
createEvent('1', { title: 'First', meeting_link: undefined }),
createEvent('2', { title: 'Second', meeting_link: undefined }),
createEvent('3', { title: 'Third', meeting_link: undefined }),
createEvent('4', { title: 'Fourth', meeting_link: undefined }),
];
mocks.useCalendarSync.mockReturnValue({
state: { status: 'success', events },
fetchEvents: vi.fn(),
});
renderWithRouter(<UpcomingMeetings />);
expect(screen.getByText('First')).toBeInTheDocument();
expect(screen.getByText('Second')).toBeInTheDocument();
expect(screen.getByText('Third')).toBeInTheDocument();
expect(screen.queryByText('Fourth')).toBeNull();
expect(screen.getByText('1-3 of 4')).toBeInTheDocument();
const buttons = screen.getAllByRole('button');
expect(buttons).toHaveLength(2);
fireEvent.click(buttons[1]);
expect(screen.getByText('Fourth')).toBeInTheDocument();
});
it('handles reminder permissions when default', () => {
const setReminderMinutes = vi.fn();
const requestPermission = vi.fn();
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useCalendarSync.mockReturnValue({
state: { status: 'success', events: [createEvent('1')] },
fetchEvents: vi.fn(),
});
mocks.useMeetingReminders.mockReturnValue({
permission: 'default',
settings: { enabled: false, reminderMinutes: [15] },
toggleReminders: vi.fn(),
setReminderMinutes,
requestPermission,
isSupported: true,
});
renderWithRouter(<UpcomingMeetings />);
fireEvent.click(screen.getByText('Enable Notifications'));
expect(requestPermission).toHaveBeenCalled();
});
it('updates reminder minutes when toggled', () => {
const setReminderMinutes = vi.fn();
mocks.preferences.getIntegrations.mockReturnValue([
{ id: 'cal-1', type: 'calendar', status: 'connected', name: 'Work Calendar' },
]);
mocks.useCalendarSync.mockReturnValue({
state: { status: 'success', events: [createEvent('1')] },
fetchEvents: vi.fn(),
});
mocks.useMeetingReminders.mockReturnValue({
permission: 'granted',
settings: { enabled: true, reminderMinutes: [15] },
toggleReminders: vi.fn(),
setReminderMinutes,
requestPermission: vi.fn(),
isSupported: true,
});
renderWithRouter(<UpcomingMeetings />);
fireEvent.click(screen.getByTestId('checkbox-reminder-30'));
expect(setReminderMinutes).toHaveBeenCalledWith([30, 15]);
fireEvent.click(screen.getByTestId('checkbox-reminder-15'));
expect(setReminderMinutes).toHaveBeenCalledWith([]);
});
});

View File

@@ -0,0 +1,56 @@
import { render, screen, waitFor } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { ConnectionStatus } from '@/components/features/connectivity/connection-status';
let mockStateMode = 'connected';
const mockGetServerInfo = vi.fn();
vi.mock('@/contexts/connection-state', () => ({
useConnectionState: () => ({
state: { mode: mockStateMode },
}),
}));
vi.mock('@/api/interface', () => ({
getAPI: () => ({
getServerInfo: mockGetServerInfo,
}),
}));
describe('ConnectionStatus', () => {
it('shows connected state with version', async () => {
mockStateMode = 'connected';
mockGetServerInfo.mockResolvedValueOnce({ version: '1.2.3' });
render(<ConnectionStatus />);
await waitFor(() => {
expect(screen.getByText('Connected')).toBeInTheDocument();
});
expect(screen.getByText('v1.2.3')).toBeInTheDocument();
});
it('shows disconnected state', () => {
mockStateMode = 'disconnected';
render(<ConnectionStatus />);
expect(screen.getByText('Disconnected')).toBeInTheDocument();
});
it('shows cached and reconnecting states', () => {
mockStateMode = 'cached';
const { rerender } = render(<ConnectionStatus />);
expect(screen.getByText('Offline')).toBeInTheDocument();
mockStateMode = 'reconnecting';
rerender(<ConnectionStatus />);
expect(screen.getByText('Reconnecting...')).toBeInTheDocument();
});
it('shows mock state', () => {
mockStateMode = 'mock';
render(<ConnectionStatus />);
expect(screen.getByText('Mock')).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,43 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { ServerSwitchConfirmationDialog } from '@/components/features/connectivity/server-switch-confirmation-dialog';
describe('ServerSwitchConfirmationDialog', () => {
it('renders server details and confirms', () => {
const onConfirm = vi.fn();
const onOpenChange = vi.fn();
render(
<ServerSwitchConfirmationDialog
open
onConfirm={onConfirm}
onOpenChange={onOpenChange}
currentServer="old.example"
newServer="new.example"
/>
);
expect(screen.getByText('Switch Server?')).toBeInTheDocument();
expect(screen.getByText('old.example')).toBeInTheDocument();
expect(screen.getByText('new.example')).toBeInTheDocument();
fireEvent.click(screen.getByRole('button', { name: /switch server/i }));
expect(onConfirm).toHaveBeenCalled();
});
it('falls back to default labels when servers are empty', () => {
render(
<ServerSwitchConfirmationDialog
open
onConfirm={vi.fn()}
onOpenChange={vi.fn()}
currentServer=""
newServer=""
/>
);
expect(screen.getByText('current server')).toBeInTheDocument();
expect(screen.getByText('new server')).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,79 @@
import { render, screen } from '@testing-library/react';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { AnimatedTranscription } from '@/components/features/entities/animated-transcription';
const useAnimatedWords = vi.fn();
const findMatchingEntities = vi.fn();
vi.mock('@/hooks/ui/use-animated-words', () => ({
useAnimatedWords: (text: string, options: { staggerDelay: number; blockId: string }) =>
useAnimatedWords(text, options),
}));
vi.mock('@/lib/state/entities', () => ({
findMatchingEntities: (text: string) => findMatchingEntities(text),
}));
vi.mock('@/components/features/entities/entity-highlight', () => ({
HighlightedTerm: ({ text }: { text: string }) => <span data-testid="highlight">{text}</span>,
}));
describe('AnimatedTranscription', () => {
beforeEach(() => {
useAnimatedWords.mockReset();
findMatchingEntities.mockReset();
});
it('returns null when text is empty', () => {
const { container } = render(
<AnimatedTranscription
text=""
blockId="b1"
pinnedEntities={new Set()}
onTogglePin={vi.fn()}
/>
);
expect(container.firstChild).toBeNull();
});
it('renders highlighted terms and cursor', () => {
useAnimatedWords.mockReturnValue([
{ word: 'Hello', index: 0, shouldAnimate: false, delay: 0 },
{ word: 'world', index: 2, shouldAnimate: false, delay: 0 },
]);
findMatchingEntities.mockReturnValue([
{ entity: { id: 'e1', name: 'Hello' }, startIndex: 0, endIndex: 5 },
]);
render(
<AnimatedTranscription
text="Hello world"
blockId="b1"
pinnedEntities={new Set(['e1'])}
onTogglePin={vi.fn()}
showCursor
/>
);
expect(screen.getByTestId('highlight')).toHaveTextContent('Hello');
expect(screen.getByText('world')).toBeInTheDocument();
expect(document.querySelector('.animate-pulse')).toBeInTheDocument();
});
it('falls back when word state is missing', () => {
useAnimatedWords.mockReturnValue([]);
findMatchingEntities.mockReturnValue([]);
render(
<AnimatedTranscription
text="Hello"
blockId="b2"
pinnedEntities={new Set()}
onTogglePin={vi.fn()}
/>
);
expect(screen.getByText('Hello')).toBeInTheDocument();
});
});

View File

@@ -5,7 +5,8 @@ import type { Editor } from '@tiptap/react';
import { Badge } from '@/components/ui/badge';
import { Button } from '@/components/ui/button';
import { Label } from '@/components/ui/label';
import { MarkdownEditor, getEditorMarkdown, useMarkdownEditor } from '@/components/ui/markdown-editor';
import { MarkdownEditor } from '@/components/ui/markdown-editor';
import { getEditorMarkdown, useMarkdownEditor } from '@/components/ui/markdown-editor-utils';
import { ScrollArea } from '@/components/ui/scroll-area';
import { Switch } from '@/components/ui/switch';
import { Textarea } from '@/components/ui/textarea';

View File

@@ -0,0 +1,89 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import type { Project } from '@/api/types/projects';
import type { ProjectScope } from '@/api/types/requests';
import { ProjectScopeFilter } from './ProjectScopeFilter';
const buildProject = (id: string, name: string): Project => {
const project: Project = {
id,
workspace_id: 'workspace',
name,
created_at: 0,
updated_at: 0,
is_default: false,
is_archived: false,
};
return project;
};
const renderFilter = (props: Partial<Parameters<typeof ProjectScopeFilter>[0]> = {}) => {
const projects = [buildProject('p1', 'Alpha'), buildProject('p2', 'Beta')];
const onProjectScopeChange = vi.fn();
const onSelectedProjectIdsChange = vi.fn();
const defaultProps = {
activeProjects: projects,
projectScope: 'selected' as ProjectScope,
selectedProjectIds: [],
onProjectScopeChange,
onSelectedProjectIdsChange,
resolvedProjectId: 'p1',
idPrefix: 'test',
};
return {
...render(<ProjectScopeFilter {...defaultProps} {...props} />),
onProjectScopeChange,
onSelectedProjectIdsChange,
projects,
};
};
describe('ProjectScopeFilter', () => {
it('renders selection summary when all projects are selected', () => {
const { projects } = renderFilter({ selectedProjectIds: ['p1', 'p2'] });
expect(screen.getAllByText('All projects')).toHaveLength(2);
expect(screen.getByText(String(projects.length))).toBeInTheDocument();
});
it('shows empty selection guidance inside the popover', () => {
renderFilter({ selectedProjectIds: [] });
fireEvent.click(screen.getByRole('button', { name: 'Multiple projects' }));
expect(screen.getByText('Select at least one project.')).toBeInTheDocument();
});
it('disables current project button when loading or missing resolved project', () => {
const first = renderFilter({ projectsLoading: true });
expect(screen.getByRole('button', { name: 'Current project' })).toBeDisabled();
first.unmount();
renderFilter({ projectsLoading: false, resolvedProjectId: null });
expect(screen.getByRole('button', { name: 'Current project' })).toBeDisabled();
});
it('selects all projects from the popover actions', () => {
const { onProjectScopeChange, onSelectedProjectIdsChange, projects } = renderFilter();
fireEvent.click(screen.getByRole('button', { name: 'Multiple projects' }));
fireEvent.click(screen.getByRole('button', { name: 'Select all' }));
expect(onProjectScopeChange).toHaveBeenCalledWith('selected');
expect(onSelectedProjectIdsChange).toHaveBeenCalledWith(projects.map((project) => project.id));
});
it('toggles individual project selection', () => {
const { onProjectScopeChange, onSelectedProjectIdsChange } = renderFilter({
selectedProjectIds: ['p1'],
});
fireEvent.click(screen.getByRole('button', { name: 'Multiple projects' }));
fireEvent.click(screen.getByLabelText('Beta'));
expect(onProjectScopeChange).toHaveBeenCalledWith('selected');
const call = onSelectedProjectIdsChange.mock.calls[0]?.[0];
expect(typeof call).toBe('function');
});
});

View File

@@ -0,0 +1,20 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { InTranscriptSearch } from '@/components/features/recording/in-transcript-search';
describe('InTranscriptSearch', () => {
it('calls onChange when typing and clearing', () => {
const onChange = vi.fn();
const { rerender } = render(<InTranscriptSearch value="" onChange={onChange} />);
fireEvent.change(screen.getByPlaceholderText('Search transcript...'), {
target: { value: 'hello' },
});
expect(onChange).toHaveBeenCalledWith('hello');
rerender(<InTranscriptSearch value="hello" onChange={onChange} />);
fireEvent.click(screen.getByTitle('Clear search'));
expect(onChange).toHaveBeenCalledWith('');
});
});

View File

@@ -0,0 +1,17 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { describe, expect, it, vi } from 'vitest';
import { JumpToLiveIndicator } from '@/components/features/recording/jump-to-live-indicator';
describe('JumpToLiveIndicator', () => {
it('renders and triggers onClick', () => {
const onClick = vi.fn();
render(<JumpToLiveIndicator onClick={onClick} />);
const button = screen.getByRole('button', { name: /jump to live/i });
fireEvent.click(button);
expect(onClick).toHaveBeenCalled();
});
});

View File

@@ -21,12 +21,14 @@ import { useStreamingConfig } from '@/hooks';
import { useHuggingFaceToken } from '@/hooks';
import { iconSize } from '@/lib/ui/styles';
import { cn } from '@/lib/utils';
import { ModelAuthSection, getModelAuthSummary } from './model-auth-section';
import { ModelAuthSection } from './model-auth-section';
import { TranscriptionEngineSection } from './transcription-engine-section';
import { StreamingConfigSection } from './streaming-config-section';
import {
TranscriptionEngineSection,
getModelAuthSummary,
getStreamingConfigSummary,
getTranscriptionEngineSummary,
} from './transcription-engine-section';
import { StreamingConfigSection, getStreamingConfigSummary } from './streaming-config-section';
} from './summaries';
interface AdvancedLocalAISettingsProps {
serverInfo?: ServerInfo | null;

View File

@@ -201,16 +201,3 @@ export function ModelAuthSection({
</div>
);
}
/** Summary text for the accordion trigger. */
export function getModelAuthSummary(status: HuggingFaceTokenStatus | null): string {
if (!status) {
return 'Loading...';
}
if (status.isConfigured) {
return status.isValidated
? `Token configured for ${status.username}`
: 'Token configured (not validated)';
}
return 'No HuggingFace token configured';
}

View File

@@ -331,10 +331,3 @@ export function StreamingConfigSection({
</div>
);
}
export function getStreamingConfigSummary(config: StreamingConfiguration | null): string {
if (!config) {
return 'Unavailable';
}
return `Partials ${config.partialCadenceSeconds}s · Max segment ${config.maxSegmentDurationSeconds}s`;
}

View File

@@ -0,0 +1,29 @@
import type { ASRConfiguration, HuggingFaceTokenStatus, StreamingConfiguration } from '@/api/types';
export function getModelAuthSummary(status: HuggingFaceTokenStatus | null): string {
if (!status) {
return 'Loading...';
}
if (status.isConfigured) {
return status.isValidated
? `Token configured for ${status.username}`
: 'Token configured (not validated)';
}
return 'No HuggingFace token configured';
}
export function getStreamingConfigSummary(config: StreamingConfiguration | null): string {
if (!config) {
return 'Streaming configuration unavailable';
}
return `Partials every ${config.partialCadenceSeconds.toFixed(1)}s`;
}
export function getTranscriptionEngineSummary(config: ASRConfiguration | null): string {
if (!config) {
return 'Transcription configuration unavailable';
}
const deviceLabel = config.device?.toUpperCase() ?? 'Unknown device';
const modelLabel = config.model || 'Unknown model';
return `${modelLabel} on ${deviceLabel}`;
}

View File

@@ -391,11 +391,3 @@ export function TranscriptionEngineSection({
</>
);
}
/** Summary text for the accordion trigger. */
export function getTranscriptionEngineSummary(config: ASRConfiguration | null): string {
if (!config) {
return 'Not configured';
}
return `Model: ${config.modelSize} • Device: ${DEVICE_LABELS[config.device]}`;
}

View File

@@ -1,8 +1,17 @@
import { fireEvent, render, screen } from '@testing-library/react';
import { fireEvent, render, screen, waitFor } from '@testing-library/react';
import type { ReactNode } from 'react';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { ExportAISection } from './export-ai-section';
const mocks = vi.hoisted(() => ({
getTauriExportLocationCache: vi.fn(),
setTauriExportLocationCache: vi.fn(),
isTauriEnvironment: vi.fn(),
addClientLog: vi.fn(),
documentDir: vi.fn(),
join: vi.fn(),
}));
vi.mock('@/components/ui/select', () => ({
Select: ({ children }: { children: ReactNode }) => <div>{children}</div>,
SelectTrigger: ({ children }: { children: ReactNode }) => <div>{children}</div>,
@@ -11,6 +20,28 @@ vi.mock('@/components/ui/select', () => ({
SelectItem: ({ children }: { children: ReactNode }) => <div>{children}</div>,
}));
vi.mock('@/lib/preferences', () => ({
getTauriExportLocationCache: mocks.getTauriExportLocationCache,
setTauriExportLocationCache: mocks.setTauriExportLocationCache,
}));
vi.mock('@/api', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/api')>();
return {
...actual,
isTauriEnvironment: mocks.isTauriEnvironment,
};
});
vi.mock('@tauri-apps/api/path', () => ({
documentDir: mocks.documentDir,
join: mocks.join,
}));
vi.mock('@/lib/observability/client', () => ({
addClientLog: mocks.addClientLog,
}));
// Mock framer-motion to avoid animation issues in tests
vi.mock('framer-motion', () => ({
motion: {
@@ -38,6 +69,10 @@ describe('ExportAISection', () => {
beforeEach(() => {
vi.clearAllMocks();
mocks.getTauriExportLocationCache.mockReturnValue(null);
mocks.isTauriEnvironment.mockReturnValue(false);
mocks.documentDir.mockResolvedValue('/Users/test/Documents');
mocks.join.mockResolvedValue('/Users/test/Documents/NoteFlow');
});
describe('Component Rendering', () => {
@@ -153,4 +188,86 @@ describe('ExportAISection', () => {
expect(screen.getByText('Verbosity')).toBeInTheDocument();
});
});
describe('OS-specific hints', () => {
it('uses cached Windows path for hint and placeholder', () => {
mocks.getTauriExportLocationCache.mockReturnValue(
'C:\\\\Users\\\\Me\\\\Documents\\\\NoteFlow'
);
render(<ExportAISection {...defaultProps} />);
expect(
screen.getByPlaceholderText('C:\\\\Users\\\\Me\\\\Documents\\\\NoteFlow')
).toBeInTheDocument();
expect(
screen.getByText(
'Windows: C:\\\\Users\\\\Me\\\\Documents\\\\NoteFlow (~/Documents/NoteFlow also works).'
)
).toBeInTheDocument();
});
it('uses cached macOS path for hint', () => {
mocks.getTauriExportLocationCache.mockReturnValue('/Users/me/Documents/NoteFlow');
render(<ExportAISection {...defaultProps} />);
expect(
screen.getByText('macOS: /Users/me/Documents/NoteFlow.')
).toBeInTheDocument();
});
it('uses cached Linux path for hint', () => {
mocks.getTauriExportLocationCache.mockReturnValue('/home/me/Documents/NoteFlow');
render(<ExportAISection {...defaultProps} />);
expect(
screen.getByText('Linux: /home/me/Documents/NoteFlow.')
).toBeInTheDocument();
});
it('falls back to browser detection when no cached path', () => {
Object.defineProperty(window.navigator, 'userAgentData', {
value: { platform: 'Windows' },
configurable: true,
});
Object.defineProperty(window.navigator, 'platform', {
value: 'Win32',
configurable: true,
});
render(<ExportAISection {...defaultProps} />);
expect(screen.getByText((text) => text.startsWith('Windows:'))).toBeInTheDocument();
});
});
describe('Tauri path hydration', () => {
it('loads tauri export location when available', async () => {
mocks.isTauriEnvironment.mockReturnValue(true);
mocks.documentDir.mockResolvedValue('/Users/test/Documents');
mocks.join.mockResolvedValue('/Users/test/Documents/NoteFlow');
render(<ExportAISection {...defaultProps} />);
await waitFor(() =>
expect(
screen.getByText('macOS: /Users/test/Documents/NoteFlow.')
).toBeInTheDocument()
);
expect(mocks.setTauriExportLocationCache).toHaveBeenCalledWith(
'/Users/test/Documents/NoteFlow'
);
});
it('logs when tauri path APIs are unavailable', async () => {
mocks.isTauriEnvironment.mockReturnValue(true);
mocks.documentDir.mockRejectedValue(new Error('nope'));
render(<ExportAISection {...defaultProps} />);
await waitFor(() =>
expect(mocks.addClientLog).toHaveBeenCalledWith(
expect.objectContaining({
message: 'Tauri path APIs unavailable - using browser detection for export location',
})
)
);
});
});
});

View File

@@ -0,0 +1,40 @@
import { describe, expect, it } from 'vitest';
import type { Integration } from '@/api/types';
import { getCalendarProvider, groupIntegrationsByType } from './helpers';
const buildIntegration = (overrides: Partial<Integration>): Integration => ({
id: 'id',
name: 'Custom',
type: 'custom',
status: 'disconnected',
...overrides,
});
describe('integration helpers', () => {
it('detects calendar providers by name', () => {
expect(getCalendarProvider(buildIntegration({ name: 'Google Calendar' }))).toBe('google');
expect(getCalendarProvider(buildIntegration({ name: 'Microsoft Outlook' }))).toBe('outlook');
expect(getCalendarProvider(buildIntegration({ name: 'Outlook Sync' }))).toBe('outlook');
expect(getCalendarProvider(buildIntegration({ name: 'Other' }))).toBeNull();
});
it('groups integrations by type', () => {
const integrations = [
buildIntegration({ id: 'auth', type: 'auth' }),
buildIntegration({ id: 'email', type: 'email' }),
buildIntegration({ id: 'calendar', type: 'calendar' }),
buildIntegration({ id: 'pkm', type: 'pkm' }),
buildIntegration({ id: 'oidc', type: 'oidc' }),
buildIntegration({ id: 'custom', type: 'custom' }),
];
const grouped = groupIntegrationsByType(integrations);
expect(grouped.auth).toHaveLength(1);
expect(grouped.email).toHaveLength(1);
expect(grouped.calendar).toHaveLength(1);
expect(grouped.pkm).toHaveLength(1);
expect(grouped.oidc).toHaveLength(1);
expect(grouped.custom).toHaveLength(1);
});
});

View File

@@ -13,7 +13,7 @@ import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs';
import { CustomIntegrationDialog, TestAllButton } from './custom-integration-dialog';
import { groupIntegrationsByType } from './helpers';
import { IntegrationSettingsProvider } from './integration-settings-context';
import { IntegrationSettingsProvider } from './integration-settings-provider';
import { IntegrationItem } from './integration-item';
import type { Integration } from '@/api/types';
import type { IntegrationsSectionProps } from './types';
@@ -46,7 +46,7 @@ export function IntegrationsSection({
const handleTestIntegrationWithState = useCallback(
(integration: Parameters<typeof handleTestIntegration>[0]) =>
handleTestIntegration(integration, setTestingIntegration),
[handleTestIntegration, setTestingIntegration]
[handleTestIntegration]
);
const contextValue = useMemo(

View File

@@ -1,5 +1,5 @@
import { createContext, useContext } from 'react';
import type { MutableRefObject, ReactNode } from 'react';
import type { MutableRefObject } from 'react';
import type { Integration } from '@/api/types';
import type { OAuthFlowState } from '@/hooks';
@@ -20,21 +20,8 @@ export interface IntegrationSettingsContextValue {
removeIntegration: (integrationId: string) => void;
}
const IntegrationSettingsContext = createContext<IntegrationSettingsContextValue | null>(null);
export function IntegrationSettingsProvider({
value,
children,
}: {
value: IntegrationSettingsContextValue;
children: ReactNode;
}) {
return (
<IntegrationSettingsContext.Provider value={value}>
{children}
</IntegrationSettingsContext.Provider>
);
}
export const IntegrationSettingsContext =
createContext<IntegrationSettingsContextValue | null>(null);
export function useIntegrationSettingsContext(): IntegrationSettingsContextValue {
const context = useContext(IntegrationSettingsContext);

View File

@@ -0,0 +1,19 @@
import type { ReactNode } from 'react';
import {
IntegrationSettingsContext,
type IntegrationSettingsContextValue,
} from './integration-settings-context';
export function IntegrationSettingsProvider({
value,
children,
}: {
value: IntegrationSettingsContextValue;
children: ReactNode;
}) {
return (
<IntegrationSettingsContext.Provider value={value}>
{children}
</IntegrationSettingsContext.Provider>
);
}

View File

@@ -4,12 +4,13 @@
import { useCallback, useEffect, useRef } from 'react';
import { getAPI } from '@/api';
import { getAPI, IdentityDefaults } from '@/api';
import type { Integration } from '@/api/types';
import { useWorkspace } from '@/contexts/workspace-state';
import { useOAuthFlow } from '@/hooks';
import { toast } from '@/hooks';
import { debug } from '@/lib/observability/debug';
import { toastError } from '@/lib/observability/errors';
import { preferences } from '@/lib/preferences';
import { getCalendarProvider } from './helpers';
@@ -20,7 +21,7 @@ interface UseCalendarIntegrationProps {
}
export function useCalendarIntegration({
integrations,
integrations: _integrations,
setIntegrations,
}: UseCalendarIntegrationProps) {
const log = debug('CalendarIntegration');
@@ -122,7 +123,7 @@ export function useCalendarIntegration({
return () => {
cancelled = true;
};
}, [setIntegrations, workspaceId]);
}, [log, setIntegrations, workspaceId]);
const syncCalendarOAuthConfig = useCallback(
async (integration: Integration) => {
@@ -228,4 +229,4 @@ export function useCalendarIntegration({
handleCalendarConnect,
handleCalendarDisconnect,
};
}
}

Some files were not shown because too many files have changed in this diff Show More