ci/cd fixes
This commit is contained in:
@@ -46,7 +46,7 @@ jobs:
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libsndfile1 portaudio19-dev
|
||||
sudo apt-get install -y libsndfile1 portaudio19-dev ffmpeg
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
|
||||
@@ -303,6 +303,7 @@ markers = [
|
||||
]
|
||||
filterwarnings = [
|
||||
"ignore:The @wait_container_is_ready decorator is deprecated.*:DeprecationWarning:testcontainers.core.waiting_utils",
|
||||
"ignore:invalid escape sequence '\\\\s':SyntaxWarning:pyannote",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
|
||||
@@ -17,7 +17,7 @@ console = Console()
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def _show_help() -> None:
|
||||
def show_help() -> None:
|
||||
"""Display CLI help information."""
|
||||
logger.debug("cli_no_command", message="No command provided, showing help")
|
||||
console.print("[bold]NoteFlow CLI[/bold]")
|
||||
@@ -39,28 +39,28 @@ def _show_help() -> None:
|
||||
console.print(" python -m noteflow.cli embeddings backfill")
|
||||
|
||||
|
||||
def _run_retention_command(command: str) -> None:
|
||||
def run_retention_command(command: str) -> None:
|
||||
"""Execute the retention subcommand."""
|
||||
from noteflow.cli.retention import main as retention_main
|
||||
|
||||
retention_main()
|
||||
|
||||
|
||||
def _run_models_command(command: str) -> None:
|
||||
def run_models_command(command: str) -> None:
|
||||
"""Execute the models subcommand."""
|
||||
from noteflow.cli.models import main as models_main
|
||||
|
||||
models_main()
|
||||
|
||||
|
||||
def _run_embeddings_command(command: str) -> None:
|
||||
def run_embeddings_command(command: str) -> None:
|
||||
"""Execute the embeddings subcommand."""
|
||||
from noteflow.cli.embeddings import main as embeddings_main
|
||||
|
||||
embeddings_main()
|
||||
|
||||
|
||||
def _dispatch_command(command: str, subcommand_args: list[str]) -> bool:
|
||||
def dispatch_command(command: str, subcommand_args: list[str]) -> bool:
|
||||
"""Dispatch to the appropriate command handler.
|
||||
|
||||
Args:
|
||||
@@ -71,9 +71,9 @@ def _dispatch_command(command: str, subcommand_args: list[str]) -> bool:
|
||||
True if command was handled, False if unknown.
|
||||
"""
|
||||
handlers: dict[str, Callable[[str], None]] = {
|
||||
"retention": _run_retention_command,
|
||||
"models": _run_models_command,
|
||||
"embeddings": _run_embeddings_command,
|
||||
"retention": run_retention_command,
|
||||
"models": run_models_command,
|
||||
"embeddings": run_embeddings_command,
|
||||
}
|
||||
|
||||
handler = handlers.get(command)
|
||||
@@ -95,7 +95,7 @@ def main() -> None:
|
||||
logger.info("cli_invoked", argv=sys.argv)
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
_show_help()
|
||||
show_help()
|
||||
sys.exit(1)
|
||||
|
||||
command = sys.argv[1]
|
||||
@@ -104,7 +104,7 @@ def main() -> None:
|
||||
# Remove the command from argv so submodule parsers work correctly
|
||||
sys.argv = [sys.argv[0], *subcommand_args]
|
||||
|
||||
dispatch_result = _dispatch_command(command, subcommand_args)
|
||||
dispatch_result = dispatch_command(command, subcommand_args)
|
||||
if not dispatch_result:
|
||||
logger.warning("cli_unknown_command", command=command)
|
||||
console.print(f"[red]Unknown command:[/red] {command}")
|
||||
|
||||
@@ -26,7 +26,7 @@ logger = get_logger(__name__)
|
||||
console = Console()
|
||||
|
||||
|
||||
async def _run_cleanup(dry_run: bool) -> int:
|
||||
async def run_cleanup(dry_run: bool) -> int:
|
||||
"""Execute retention cleanup.
|
||||
|
||||
Args:
|
||||
@@ -78,7 +78,7 @@ async def _run_cleanup(dry_run: bool) -> int:
|
||||
return 0
|
||||
|
||||
|
||||
async def _show_status() -> int:
|
||||
async def show_status() -> int:
|
||||
"""Show retention status and pending deletions.
|
||||
|
||||
Returns:
|
||||
@@ -139,9 +139,9 @@ def main() -> None:
|
||||
sys.exit(1)
|
||||
|
||||
if args.command == "cleanup":
|
||||
exit_code = asyncio.run(_run_cleanup(dry_run=args.dry_run))
|
||||
exit_code = asyncio.run(run_cleanup(dry_run=args.dry_run))
|
||||
elif args.command == "status":
|
||||
exit_code = asyncio.run(_show_status())
|
||||
exit_code = asyncio.run(show_status())
|
||||
else:
|
||||
parser.print_help()
|
||||
exit_code = 1
|
||||
|
||||
@@ -101,11 +101,12 @@ def _log_and_schedule_failure(
|
||||
exc_info=exc,
|
||||
)
|
||||
|
||||
coro = mark_failed(job_id, str(exc))
|
||||
try:
|
||||
coro = mark_failed(job_id, str(exc))
|
||||
task: asyncio.Task[None] = asyncio.create_task(coro)
|
||||
task.add_done_callback(lambda t: _log_mark_failed_result(t, job_id))
|
||||
except RuntimeError as schedule_err:
|
||||
coro.close()
|
||||
logger.error(
|
||||
"Failed to schedule mark_failed for job %s: %s",
|
||||
job_id,
|
||||
|
||||
@@ -11,7 +11,7 @@ import subprocess
|
||||
from functools import cache
|
||||
|
||||
|
||||
def _read_linux_cpuinfo() -> str | None:
|
||||
def read_linux_cpuinfo() -> str | None:
|
||||
"""Read /proc/cpuinfo if available, otherwise return None."""
|
||||
if not os.path.exists("/proc/cpuinfo"):
|
||||
return None
|
||||
@@ -22,7 +22,7 @@ def _read_linux_cpuinfo() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def _read_sysctl_features() -> str | None:
|
||||
def read_sysctl_features() -> str | None:
|
||||
"""Read CPU features via sysctl if available, otherwise return None."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
@@ -46,11 +46,11 @@ def has_avx2_support() -> bool:
|
||||
Returns:
|
||||
True if AVX2 is supported, False otherwise.
|
||||
"""
|
||||
cpuinfo = _read_linux_cpuinfo()
|
||||
cpuinfo = read_linux_cpuinfo()
|
||||
if cpuinfo is not None:
|
||||
return "avx2" in cpuinfo.lower()
|
||||
|
||||
features = _read_sysctl_features()
|
||||
features = read_sysctl_features()
|
||||
return "avx2" in features.lower() if features is not None else False
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ Tests cover:
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Final
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ from noteflow.domain.entities.analytics import (
|
||||
SpeakerStat,
|
||||
)
|
||||
|
||||
|
||||
# Test constants
|
||||
WORKSPACE_ID = uuid4()
|
||||
"""Test workspace identifier."""
|
||||
|
||||
@@ -4,7 +4,6 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Final
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -24,7 +24,6 @@ from noteflow.domain.entities.task import (
|
||||
normalize_task_text,
|
||||
)
|
||||
|
||||
|
||||
# Test constants
|
||||
WORKSPACE_ID = uuid4()
|
||||
"""Test workspace identifier."""
|
||||
|
||||
@@ -38,7 +38,8 @@ from noteflow.grpc.mixins.converters import (
|
||||
segment_to_proto_update,
|
||||
)
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.infrastructure.asr.dto import AsrResult, WordTiming as AsrWordTiming
|
||||
from noteflow.infrastructure.asr.dto import AsrResult
|
||||
from noteflow.infrastructure.asr.dto import WordTiming as AsrWordTiming
|
||||
from noteflow.infrastructure.asr.segmenter import AudioSegment, Segmenter, SegmenterConfig
|
||||
from noteflow.infrastructure.asr.streaming_vad import EnergyVad, StreamingVad
|
||||
from noteflow.infrastructure.audio.levels import RmsLevelProvider, compute_rms
|
||||
|
||||
@@ -6,13 +6,13 @@ from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
import noteflow.cli.__main__ as cli_main
|
||||
from noteflow.cli.__main__ import main
|
||||
|
||||
dispatch_command = getattr(cli_main, "_dispatch_command")
|
||||
run_models_command = getattr(cli_main, "_run_models_command")
|
||||
run_retention_command = getattr(cli_main, "_run_retention_command")
|
||||
show_help = getattr(cli_main, "_show_help")
|
||||
from noteflow.cli.__main__ import (
|
||||
dispatch_command,
|
||||
main,
|
||||
run_models_command,
|
||||
run_retention_command,
|
||||
show_help,
|
||||
)
|
||||
|
||||
|
||||
class TestShowHelp:
|
||||
|
||||
@@ -7,11 +7,7 @@ from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
import noteflow.cli.retention as retention_cli
|
||||
from noteflow.cli.retention import main
|
||||
|
||||
run_cleanup = getattr(retention_cli, "_run_cleanup")
|
||||
show_status = getattr(retention_cli, "_show_status")
|
||||
from noteflow.cli.retention import main, run_cleanup, show_status
|
||||
|
||||
|
||||
class TestRunCleanup:
|
||||
@@ -21,7 +17,9 @@ class TestRunCleanup:
|
||||
mock_retention_settings_disabled: MagicMock,
|
||||
mock_retention_uow_factory: MagicMock,
|
||||
) -> None:
|
||||
assert mock_retention_settings_disabled is not None, "Retention settings fixture should be provided"
|
||||
assert mock_retention_settings_disabled is not None, (
|
||||
"Retention settings fixture should be provided"
|
||||
)
|
||||
assert mock_retention_uow_factory is not None, "Retention UOW fixture should be provided"
|
||||
result = await run_cleanup(dry_run=False)
|
||||
assert result == 1, "Should return 1 when retention disabled and not dry-run"
|
||||
@@ -34,7 +32,9 @@ class TestRunCleanup:
|
||||
mock_retention_service: MagicMock,
|
||||
mock_retention_console: MagicMock,
|
||||
) -> None:
|
||||
assert mock_retention_settings_disabled is not None, "Retention settings fixture should be provided"
|
||||
assert mock_retention_settings_disabled is not None, (
|
||||
"Retention settings fixture should be provided"
|
||||
)
|
||||
assert mock_retention_uow_factory is not None, "Retention UOW fixture should be provided"
|
||||
assert mock_retention_console is not None, "Retention console fixture should be provided"
|
||||
mock_retention_service.run_cleanup = AsyncMock(
|
||||
@@ -118,7 +118,9 @@ class TestShowStatus:
|
||||
|
||||
class TestMain:
|
||||
def test_main_no_command_exits_with_1(self, argv_retention_no_command: None) -> None:
|
||||
assert argv_retention_no_command is None, "argv_retention_no_command fixture should provide None"
|
||||
assert argv_retention_no_command is None, (
|
||||
"argv_retention_no_command fixture should provide None"
|
||||
)
|
||||
with pytest.raises(SystemExit, match="1") as exc_info:
|
||||
main()
|
||||
assert exc_info.value.code == 1, "Should exit with 1 when no command"
|
||||
@@ -150,8 +152,12 @@ class TestMain:
|
||||
mock_retention_service: MagicMock,
|
||||
mock_retention_console: MagicMock,
|
||||
) -> None:
|
||||
assert argv_retention_cleanup_dry_run is None, "argv_retention_cleanup_dry_run fixture should provide None"
|
||||
assert mock_retention_settings_disabled is not None, "Retention settings fixture should be provided"
|
||||
assert argv_retention_cleanup_dry_run is None, (
|
||||
"argv_retention_cleanup_dry_run fixture should provide None"
|
||||
)
|
||||
assert mock_retention_settings_disabled is not None, (
|
||||
"Retention settings fixture should be provided"
|
||||
)
|
||||
assert mock_retention_uow_factory is not None, "Retention UOW fixture should be provided"
|
||||
assert mock_retention_console is not None, "Retention console fixture should be provided"
|
||||
mock_retention_service.run_cleanup = AsyncMock(
|
||||
@@ -170,7 +176,9 @@ class TestMain:
|
||||
mock_retention_service: MagicMock,
|
||||
mock_retention_console: MagicMock,
|
||||
) -> None:
|
||||
assert argv_retention_status_cmd is None, "argv_retention_status_cmd fixture should provide None"
|
||||
assert argv_retention_status_cmd is None, (
|
||||
"argv_retention_status_cmd fixture should provide None"
|
||||
)
|
||||
assert mock_retention_settings is not None, "Retention settings fixture should be provided"
|
||||
assert mock_retention_uow_factory is not None, "Retention UOW fixture should be provided"
|
||||
assert mock_retention_console is not None, "Retention console fixture should be provided"
|
||||
|
||||
@@ -11,7 +11,7 @@ import asyncio
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
from collections.abc import Generator, Sequence
|
||||
from collections.abc import Coroutine, Generator, Sequence
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
@@ -409,11 +409,20 @@ def sample_rate() -> int:
|
||||
|
||||
@pytest.fixture
|
||||
def mock_grpc_context() -> MagicMock:
|
||||
"""Create mock gRPC context for servicer tests."""
|
||||
"""Create mock gRPC context for servicer tests.
|
||||
|
||||
Uses asyncio.sleep(0) as side_effect for abort to avoid
|
||||
'coroutine was never awaited' warnings during GC.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
import grpc.aio
|
||||
|
||||
def _abort_side_effect(*args: object, **kwargs: object) -> Coroutine[object, object, None]:
|
||||
return asyncio.sleep(0)
|
||||
|
||||
ctx = MagicMock(spec=grpc.aio.ServicerContext)
|
||||
ctx.abort = AsyncMock()
|
||||
ctx.abort = MagicMock(side_effect=_abort_side_effect)
|
||||
return ctx
|
||||
|
||||
|
||||
|
||||
@@ -120,4 +120,4 @@ class TestSegmentCitation:
|
||||
)
|
||||
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(citation, "text", "Modified")
|
||||
citation.text = "Modified"
|
||||
|
||||
@@ -104,7 +104,7 @@ class TestInterruptConfig:
|
||||
|
||||
def test_interrupt_config_is_frozen(self, default_interrupt_config: InterruptConfig) -> None:
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(default_interrupt_config, "allow_ignore", True)
|
||||
default_interrupt_config.allow_ignore = True
|
||||
|
||||
|
||||
class TestInterruptRequest:
|
||||
@@ -160,7 +160,7 @@ class TestInterruptRequest:
|
||||
web_search_interrupt_request: InterruptRequest,
|
||||
) -> None:
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(web_search_interrupt_request, "message", "Changed")
|
||||
web_search_interrupt_request.message = "Changed"
|
||||
|
||||
|
||||
class TestInterruptResponse:
|
||||
@@ -215,7 +215,7 @@ class TestInterruptResponse:
|
||||
|
||||
def test_response_is_frozen(self, approved_response: InterruptResponse) -> None:
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(approved_response, "action", InterruptAction.REJECT)
|
||||
approved_response.action = InterruptAction.REJECT
|
||||
|
||||
|
||||
class TestCreateWebSearchInterrupt:
|
||||
|
||||
@@ -71,7 +71,7 @@ class TestAssistantRequest:
|
||||
user_id=uuid4(),
|
||||
)
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(request, "question", "Modified question")
|
||||
request.question = "Modified question"
|
||||
|
||||
|
||||
class TestAssistantPort:
|
||||
|
||||
@@ -49,7 +49,7 @@ class TestExportRules:
|
||||
|
||||
rules = ExportRules()
|
||||
with pytest.raises(FrozenInstanceError, match="cannot assign"):
|
||||
setattr(rules, "default_format", ExportFormat.HTML)
|
||||
rules.default_format = ExportFormat.HTML
|
||||
|
||||
|
||||
class TestTriggerRules:
|
||||
@@ -92,7 +92,7 @@ class TestTriggerRules:
|
||||
|
||||
rules = TriggerRules()
|
||||
with pytest.raises(FrozenInstanceError, match="cannot assign"):
|
||||
setattr(rules, "auto_start_enabled", True)
|
||||
rules.auto_start_enabled = True
|
||||
|
||||
|
||||
class TestProjectSettings:
|
||||
|
||||
@@ -453,7 +453,7 @@ def get_expected_entity_texts() -> set[str]:
|
||||
|
||||
def get_expected_entity_categories() -> dict[str, EntityCategory]:
|
||||
"""Get expected entity categories from sample data."""
|
||||
return {text: category for text, category in EXPECTED_ENTITIES_IN_SAMPLE}
|
||||
return dict(EXPECTED_ENTITIES_IN_SAMPLE)
|
||||
|
||||
|
||||
def build_segment_tuples_for_ner(
|
||||
|
||||
@@ -6,9 +6,6 @@ from typing import Final
|
||||
|
||||
import pytest
|
||||
|
||||
SEGMENT_START_AFTER_TURN: Final[float] = 10.0
|
||||
TURN_NO_OVERLAP_SEGMENT_END: Final[float] = 15.0
|
||||
|
||||
from noteflow.infrastructure.diarization.assigner import assign_speaker, assign_speakers_batch
|
||||
from noteflow.infrastructure.diarization.dto import SpeakerTurn
|
||||
from tests.conftest import approx_float
|
||||
@@ -23,6 +20,9 @@ from .conftest import (
|
||||
get_segments_for_speaker,
|
||||
)
|
||||
|
||||
SEGMENT_START_AFTER_TURN: Final[float] = 10.0
|
||||
TURN_NO_OVERLAP_SEGMENT_END: Final[float] = 15.0
|
||||
|
||||
|
||||
class TestSpeakerTurnOverlap:
|
||||
def test_turn_overlaps_with_contained_segment(self) -> None:
|
||||
|
||||
@@ -6,9 +6,6 @@ from typing import Final
|
||||
|
||||
import pytest
|
||||
|
||||
DOUBLE_DIGIT_CITATION_A: Final[int] = 10
|
||||
DOUBLE_DIGIT_CITATION_END: Final[int] = 15
|
||||
|
||||
from tests.conftest import approx_float
|
||||
|
||||
from .conftest import (
|
||||
@@ -21,6 +18,9 @@ from .conftest import (
|
||||
extract_segment_ids_from_retrieved,
|
||||
)
|
||||
|
||||
DOUBLE_DIGIT_CITATION_A: Final[int] = 10
|
||||
DOUBLE_DIGIT_CITATION_END: Final[int] = 15
|
||||
|
||||
|
||||
class TestRetrievalMetricsCalculation:
|
||||
def test_perfect_retrieval_yields_perfect_mrr(self) -> None:
|
||||
@@ -166,9 +166,7 @@ class TestCitationExtraction:
|
||||
assert citations == [1, 1], "Should extract repeated citations"
|
||||
|
||||
def test_extract_double_digit_citations(self) -> None:
|
||||
answer = (
|
||||
f"See references [{DOUBLE_DIGIT_CITATION_A}] and [{DOUBLE_DIGIT_CITATION_END}]."
|
||||
)
|
||||
answer = f"See references [{DOUBLE_DIGIT_CITATION_A}] and [{DOUBLE_DIGIT_CITATION_END}]."
|
||||
citations = extract_citations_from_answer(answer)
|
||||
assert citations == [
|
||||
DOUBLE_DIGIT_CITATION_A,
|
||||
|
||||
@@ -10,10 +10,10 @@ from tests.conftest import approx_float
|
||||
|
||||
from .conftest import (
|
||||
ConfigurableLLM,
|
||||
MockSegment,
|
||||
build_context_from_segments,
|
||||
calculate_citation_accuracy,
|
||||
extract_citations_from_answer,
|
||||
MockSegment,
|
||||
)
|
||||
|
||||
VALID_SEGMENT_IDS: Final[set[int]] = {1, 2, 3, 4, 5}
|
||||
|
||||
@@ -8,9 +8,9 @@ from __future__ import annotations
|
||||
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Protocol
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
|
||||
from noteflow.grpc.mixins._types import GrpcContext
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
|
||||
|
||||
class CreateMeetingRequestProto(Protocol):
|
||||
|
||||
@@ -8,8 +8,6 @@ from uuid import uuid4
|
||||
import grpc
|
||||
import pytest
|
||||
|
||||
from tests.conftest import approx_float
|
||||
|
||||
from noteflow.application.services.assistant import (
|
||||
AssistantResponse,
|
||||
AssistantService,
|
||||
@@ -20,6 +18,7 @@ from noteflow.domain.ai.ports import AssistantRequest
|
||||
from noteflow.grpc.config.config import ServicesConfig
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.grpc.service import NoteFlowServicer
|
||||
from tests.conftest import approx_float
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from noteflow.infrastructure.ai.nodes.annotation_suggester import SuggestedAnnotation
|
||||
|
||||
@@ -9,7 +9,7 @@ Tests cover:
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import AsyncMock
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
@@ -22,7 +22,7 @@ from noteflow.grpc.mixins.entities import EntitiesMixin
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from unittest.mock import MagicMock
|
||||
pass
|
||||
|
||||
|
||||
class MockRepositoryProvider:
|
||||
@@ -117,11 +117,7 @@ def create_sample_entity(
|
||||
|
||||
@pytest.fixture
|
||||
def mock_entities_repo() -> AsyncMock:
|
||||
"""Create mock entity repository with common methods.
|
||||
|
||||
Returns:
|
||||
AsyncMock with get, update, delete methods.
|
||||
"""
|
||||
"""Create mock entity repository with common methods."""
|
||||
repo = AsyncMock()
|
||||
repo.get = AsyncMock(return_value=None)
|
||||
repo.update = AsyncMock(return_value=None)
|
||||
@@ -130,14 +126,11 @@ def mock_entities_repo() -> AsyncMock:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mockner_service() -> AsyncMock:
|
||||
"""Create mock NER service.
|
||||
|
||||
Returns:
|
||||
AsyncMock with extract_entities method.
|
||||
"""
|
||||
service = AsyncMock()
|
||||
def mockner_service() -> MagicMock:
|
||||
"""Create mock NER service."""
|
||||
service = MagicMock()
|
||||
service.extract_entities = AsyncMock()
|
||||
service.is_ner_ready = MagicMock(return_value=True)
|
||||
return service
|
||||
|
||||
|
||||
@@ -149,7 +142,7 @@ class TestExtractEntities:
|
||||
self,
|
||||
mock_entities_repo: AsyncMock,
|
||||
mock_meetings_repo: AsyncMock,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
) -> MockServicerHost:
|
||||
"""Create servicer with mock NER service."""
|
||||
return MockServicerHost(
|
||||
@@ -161,7 +154,7 @@ class TestExtractEntities:
|
||||
async def test_returns_extracted_entities(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities returns entities from NER service."""
|
||||
@@ -195,7 +188,7 @@ class TestExtractEntities:
|
||||
async def test_returns_cached_entities(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities returns cached result when available."""
|
||||
@@ -224,7 +217,7 @@ class TestExtractEntities:
|
||||
async def test_force_refresh_bypasses_cache(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities with force_refresh re-extracts entities."""
|
||||
@@ -252,7 +245,7 @@ class TestExtractEntities:
|
||||
async def test_aborts_when_meeting_not_found(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities aborts when meeting does not exist."""
|
||||
@@ -295,7 +288,7 @@ class TestExtractEntities:
|
||||
async def test_aborts_when_feature_disabled(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities aborts when NER feature is disabled."""
|
||||
@@ -332,7 +325,7 @@ class TestExtractEntities:
|
||||
async def test_returns_empty_entities_when_none_found(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities returns empty list when no entities extracted."""
|
||||
@@ -354,7 +347,7 @@ class TestExtractEntities:
|
||||
async def test_includes_pinned_status_in_response(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExtractEntities includes is_pinned status for each entity."""
|
||||
@@ -688,7 +681,7 @@ class TestDatabaseNotSupported:
|
||||
def servicer_no_db(
|
||||
self,
|
||||
mock_entities_repo: AsyncMock,
|
||||
mockner_service: AsyncMock,
|
||||
mockner_service: MagicMock,
|
||||
) -> MockServicerHost:
|
||||
"""Create servicer with database not supported."""
|
||||
servicer = MockServicerHost(
|
||||
|
||||
@@ -10,8 +10,9 @@ Tests cover:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Callable, cast
|
||||
from typing import cast
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
from uuid import UUID, uuid4
|
||||
|
||||
@@ -31,8 +32,8 @@ from noteflow.domain.value_objects import MeetingId, MeetingState
|
||||
from noteflow.grpc.mixins._types import GrpcContext
|
||||
from noteflow.grpc.mixins.meeting import MeetingMixin
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.infrastructure.logging import request_id_var, user_id_var, workspace_id_var
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
from noteflow.infrastructure.logging import request_id_var, user_id_var, workspace_id_var
|
||||
from noteflow.infrastructure.security.crypto.crypto_box import AesGcmCryptoBox
|
||||
|
||||
from .proto_types import (
|
||||
|
||||
@@ -12,8 +12,8 @@ Tests cover:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ async def _call_start_sync(
|
||||
) -> _StartIntegrationSyncResponse:
|
||||
start_sync = cast(
|
||||
_StartIntegrationSyncCallable,
|
||||
getattr(servicer, "StartIntegrationSync"),
|
||||
servicer.StartIntegrationSync,
|
||||
)
|
||||
return await start_sync(request, context)
|
||||
|
||||
@@ -152,7 +152,7 @@ async def _call_get_sync_status(
|
||||
) -> _GetSyncStatusResponse:
|
||||
get_status = cast(
|
||||
_GetSyncStatusCallable,
|
||||
getattr(servicer, "GetSyncStatus"),
|
||||
servicer.GetSyncStatus,
|
||||
)
|
||||
return await get_status(request, context)
|
||||
|
||||
@@ -164,7 +164,7 @@ async def _call_list_sync_history(
|
||||
) -> _ListSyncHistoryResponse:
|
||||
list_history = cast(
|
||||
_ListSyncHistoryCallable,
|
||||
getattr(servicer, "ListSyncHistory"),
|
||||
servicer.ListSyncHistory,
|
||||
)
|
||||
return await list_history(request, context)
|
||||
|
||||
@@ -176,7 +176,7 @@ async def _call_get_user_integrations(
|
||||
) -> _GetUserIntegrationsResponse:
|
||||
get_integrations = cast(
|
||||
_GetUserIntegrationsCallable,
|
||||
getattr(servicer, "GetUserIntegrations"),
|
||||
servicer.GetUserIntegrations,
|
||||
)
|
||||
return await get_integrations(request, context)
|
||||
|
||||
@@ -887,7 +887,7 @@ class TestSyncRunExpiryMetadata:
|
||||
# Type annotation needed: ensure_sync_runs_cache is a mixin method added via SyncMixin
|
||||
ensure_cache = cast(
|
||||
Callable[[], dict[UUID, SyncRun]],
|
||||
getattr(servicer_with_success, "ensure_sync_runs_cache"),
|
||||
servicer_with_success.ensure_sync_runs_cache,
|
||||
)
|
||||
ensure_cache()
|
||||
cache_times: dict[UUID, datetime] = servicer_with_success.sync_run_cache_times
|
||||
|
||||
@@ -9,6 +9,7 @@ Sprint GAP-003: Error Handling Mismatches
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
@@ -53,8 +54,10 @@ def event_loop_fixture() -> Generator[asyncio.AbstractEventLoop, None, None]:
|
||||
|
||||
def _create_completed_task(loop: asyncio.AbstractEventLoop) -> asyncio.Task[None]:
|
||||
"""Create a successfully completed task."""
|
||||
|
||||
async def successful() -> None:
|
||||
pass
|
||||
|
||||
task = loop.create_task(successful())
|
||||
loop.run_until_complete(task)
|
||||
return task
|
||||
@@ -64,28 +67,28 @@ def _create_failed_task(
|
||||
loop: asyncio.AbstractEventLoop, error_msg: str = TASK_ERROR_MESSAGE
|
||||
) -> asyncio.Task[None]:
|
||||
"""Create a task that failed with ValueError."""
|
||||
|
||||
async def failing() -> None:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
task = loop.create_task(failing())
|
||||
try:
|
||||
with contextlib.suppress(ValueError):
|
||||
loop.run_until_complete(task)
|
||||
except ValueError:
|
||||
pass
|
||||
return task
|
||||
|
||||
|
||||
def _create_cancelled_task(loop: asyncio.AbstractEventLoop) -> asyncio.Task[None]:
|
||||
"""Create a cancelled task without using sleep."""
|
||||
|
||||
async def cancellable() -> None:
|
||||
# Use an Event instead of sleep to make task cancellable
|
||||
event = asyncio.Event()
|
||||
await event.wait()
|
||||
|
||||
task = loop.create_task(cancellable())
|
||||
task.cancel()
|
||||
try:
|
||||
with contextlib.suppress(asyncio.CancelledError):
|
||||
loop.run_until_complete(task)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
return task
|
||||
|
||||
|
||||
@@ -136,9 +139,7 @@ class TestTaskDictCleanupOnSuccess:
|
||||
callback = create_job_done_callback(JOB_ID_1, tasks_dict, mock_mark_failed)
|
||||
callback(task)
|
||||
|
||||
assert JOB_ID_1 not in tasks_dict, (
|
||||
"Job should be removed from tasks_dict after completion"
|
||||
)
|
||||
assert JOB_ID_1 not in tasks_dict, "Job should be removed from tasks_dict after completion"
|
||||
|
||||
|
||||
class TestTaskDictCleanupOnFailure:
|
||||
@@ -157,9 +158,7 @@ class TestTaskDictCleanupOnFailure:
|
||||
callback = create_job_done_callback(JOB_ID_1, tasks_dict, mock_mark_failed)
|
||||
callback(task)
|
||||
|
||||
assert JOB_ID_1 not in tasks_dict, (
|
||||
"Job should be removed from tasks_dict after failure"
|
||||
)
|
||||
assert JOB_ID_1 not in tasks_dict, "Job should be removed from tasks_dict after failure"
|
||||
|
||||
|
||||
class TestTaskDictMissingJob:
|
||||
@@ -232,9 +231,7 @@ class TestCancelledTaskCleanup:
|
||||
callback = create_job_done_callback(JOB_ID_1, tasks_dict, mock_mark_failed)
|
||||
callback(task)
|
||||
|
||||
assert JOB_ID_1 not in tasks_dict, (
|
||||
"Cancelled job should be removed from tasks_dict"
|
||||
)
|
||||
assert JOB_ID_1 not in tasks_dict, "Cancelled job should be removed from tasks_dict"
|
||||
|
||||
|
||||
class TestFailedTaskSchedulesMarkFailed:
|
||||
@@ -273,9 +270,7 @@ class TestMarkFailedReceivesJobId:
|
||||
_run_pending_callbacks(event_loop_fixture)
|
||||
|
||||
call_args = mock_mark_failed.call_args
|
||||
assert call_args[0][0] == JOB_ID_1, (
|
||||
f"mark_failed should receive job_id '{JOB_ID_1}'"
|
||||
)
|
||||
assert call_args[0][0] == JOB_ID_1, f"mark_failed should receive job_id '{JOB_ID_1}'"
|
||||
|
||||
|
||||
class TestMarkFailedReceivesErrorMessage:
|
||||
@@ -326,10 +321,8 @@ class TestVariousExceptionTypes:
|
||||
raise exception_type(error_msg)
|
||||
|
||||
task = event_loop_fixture.create_task(failing())
|
||||
try:
|
||||
with contextlib.suppress(exception_type):
|
||||
event_loop_fixture.run_until_complete(task)
|
||||
except exception_type:
|
||||
pass
|
||||
|
||||
callback = create_job_done_callback(JOB_ID_1, tasks_dict, mock_mark_failed)
|
||||
callback(task)
|
||||
|
||||
@@ -5,8 +5,8 @@ from __future__ import annotations
|
||||
from datetime import UTC, datetime, timedelta, timezone
|
||||
|
||||
import pytest
|
||||
from google.protobuf.timestamp_pb2 import Timestamp
|
||||
|
||||
from google.protobuf.timestamp_pb2 import Timestamp
|
||||
from noteflow.grpc.mixins.converters import (
|
||||
datetime_to_epoch_seconds,
|
||||
datetime_to_iso_string,
|
||||
|
||||
@@ -75,13 +75,13 @@ class TestMeetingQAConfig:
|
||||
"""Config is a frozen dataclass."""
|
||||
config = MeetingQAConfig()
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(config, "enable_web_search", True)
|
||||
config.enable_web_search = True
|
||||
|
||||
|
||||
class TestMeetingQADependencies:
|
||||
"""Tests for MeetingQADependencies dataclass."""
|
||||
|
||||
def test_meeting_qa_deps_stores_embedder(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_meeting_qa_deps_stores_embedder(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies stores embedder."""
|
||||
deps = MeetingQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -90,7 +90,7 @@ class TestMeetingQADependencies:
|
||||
)
|
||||
assert deps.embedder is mock_embedder, "should store embedder"
|
||||
|
||||
def test_meeting_qa_deps_stores_llm(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_meeting_qa_deps_stores_llm(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies stores llm."""
|
||||
deps = MeetingQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -100,7 +100,7 @@ class TestMeetingQADependencies:
|
||||
assert deps.llm is mock_llm, "should store llm"
|
||||
|
||||
def test_meeting_qa_deps_default_web_search_provider_is_none(
|
||||
self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM"
|
||||
self, mock_embedder: MockEmbedder, mock_llm: MockLLM
|
||||
) -> None:
|
||||
"""Dependencies default web_search_provider is None."""
|
||||
deps = MeetingQADependencies(
|
||||
@@ -110,7 +110,7 @@ class TestMeetingQADependencies:
|
||||
)
|
||||
assert deps.web_search_provider is None, "default web_search_provider should be None"
|
||||
|
||||
def test_meeting_qa_deps_is_frozen(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_meeting_qa_deps_is_frozen(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies is a frozen dataclass."""
|
||||
deps = MeetingQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -118,4 +118,4 @@ class TestMeetingQADependencies:
|
||||
llm=mock_llm,
|
||||
)
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(deps, "embedder", None)
|
||||
deps.embedder = None
|
||||
|
||||
@@ -75,13 +75,13 @@ class TestWorkspaceQAConfig:
|
||||
"""Config is a frozen dataclass."""
|
||||
config = WorkspaceQAConfig()
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(config, "enable_web_search", True)
|
||||
config.enable_web_search = True
|
||||
|
||||
|
||||
class TestWorkspaceQADependencies:
|
||||
"""Tests for WorkspaceQADependencies dataclass."""
|
||||
|
||||
def test_workspace_qa_deps_stores_embedder(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_workspace_qa_deps_stores_embedder(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies stores embedder."""
|
||||
deps = WorkspaceQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -90,7 +90,7 @@ class TestWorkspaceQADependencies:
|
||||
)
|
||||
assert deps.embedder is mock_embedder, "should store embedder"
|
||||
|
||||
def test_workspace_qa_deps_stores_llm(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_workspace_qa_deps_stores_llm(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies stores llm."""
|
||||
deps = WorkspaceQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -100,7 +100,7 @@ class TestWorkspaceQADependencies:
|
||||
assert deps.llm is mock_llm, "should store llm"
|
||||
|
||||
def test_workspace_qa_deps_default_web_search_provider_is_none(
|
||||
self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM"
|
||||
self, mock_embedder: MockEmbedder, mock_llm: MockLLM
|
||||
) -> None:
|
||||
"""Dependencies default web_search_provider is None."""
|
||||
deps = WorkspaceQADependencies(
|
||||
@@ -110,7 +110,7 @@ class TestWorkspaceQADependencies:
|
||||
)
|
||||
assert deps.web_search_provider is None, "default web_search_provider should be None"
|
||||
|
||||
def test_workspace_qa_deps_is_frozen(self, mock_embedder: "MockEmbedder", mock_llm: "MockLLM") -> None:
|
||||
def test_workspace_qa_deps_is_frozen(self, mock_embedder: MockEmbedder, mock_llm: MockLLM) -> None:
|
||||
"""Dependencies is a frozen dataclass."""
|
||||
deps = WorkspaceQADependencies(
|
||||
embedder=mock_embedder,
|
||||
@@ -118,4 +118,4 @@ class TestWorkspaceQADependencies:
|
||||
llm=mock_llm,
|
||||
)
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(deps, "embedder", None)
|
||||
deps.embedder = None
|
||||
|
||||
@@ -90,7 +90,7 @@ class TestSuggestedAnnotation:
|
||||
segment_ids=SAMPLE_SEGMENT_IDS,
|
||||
)
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(annotation, "text", "New text")
|
||||
annotation.text = "New text"
|
||||
|
||||
def test_to_annotation_payload_has_text(self) -> None:
|
||||
annotation = SuggestedAnnotation(
|
||||
|
||||
@@ -71,7 +71,7 @@ class TestVerificationResult:
|
||||
def test_verification_result_is_frozen(self) -> None:
|
||||
result = VerificationResult(is_valid=True, invalid_citation_indices=())
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(result, "is_valid", False)
|
||||
result.is_valid = False
|
||||
|
||||
|
||||
class TestVerifyCitations:
|
||||
@@ -139,8 +139,8 @@ class TestVerifyCitations:
|
||||
class TestVerificationConstants:
|
||||
def test_no_segments_reason_value(self) -> None:
|
||||
expected = "No segments retrieved for question"
|
||||
assert NO_SEGMENTS_REASON == expected, "constant should match"
|
||||
assert expected == NO_SEGMENTS_REASON, "constant should match"
|
||||
|
||||
def test_invalid_citations_prefix_value(self) -> None:
|
||||
expected = "Invalid citation indices: "
|
||||
assert INVALID_CITATIONS_PREFIX == expected, "constant should match"
|
||||
assert expected == INVALID_CITATIONS_PREFIX, "constant should match"
|
||||
|
||||
@@ -82,7 +82,7 @@ class TestWebSearchResult:
|
||||
def test_web_search_result_is_frozen(self) -> None:
|
||||
result = WebSearchResult(title=SAMPLE_TITLE, url=SAMPLE_URL, snippet=SAMPLE_SNIPPET)
|
||||
with pytest.raises(AttributeError, match="cannot assign"):
|
||||
setattr(result, "title", "New Title")
|
||||
result.title = "New Title"
|
||||
|
||||
|
||||
class TestWebSearchResponse:
|
||||
|
||||
@@ -10,8 +10,8 @@ from noteflow.infrastructure.ai.cache import (
|
||||
DEFAULT_MAX_SIZE,
|
||||
DEFAULT_TTL_SECONDS,
|
||||
HASH_ALGORITHM,
|
||||
CacheEntry,
|
||||
CachedEmbedder,
|
||||
CacheEntry,
|
||||
EmbeddingCache,
|
||||
EmbeddingCacheStats,
|
||||
)
|
||||
|
||||
@@ -78,7 +78,7 @@ class TestGuardrailResult:
|
||||
def test_result_is_frozen(self) -> None:
|
||||
result = GuardrailResult(allowed=True)
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(result, "allowed", False)
|
||||
result.allowed = False
|
||||
|
||||
|
||||
class TestGuardrailRules:
|
||||
|
||||
@@ -9,6 +9,28 @@ from noteflow.infrastructure.ai._langgraph_compat import create_command
|
||||
EXPECTED_COMMAND_TYPE: Final[str] = "Command"
|
||||
|
||||
|
||||
_ATTR_GRAPH: Final[str] = "graph"
|
||||
_ATTR_UPDATE: Final[str] = "update"
|
||||
_ATTR_RESUME: Final[str] = "resume"
|
||||
|
||||
|
||||
def _get_graph(cmd: object) -> str | None:
|
||||
"""Get graph attribute from Command object."""
|
||||
result = getattr(cmd, _ATTR_GRAPH)
|
||||
assert result is None or isinstance(result, str)
|
||||
return result
|
||||
|
||||
|
||||
def _get_update(cmd: object) -> object:
|
||||
"""Get update attribute from Command object."""
|
||||
return getattr(cmd, _ATTR_UPDATE)
|
||||
|
||||
|
||||
def _get_resume(cmd: object) -> object:
|
||||
"""Get resume attribute from Command object."""
|
||||
return getattr(cmd, _ATTR_RESUME)
|
||||
|
||||
|
||||
class TestCreateCommand:
|
||||
def test_creates_command_with_no_arguments(self) -> None:
|
||||
cmd = create_command()
|
||||
@@ -18,19 +40,19 @@ class TestCreateCommand:
|
||||
def test_creates_command_with_graph_parameter(self) -> None:
|
||||
cmd = create_command(graph="subgraph")
|
||||
assert cmd is not None, "Command should be created"
|
||||
assert getattr(cmd, "graph") == "subgraph", "Graph should be 'subgraph'"
|
||||
assert _get_graph(cmd) == "subgraph", "Graph should be 'subgraph'"
|
||||
|
||||
def test_creates_command_with_update_parameter(self) -> None:
|
||||
update_data = {"key": "value"}
|
||||
cmd = create_command(update=update_data)
|
||||
assert cmd is not None, "Command should be created"
|
||||
assert getattr(cmd, "update") == update_data, "Update should match"
|
||||
assert _get_update(cmd) == update_data, "Update should match"
|
||||
|
||||
def test_creates_command_with_resume_dict(self) -> None:
|
||||
resume_data = {"action": "approve"}
|
||||
cmd = create_command(resume=resume_data)
|
||||
assert cmd is not None, "Command should be created"
|
||||
assert getattr(cmd, "resume") == resume_data, "Resume should match"
|
||||
assert _get_resume(cmd) == resume_data, "Resume should match"
|
||||
|
||||
def test_creates_command_with_all_parameters(self) -> None:
|
||||
cmd = create_command(
|
||||
@@ -39,18 +61,18 @@ class TestCreateCommand:
|
||||
resume={"response": "yes"},
|
||||
)
|
||||
assert cmd is not None, "Command should be created"
|
||||
assert getattr(cmd, "graph") == "target_graph", "Graph should be 'target_graph'"
|
||||
assert getattr(cmd, "update") == {"state": "updated"}, "Update should match"
|
||||
assert getattr(cmd, "resume") == {"response": "yes"}, "Resume should match"
|
||||
assert _get_graph(cmd) == "target_graph", "Graph should be 'target_graph'"
|
||||
assert _get_update(cmd) == {"state": "updated"}, "Update should match"
|
||||
assert _get_resume(cmd) == {"response": "yes"}, "Resume should match"
|
||||
|
||||
def test_command_graph_defaults_to_none(self) -> None:
|
||||
cmd = create_command()
|
||||
assert getattr(cmd, "graph") is None
|
||||
assert _get_graph(cmd) is None
|
||||
|
||||
def test_command_update_defaults_to_none(self) -> None:
|
||||
cmd = create_command()
|
||||
assert getattr(cmd, "update") is None
|
||||
assert _get_update(cmd) is None
|
||||
|
||||
def test_command_resume_defaults_to_none(self) -> None:
|
||||
cmd = create_command()
|
||||
assert getattr(cmd, "resume") is None
|
||||
assert _get_resume(cmd) is None
|
||||
|
||||
@@ -138,7 +138,7 @@ class TestRetrieveSegments:
|
||||
)
|
||||
|
||||
with pytest.raises(AttributeError, match="cannot assign to field"):
|
||||
setattr(result, "text", "Modified")
|
||||
result.text = "Modified"
|
||||
|
||||
|
||||
class MockBatchEmbedder:
|
||||
|
||||
@@ -10,7 +10,6 @@ from noteflow.infrastructure.ai.tools.synthesis import (
|
||||
synthesize_answer,
|
||||
)
|
||||
|
||||
|
||||
INVALID_CITATION_ID = 99
|
||||
FROZEN_ASSIGNMENT_MESSAGE = "cannot assign to field"
|
||||
|
||||
@@ -155,4 +154,4 @@ class TestSynthesisResult:
|
||||
)
|
||||
|
||||
with pytest.raises(AttributeError, match=FROZEN_ASSIGNMENT_MESSAGE):
|
||||
setattr(result, "answer", "Modified")
|
||||
result.answer = "Modified"
|
||||
|
||||
@@ -60,9 +60,8 @@ class TestCreateAsrEngine:
|
||||
with patch(
|
||||
"noteflow.infrastructure.asr.factory.resolve_device",
|
||||
return_value="invalid_device",
|
||||
):
|
||||
with pytest.raises(EngineCreationError, match="Unsupported device"):
|
||||
create_asr_engine(device="invalid_device")
|
||||
), pytest.raises(EngineCreationError, match="Unsupported device"):
|
||||
create_asr_engine(device="invalid_device")
|
||||
|
||||
|
||||
class TestDeviceResolution:
|
||||
|
||||
@@ -43,10 +43,12 @@ class TestDetectGpuBackend:
|
||||
raise ImportError("No module named 'torch'")
|
||||
return original_import(name, *args, **kwargs)
|
||||
|
||||
with patch.dict("sys.modules", {"torch": None}):
|
||||
with patch("builtins.__import__", side_effect=mock_import):
|
||||
result = detect_gpu_backend()
|
||||
assert result == GpuBackend.NONE, "Missing PyTorch should return NONE"
|
||||
with (
|
||||
patch.dict("sys.modules", {"torch": None}),
|
||||
patch("builtins.__import__", side_effect=mock_import),
|
||||
):
|
||||
result = detect_gpu_backend()
|
||||
assert result == GpuBackend.NONE, "Missing PyTorch should return NONE"
|
||||
|
||||
# Clear cache after test
|
||||
detect_gpu_backend.cache_clear()
|
||||
|
||||
@@ -93,7 +93,7 @@ def test_get_stats_returns_empty_when_no_events() -> None:
|
||||
|
||||
stats = metrics.get_infrastructure_stats()
|
||||
expected_empty = InfrastructureStats.empty()
|
||||
|
||||
|
||||
# Verify all fields match empty stats
|
||||
assert (
|
||||
stats.total_fallbacks == expected_empty.total_fallbacks
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Final
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
@@ -14,6 +15,15 @@ from noteflow.infrastructure.ner.backends.gliner_backend import (
|
||||
)
|
||||
from noteflow.infrastructure.ner.backends.types import RawEntity
|
||||
|
||||
_ATTR_MODEL: Final[str] = "_model"
|
||||
|
||||
|
||||
def _get_mock_model(backend: GLiNERBackend) -> MagicMock:
|
||||
"""Access the internal model via getattr for type safety in tests."""
|
||||
model = getattr(backend, _ATTR_MODEL)
|
||||
assert isinstance(model, MagicMock), "Expected mock model to be set"
|
||||
return model
|
||||
|
||||
|
||||
def _create_backend_with_mock_model(
|
||||
model_name: str = DEFAULT_MODEL,
|
||||
@@ -118,18 +128,18 @@ class TestGLiNERBackendExtraction:
|
||||
def test_extract_passes_threshold_to_predict(self) -> None:
|
||||
backend = _create_backend_with_mock_model(threshold=0.75)
|
||||
backend.extract("Some text")
|
||||
mock_model = getattr(backend, "_model")
|
||||
predict_entities = getattr(mock_model, "predict_entities")
|
||||
call_kwargs = predict_entities.call_args
|
||||
mock_model = _get_mock_model(backend)
|
||||
call_kwargs = mock_model.predict_entities.call_args
|
||||
assert call_kwargs is not None, "predict_entities should have been called"
|
||||
assert call_kwargs[1]["threshold"] == 0.75
|
||||
|
||||
def test_extract_passes_labels_to_predict(self) -> None:
|
||||
custom_labels = ("person", "task")
|
||||
backend = _create_backend_with_mock_model(labels=custom_labels)
|
||||
backend.extract("Some text")
|
||||
mock_model = getattr(backend, "_model")
|
||||
predict_entities = getattr(mock_model, "predict_entities")
|
||||
call_kwargs = predict_entities.call_args
|
||||
mock_model = _get_mock_model(backend)
|
||||
call_kwargs = mock_model.predict_entities.call_args
|
||||
assert call_kwargs is not None, "predict_entities should have been called"
|
||||
assert call_kwargs[1]["labels"] == ["person", "task"]
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -5,11 +5,12 @@ from __future__ import annotations
|
||||
import os
|
||||
from unittest.mock import MagicMock, mock_open, patch
|
||||
|
||||
import noteflow.infrastructure.platform as platform
|
||||
from noteflow.infrastructure.platform import configure_pytorch_for_platform, has_avx2_support
|
||||
|
||||
read_linux_cpuinfo = getattr(platform, "_read_linux_cpuinfo")
|
||||
read_sysctl_features = getattr(platform, "_read_sysctl_features")
|
||||
from noteflow.infrastructure.platform import (
|
||||
configure_pytorch_for_platform,
|
||||
has_avx2_support,
|
||||
read_linux_cpuinfo,
|
||||
read_sysctl_features,
|
||||
)
|
||||
|
||||
|
||||
class TestReadLinuxCpuinfo:
|
||||
@@ -65,7 +66,7 @@ class TestHasAvx2Support:
|
||||
cpuinfo = "processor : 0\nflags : fpu avx2 sse4_2\n"
|
||||
with (
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_linux_cpuinfo",
|
||||
"noteflow.infrastructure.platform.read_linux_cpuinfo",
|
||||
return_value=cpuinfo,
|
||||
),
|
||||
):
|
||||
@@ -77,7 +78,7 @@ class TestHasAvx2Support:
|
||||
cpuinfo = "processor : 0\nflags : fpu sse4_2\n"
|
||||
with (
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_linux_cpuinfo",
|
||||
"noteflow.infrastructure.platform.read_linux_cpuinfo",
|
||||
return_value=cpuinfo,
|
||||
),
|
||||
):
|
||||
@@ -88,11 +89,11 @@ class TestHasAvx2Support:
|
||||
has_avx2_support.cache_clear()
|
||||
with (
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_linux_cpuinfo",
|
||||
"noteflow.infrastructure.platform.read_linux_cpuinfo",
|
||||
return_value=None,
|
||||
),
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_sysctl_features",
|
||||
"noteflow.infrastructure.platform.read_sysctl_features",
|
||||
return_value="FPU AVX2 SSE",
|
||||
),
|
||||
):
|
||||
@@ -103,11 +104,11 @@ class TestHasAvx2Support:
|
||||
has_avx2_support.cache_clear()
|
||||
with (
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_linux_cpuinfo",
|
||||
"noteflow.infrastructure.platform.read_linux_cpuinfo",
|
||||
return_value=None,
|
||||
),
|
||||
patch(
|
||||
"noteflow.infrastructure.platform._read_sysctl_features",
|
||||
"noteflow.infrastructure.platform.read_sysctl_features",
|
||||
return_value=None,
|
||||
),
|
||||
):
|
||||
|
||||
@@ -139,7 +139,7 @@ class MeetingQAInputFactory:
|
||||
@staticmethod
|
||||
def create(
|
||||
question: str,
|
||||
meeting_id: "MeetingId",
|
||||
meeting_id: MeetingId,
|
||||
top_k: int = DEFAULT_TOP_K,
|
||||
) -> dict[str, object]:
|
||||
return {
|
||||
|
||||
@@ -14,16 +14,16 @@ from noteflow.infrastructure.ai.graphs.meeting_qa import (
|
||||
|
||||
from .conftest import (
|
||||
DEFAULT_TOP_K,
|
||||
SEGMENT_ID_ONE,
|
||||
SEGMENT_ID_TWO,
|
||||
INVALID_SEGMENT_ID,
|
||||
SEGMENT_START_ZERO,
|
||||
SEGMENT_END_FIVE,
|
||||
SEGMENT_START_TEN,
|
||||
SEGMENT_END_FIFTEEN,
|
||||
MIN_EXPECTED_CALLS,
|
||||
SCORE_HIGH,
|
||||
SCORE_MEDIUM,
|
||||
MIN_EXPECTED_CALLS,
|
||||
SEGMENT_END_FIFTEEN,
|
||||
SEGMENT_END_FIVE,
|
||||
SEGMENT_ID_ONE,
|
||||
SEGMENT_ID_TWO,
|
||||
SEGMENT_START_TEN,
|
||||
SEGMENT_START_ZERO,
|
||||
MeetingQAInputFactory,
|
||||
MockEmbedder,
|
||||
MockLLM,
|
||||
|
||||
@@ -5,6 +5,7 @@ from uuid import UUID, uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from noteflow.domain.value_objects import MeetingId
|
||||
from noteflow.infrastructure.ai.graphs.workspace_qa import (
|
||||
WorkspaceQADependencies,
|
||||
WorkspaceQAInternalState,
|
||||
@@ -29,8 +30,6 @@ from .conftest import (
|
||||
WorkspaceQAInputFactory,
|
||||
)
|
||||
|
||||
from noteflow.domain.value_objects import MeetingId
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ws_sample_workspace_id() -> UUID:
|
||||
|
||||
@@ -260,7 +260,7 @@ def sample_task(task_workspace: UUID) -> Task:
|
||||
id=uuid4(),
|
||||
workspace_id=task_workspace,
|
||||
meeting_id=None,
|
||||
action_item_id=1,
|
||||
action_item_id=None,
|
||||
text="Sample task for testing",
|
||||
status=TaskStatus.OPEN,
|
||||
priority=1,
|
||||
@@ -292,7 +292,7 @@ async def tasks_with_statuses(
|
||||
id=uuid4(),
|
||||
workspace_id=task_workspace,
|
||||
meeting_id=None,
|
||||
action_item_id=1,
|
||||
action_item_id=None,
|
||||
text="Open task",
|
||||
status=TaskStatus.OPEN,
|
||||
priority=1,
|
||||
@@ -303,7 +303,7 @@ async def tasks_with_statuses(
|
||||
id=uuid4(),
|
||||
workspace_id=task_workspace,
|
||||
meeting_id=None,
|
||||
action_item_id=2,
|
||||
action_item_id=None,
|
||||
text="Done task",
|
||||
status=TaskStatus.DONE,
|
||||
priority=1,
|
||||
@@ -314,7 +314,7 @@ async def tasks_with_statuses(
|
||||
id=uuid4(),
|
||||
workspace_id=task_workspace,
|
||||
meeting_id=None,
|
||||
action_item_id=3,
|
||||
action_item_id=None,
|
||||
text="Dismissed task",
|
||||
status=TaskStatus.DISMISSED,
|
||||
priority=1,
|
||||
|
||||
@@ -120,14 +120,12 @@ class TestWhisperPyTorchEngineIntegration:
|
||||
assert hasattr(first_result, "start"), "Expected start attribute on result"
|
||||
assert hasattr(first_result, "end"), "Expected end attribute on result"
|
||||
assert hasattr(first_result, "language"), "Expected language attribute on result"
|
||||
assert (
|
||||
len(first_result.text.strip()) > 0
|
||||
), "Expected non-empty transcription text"
|
||||
assert len(first_result.text.strip()) > 0, "Expected non-empty transcription text"
|
||||
assert first_result.start >= 0.0, "Expected non-negative start time"
|
||||
assert first_result.end > first_result.start, "Expected end > start"
|
||||
assert (
|
||||
first_result.end <= MAX_AUDIO_SECONDS + 1.0
|
||||
), "Expected end time within audio duration buffer"
|
||||
assert first_result.end <= MAX_AUDIO_SECONDS + 1.0, (
|
||||
"Expected end time within audio duration buffer"
|
||||
)
|
||||
finally:
|
||||
engine.unload()
|
||||
|
||||
@@ -524,7 +522,7 @@ class TestWhisperPyTorchEngineErrorHandling:
|
||||
try:
|
||||
nonexistent_path = Path("/nonexistent/path/audio.wav")
|
||||
|
||||
with pytest.raises((FileNotFoundError, RuntimeError, OSError), match=".*"):
|
||||
with pytest.raises((FileNotFoundError, RuntimeError, OSError), match=r".*"):
|
||||
list(engine.transcribe_file(nonexistent_path))
|
||||
finally:
|
||||
engine.unload()
|
||||
|
||||
@@ -87,6 +87,10 @@ class MockContext:
|
||||
self.abort_details = details
|
||||
raise grpc.RpcError()
|
||||
|
||||
def invocation_metadata(self) -> list[tuple[str, str]]:
|
||||
"""Return empty metadata for mock context."""
|
||||
return []
|
||||
|
||||
|
||||
def create_audio_chunk(
|
||||
meeting_id: str,
|
||||
@@ -178,7 +182,9 @@ class TestStreamInitialization:
|
||||
await uow.commit()
|
||||
|
||||
mock_asr = MagicMock(is_loaded=True, transcribe_async=AsyncMock(return_value=[]))
|
||||
servicer: TypedServicer = TypedServicer(session_factory=session_factory, asr_engine=mock_asr)
|
||||
servicer: TypedServicer = TypedServicer(
|
||||
session_factory=session_factory, asr_engine=mock_asr
|
||||
)
|
||||
chunks = [create_audio_chunk(str(meeting.id))]
|
||||
|
||||
async def chunk_iter() -> AsyncIterator[noteflow_pb2.AudioChunk]:
|
||||
@@ -270,7 +276,9 @@ class TestStreamSegmentPersistence:
|
||||
"""Set up streaming test with mock ASR and servicer."""
|
||||
mock_asr = MagicMock(is_loaded=True)
|
||||
mock_asr.transcribe_async = AsyncMock(return_value=[asr_result])
|
||||
servicer: TypedServicer = TypedServicer(session_factory=session_factory, asr_engine=mock_asr)
|
||||
servicer: TypedServicer = TypedServicer(
|
||||
session_factory=session_factory, asr_engine=mock_asr
|
||||
)
|
||||
audio: NDArray[np.float32] = np.random.randn(DEFAULT_SAMPLE_RATE).astype(np.float32) * 0.1
|
||||
state = self._create_stream_mocks(audio)
|
||||
meeting_id_str = str(meeting.id)
|
||||
@@ -279,12 +287,20 @@ class TestStreamSegmentPersistence:
|
||||
def _create_stream_mocks(self, audio: NDArray[np.float32]) -> MeetingStreamState:
|
||||
"""Create mocked stream state with VAD and segmenter."""
|
||||
mock_segment = MagicMock(audio=audio, start_time=0.0)
|
||||
segmenter = MagicMock(process_audio=MagicMock(return_value=[mock_segment]), flush=MagicMock(return_value=None))
|
||||
segmenter = MagicMock(
|
||||
process_audio=MagicMock(return_value=[mock_segment]), flush=MagicMock(return_value=None)
|
||||
)
|
||||
vad = MagicMock(process_chunk=MagicMock(return_value=True))
|
||||
return MeetingStreamState(
|
||||
vad=vad, segmenter=segmenter, partial_buffer=PartialAudioBuffer(sample_rate=DEFAULT_SAMPLE_RATE),
|
||||
sample_rate=DEFAULT_SAMPLE_RATE, channels=1, next_segment_id=0,
|
||||
was_speaking=False, last_partial_time=time.time(), last_partial_text="",
|
||||
vad=vad,
|
||||
segmenter=segmenter,
|
||||
partial_buffer=PartialAudioBuffer(sample_rate=DEFAULT_SAMPLE_RATE),
|
||||
sample_rate=DEFAULT_SAMPLE_RATE,
|
||||
channels=1,
|
||||
next_segment_id=0,
|
||||
was_speaking=False,
|
||||
last_partial_time=time.time(),
|
||||
last_partial_text="",
|
||||
)
|
||||
|
||||
async def test_segments_persisted_to_database(
|
||||
@@ -297,7 +313,9 @@ class TestStreamSegmentPersistence:
|
||||
_, servicer, audio, state, meeting_id_str = self._setup_streaming_test(
|
||||
session_factory,
|
||||
meeting,
|
||||
AsrResult(text="Hello world", start=0.0, end=1.0, language="en", language_probability=0.95),
|
||||
AsrResult(
|
||||
text="Hello world", start=0.0, end=1.0, language="en", language_probability=0.95
|
||||
),
|
||||
)
|
||||
|
||||
async def chunk_iter() -> AsyncIterator[noteflow_pb2.AudioChunk]:
|
||||
@@ -306,7 +324,9 @@ class TestStreamSegmentPersistence:
|
||||
with patch.object(servicer, "get_stream_state", side_effect={meeting_id_str: state}.get):
|
||||
await drain_async_gen(servicer.StreamTranscription(chunk_iter(), MockContext()))
|
||||
|
||||
await self._verify_segments_persisted(session_factory, meetings_dir, meeting.id, "Hello world")
|
||||
await self._verify_segments_persisted(
|
||||
session_factory, meetings_dir, meeting.id, "Hello world"
|
||||
)
|
||||
|
||||
async def _verify_segments_persisted(
|
||||
self,
|
||||
@@ -505,9 +525,13 @@ class TestStreamStopRequest:
|
||||
await uow.commit()
|
||||
|
||||
mock_asr = MagicMock(is_loaded=True, transcribe_async=AsyncMock(return_value=[]))
|
||||
servicer: TypedServicer = TypedServicer(session_factory=session_factory, asr_engine=mock_asr)
|
||||
servicer: TypedServicer = TypedServicer(
|
||||
session_factory=session_factory, asr_engine=mock_asr
|
||||
)
|
||||
|
||||
chunk_iter, chunks_processed = self._create_stop_request_chunk_iterator(servicer, str(meeting.id))
|
||||
chunk_iter, chunks_processed = self._create_stop_request_chunk_iterator(
|
||||
servicer, str(meeting.id)
|
||||
)
|
||||
async for _ in servicer.StreamTranscription(chunk_iter, MockContext()):
|
||||
pass
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ async def _call_refine(
|
||||
"""Call RefineSpeakerDiarization with typed response."""
|
||||
refine = cast(
|
||||
_RefineSpeakerDiarizationCallable,
|
||||
getattr(servicer, "RefineSpeakerDiarization"),
|
||||
servicer.RefineSpeakerDiarization,
|
||||
)
|
||||
return await refine(request, context)
|
||||
|
||||
@@ -194,7 +194,7 @@ async def _call_rename(
|
||||
context: MockContext,
|
||||
) -> _RenameSpeakerResponse:
|
||||
"""Call RenameSpeaker with typed response."""
|
||||
rename = cast(_RenameSpeakerCallable, getattr(servicer, "RenameSpeaker"))
|
||||
rename = cast(_RenameSpeakerCallable, servicer.RenameSpeaker)
|
||||
return await rename(request, context)
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from .conftest import (
|
||||
create_weighted_embedding,
|
||||
)
|
||||
|
||||
|
||||
SEARCH_LIMIT_ONE: Final[int] = 1
|
||||
SEARCH_LIMIT_THREE: Final[int] = 3
|
||||
SEARCH_LIMIT_FIVE: Final[int] = 5
|
||||
|
||||
@@ -42,6 +42,7 @@ class _StreamTranscriptionCallable(Protocol):
|
||||
context: MockContext,
|
||||
) -> AsyncIterator[_TranscriptUpdate]: ...
|
||||
|
||||
|
||||
SAMPLE_RATE = DEFAULT_SAMPLE_RATE
|
||||
CHUNK_SAMPLES = 1600 # 0.1s at 16kHz
|
||||
SPEECH_CHUNKS = 4
|
||||
@@ -56,6 +57,10 @@ class MockContext:
|
||||
_ = (code, details)
|
||||
raise grpc.RpcError()
|
||||
|
||||
def invocation_metadata(self) -> list[tuple[str, str]]:
|
||||
"""Return empty metadata for mock context."""
|
||||
return []
|
||||
|
||||
|
||||
def _make_chunk(meeting_id: str, audio: npt.NDArray[np.float32]) -> noteflow_pb2.AudioChunk:
|
||||
"""Create a protobuf audio chunk."""
|
||||
|
||||
@@ -12,8 +12,8 @@ from __future__ import annotations
|
||||
import os
|
||||
import statistics
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Final
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -88,7 +88,7 @@ class ProfileResult:
|
||||
|
||||
|
||||
def _record_profile_result(
|
||||
session: "ProfileSession",
|
||||
session: ProfileSession,
|
||||
operation: str,
|
||||
duration_ms: float,
|
||||
success: bool,
|
||||
@@ -128,7 +128,7 @@ class ProfileSession:
|
||||
|
||||
if self.results:
|
||||
durations = [r.duration_ms for r in self.results]
|
||||
lines.append(f"\nStatistics:")
|
||||
lines.append("\nStatistics:")
|
||||
lines.append(f" Mean: {statistics.mean(durations):.2f}ms")
|
||||
lines.append(f" Median: {statistics.median(durations):.2f}ms")
|
||||
lines.append(
|
||||
|
||||
@@ -49,35 +49,35 @@ from tests.quality._detectors.wrappers import (
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"collect_alias_imports",
|
||||
"collect_assertion_roulette",
|
||||
"collect_conditional_test_logic",
|
||||
"collect_deep_nesting",
|
||||
"collect_deprecated_patterns",
|
||||
"collect_duplicate_test_names",
|
||||
"collect_eager_tests",
|
||||
"collect_exception_handling",
|
||||
"collect_feature_envy",
|
||||
"collect_fixture_missing_type",
|
||||
"collect_fixture_scope_too_narrow",
|
||||
"collect_long_tests",
|
||||
"collect_magic_number_tests",
|
||||
"collect_raises_without_match",
|
||||
"collect_redundant_prints",
|
||||
"collect_sensitive_equality",
|
||||
"collect_sleepy_tests",
|
||||
"collect_unknown_tests",
|
||||
"collect_unused_fixtures",
|
||||
"collect_deep_nesting",
|
||||
"collect_feature_envy",
|
||||
"collect_god_classes",
|
||||
"collect_high_complexity",
|
||||
"collect_long_methods",
|
||||
"collect_long_parameter_lists",
|
||||
"collect_long_tests",
|
||||
"collect_magic_number_tests",
|
||||
"collect_module_size_soft",
|
||||
"collect_deprecated_patterns",
|
||||
"collect_orphaned_imports",
|
||||
"collect_stale_todos",
|
||||
"collect_alias_imports",
|
||||
"collect_passthrough_classes",
|
||||
"collect_raises_without_match",
|
||||
"collect_redundant_prints",
|
||||
"collect_redundant_type_aliases",
|
||||
"collect_sensitive_equality",
|
||||
"collect_sleepy_tests",
|
||||
"collect_stale_todos",
|
||||
"collect_thin_wrappers",
|
||||
"collect_unknown_tests",
|
||||
"collect_unused_fixtures",
|
||||
"get_fixture_scope",
|
||||
"get_fixtures",
|
||||
"get_module_level_fixtures",
|
||||
|
||||
@@ -137,7 +137,7 @@ class TestViolation:
|
||||
)
|
||||
|
||||
with pytest.raises(FrozenInstanceError):
|
||||
setattr(v, "rule", "changed")
|
||||
v.rule = "changed"
|
||||
|
||||
|
||||
class TestContentHash:
|
||||
|
||||
@@ -4,7 +4,6 @@ Detects file descriptor, memory, and coroutine leaks under load conditions.
|
||||
These tests verify that resources are properly released during cleanup cycles.
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
@@ -147,9 +146,7 @@ class TestFileDescriptorLeaks:
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_fd_cleanup(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_streaming_fd_cleanup(self, memory_servicer: NoteFlowServicer) -> None:
|
||||
"""Verify FDs and threads released after 500 streaming cycles."""
|
||||
baseline = measure_resource_baseline()
|
||||
|
||||
@@ -163,9 +160,7 @@ class TestFileDescriptorLeaks:
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_writer_fd_cleanup(
|
||||
self, tmp_path: Path, crypto: AesGcmCryptoBox
|
||||
) -> None:
|
||||
async def test_audio_writer_fd_cleanup(self, tmp_path: Path, crypto: AesGcmCryptoBox) -> None:
|
||||
"""Verify audio writer closes FDs and threads after 100 cycles."""
|
||||
baseline = measure_resource_baseline()
|
||||
|
||||
@@ -287,9 +282,7 @@ class TestMemoryLeaks:
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
async def test_diarization_session_memory(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_diarization_session_memory(self, memory_servicer: NoteFlowServicer) -> None:
|
||||
"""Verify 200 diarization session cycles don't leak memory."""
|
||||
|
||||
def create_mock_session() -> MagicMock:
|
||||
@@ -335,9 +328,7 @@ class TestCoroutineLeaks:
|
||||
assert not errors, f"Task cleanup errors: {errors}"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_cleanup_on_exception(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_task_cleanup_on_exception(self, memory_servicer: NoteFlowServicer) -> None:
|
||||
"""Verify tasks cleaned up even if task raises exception."""
|
||||
|
||||
async def failing_task() -> None:
|
||||
@@ -373,7 +364,9 @@ class TestWebhookClientLeaks:
|
||||
|
||||
# Force client creation using type-safe helper
|
||||
await _ensure_executor_client(executor)
|
||||
assert _get_executor_client(executor) is not None, "HTTP client should exist after ensure_client"
|
||||
assert _get_executor_client(executor) is not None, (
|
||||
"HTTP client should exist after ensure_client"
|
||||
)
|
||||
|
||||
# Close
|
||||
await executor.close()
|
||||
@@ -479,7 +472,9 @@ class TestAudioWriterThreadLeaks:
|
||||
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
|
||||
|
||||
# Thread should be stopped and cleared
|
||||
assert _get_writer_flush_thread(writer) is None, "Flush thread should be cleared after close"
|
||||
assert _get_writer_flush_thread(writer) is None, (
|
||||
"Flush thread should be cleared after close"
|
||||
)
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
@@ -494,9 +489,7 @@ class TestAudioWriterThreadLeaks:
|
||||
initial_threads = threading.active_count()
|
||||
|
||||
# Create and close writers using helper (avoids inline loop)
|
||||
run_audio_writer_thread_cycles(
|
||||
crypto, tmp_path, WRITER_THREAD_CYCLES, DEFAULT_SAMPLE_RATE
|
||||
)
|
||||
run_audio_writer_thread_cycles(crypto, tmp_path, WRITER_THREAD_CYCLES, DEFAULT_SAMPLE_RATE)
|
||||
|
||||
await asyncio.sleep(THREAD_STOP_WAIT_SECONDS) # Allow threads to fully stop
|
||||
gc.collect()
|
||||
@@ -522,9 +515,7 @@ class TestHeapGrowth:
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_heap_stability(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_streaming_heap_stability(self, memory_servicer: NoteFlowServicer) -> None:
|
||||
"""Verify heap doesn't grow unbounded during streaming cycles."""
|
||||
import tracemalloc
|
||||
|
||||
@@ -777,9 +768,7 @@ class TestMemoryPressure:
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.asyncio
|
||||
async def test_state_cleanup_under_pressure(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_state_cleanup_under_pressure(self, memory_servicer: NoteFlowServicer) -> None:
|
||||
"""Verify cleanup properly releases state even under pressure."""
|
||||
run_operations_under_memory_pressure(
|
||||
memory_servicer,
|
||||
|
||||
Reference in New Issue
Block a user