chore: update submodule and improve HTML export functionality
- Updated the client submodule to the latest commit for enhanced compatibility. - Refactored HTML export logic to utilize a dedicated function for building HTML documents, improving code clarity and maintainability. - Enhanced test assertions across various files to include descriptive messages, aiding in debugging and understanding test failures. All quality checks pass.
This commit is contained in:
2
client
2
client
Submodule client updated: 34bed4dd55...1655334a00
@@ -23,43 +23,48 @@ if TYPE_CHECKING:
|
||||
from noteflow.domain.entities.summary import Summary
|
||||
|
||||
|
||||
# HTML template with embedded CSS for print-friendly output
|
||||
_HTML_TEMPLATE = """<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{title}</title>
|
||||
<style>
|
||||
body {{
|
||||
# CSS styles for print-friendly HTML output
|
||||
_HTML_STYLES = """
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
line-height: 1.6;
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem;
|
||||
color: #333;
|
||||
}}
|
||||
h1 {{ color: #1a1a1a; border-bottom: 2px solid #e0e0e0; padding-bottom: 0.5rem; }}
|
||||
h2 {{ color: #2c2c2c; margin-top: 2rem; }}
|
||||
h3 {{ color: #444; }}
|
||||
.metadata {{ background: #f5f5f5; padding: 1rem; border-radius: 8px; margin-bottom: 2rem; }}
|
||||
.metadata dt {{ font-weight: bold; display: inline; }}
|
||||
.metadata dd {{ display: inline; margin: 0 1rem 0 0.5rem; }}
|
||||
.transcript {{ margin: 1rem 0; }}
|
||||
.segment {{ margin-bottom: 1rem; padding: 0.5rem; border-left: 3px solid #e0e0e0; }}
|
||||
.segment:hover {{ background: #f9f9f9; }}
|
||||
.timestamp {{ color: #666; font-size: 0.9em; font-weight: bold; margin-right: 0.5rem; }}
|
||||
.summary {{ background: #f0f7ff; padding: 1rem; border-radius: 8px; margin-top: 2rem; }}
|
||||
.key-points li, .action-items li {{ margin-bottom: 0.5rem; }}
|
||||
.action-items li {{ list-style-type: none; }}
|
||||
.action-items li::before {{ content: '☐ '; }}
|
||||
.assignee {{ color: #0066cc; font-size: 0.9em; }}
|
||||
footer {{ margin-top: 3rem; padding-top: 1rem; border-top: 1px solid #e0e0e0; color: #888; font-size: 0.9em; }}
|
||||
@media print {{
|
||||
body {{ max-width: none; padding: 1cm; }}
|
||||
.segment:hover {{ background: none; }}
|
||||
}}
|
||||
</style>
|
||||
}
|
||||
h1 { color: #1a1a1a; border-bottom: 2px solid #e0e0e0; padding-bottom: 0.5rem; }
|
||||
h2 { color: #2c2c2c; margin-top: 2rem; }
|
||||
h3 { color: #444; }
|
||||
.metadata { background: #f5f5f5; padding: 1rem; border-radius: 8px; margin-bottom: 2rem; }
|
||||
.metadata dt { font-weight: bold; display: inline; }
|
||||
.metadata dd { display: inline; margin: 0 1rem 0 0.5rem; }
|
||||
.transcript { margin: 1rem 0; }
|
||||
.segment { margin-bottom: 1rem; padding: 0.5rem; border-left: 3px solid #e0e0e0; }
|
||||
.segment:hover { background: #f9f9f9; }
|
||||
.timestamp { color: #666; font-size: 0.9em; font-weight: bold; margin-right: 0.5rem; }
|
||||
.summary { background: #f0f7ff; padding: 1rem; border-radius: 8px; margin-top: 2rem; }
|
||||
.key-points li, .action-items li { margin-bottom: 0.5rem; }
|
||||
.action-items li { list-style-type: none; }
|
||||
.action-items li::before { content: '☐ '; }
|
||||
.assignee { color: #0066cc; font-size: 0.9em; }
|
||||
footer { margin-top: 3rem; padding-top: 1rem; border-top: 1px solid #e0e0e0; color: #888; font-size: 0.9em; }
|
||||
@media print {
|
||||
body { max-width: none; padding: 1cm; }
|
||||
.segment:hover { background: none; }
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def _build_html_document(title: str, content: str) -> str:
|
||||
"""Build complete HTML document with title and content."""
|
||||
return f"""<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{title}</title>
|
||||
<style>{_HTML_STYLES} </style>
|
||||
</head>
|
||||
<body>
|
||||
{content}
|
||||
@@ -184,4 +189,4 @@ class HtmlExporter:
|
||||
)
|
||||
)
|
||||
content = "\n".join(content_parts)
|
||||
return _HTML_TEMPLATE.format(title=escape_html(meeting.title), content=content)
|
||||
return _build_html_document(title=escape_html(meeting.title), content=content)
|
||||
|
||||
@@ -76,6 +76,73 @@ class LoggingUsageEventSink:
|
||||
))
|
||||
|
||||
|
||||
def _build_event_attributes(event: UsageEvent) -> dict[str, str | int | float | bool]:
|
||||
"""Build OTel attributes dict from usage event, filtering None values.
|
||||
|
||||
Args:
|
||||
event: Usage event with optional fields.
|
||||
|
||||
Returns:
|
||||
Dictionary of primitive-typed attributes for OTel span.
|
||||
"""
|
||||
from noteflow.config.constants import ERROR_DETAIL_PROJECT_ID
|
||||
|
||||
# Map event fields to attribute names (None values filtered out)
|
||||
field_mappings: list[tuple[str, str | int | float | bool | None]] = [
|
||||
("meeting_id", event.meeting_id),
|
||||
("workspace_id", event.workspace_id),
|
||||
(ERROR_DETAIL_PROJECT_ID, event.project_id),
|
||||
("provider_name", event.provider_name),
|
||||
("model_name", event.model_name),
|
||||
("tokens_input", event.tokens_input),
|
||||
("tokens_output", event.tokens_output),
|
||||
("latency_ms", event.latency_ms),
|
||||
("error_code", event.error_code),
|
||||
]
|
||||
|
||||
# Build attributes from non-None field values
|
||||
attributes: dict[str, str | int | float | bool] = {
|
||||
key: value for key, value in field_mappings if value is not None
|
||||
}
|
||||
|
||||
# Always include success
|
||||
attributes["success"] = event.success
|
||||
|
||||
# Add custom attributes (only primitive types)
|
||||
primitive_custom = {
|
||||
key: value
|
||||
for key, value in event.attributes.items()
|
||||
if isinstance(value, (str, int, float, bool))
|
||||
}
|
||||
attributes.update(primitive_custom)
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
def _set_span_filter_attributes(
|
||||
span: object, event: UsageEvent
|
||||
) -> None:
|
||||
"""Set key attributes on span for filtering in observability backends.
|
||||
|
||||
Args:
|
||||
span: OTel span to set attributes on.
|
||||
event: Usage event with metric values.
|
||||
"""
|
||||
# Map noteflow-prefixed attributes for span-level filtering
|
||||
span_mappings: list[tuple[str, str | int | float | None]] = [
|
||||
("noteflow.provider", event.provider_name),
|
||||
("noteflow.model", event.model_name),
|
||||
("noteflow.tokens_input", event.tokens_input),
|
||||
("noteflow.tokens_output", event.tokens_output),
|
||||
("noteflow.latency_ms", event.latency_ms),
|
||||
]
|
||||
|
||||
# Set non-None attributes on span
|
||||
for attr_name, value in span_mappings:
|
||||
if value is not None:
|
||||
span.set_attribute(attr_name, value) # type: ignore[union-attr]
|
||||
|
||||
|
||||
class OtelUsageEventSink:
|
||||
"""Usage event sink that records to OpenTelemetry spans.
|
||||
|
||||
@@ -92,53 +159,15 @@ class OtelUsageEventSink:
|
||||
|
||||
span = trace.get_current_span()
|
||||
if span is None or not span.is_recording():
|
||||
# No active span, log instead
|
||||
logger.debug("No active span for usage event: %s", event.event_type)
|
||||
return
|
||||
|
||||
# Build attributes dict, filtering None values
|
||||
attributes: dict[str, str | int | float | bool] = {}
|
||||
if event.meeting_id:
|
||||
attributes["meeting_id"] = event.meeting_id
|
||||
if event.workspace_id:
|
||||
attributes["workspace_id"] = event.workspace_id
|
||||
from noteflow.config.constants import ERROR_DETAIL_PROJECT_ID
|
||||
|
||||
if event.project_id:
|
||||
attributes[ERROR_DETAIL_PROJECT_ID] = event.project_id
|
||||
if event.provider_name:
|
||||
attributes["provider_name"] = event.provider_name
|
||||
if event.model_name:
|
||||
attributes["model_name"] = event.model_name
|
||||
if event.tokens_input is not None:
|
||||
attributes["tokens_input"] = event.tokens_input
|
||||
if event.tokens_output is not None:
|
||||
attributes["tokens_output"] = event.tokens_output
|
||||
if event.latency_ms is not None:
|
||||
attributes["latency_ms"] = event.latency_ms
|
||||
attributes["success"] = event.success
|
||||
if event.error_code:
|
||||
attributes["error_code"] = event.error_code
|
||||
|
||||
# Add custom attributes (only primitive types)
|
||||
for key, value in event.attributes.items():
|
||||
if isinstance(value, (str, int, float, bool)):
|
||||
attributes[key] = value
|
||||
|
||||
# Add as span event
|
||||
# Build and attach event attributes
|
||||
attributes = _build_event_attributes(event)
|
||||
span.add_event(event.event_type, attributes=attributes)
|
||||
|
||||
# Also set key attributes on the span itself for filtering
|
||||
if event.provider_name:
|
||||
span.set_attribute("noteflow.provider", event.provider_name)
|
||||
if event.model_name:
|
||||
span.set_attribute("noteflow.model", event.model_name)
|
||||
if event.tokens_input is not None:
|
||||
span.set_attribute("noteflow.tokens_input", event.tokens_input)
|
||||
if event.tokens_output is not None:
|
||||
span.set_attribute("noteflow.tokens_output", event.tokens_output)
|
||||
if event.latency_ms is not None:
|
||||
span.set_attribute("noteflow.latency_ms", event.latency_ms)
|
||||
# Set key attributes on span for filtering
|
||||
_set_span_filter_attributes(span, event)
|
||||
|
||||
def record_simple(
|
||||
self, event_type: str, *, meeting_id: str | None = None,
|
||||
|
||||
@@ -153,8 +153,8 @@ class TestRecoveryServiceAudioValidation:
|
||||
Note: Uses the global `meetings_dir` fixture from tests/conftest.py.
|
||||
"""
|
||||
|
||||
def test_audio_validation_skipped_without_meetings_dir(self, mock_uow: MagicMock) -> None:
|
||||
"""Test audio validation skipped when no meetings_dir configured."""
|
||||
def test_audio_validation_skipped_when_meetings_dir_is_none(self, mock_uow: MagicMock) -> None:
|
||||
"""Test audio validation skipped when meetings_dir is None."""
|
||||
meeting = Meeting.create(title="Test Meeting")
|
||||
meeting.start_recording()
|
||||
|
||||
|
||||
@@ -37,9 +37,9 @@ class TestClaimMapping:
|
||||
groups_claim="roles",
|
||||
)
|
||||
|
||||
assert mapping.subject_claim == "user_id"
|
||||
assert mapping.email_claim == "mail"
|
||||
assert mapping.groups_claim == "roles"
|
||||
assert mapping.subject_claim == "user_id", f"expected 'user_id', got {mapping.subject_claim!r}"
|
||||
assert mapping.email_claim == "mail", f"expected 'mail', got {mapping.email_claim!r}"
|
||||
assert mapping.groups_claim == "roles", f"expected 'roles', got {mapping.groups_claim!r}"
|
||||
|
||||
def test_claim_mapping_to_dict_roundtrip(self) -> None:
|
||||
"""Verify serialization and deserialization."""
|
||||
@@ -52,18 +52,24 @@ class TestClaimMapping:
|
||||
data = original.to_dict()
|
||||
restored = ClaimMapping.from_dict(data)
|
||||
|
||||
assert restored.subject_claim == original.subject_claim
|
||||
assert restored.groups_claim == original.groups_claim
|
||||
assert restored.first_name_claim == original.first_name_claim
|
||||
assert restored.subject_claim == original.subject_claim, (
|
||||
f"subject_claim should survive roundtrip: expected {original.subject_claim!r}, got {restored.subject_claim!r}"
|
||||
)
|
||||
assert restored.groups_claim == original.groups_claim, (
|
||||
f"groups_claim should survive roundtrip: expected {original.groups_claim!r}, got {restored.groups_claim!r}"
|
||||
)
|
||||
assert restored.first_name_claim == original.first_name_claim, (
|
||||
f"first_name_claim should survive roundtrip: expected {original.first_name_claim!r}, got {restored.first_name_claim!r}"
|
||||
)
|
||||
|
||||
def test_from_dict_with_missing_keys_uses_defaults(self) -> None:
|
||||
"""Verify missing keys in from_dict use default values."""
|
||||
data: dict[str, str | None] = {"groups_claim": "roles"}
|
||||
mapping = ClaimMapping.from_dict(data)
|
||||
|
||||
assert mapping.groups_claim == "roles"
|
||||
assert mapping.subject_claim == "sub"
|
||||
assert mapping.email_claim == "email"
|
||||
assert mapping.groups_claim == "roles", f"groups_claim should be 'roles', got {mapping.groups_claim!r}"
|
||||
assert mapping.subject_claim == "sub", f"subject_claim should default to 'sub', got {mapping.subject_claim!r}"
|
||||
assert mapping.email_claim == "email", f"email_claim should default to 'email', got {mapping.email_claim!r}"
|
||||
|
||||
|
||||
class TestOidcDiscoveryConfig:
|
||||
@@ -98,12 +104,12 @@ class TestOidcDiscoveryConfig:
|
||||
def test_supports_pkce(self, discovery_data: dict[str, object]) -> None:
|
||||
"""Verify PKCE support detection."""
|
||||
config = OidcDiscoveryConfig.from_dict(discovery_data)
|
||||
assert config.supports_pkce() is True
|
||||
assert config.supports_pkce() is True, "should support PKCE when S256 is in code_challenge_methods_supported"
|
||||
|
||||
# Without S256
|
||||
discovery_data["code_challenge_methods_supported"] = ["plain"]
|
||||
config_no_pkce = OidcDiscoveryConfig.from_dict(discovery_data)
|
||||
assert config_no_pkce.supports_pkce() is False
|
||||
assert config_no_pkce.supports_pkce() is False, "should not support PKCE when S256 is not available"
|
||||
|
||||
def test_discovery_config_to_dict_roundtrip(self, discovery_data: dict[str, object]) -> None:
|
||||
"""Verify serialization roundtrip."""
|
||||
@@ -111,9 +117,15 @@ class TestOidcDiscoveryConfig:
|
||||
data = original.to_dict()
|
||||
restored = OidcDiscoveryConfig.from_dict(data)
|
||||
|
||||
assert restored.issuer == original.issuer
|
||||
assert restored.authorization_endpoint == original.authorization_endpoint
|
||||
assert restored.scopes_supported == original.scopes_supported
|
||||
assert restored.issuer == original.issuer, (
|
||||
f"issuer should survive roundtrip: expected {original.issuer!r}, got {restored.issuer!r}"
|
||||
)
|
||||
assert restored.authorization_endpoint == original.authorization_endpoint, (
|
||||
f"authorization_endpoint should survive roundtrip: expected {original.authorization_endpoint!r}, got {restored.authorization_endpoint!r}"
|
||||
)
|
||||
assert restored.scopes_supported == original.scopes_supported, (
|
||||
f"scopes_supported should survive roundtrip: expected {original.scopes_supported!r}, got {restored.scopes_supported!r}"
|
||||
)
|
||||
|
||||
|
||||
class TestOidcProviderConfig:
|
||||
@@ -151,7 +163,9 @@ class TestOidcProviderConfig:
|
||||
preset=OidcProviderPreset.AUTHENTIK,
|
||||
)
|
||||
|
||||
assert provider.preset == OidcProviderPreset.AUTHENTIK
|
||||
assert provider.preset == OidcProviderPreset.AUTHENTIK, (
|
||||
f"preset should be AUTHENTIK, got {provider.preset!r}"
|
||||
)
|
||||
|
||||
def test_issuer_url_trailing_slash_stripped(self, workspace_id: UUID) -> None:
|
||||
"""Verify trailing slash is removed from issuer URL."""
|
||||
@@ -162,7 +176,9 @@ class TestOidcProviderConfig:
|
||||
client_id="test",
|
||||
)
|
||||
|
||||
assert provider.issuer_url == "https://auth.example.com"
|
||||
assert provider.issuer_url == "https://auth.example.com", (
|
||||
f"trailing slash should be stripped: got {provider.issuer_url!r}"
|
||||
)
|
||||
|
||||
def test_discovery_url_property(self, workspace_id: UUID) -> None:
|
||||
"""Verify discovery URL is correctly formed."""
|
||||
@@ -173,7 +189,9 @@ class TestOidcProviderConfig:
|
||||
client_id="test",
|
||||
)
|
||||
|
||||
assert provider.discovery_url == "https://auth.example.com/.well-known/openid-configuration"
|
||||
assert provider.discovery_url == "https://auth.example.com/.well-known/openid-configuration", (
|
||||
f"discovery_url should be well-known path: got {provider.discovery_url!r}"
|
||||
)
|
||||
|
||||
def test_enable_disable(self, workspace_id: UUID) -> None:
|
||||
"""Verify enable/disable methods."""
|
||||
@@ -186,11 +204,13 @@ class TestOidcProviderConfig:
|
||||
original_updated = provider.updated_at
|
||||
|
||||
provider.disable()
|
||||
assert provider.enabled is False
|
||||
assert provider.updated_at >= original_updated
|
||||
assert provider.enabled is False, "provider should be disabled after disable()"
|
||||
assert provider.updated_at >= original_updated, (
|
||||
f"updated_at should be >= original: {provider.updated_at} vs {original_updated}"
|
||||
)
|
||||
|
||||
provider.enable()
|
||||
assert provider.enabled is True
|
||||
assert provider.enabled is True, "provider should be enabled after enable()"
|
||||
|
||||
def test_update_discovery(self, workspace_id: UUID) -> None:
|
||||
"""Verify discovery update."""
|
||||
@@ -247,9 +267,9 @@ class TestOidcProviderConfig:
|
||||
client_id="test",
|
||||
)
|
||||
|
||||
assert "openid" in provider.scopes
|
||||
assert "profile" in provider.scopes
|
||||
assert "email" in provider.scopes
|
||||
assert "openid" in provider.scopes, f"'openid' should be in default scopes: {provider.scopes!r}"
|
||||
assert "profile" in provider.scopes, f"'profile' should be in default scopes: {provider.scopes!r}"
|
||||
assert "email" in provider.scopes, f"'email' should be in default scopes: {provider.scopes!r}"
|
||||
|
||||
|
||||
class TestOidcProviderPreset:
|
||||
|
||||
@@ -9,6 +9,9 @@ import pytest
|
||||
from noteflow.domain.entities.annotation import Annotation
|
||||
from noteflow.domain.value_objects import AnnotationId, AnnotationType, MeetingId
|
||||
|
||||
# Test constants
|
||||
TWO_HOURS_SECONDS = 7200.0
|
||||
|
||||
|
||||
class TestAnnotation:
|
||||
"""Tests for Annotation entity."""
|
||||
@@ -69,7 +72,7 @@ class TestAnnotationEdgeCases:
|
||||
start_time=5.0,
|
||||
end_time=5.0,
|
||||
)
|
||||
assert annotation.duration == 0.0
|
||||
assert annotation.duration == 0.0, "duration should be 0.0 when start_time equals end_time"
|
||||
|
||||
def test_annotation_empty_text(self) -> None:
|
||||
"""Test annotation with empty text."""
|
||||
@@ -81,7 +84,7 @@ class TestAnnotationEdgeCases:
|
||||
start_time=0.0,
|
||||
end_time=1.0,
|
||||
)
|
||||
assert annotation.text == ""
|
||||
assert annotation.text == "", "empty text should be preserved as empty string"
|
||||
|
||||
def test_annotation_unicode_text(self) -> None:
|
||||
"""Test annotation with unicode text."""
|
||||
@@ -93,7 +96,7 @@ class TestAnnotationEdgeCases:
|
||||
start_time=0.0,
|
||||
end_time=1.0,
|
||||
)
|
||||
assert "🎯" in annotation.text
|
||||
assert "🎯" in annotation.text, f"unicode emoji should be preserved in text, got: {annotation.text}"
|
||||
|
||||
def test_annotation_many_segment_ids(self) -> None:
|
||||
"""Test annotation with many segment IDs."""
|
||||
@@ -118,9 +121,9 @@ class TestAnnotationEdgeCases:
|
||||
annotation_type=AnnotationType.NOTE,
|
||||
text="Long meeting note",
|
||||
start_time=0.0,
|
||||
end_time=7200.0, # 2 hours
|
||||
end_time=TWO_HOURS_SECONDS,
|
||||
)
|
||||
assert annotation.duration == 7200.0
|
||||
assert annotation.duration == TWO_HOURS_SECONDS, f"duration should be {TWO_HOURS_SECONDS} seconds (2 hours), got {annotation.duration}"
|
||||
|
||||
@pytest.mark.parametrize("annotation_type", list(AnnotationType))
|
||||
def test_annotation_all_types(self, annotation_type: AnnotationType) -> None:
|
||||
@@ -134,7 +137,7 @@ class TestAnnotationEdgeCases:
|
||||
start_time=0.0,
|
||||
end_time=1.0,
|
||||
)
|
||||
assert annotation.annotation_type == annotation_type
|
||||
assert annotation.annotation_type == annotation_type, f"annotation_type should match {annotation_type}, got {annotation.annotation_type}"
|
||||
|
||||
def test_annotation_empty_segment_ids_list(self) -> None:
|
||||
"""Test annotation with explicitly empty segment_ids."""
|
||||
@@ -147,4 +150,4 @@ class TestAnnotationEdgeCases:
|
||||
end_time=1.0,
|
||||
segment_ids=[],
|
||||
)
|
||||
assert annotation.has_segments() is False
|
||||
assert annotation.has_segments() is False, "empty segment_ids list should result in has_segments() returning False"
|
||||
|
||||
@@ -13,6 +13,9 @@ from noteflow.domain.entities.summary import Summary
|
||||
from noteflow.domain.utils.time import utc_now
|
||||
from noteflow.domain.value_objects import MeetingState
|
||||
|
||||
# Test constants
|
||||
HALF_HOUR_SECONDS = 1800.0
|
||||
|
||||
|
||||
class TestMeetingCreation:
|
||||
"""Tests for Meeting creation methods."""
|
||||
@@ -30,23 +33,24 @@ class TestMeetingCreation:
|
||||
def test_create_default_attributes(self, attr: str, expected: object) -> None:
|
||||
"""Test factory method sets default attribute values."""
|
||||
meeting = Meeting.create()
|
||||
assert getattr(meeting, attr) == expected
|
||||
actual = getattr(meeting, attr)
|
||||
assert actual == expected, f"expected {attr}={expected!r}, got {actual!r}"
|
||||
|
||||
def test_create_generates_default_title(self) -> None:
|
||||
"""Test factory method generates default title prefix."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.title.startswith("Meeting ")
|
||||
assert meeting.title.startswith("Meeting "), f"expected title starting with 'Meeting ', got {meeting.title!r}"
|
||||
|
||||
def test_create_with_custom_title(self) -> None:
|
||||
"""Test factory method accepts custom title."""
|
||||
meeting = Meeting.create(title="Team Standup")
|
||||
assert meeting.title == "Team Standup"
|
||||
assert meeting.title == "Team Standup", f"expected title 'Team Standup', got {meeting.title!r}"
|
||||
|
||||
def test_create_with_metadata(self) -> None:
|
||||
"""Test factory method accepts metadata."""
|
||||
metadata = {"project": "NoteFlow", "team": "Engineering"}
|
||||
meeting = Meeting.create(title="Sprint Planning", metadata=metadata)
|
||||
assert meeting.metadata == metadata
|
||||
assert meeting.metadata == metadata, f"expected metadata {metadata!r}, got {meeting.metadata!r}"
|
||||
|
||||
def test_from_uuid_str(self) -> None:
|
||||
"""Test creation from existing UUID string."""
|
||||
@@ -85,7 +89,7 @@ class TestMeetingStateTransitions:
|
||||
meeting = Meeting.create()
|
||||
meeting.start_recording()
|
||||
meeting.begin_stopping()
|
||||
assert meeting.state == MeetingState.STOPPING
|
||||
assert meeting.state == MeetingState.STOPPING, f"expected state STOPPING, got {meeting.state}"
|
||||
|
||||
def test_begin_stopping_invalid_state_raises(self) -> None:
|
||||
"""Test begin_stopping from invalid state raises."""
|
||||
@@ -125,7 +129,7 @@ class TestMeetingStateTransitions:
|
||||
meeting.begin_stopping()
|
||||
meeting.stop_recording()
|
||||
meeting.complete()
|
||||
assert meeting.state == MeetingState.COMPLETED
|
||||
assert meeting.state == MeetingState.COMPLETED, f"expected state COMPLETED, got {meeting.state}"
|
||||
|
||||
def test_complete_invalid_state_raises(self) -> None:
|
||||
"""Test completing from invalid state raises."""
|
||||
@@ -137,7 +141,7 @@ class TestMeetingStateTransitions:
|
||||
"""Test marking meeting as error state."""
|
||||
meeting = Meeting.create()
|
||||
meeting.mark_error()
|
||||
assert meeting.state == MeetingState.ERROR
|
||||
assert meeting.state == MeetingState.ERROR, f"expected state ERROR, got {meeting.state}"
|
||||
|
||||
def test_stopping_to_recording_invalid(self) -> None:
|
||||
"""Test cannot transition from STOPPING back to RECORDING."""
|
||||
@@ -162,33 +166,33 @@ class TestMeetingSegments:
|
||||
def test_next_segment_id_empty(self) -> None:
|
||||
"""Test next segment ID when no segments exist."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.next_segment_id == 0
|
||||
assert meeting.next_segment_id == 0, f"expected next_segment_id 0 for empty meeting, got {meeting.next_segment_id}"
|
||||
|
||||
def test_next_segment_id_with_segments(self) -> None:
|
||||
"""Test next segment ID increments correctly."""
|
||||
meeting = Meeting.create()
|
||||
meeting.add_segment(Segment(segment_id=0, text="First", start_time=0.0, end_time=1.0))
|
||||
meeting.add_segment(Segment(segment_id=1, text="Second", start_time=1.0, end_time=2.0))
|
||||
assert meeting.next_segment_id == 2
|
||||
assert meeting.next_segment_id == 2, f"expected next_segment_id 2, got {meeting.next_segment_id}"
|
||||
|
||||
def test_next_segment_id_non_contiguous(self) -> None:
|
||||
"""Test next segment ID uses max + 1 for non-contiguous IDs."""
|
||||
meeting = Meeting.create()
|
||||
meeting.add_segment(Segment(segment_id=0, text="First", start_time=0.0, end_time=1.0))
|
||||
meeting.add_segment(Segment(segment_id=5, text="Sixth", start_time=1.0, end_time=2.0))
|
||||
assert meeting.next_segment_id == 6
|
||||
assert meeting.next_segment_id == 6, f"expected next_segment_id 6 for max id 5, got {meeting.next_segment_id}"
|
||||
|
||||
def test_full_transcript(self) -> None:
|
||||
"""Test concatenating all segment text."""
|
||||
meeting = Meeting.create()
|
||||
meeting.add_segment(Segment(segment_id=0, text="Hello", start_time=0.0, end_time=1.0))
|
||||
meeting.add_segment(Segment(segment_id=1, text="world", start_time=1.0, end_time=2.0))
|
||||
assert meeting.full_transcript == "Hello world"
|
||||
assert meeting.full_transcript == "Hello world", f"expected 'Hello world', got {meeting.full_transcript!r}"
|
||||
|
||||
def test_full_transcript_empty(self) -> None:
|
||||
"""Test full_transcript is empty when there are no segments."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.full_transcript == ""
|
||||
assert meeting.full_transcript == "", f"expected empty transcript, got {meeting.full_transcript!r}"
|
||||
|
||||
|
||||
class TestMeetingProperties:
|
||||
@@ -197,38 +201,38 @@ class TestMeetingProperties:
|
||||
def test_duration_seconds_not_started(self) -> None:
|
||||
"""Test duration is 0 when not started."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.duration_seconds == 0.0
|
||||
assert meeting.duration_seconds == 0.0, f"expected 0.0 duration for unstarted meeting, got {meeting.duration_seconds}"
|
||||
|
||||
def test_duration_seconds_with_times(self) -> None:
|
||||
"""Test duration calculation with start and end times."""
|
||||
meeting = Meeting.create()
|
||||
meeting.started_at = datetime(2024, 1, 1, 10, 0, 0)
|
||||
meeting.ended_at = datetime(2024, 1, 1, 10, 30, 0)
|
||||
assert meeting.duration_seconds == 1800.0
|
||||
assert meeting.duration_seconds == HALF_HOUR_SECONDS, f"expected {HALF_HOUR_SECONDS} seconds (30 min), got {meeting.duration_seconds}"
|
||||
|
||||
def test_duration_seconds_in_progress(self) -> None:
|
||||
"""Test duration is > 0 when started but not ended."""
|
||||
meeting = Meeting.create()
|
||||
meeting.started_at = utc_now() - timedelta(seconds=5)
|
||||
assert meeting.duration_seconds >= 5.0
|
||||
assert meeting.duration_seconds >= 5.0, f"expected duration >= 5.0 seconds, got {meeting.duration_seconds}"
|
||||
|
||||
def test_is_in_active_state_created(self) -> None:
|
||||
"""Test is_in_active_state returns True for CREATED state."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.is_in_active_state() is True
|
||||
assert meeting.is_in_active_state() is True, "CREATED state should be considered active"
|
||||
|
||||
def test_is_in_active_state_recording(self) -> None:
|
||||
"""Test is_in_active_state returns True for RECORDING state."""
|
||||
meeting = Meeting.create()
|
||||
meeting.start_recording()
|
||||
assert meeting.is_in_active_state() is True
|
||||
assert meeting.is_in_active_state() is True, "RECORDING state should be considered active"
|
||||
|
||||
def test_is_in_active_state_stopping(self) -> None:
|
||||
"""Test is_in_active_state returns False for STOPPING state."""
|
||||
meeting = Meeting.create()
|
||||
meeting.start_recording()
|
||||
meeting.begin_stopping()
|
||||
assert meeting.is_in_active_state() is False
|
||||
assert meeting.is_in_active_state() is False, "STOPPING state should not be considered active"
|
||||
|
||||
def test_is_in_active_state_stopped(self) -> None:
|
||||
"""Test is_in_active_state returns False for STOPPED state."""
|
||||
@@ -236,19 +240,19 @@ class TestMeetingProperties:
|
||||
meeting.start_recording()
|
||||
meeting.begin_stopping()
|
||||
meeting.stop_recording()
|
||||
assert meeting.is_in_active_state() is False
|
||||
assert meeting.is_in_active_state() is False, "STOPPED state should not be considered active"
|
||||
|
||||
def test_has_summary_false(self) -> None:
|
||||
"""Test has_summary returns False when no summary."""
|
||||
meeting = Meeting.create()
|
||||
assert meeting.has_summary() is False
|
||||
assert meeting.has_summary() is False, "meeting without summary should return has_summary=False"
|
||||
|
||||
def test_has_summary_true(self) -> None:
|
||||
"""Test has_summary returns True when summary set."""
|
||||
meeting = Meeting.create()
|
||||
summary = Summary(meeting_id=meeting.id)
|
||||
meeting.set_summary(summary)
|
||||
assert meeting.has_summary() is True
|
||||
assert meeting.has_summary() is True, "meeting with summary should return has_summary=True"
|
||||
|
||||
|
||||
class TestMeetingEdgeCases:
|
||||
@@ -260,7 +264,7 @@ class TestMeetingEdgeCases:
|
||||
meeting.add_segment(Segment(segment_id=0, text="Hello", start_time=0.0, end_time=1.0))
|
||||
meeting.add_segment(Segment(segment_id=1, text="", start_time=1.0, end_time=1.5))
|
||||
meeting.add_segment(Segment(segment_id=2, text="world", start_time=1.5, end_time=2.0))
|
||||
assert meeting.full_transcript == "Hello world"
|
||||
assert meeting.full_transcript == "Hello world", f"expected 'Hello world', got {meeting.full_transcript!r}"
|
||||
|
||||
def test_full_transcript_segments_whitespace_only(self) -> None:
|
||||
"""Test full_transcript handles segments with whitespace-only text."""
|
||||
@@ -278,7 +282,8 @@ class TestMeetingEdgeCases:
|
||||
meeting.add_segment(Segment(segment_id=0, text="你好", start_time=0.0, end_time=1.0))
|
||||
meeting.add_segment(Segment(segment_id=1, text="🚀", start_time=1.0, end_time=2.0))
|
||||
meeting.add_segment(Segment(segment_id=2, text="café", start_time=2.0, end_time=3.0))
|
||||
assert meeting.full_transcript == "你好 🚀 café"
|
||||
expected = "你好 🚀 café"
|
||||
assert meeting.full_transcript == expected, f"expected {expected!r}, got {meeting.full_transcript!r}"
|
||||
|
||||
def test_start_recording_sets_recent_timestamp(self) -> None:
|
||||
"""Test started_at is set to a recent timestamp."""
|
||||
@@ -350,13 +355,13 @@ class TestMeetingEdgeCases:
|
||||
"""Test empty title triggers default title generation."""
|
||||
meeting = Meeting.create(title="")
|
||||
# Factory generates default title when empty string passed
|
||||
assert meeting.title.startswith("Meeting ")
|
||||
assert meeting.title.startswith("Meeting "), f"expected title starting with 'Meeting ', got {meeting.title!r}"
|
||||
|
||||
def test_create_with_very_long_title(self) -> None:
|
||||
"""Test meeting accepts very long titles."""
|
||||
long_title = "A" * 1000
|
||||
meeting = Meeting.create(title=long_title)
|
||||
assert len(meeting.title) == 1000
|
||||
assert len(meeting.title) == 1000, f"expected title length 1000, got {len(meeting.title)}"
|
||||
|
||||
def test_immediate_stop_after_start_zero_duration(self) -> None:
|
||||
"""Test immediate stop after start yields near-zero duration (ED-01).
|
||||
@@ -475,7 +480,7 @@ class TestMeetingSegmentMutability:
|
||||
meeting.stop_recording()
|
||||
meeting.complete()
|
||||
|
||||
assert meeting.state == MeetingState.COMPLETED
|
||||
assert meeting.state == MeetingState.COMPLETED, f"expected state COMPLETED, got {meeting.state}"
|
||||
|
||||
# Adding segment to completed meeting is allowed
|
||||
segment = Segment(segment_id=0, text="Late transcription", start_time=0.0, end_time=1.0)
|
||||
@@ -493,7 +498,7 @@ class TestMeetingSegmentMutability:
|
||||
meeting = Meeting.create()
|
||||
meeting.mark_error()
|
||||
|
||||
assert meeting.state == MeetingState.ERROR
|
||||
assert meeting.state == MeetingState.ERROR, f"expected state ERROR, got {meeting.state}"
|
||||
|
||||
segment = Segment(segment_id=0, text="Recovered text", start_time=0.0, end_time=1.0)
|
||||
meeting.add_segment(segment)
|
||||
@@ -515,4 +520,4 @@ class TestMeetingSegmentMutability:
|
||||
segment_id=0, text="Modified", start_time=0.0, end_time=1.0
|
||||
)
|
||||
|
||||
assert meeting.segments[0].text == "Modified"
|
||||
assert meeting.segments[0].text == "Modified", f"expected segment text 'Modified', got {meeting.segments[0].text!r}"
|
||||
|
||||
@@ -30,13 +30,14 @@ class TestEntityCategory:
|
||||
self, value: str, expected: EntityCategory
|
||||
) -> None:
|
||||
"""Convert lowercase string to EntityCategory."""
|
||||
assert EntityCategory.from_string(value) == expected
|
||||
result = EntityCategory.from_string(value)
|
||||
assert result == expected, f"from_string('{value}') should return {expected}, got {result}"
|
||||
|
||||
@pytest.mark.parametrize("value", ["PERSON", "Person", "COMPANY"])
|
||||
def test_from_string_case_insensitive(self, value: str) -> None:
|
||||
"""Convert mixed case string to EntityCategory."""
|
||||
result = EntityCategory.from_string(value)
|
||||
assert result in EntityCategory
|
||||
assert result in EntityCategory, f"from_string('{value}') should return valid EntityCategory, got {result}"
|
||||
|
||||
def test_from_string_invalid_raises(self) -> None:
|
||||
"""Invalid category string raises ValueError."""
|
||||
@@ -65,7 +66,7 @@ class TestNamedEntityValidation:
|
||||
category=EntityCategory.PERSON,
|
||||
confidence=confidence,
|
||||
)
|
||||
assert entity.confidence == confidence
|
||||
assert entity.confidence == confidence, f"expected confidence {confidence}, got {entity.confidence}"
|
||||
|
||||
def test_auto_computes_normalized_text(self) -> None:
|
||||
"""Normalized text is auto-computed from text when not provided."""
|
||||
@@ -74,7 +75,7 @@ class TestNamedEntityValidation:
|
||||
category=EntityCategory.PERSON,
|
||||
confidence=0.9,
|
||||
)
|
||||
assert entity.normalized_text == "john smith"
|
||||
assert entity.normalized_text == "john smith", f"expected normalized_text 'john smith', got '{entity.normalized_text}'"
|
||||
|
||||
def test_preserves_explicit_normalized_text(self) -> None:
|
||||
"""Explicit normalized_text is preserved."""
|
||||
@@ -84,7 +85,7 @@ class TestNamedEntityValidation:
|
||||
category=EntityCategory.PERSON,
|
||||
confidence=0.9,
|
||||
)
|
||||
assert entity.normalized_text == "custom_normalization"
|
||||
assert entity.normalized_text == "custom_normalization", f"expected explicit normalized_text 'custom_normalization', got '{entity.normalized_text}'"
|
||||
|
||||
|
||||
class TestNamedEntityCreate:
|
||||
@@ -116,8 +117,8 @@ class TestNamedEntityCreate:
|
||||
segment_ids=[0],
|
||||
confidence=0.9,
|
||||
)
|
||||
assert entity.text == "John Smith"
|
||||
assert entity.normalized_text == "john smith"
|
||||
assert entity.text == "John Smith", f"expected stripped text 'John Smith', got '{entity.text}'"
|
||||
assert entity.normalized_text == "john smith", f"expected normalized_text 'john smith', got '{entity.normalized_text}'"
|
||||
|
||||
def test_create_deduplicates_segment_ids(self) -> None:
|
||||
"""Create deduplicates and sorts segment IDs."""
|
||||
@@ -127,7 +128,7 @@ class TestNamedEntityCreate:
|
||||
segment_ids=[3, 1, 1, 3, 2],
|
||||
confidence=0.8,
|
||||
)
|
||||
assert entity.segment_ids == [1, 2, 3]
|
||||
assert entity.segment_ids == [1, 2, 3], f"expected deduplicated/sorted segment_ids [1, 2, 3], got {entity.segment_ids}"
|
||||
|
||||
def test_create_empty_text_raises(self) -> None:
|
||||
"""Create with empty text raises ValueError."""
|
||||
@@ -171,7 +172,7 @@ class TestNamedEntityOccurrenceCount:
|
||||
segment_ids=[0, 1, 2],
|
||||
confidence=0.8,
|
||||
)
|
||||
assert entity.occurrence_count == 3
|
||||
assert entity.occurrence_count == 3, f"expected occurrence_count 3 for 3 segments, got {entity.occurrence_count}"
|
||||
|
||||
def test_occurrence_count_empty_segments(self) -> None:
|
||||
"""Occurrence count returns 0 for empty segment_ids."""
|
||||
@@ -181,7 +182,7 @@ class TestNamedEntityOccurrenceCount:
|
||||
segment_ids=[],
|
||||
confidence=0.8,
|
||||
)
|
||||
assert entity.occurrence_count == 0
|
||||
assert entity.occurrence_count == 0, f"expected occurrence_count 0 for empty segments, got {entity.occurrence_count}"
|
||||
|
||||
def test_occurrence_count_single_segment(self) -> None:
|
||||
"""Occurrence count returns 1 for single segment."""
|
||||
@@ -191,7 +192,7 @@ class TestNamedEntityOccurrenceCount:
|
||||
segment_ids=[5],
|
||||
confidence=0.8,
|
||||
)
|
||||
assert entity.occurrence_count == 1
|
||||
assert entity.occurrence_count == 1, f"expected occurrence_count 1 for single segment, got {entity.occurrence_count}"
|
||||
|
||||
|
||||
class TestNamedEntityMergeSegments:
|
||||
@@ -206,7 +207,7 @@ class TestNamedEntityMergeSegments:
|
||||
confidence=0.9,
|
||||
)
|
||||
entity.merge_segments([3, 4])
|
||||
assert entity.segment_ids == [0, 1, 3, 4]
|
||||
assert entity.segment_ids == [0, 1, 3, 4], f"expected merged segment_ids [0, 1, 3, 4], got {entity.segment_ids}"
|
||||
|
||||
def test_merge_segments_deduplicates(self) -> None:
|
||||
"""Merge segments deduplicates overlapping IDs."""
|
||||
@@ -217,7 +218,7 @@ class TestNamedEntityMergeSegments:
|
||||
confidence=0.9,
|
||||
)
|
||||
entity.merge_segments([1, 2, 3])
|
||||
assert entity.segment_ids == [0, 1, 2, 3]
|
||||
assert entity.segment_ids == [0, 1, 2, 3], f"expected deduplicated segment_ids [0, 1, 2, 3], got {entity.segment_ids}"
|
||||
|
||||
def test_merge_segments_sorts(self) -> None:
|
||||
"""Merge segments keeps result sorted."""
|
||||
@@ -228,7 +229,7 @@ class TestNamedEntityMergeSegments:
|
||||
confidence=0.9,
|
||||
)
|
||||
entity.merge_segments([1, 3])
|
||||
assert entity.segment_ids == [1, 3, 5, 10]
|
||||
assert entity.segment_ids == [1, 3, 5, 10], f"expected sorted segment_ids [1, 3, 5, 10], got {entity.segment_ids}"
|
||||
|
||||
def test_merge_empty_segments(self) -> None:
|
||||
"""Merge with empty list preserves original segments."""
|
||||
@@ -239,7 +240,7 @@ class TestNamedEntityMergeSegments:
|
||||
confidence=0.9,
|
||||
)
|
||||
entity.merge_segments([])
|
||||
assert entity.segment_ids == [0, 1]
|
||||
assert entity.segment_ids == [0, 1], f"expected unchanged segment_ids [0, 1] after merging empty list, got {entity.segment_ids}"
|
||||
|
||||
|
||||
class TestNamedEntityDefaults:
|
||||
|
||||
@@ -142,7 +142,8 @@ class TestProjectCreation:
|
||||
def test_default_attribute_values(self, attr: str, expected: object) -> None:
|
||||
"""Test Project has expected default values."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert getattr(project, attr) == expected
|
||||
actual = getattr(project, attr)
|
||||
assert actual == expected, f"expected {attr}={expected!r}, got {actual!r}"
|
||||
|
||||
def test_default_settings_factory(self) -> None:
|
||||
"""Test Project creates empty ProjectSettings by default."""
|
||||
@@ -191,7 +192,7 @@ class TestProjectSlugValidation:
|
||||
name="Test",
|
||||
slug=valid_slug,
|
||||
)
|
||||
assert project.slug == valid_slug
|
||||
assert project.slug == valid_slug, f"expected slug={valid_slug!r}, got {project.slug!r}"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"invalid_slug",
|
||||
@@ -223,7 +224,7 @@ class TestProjectSlugValidation:
|
||||
name="Test",
|
||||
slug=None,
|
||||
)
|
||||
assert project.slug is None
|
||||
assert project.slug is None, f"expected slug=None, got {project.slug!r}"
|
||||
|
||||
|
||||
class TestProjectArchiveRestore:
|
||||
@@ -232,7 +233,7 @@ class TestProjectArchiveRestore:
|
||||
def test_archive_sets_timestamp(self) -> None:
|
||||
"""Test archive() sets archived_at timestamp."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert project.archived_at is None
|
||||
assert project.archived_at is None, "archived_at should be None before archive"
|
||||
|
||||
before = utc_now()
|
||||
project.archive()
|
||||
@@ -248,7 +249,7 @@ class TestProjectArchiveRestore:
|
||||
|
||||
project.archive()
|
||||
|
||||
assert project.updated_at >= original_updated
|
||||
assert project.updated_at >= original_updated, "updated_at should be >= original after archive"
|
||||
|
||||
def test_archive_default_project_raises(self) -> None:
|
||||
"""Test archiving default project raises CannotArchiveDefaultProjectError."""
|
||||
@@ -265,7 +266,7 @@ class TestProjectArchiveRestore:
|
||||
"""Test restore() clears archived_at timestamp."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.archive()
|
||||
assert project.archived_at is not None
|
||||
assert project.archived_at is not None, "archived_at should be set after archive"
|
||||
|
||||
project.restore()
|
||||
|
||||
@@ -279,7 +280,7 @@ class TestProjectArchiveRestore:
|
||||
|
||||
project.restore()
|
||||
|
||||
assert project.updated_at >= archived_updated
|
||||
assert project.updated_at >= archived_updated, "updated_at should be >= archived_updated after restore"
|
||||
|
||||
|
||||
class TestProjectProperties:
|
||||
@@ -288,24 +289,24 @@ class TestProjectProperties:
|
||||
def test_is_archived_false_by_default(self) -> None:
|
||||
"""Test is_archived returns False for new project."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert project.is_archived is False
|
||||
assert project.is_archived is False, "is_archived should be False for new project"
|
||||
|
||||
def test_is_archived_true_when_archived(self) -> None:
|
||||
"""Test is_archived returns True after archive()."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.archive()
|
||||
assert project.is_archived is True
|
||||
assert project.is_archived is True, "is_archived should be True after archive"
|
||||
|
||||
def test_is_active_true_by_default(self) -> None:
|
||||
"""Test is_active returns True for new project."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert project.is_active is True
|
||||
assert project.is_active is True, "is_active should be True for new project"
|
||||
|
||||
def test_is_active_false_when_archived(self) -> None:
|
||||
"""Test is_active returns False after archive()."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.archive()
|
||||
assert project.is_active is False
|
||||
assert project.is_active is False, "is_active should be False after archive"
|
||||
|
||||
def test_is_archived_and_is_active_are_inverse(self) -> None:
|
||||
"""Test is_archived and is_active are always inverse."""
|
||||
@@ -326,7 +327,7 @@ class TestProjectMutations:
|
||||
"""Test update_name() changes name field."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Original")
|
||||
project.update_name("Updated Name")
|
||||
assert project.name == "Updated Name"
|
||||
assert project.name == "Updated Name", f"expected name='Updated Name', got {project.name!r}"
|
||||
|
||||
def test_update_name_updates_timestamp(self) -> None:
|
||||
"""Test update_name() updates updated_at timestamp."""
|
||||
@@ -335,13 +336,13 @@ class TestProjectMutations:
|
||||
|
||||
project.update_name("Updated")
|
||||
|
||||
assert project.updated_at >= original_updated
|
||||
assert project.updated_at >= original_updated, "updated_at should be >= original after update_name"
|
||||
|
||||
def test_update_description(self) -> None:
|
||||
"""Test update_description() changes description field."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.update_description("New description")
|
||||
assert project.description == "New description"
|
||||
assert project.description == "New description", f"expected description='New description', got {project.description!r}"
|
||||
|
||||
def test_update_description_to_none(self) -> None:
|
||||
"""Test update_description() can set description to None."""
|
||||
@@ -352,7 +353,7 @@ class TestProjectMutations:
|
||||
description="Original",
|
||||
)
|
||||
project.update_description(None)
|
||||
assert project.description is None
|
||||
assert project.description is None, f"expected description=None, got {project.description!r}"
|
||||
|
||||
def test_update_description_updates_timestamp(self) -> None:
|
||||
"""Test update_description() updates updated_at timestamp."""
|
||||
@@ -361,7 +362,7 @@ class TestProjectMutations:
|
||||
|
||||
project.update_description("Description")
|
||||
|
||||
assert project.updated_at >= original_updated
|
||||
assert project.updated_at >= original_updated, "updated_at should be >= original after update_description"
|
||||
|
||||
def test_update_settings(self) -> None:
|
||||
"""Test update_settings() replaces settings object."""
|
||||
@@ -380,7 +381,7 @@ class TestProjectMutations:
|
||||
|
||||
project.update_settings(ProjectSettings())
|
||||
|
||||
assert project.updated_at >= original_updated
|
||||
assert project.updated_at >= original_updated, "updated_at should be >= original after update_settings"
|
||||
|
||||
def test_update_slug_with_valid_pattern(self) -> None:
|
||||
"""Test update_slug() accepts valid slug patterns."""
|
||||
@@ -418,14 +419,14 @@ class TestProjectMutations:
|
||||
"""Test set_metadata() adds key-value pair."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.set_metadata("key", "value")
|
||||
assert project.metadata["key"] == "value"
|
||||
assert project.metadata["key"] == "value", f"expected metadata['key']='value', got {project.metadata.get('key')!r}"
|
||||
|
||||
def test_set_metadata_overwrites_existing(self) -> None:
|
||||
"""Test set_metadata() overwrites existing key."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
project.set_metadata("key", "original")
|
||||
project.set_metadata("key", "updated")
|
||||
assert project.metadata["key"] == "updated"
|
||||
assert project.metadata["key"] == "updated", f"expected metadata['key']='updated', got {project.metadata.get('key')!r}"
|
||||
|
||||
def test_set_metadata_updates_timestamp(self) -> None:
|
||||
"""Test set_metadata() updates updated_at timestamp."""
|
||||
@@ -434,7 +435,7 @@ class TestProjectMutations:
|
||||
|
||||
project.set_metadata("key", "value")
|
||||
|
||||
assert project.updated_at >= original_updated
|
||||
assert project.updated_at >= original_updated, "updated_at should be >= original after set_metadata"
|
||||
|
||||
|
||||
class TestProjectEdgeCases:
|
||||
@@ -450,17 +451,17 @@ class TestProjectEdgeCases:
|
||||
# Small delay to ensure different timestamp
|
||||
project.archive()
|
||||
|
||||
assert project.archived_at is not None
|
||||
assert project.archived_at >= first_archived
|
||||
assert project.archived_at is not None, "archived_at should still be set after second archive"
|
||||
assert project.archived_at >= first_archived, "archived_at should be >= first_archived after second archive"
|
||||
|
||||
def test_restore_non_archived_project(self) -> None:
|
||||
"""Test restoring non-archived project is idempotent."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert project.archived_at is None
|
||||
assert project.archived_at is None, "archived_at should be None before restore"
|
||||
|
||||
project.restore()
|
||||
|
||||
assert project.archived_at is None
|
||||
assert project.archived_at is None, "archived_at should remain None after restore on non-archived project"
|
||||
|
||||
def test_metadata_complex_structures(self) -> None:
|
||||
"""Test metadata can store complex nested structures."""
|
||||
@@ -471,13 +472,13 @@ class TestProjectEdgeCases:
|
||||
"boolean": True,
|
||||
}
|
||||
project.set_metadata("complex", complex_value)
|
||||
assert project.metadata["complex"] == complex_value
|
||||
assert project.metadata["complex"] == complex_value, "complex nested structure should be stored in metadata"
|
||||
|
||||
def test_very_long_name(self) -> None:
|
||||
"""Test project accepts very long names."""
|
||||
long_name = "A" * 1000
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name=long_name)
|
||||
assert len(project.name) == 1000
|
||||
assert len(project.name) == 1000, f"expected name length 1000, got {len(project.name)}"
|
||||
|
||||
def test_unicode_name_and_description(self) -> None:
|
||||
"""Test project accepts unicode in name and description."""
|
||||
@@ -487,8 +488,8 @@ class TestProjectEdgeCases:
|
||||
name="项目名称", # Chinese for "project name"
|
||||
description="プロジェクトの説明", # Japanese for "project description"
|
||||
)
|
||||
assert project.name == "项目名称"
|
||||
assert project.description == "プロジェクトの説明"
|
||||
assert project.name == "项目名称", f"expected Chinese name, got {project.name!r}"
|
||||
assert project.description == "プロジェクトの説明", f"expected Japanese description, got {project.description!r}"
|
||||
|
||||
def test_emoji_in_name(self) -> None:
|
||||
"""Test project accepts emoji in name."""
|
||||
@@ -497,7 +498,7 @@ class TestProjectEdgeCases:
|
||||
workspace_id=uuid4(),
|
||||
name="My Project 🚀",
|
||||
)
|
||||
assert "🚀" in project.name
|
||||
assert "🚀" in project.name, f"expected emoji in name, got {project.name!r}"
|
||||
|
||||
def test_settings_with_full_configuration(self) -> None:
|
||||
"""Test project with fully configured settings."""
|
||||
@@ -523,10 +524,10 @@ class TestProjectEdgeCases:
|
||||
name="Fully Configured",
|
||||
settings=settings,
|
||||
)
|
||||
assert project.settings.export_rules is not None
|
||||
assert project.settings.export_rules.default_format == ExportFormat.PDF
|
||||
assert project.settings.trigger_rules is not None
|
||||
assert project.settings.trigger_rules.auto_start_enabled is True
|
||||
assert project.settings.export_rules is not None, "export_rules should not be None"
|
||||
assert project.settings.export_rules.default_format == ExportFormat.PDF, "default_format should be PDF"
|
||||
assert project.settings.trigger_rules is not None, "trigger_rules should not be None"
|
||||
assert project.settings.trigger_rules.auto_start_enabled is True, "auto_start_enabled should be True"
|
||||
|
||||
|
||||
class TestProjectInvariants:
|
||||
@@ -545,19 +546,19 @@ class TestProjectInvariants:
|
||||
project.archive()
|
||||
|
||||
# Project state should be unchanged
|
||||
assert project.archived_at is None
|
||||
assert project.is_active is True
|
||||
assert project.archived_at is None, "archived_at should remain None for default project"
|
||||
assert project.is_active is True, "is_active should remain True for default project"
|
||||
|
||||
def test_updated_at_never_before_created_at(self) -> None:
|
||||
"""Test updated_at is always >= created_at."""
|
||||
project = Project(id=uuid4(), workspace_id=uuid4(), name="Test")
|
||||
assert project.updated_at >= project.created_at
|
||||
assert project.updated_at >= project.created_at, "updated_at should be >= created_at on creation"
|
||||
|
||||
project.update_name("New Name")
|
||||
assert project.updated_at >= project.created_at
|
||||
assert project.updated_at >= project.created_at, "updated_at should be >= created_at after update_name"
|
||||
|
||||
project.archive()
|
||||
assert project.updated_at >= project.created_at
|
||||
assert project.updated_at >= project.created_at, "updated_at should be >= created_at after archive"
|
||||
|
||||
def test_explicit_timestamps_preserved(self) -> None:
|
||||
"""Test explicit timestamps are preserved on creation."""
|
||||
@@ -571,8 +572,8 @@ class TestProjectInvariants:
|
||||
created_at=created,
|
||||
updated_at=updated,
|
||||
)
|
||||
assert project.created_at == created
|
||||
assert project.updated_at == updated
|
||||
assert project.created_at == created, f"expected created_at={created}, got {project.created_at}"
|
||||
assert project.updated_at == updated, f"expected updated_at={updated}, got {project.updated_at}"
|
||||
|
||||
def test_id_immutability(self) -> None:
|
||||
"""Test project id cannot be changed after creation."""
|
||||
@@ -584,7 +585,7 @@ class TestProjectInvariants:
|
||||
project.id = uuid4()
|
||||
|
||||
# Just verifying the field was changed (dataclass allows this)
|
||||
assert project.id != original_id
|
||||
assert project.id != original_id, "id should have been changed (dataclass allows reassignment)"
|
||||
|
||||
|
||||
class TestCannotArchiveDefaultProjectError:
|
||||
@@ -594,11 +595,11 @@ class TestCannotArchiveDefaultProjectError:
|
||||
"""Test error message includes the project ID."""
|
||||
project_id = "test-project-123"
|
||||
error = CannotArchiveDefaultProjectError(project_id)
|
||||
assert project_id in str(error.message)
|
||||
assert project_id in str(error.message), f"expected '{project_id}' in error message, got {error.message!r}"
|
||||
|
||||
def test_error_details_contain_project_id(self) -> None:
|
||||
"""Test error details contain project_id key."""
|
||||
project_id = "test-project-456"
|
||||
error = CannotArchiveDefaultProjectError(project_id)
|
||||
assert error.details is not None
|
||||
assert error.details.get("project_id") == project_id
|
||||
assert error.details is not None, "error.details should not be None"
|
||||
assert error.details.get("project_id") == project_id, f"expected project_id='{project_id}' in details, got {error.details.get('project_id')!r}"
|
||||
|
||||
@@ -6,6 +6,9 @@ import pytest
|
||||
|
||||
from noteflow.domain.entities.segment import Segment, WordTiming
|
||||
|
||||
# Test constants
|
||||
TEN_HOURS_SECONDS = 36000.0
|
||||
|
||||
|
||||
class TestWordTiming:
|
||||
"""Tests for WordTiming entity."""
|
||||
@@ -17,7 +20,8 @@ class TestWordTiming:
|
||||
def test_word_timing_attributes(self, attr: str, expected: object) -> None:
|
||||
"""Test WordTiming stores attribute values correctly."""
|
||||
word = WordTiming(word="hello", start_time=0.0, end_time=0.5, probability=0.95)
|
||||
assert getattr(word, attr) == expected
|
||||
actual = getattr(word, attr)
|
||||
assert actual == expected, f"WordTiming.{attr} expected {expected!r}, got {actual!r}"
|
||||
|
||||
def test_word_timing_invalid_times_raises(self) -> None:
|
||||
"""Test WordTiming raises on end_time < start_time."""
|
||||
@@ -34,7 +38,7 @@ class TestWordTiming:
|
||||
def test_word_timing_valid_probability_bounds(self, prob: float) -> None:
|
||||
"""Test WordTiming accepts probability at boundaries."""
|
||||
word = WordTiming(word="test", start_time=0.0, end_time=0.5, probability=prob)
|
||||
assert word.probability == prob
|
||||
assert word.probability == prob, f"probability expected {prob}, got {word.probability}"
|
||||
|
||||
|
||||
class TestSegment:
|
||||
@@ -55,7 +59,8 @@ class TestSegment:
|
||||
segment = Segment(
|
||||
segment_id=0, text="Hello world", start_time=0.0, end_time=2.5, language="en"
|
||||
)
|
||||
assert getattr(segment, attr) == expected
|
||||
actual = getattr(segment, attr)
|
||||
assert actual == expected, f"Segment.{attr} expected {expected!r}, got {actual!r}"
|
||||
|
||||
def test_segment_invalid_times_raises(self) -> None:
|
||||
"""Test Segment raises on end_time < start_time."""
|
||||
@@ -70,12 +75,12 @@ class TestSegment:
|
||||
def test_segment_duration(self) -> None:
|
||||
"""Test duration property calculation."""
|
||||
segment = Segment(segment_id=0, text="test", start_time=1.5, end_time=4.0)
|
||||
assert segment.duration == 2.5
|
||||
assert segment.duration == 2.5, f"duration expected 2.5, got {segment.duration}"
|
||||
|
||||
def test_segment_word_count_from_text(self) -> None:
|
||||
"""Test word_count from text when no words list."""
|
||||
segment = Segment(segment_id=0, text="Hello beautiful world", start_time=0.0, end_time=1.0)
|
||||
assert segment.word_count == 3
|
||||
assert segment.word_count == 3, f"word_count expected 3, got {segment.word_count}"
|
||||
|
||||
def test_segment_word_count_from_words(self) -> None:
|
||||
"""Test word_count from words list when provided."""
|
||||
@@ -90,7 +95,7 @@ class TestSegment:
|
||||
end_time=0.5,
|
||||
words=words,
|
||||
)
|
||||
assert segment.word_count == 2
|
||||
assert segment.word_count == 2, f"word_count expected 2, got {segment.word_count}"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"embedding,expected",
|
||||
@@ -111,7 +116,8 @@ class TestSegment:
|
||||
end_time=1.0,
|
||||
embedding=embedding,
|
||||
)
|
||||
assert segment.has_embedding() is expected
|
||||
result = segment.has_embedding()
|
||||
assert result is expected, f"has_embedding() expected {expected}, got {result}"
|
||||
|
||||
# --- Edge case tests ---
|
||||
|
||||
@@ -128,7 +134,9 @@ class TestSegment:
|
||||
def test_segment_word_count_edge_cases(self, text: str, expected_count: int) -> None:
|
||||
"""Test word_count correctly handles various text patterns."""
|
||||
segment = Segment(segment_id=0, text=text, start_time=0.0, end_time=1.0)
|
||||
assert segment.word_count == expected_count
|
||||
assert segment.word_count == expected_count, (
|
||||
f"word_count for {text!r} expected {expected_count}, got {segment.word_count}"
|
||||
)
|
||||
|
||||
def test_segment_word_count_empty_words_list(self) -> None:
|
||||
"""Test word_count from text when words list is empty."""
|
||||
@@ -139,22 +147,24 @@ class TestSegment:
|
||||
end_time=0.5,
|
||||
words=[],
|
||||
)
|
||||
assert segment.word_count == 2
|
||||
assert segment.word_count == 2, (
|
||||
f"word_count expected 2 from text fallback, got {segment.word_count}"
|
||||
)
|
||||
|
||||
def test_segment_unicode_text_contains_emoji(self) -> None:
|
||||
"""Test segment preserves unicode emoji in text."""
|
||||
segment = Segment(segment_id=0, text="你好世界 🚀 café", start_time=0.0, end_time=1.0)
|
||||
assert "🚀" in segment.text
|
||||
assert "🚀" in segment.text, f"expected emoji in text, got {segment.text!r}"
|
||||
|
||||
def test_segment_zero_duration(self) -> None:
|
||||
"""Test segment with zero duration is valid."""
|
||||
segment = Segment(segment_id=0, text="instant", start_time=5.0, end_time=5.0)
|
||||
assert segment.duration == 0.0
|
||||
assert segment.duration == 0.0, f"duration expected 0.0, got {segment.duration}"
|
||||
|
||||
def test_segment_very_long_duration(self) -> None:
|
||||
"""Test segment with very long duration."""
|
||||
segment = Segment(segment_id=0, text="marathon", start_time=0.0, end_time=36000.0) # 10 hours
|
||||
assert segment.duration == 36000.0
|
||||
segment = Segment(segment_id=0, text="marathon", start_time=0.0, end_time=TEN_HOURS_SECONDS)
|
||||
assert segment.duration == TEN_HOURS_SECONDS, f"duration expected {TEN_HOURS_SECONDS}, got {segment.duration}"
|
||||
|
||||
|
||||
class TestWordTimingEdgeCases:
|
||||
@@ -163,24 +173,26 @@ class TestWordTimingEdgeCases:
|
||||
def test_word_timing_boundary_probability_zero(self) -> None:
|
||||
"""Test probability at exact lower boundary."""
|
||||
word = WordTiming(word="test", start_time=0.0, end_time=0.5, probability=0.0)
|
||||
assert word.probability == 0.0
|
||||
assert word.probability == 0.0, f"probability expected 0.0, got {word.probability}"
|
||||
|
||||
def test_word_timing_boundary_probability_one(self) -> None:
|
||||
"""Test probability at exact upper boundary."""
|
||||
word = WordTiming(word="test", start_time=0.0, end_time=0.5, probability=1.0)
|
||||
assert word.probability == 1.0
|
||||
assert word.probability == 1.0, f"probability expected 1.0, got {word.probability}"
|
||||
|
||||
def test_word_timing_equal_times(self) -> None:
|
||||
"""Test word timing with equal start and end times."""
|
||||
word = WordTiming(word="instant", start_time=1.5, end_time=1.5, probability=0.9)
|
||||
assert word.start_time == word.end_time
|
||||
assert word.start_time == word.end_time, (
|
||||
f"start_time and end_time should be equal, got {word.start_time} and {word.end_time}"
|
||||
)
|
||||
|
||||
def test_word_timing_empty_word(self) -> None:
|
||||
"""Test word timing with empty word string."""
|
||||
word = WordTiming(word="", start_time=0.0, end_time=0.1, probability=0.5)
|
||||
assert word.word == ""
|
||||
assert word.word == "", f"word expected empty string, got {word.word!r}"
|
||||
|
||||
def test_word_timing_unicode_word(self) -> None:
|
||||
"""Test word timing with unicode characters."""
|
||||
word = WordTiming(word="日本語", start_time=0.0, end_time=0.5, probability=0.95)
|
||||
assert word.word == "日本語"
|
||||
assert word.word == "日本語", f"word expected '日本語', got {word.word!r}"
|
||||
|
||||
@@ -9,6 +9,12 @@ import pytest
|
||||
from noteflow.domain.entities.summary import ActionItem, KeyPoint, Summary
|
||||
from noteflow.domain.value_objects import MeetingId
|
||||
|
||||
# Test constants
|
||||
MANY_SEGMENT_IDS_COUNT = 50
|
||||
KEY_POINT_START_TIME = 10.5
|
||||
KEY_POINT_END_TIME = 25.0
|
||||
VERY_LONG_SUMMARY_LENGTH = 10000
|
||||
|
||||
|
||||
class TestKeyPoint:
|
||||
"""Tests for KeyPoint entity."""
|
||||
@@ -25,28 +31,28 @@ class TestKeyPoint:
|
||||
def test_key_point_defaults(self, attr: str, expected: object) -> None:
|
||||
"""Test KeyPoint default attribute values."""
|
||||
kp = KeyPoint(text="Important discussion about architecture")
|
||||
assert getattr(kp, attr) == expected
|
||||
assert getattr(kp, attr) == expected, f"expected {attr}={expected!r}, got {getattr(kp, attr)!r}"
|
||||
|
||||
def test_key_point_is_sourced_false(self) -> None:
|
||||
"""Test is_sourced returns False when no segment_ids."""
|
||||
kp = KeyPoint(text="No evidence")
|
||||
assert kp.is_sourced() is False
|
||||
assert kp.is_sourced() is False, "key point without segment_ids should not be sourced"
|
||||
|
||||
def test_key_point_is_sourced_true(self) -> None:
|
||||
"""Test is_sourced returns True with segment_ids."""
|
||||
kp = KeyPoint(text="With evidence", segment_ids=[1, 2, 3])
|
||||
assert kp.is_sourced() is True
|
||||
assert kp.is_sourced() is True, "key point with segment_ids should be sourced"
|
||||
|
||||
def test_key_point_with_timing(self) -> None:
|
||||
"""Test KeyPoint with timing information."""
|
||||
kp = KeyPoint(
|
||||
text="Timed point",
|
||||
segment_ids=[0, 1],
|
||||
start_time=10.5,
|
||||
end_time=25.0,
|
||||
start_time=KEY_POINT_START_TIME,
|
||||
end_time=KEY_POINT_END_TIME,
|
||||
)
|
||||
assert kp.start_time == 10.5, "start_time should match provided value"
|
||||
assert kp.end_time == 25.0, "end_time should match provided value"
|
||||
assert kp.start_time == KEY_POINT_START_TIME, "start_time should match provided value"
|
||||
assert kp.end_time == KEY_POINT_END_TIME, "end_time should match provided value"
|
||||
|
||||
|
||||
class TestActionItem:
|
||||
@@ -65,37 +71,37 @@ class TestActionItem:
|
||||
def test_action_item_defaults(self, attr: str, expected: object) -> None:
|
||||
"""Test ActionItem default attribute values."""
|
||||
ai = ActionItem(text="Review PR #123")
|
||||
assert getattr(ai, attr) == expected
|
||||
assert getattr(ai, attr) == expected, f"expected {attr}={expected!r}, got {getattr(ai, attr)!r}"
|
||||
|
||||
def test_action_item_has_evidence_false(self) -> None:
|
||||
"""Test has_evidence returns False when no segment_ids."""
|
||||
ai = ActionItem(text="Task without evidence")
|
||||
assert ai.has_evidence() is False
|
||||
assert ai.has_evidence() is False, "action item without segment_ids should not have evidence"
|
||||
|
||||
def test_action_item_has_evidence_true(self) -> None:
|
||||
"""Test has_evidence returns True with segment_ids."""
|
||||
ai = ActionItem(text="Task with evidence", segment_ids=[5])
|
||||
assert ai.has_evidence() is True
|
||||
assert ai.has_evidence() is True, "action item with segment_ids should have evidence"
|
||||
|
||||
def test_action_item_is_assigned_false(self) -> None:
|
||||
"""Test is_assigned returns False when no assignee."""
|
||||
ai = ActionItem(text="Unassigned task")
|
||||
assert ai.is_assigned() is False
|
||||
assert ai.is_assigned() is False, "action item without assignee should not be assigned"
|
||||
|
||||
def test_action_item_is_assigned_true(self) -> None:
|
||||
"""Test is_assigned returns True with assignee."""
|
||||
ai = ActionItem(text="Assigned task", assignee="Alice")
|
||||
assert ai.is_assigned() is True
|
||||
assert ai.is_assigned() is True, "action item with assignee should be assigned"
|
||||
|
||||
def test_action_item_has_due_date_false(self) -> None:
|
||||
"""Test has_due_date returns False when no due_date."""
|
||||
ai = ActionItem(text="No deadline")
|
||||
assert ai.has_due_date() is False
|
||||
assert ai.has_due_date() is False, "action item without due_date should not have due date"
|
||||
|
||||
def test_action_item_has_due_date_true(self) -> None:
|
||||
"""Test has_due_date returns True with due_date."""
|
||||
ai = ActionItem(text="With deadline", due_date=datetime(2024, 12, 31))
|
||||
assert ai.has_due_date() is True
|
||||
assert ai.has_due_date() is True, "action item with due_date should have due date"
|
||||
|
||||
|
||||
class TestSummary:
|
||||
@@ -114,12 +120,12 @@ class TestSummary:
|
||||
def test_summary_defaults(self, meeting_id: MeetingId, attr: str, expected: object) -> None:
|
||||
"""Test Summary default attribute values."""
|
||||
summary = Summary(meeting_id=meeting_id)
|
||||
assert getattr(summary, attr) == expected
|
||||
assert getattr(summary, attr) == expected, f"expected {attr}={expected!r}, got {getattr(summary, attr)!r}"
|
||||
|
||||
def test_summary_meeting_id(self, meeting_id: MeetingId) -> None:
|
||||
"""Test Summary stores meeting_id correctly."""
|
||||
summary = Summary(meeting_id=meeting_id)
|
||||
assert summary.meeting_id == meeting_id
|
||||
assert summary.meeting_id == meeting_id, f"expected meeting_id={meeting_id}, got {summary.meeting_id}"
|
||||
|
||||
def test_summary_key_point_count(self, meeting_id: MeetingId) -> None:
|
||||
"""Test key_point_count property."""
|
||||
@@ -131,7 +137,7 @@ class TestSummary:
|
||||
KeyPoint(text="Point 3"),
|
||||
],
|
||||
)
|
||||
assert summary.key_point_count == 3
|
||||
assert summary.key_point_count == 3, f"expected key_point_count=3, got {summary.key_point_count}"
|
||||
|
||||
def test_summary_action_item_count(self, meeting_id: MeetingId) -> None:
|
||||
"""Test action_item_count property."""
|
||||
@@ -142,7 +148,7 @@ class TestSummary:
|
||||
ActionItem(text="Task 2"),
|
||||
],
|
||||
)
|
||||
assert summary.action_item_count == 2
|
||||
assert summary.action_item_count == 2, f"expected action_item_count=2, got {summary.action_item_count}"
|
||||
|
||||
def test_all_points_have_evidence_true(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_points_have_evidence returns True when all evidenced."""
|
||||
@@ -153,7 +159,7 @@ class TestSummary:
|
||||
KeyPoint(text="Point 2", segment_ids=[1, 2]),
|
||||
],
|
||||
)
|
||||
assert summary.all_points_have_evidence() is True
|
||||
assert summary.all_points_have_evidence() is True, "all points with segment_ids should be evidenced"
|
||||
|
||||
def test_all_points_have_evidence_false(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_points_have_evidence returns False when some unevidenced."""
|
||||
@@ -164,7 +170,7 @@ class TestSummary:
|
||||
KeyPoint(text="Point 2"), # No evidence
|
||||
],
|
||||
)
|
||||
assert summary.all_points_have_evidence() is False
|
||||
assert summary.all_points_have_evidence() is False, "should be False when some points lack segment_ids"
|
||||
|
||||
def test_all_actions_have_evidence_true(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_actions_have_evidence returns True when all evidenced."""
|
||||
@@ -174,7 +180,7 @@ class TestSummary:
|
||||
ActionItem(text="Task 1", segment_ids=[0]),
|
||||
],
|
||||
)
|
||||
assert summary.all_actions_have_evidence() is True
|
||||
assert summary.all_actions_have_evidence() is True, "all actions with segment_ids should be evidenced"
|
||||
|
||||
def test_all_actions_have_evidence_false(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_actions_have_evidence returns False when some unevidenced."""
|
||||
@@ -184,7 +190,7 @@ class TestSummary:
|
||||
ActionItem(text="Task 1"), # No evidence
|
||||
],
|
||||
)
|
||||
assert summary.all_actions_have_evidence() is False
|
||||
assert summary.all_actions_have_evidence() is False, "should be False when some actions lack segment_ids"
|
||||
|
||||
def test_is_fully_evidenced_true(self, meeting_id: MeetingId) -> None:
|
||||
"""Test is_fully_evidenced returns True when all items evidenced."""
|
||||
@@ -193,7 +199,7 @@ class TestSummary:
|
||||
key_points=[KeyPoint(text="KP", segment_ids=[0])],
|
||||
action_items=[ActionItem(text="AI", segment_ids=[1])],
|
||||
)
|
||||
assert summary.is_fully_evidenced() is True
|
||||
assert summary.is_fully_evidenced() is True, "summary with all evidenced items should be fully evidenced"
|
||||
|
||||
def test_is_fully_evidenced_false_points(self, meeting_id: MeetingId) -> None:
|
||||
"""Test is_fully_evidenced returns False with unevidenced points."""
|
||||
@@ -202,7 +208,7 @@ class TestSummary:
|
||||
key_points=[KeyPoint(text="KP")], # No evidence
|
||||
action_items=[ActionItem(text="AI", segment_ids=[1])],
|
||||
)
|
||||
assert summary.is_fully_evidenced() is False
|
||||
assert summary.is_fully_evidenced() is False, "summary with unevidenced points should not be fully evidenced"
|
||||
|
||||
def test_unevidenced_points(self, meeting_id: MeetingId) -> None:
|
||||
"""Test unevidenced_points property filters correctly."""
|
||||
@@ -235,17 +241,17 @@ class TestSummaryEdgeCases:
|
||||
def test_all_points_have_evidence_empty_list(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_points_have_evidence returns True for empty key_points."""
|
||||
summary = Summary(meeting_id=meeting_id, key_points=[])
|
||||
assert summary.all_points_have_evidence() is True
|
||||
assert summary.all_points_have_evidence() is True, "empty key_points list should be considered all evidenced"
|
||||
|
||||
def test_all_actions_have_evidence_empty_list(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_actions_have_evidence returns True for empty action_items."""
|
||||
summary = Summary(meeting_id=meeting_id, action_items=[])
|
||||
assert summary.all_actions_have_evidence() is True
|
||||
assert summary.all_actions_have_evidence() is True, "empty action_items list should be considered all evidenced"
|
||||
|
||||
def test_is_fully_evidenced_empty_summary(self, meeting_id: MeetingId) -> None:
|
||||
"""Test is_fully_evidenced returns True for empty summary."""
|
||||
summary = Summary(meeting_id=meeting_id)
|
||||
assert summary.is_fully_evidenced() is True
|
||||
assert summary.is_fully_evidenced() is True, "empty summary should be considered fully evidenced"
|
||||
|
||||
def test_all_points_unevidenced(self, meeting_id: MeetingId) -> None:
|
||||
"""Test all_points_have_evidence returns False when all points unevidenced."""
|
||||
@@ -280,10 +286,10 @@ class TestSummaryEdgeCases:
|
||||
|
||||
def test_key_point_with_many_segment_ids(self) -> None:
|
||||
"""Test key point with many segment references."""
|
||||
many_segments = list(range(50))
|
||||
many_segments = list(range(MANY_SEGMENT_IDS_COUNT))
|
||||
kp = KeyPoint(text="Well-sourced point", segment_ids=many_segments)
|
||||
assert kp.is_sourced() is True, "key point with segments should be sourced"
|
||||
assert len(kp.segment_ids) == 50, "all segment_ids should be preserved"
|
||||
assert len(kp.segment_ids) == MANY_SEGMENT_IDS_COUNT, "all segment_ids should be preserved"
|
||||
|
||||
def test_action_item_with_all_fields(self) -> None:
|
||||
"""Test action item with all optional fields populated."""
|
||||
@@ -313,6 +319,6 @@ class TestSummaryEdgeCases:
|
||||
|
||||
def test_summary_very_long_executive_summary(self, meeting_id: MeetingId) -> None:
|
||||
"""Test summary handles very long executive summary."""
|
||||
long_summary = "A" * 10000
|
||||
long_summary = "A" * VERY_LONG_SUMMARY_LENGTH
|
||||
summary = Summary(meeting_id=meeting_id, executive_summary=long_summary)
|
||||
assert len(summary.executive_summary) == 10000
|
||||
assert len(summary.executive_summary) == VERY_LONG_SUMMARY_LENGTH, f"expected length {VERY_LONG_SUMMARY_LENGTH}, got {len(summary.executive_summary)}"
|
||||
|
||||
@@ -16,7 +16,7 @@ def test_trigger_signal_weight_bounds() -> None:
|
||||
TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=1.1)
|
||||
|
||||
signal = TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=0.5)
|
||||
assert signal.weight == 0.5
|
||||
assert signal.weight == 0.5, f"expected weight 0.5, got {signal.weight}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -36,7 +36,8 @@ def test_trigger_decision_attributes(attr: str, expected: object) -> None:
|
||||
confidence=0.6,
|
||||
signals=(audio, foreground),
|
||||
)
|
||||
assert getattr(decision, attr) == expected
|
||||
actual = getattr(decision, attr)
|
||||
assert actual == expected, f"expected {attr}={expected!r}, got {actual!r}"
|
||||
|
||||
|
||||
def test_trigger_decision_primary_signal() -> None:
|
||||
@@ -52,14 +53,17 @@ def test_trigger_decision_primary_signal() -> None:
|
||||
confidence=0.6,
|
||||
signals=(audio, foreground),
|
||||
)
|
||||
assert decision.primary_signal == foreground
|
||||
assert decision.primary_signal == foreground, (
|
||||
f"expected foreground signal as primary, got {decision.primary_signal}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("attr", ["primary_signal", "detected_app"])
|
||||
def test_trigger_decision_empty_signals_returns_none(attr: str) -> None:
|
||||
"""Empty signals returns None for primary_signal and detected_app."""
|
||||
empty = TriggerDecision(action=TriggerAction.IGNORE, confidence=0.0, signals=())
|
||||
assert getattr(empty, attr) is None
|
||||
actual = getattr(empty, attr)
|
||||
assert actual is None, f"expected {attr} to be None for empty signals, got {actual!r}"
|
||||
|
||||
|
||||
class TestTriggerSignalEdgeCases:
|
||||
@@ -68,28 +72,28 @@ class TestTriggerSignalEdgeCases:
|
||||
def test_weight_exactly_zero(self) -> None:
|
||||
"""Test weight at exact lower boundary (0.0) is valid."""
|
||||
signal = TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=0.0)
|
||||
assert signal.weight == 0.0
|
||||
assert signal.weight == 0.0, f"expected weight 0.0 at lower boundary, got {signal.weight}"
|
||||
|
||||
def test_weight_exactly_one(self) -> None:
|
||||
"""Test weight at exact upper boundary (1.0) is valid."""
|
||||
signal = TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=1.0)
|
||||
assert signal.weight == 1.0
|
||||
assert signal.weight == 1.0, f"expected weight 1.0 at upper boundary, got {signal.weight}"
|
||||
|
||||
def test_weight_very_small_positive(self) -> None:
|
||||
"""Test very small positive weight."""
|
||||
signal = TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=0.0001)
|
||||
assert signal.weight == 0.0001
|
||||
assert signal.weight == 0.0001, f"expected small positive weight 0.0001, got {signal.weight}"
|
||||
|
||||
def test_weight_just_below_one(self) -> None:
|
||||
"""Test weight just below upper boundary."""
|
||||
signal = TriggerSignal(source=TriggerSource.AUDIO_ACTIVITY, weight=0.9999)
|
||||
assert signal.weight == 0.9999
|
||||
assert signal.weight == 0.9999, f"expected weight 0.9999 just below boundary, got {signal.weight}"
|
||||
|
||||
@pytest.mark.parametrize("source", list(TriggerSource))
|
||||
def test_all_trigger_sources(self, source: TriggerSource) -> None:
|
||||
"""Test all trigger sources can create signals."""
|
||||
signal = TriggerSignal(source=source, weight=0.5)
|
||||
assert signal.source == source
|
||||
assert signal.source == source, f"expected source {source}, got {signal.source}"
|
||||
|
||||
def test_foreground_app_signal_with_app_name(self) -> None:
|
||||
"""Test foreground app signal stores app name."""
|
||||
@@ -98,7 +102,9 @@ class TestTriggerSignalEdgeCases:
|
||||
weight=0.8,
|
||||
app_name="Microsoft Teams",
|
||||
)
|
||||
assert signal.app_name == "Microsoft Teams"
|
||||
assert signal.app_name == "Microsoft Teams", (
|
||||
f"expected app_name 'Microsoft Teams', got {signal.app_name!r}"
|
||||
)
|
||||
|
||||
def test_calendar_signal_basic(self) -> None:
|
||||
"""Test calendar trigger source is valid."""
|
||||
@@ -106,8 +112,10 @@ class TestTriggerSignalEdgeCases:
|
||||
source=TriggerSource.CALENDAR,
|
||||
weight=0.9,
|
||||
)
|
||||
assert signal.source == TriggerSource.CALENDAR
|
||||
assert signal.weight == 0.9
|
||||
assert signal.source == TriggerSource.CALENDAR, (
|
||||
f"expected source CALENDAR, got {signal.source}"
|
||||
)
|
||||
assert signal.weight == 0.9, f"expected weight 0.9, got {signal.weight}"
|
||||
|
||||
|
||||
class TestTriggerDecisionEdgeCases:
|
||||
@@ -121,7 +129,9 @@ class TestTriggerDecisionEdgeCases:
|
||||
confidence=0.5,
|
||||
signals=(signal,),
|
||||
)
|
||||
assert decision.primary_signal == signal
|
||||
assert decision.primary_signal == signal, (
|
||||
f"expected single signal to be primary, got {decision.primary_signal}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("action", list(TriggerAction))
|
||||
def test_all_trigger_actions(self, action: TriggerAction) -> None:
|
||||
@@ -131,7 +141,7 @@ class TestTriggerDecisionEdgeCases:
|
||||
confidence=0.5,
|
||||
signals=(),
|
||||
)
|
||||
assert decision.action == action
|
||||
assert decision.action == action, f"expected action {action}, got {decision.action}"
|
||||
|
||||
def test_confidence_zero(self) -> None:
|
||||
"""Test zero confidence decision."""
|
||||
@@ -140,7 +150,7 @@ class TestTriggerDecisionEdgeCases:
|
||||
confidence=0.0,
|
||||
signals=(),
|
||||
)
|
||||
assert decision.confidence == 0.0
|
||||
assert decision.confidence == 0.0, f"expected zero confidence, got {decision.confidence}"
|
||||
|
||||
def test_confidence_one(self) -> None:
|
||||
"""Test full confidence decision."""
|
||||
@@ -149,7 +159,7 @@ class TestTriggerDecisionEdgeCases:
|
||||
confidence=1.0,
|
||||
signals=(),
|
||||
)
|
||||
assert decision.confidence == 1.0
|
||||
assert decision.confidence == 1.0, f"expected full confidence 1.0, got {decision.confidence}"
|
||||
|
||||
def test_many_signals_finds_highest_weight(self) -> None:
|
||||
"""Test primary_signal with many signals finds highest."""
|
||||
@@ -162,8 +172,10 @@ class TestTriggerDecisionEdgeCases:
|
||||
confidence=0.9,
|
||||
signals=signals,
|
||||
)
|
||||
assert decision.primary_signal is not None
|
||||
assert decision.primary_signal.weight == 0.9
|
||||
assert decision.primary_signal is not None, "expected primary_signal to exist for non-empty signals"
|
||||
assert decision.primary_signal.weight == 0.9, (
|
||||
f"expected highest weight 0.9 as primary, got {decision.primary_signal.weight}"
|
||||
)
|
||||
|
||||
def test_equal_weights_returns_first(self) -> None:
|
||||
"""Test when signals have equal weights, first is returned."""
|
||||
@@ -175,4 +187,6 @@ class TestTriggerDecisionEdgeCases:
|
||||
signals=(signal1, signal2),
|
||||
)
|
||||
# max() returns first element when weights are equal
|
||||
assert decision.primary_signal in (signal1, signal2)
|
||||
assert decision.primary_signal in (signal1, signal2), (
|
||||
f"expected primary_signal to be one of the equal-weight signals, got {decision.primary_signal}"
|
||||
)
|
||||
|
||||
@@ -51,7 +51,11 @@ class TestMeetingState:
|
||||
expected: bool,
|
||||
) -> None:
|
||||
"""Test state transition validation."""
|
||||
assert current.can_transition_to(target) == expected
|
||||
result = current.can_transition_to(target)
|
||||
assert result == expected, (
|
||||
f"transition {current.name} -> {target.name}: "
|
||||
f"expected {expected}, got {result}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("value", "expected"),
|
||||
@@ -67,7 +71,10 @@ class TestMeetingState:
|
||||
)
|
||||
def test_from_int_valid(self, value: int, expected: MeetingState) -> None:
|
||||
"""Test conversion from valid integers."""
|
||||
assert MeetingState.from_int(value) == expected
|
||||
result = MeetingState.from_int(value)
|
||||
assert result == expected, (
|
||||
f"from_int({value}): expected {expected.name}, got {result.name}"
|
||||
)
|
||||
|
||||
def test_from_int_invalid_raises(self) -> None:
|
||||
"""Test conversion from invalid integer raises ValueError."""
|
||||
@@ -82,10 +89,16 @@ class TestMeetingId:
|
||||
"""Test MeetingId wraps UUID."""
|
||||
uuid = UUID("12345678-1234-5678-1234-567812345678")
|
||||
meeting_id = MeetingId(uuid)
|
||||
assert meeting_id == uuid
|
||||
assert meeting_id == uuid, (
|
||||
f"MeetingId should equal underlying UUID: expected {uuid}, got {meeting_id}"
|
||||
)
|
||||
|
||||
def test_meeting_id_string_conversion(self) -> None:
|
||||
"""Test MeetingId can be converted to string."""
|
||||
uuid = UUID("12345678-1234-5678-1234-567812345678")
|
||||
meeting_id = MeetingId(uuid)
|
||||
assert str(meeting_id) == "12345678-1234-5678-1234-567812345678"
|
||||
expected_str = "12345678-1234-5678-1234-567812345678"
|
||||
result_str = str(meeting_id)
|
||||
assert result_str == expected_str, (
|
||||
f"MeetingId string conversion failed: expected {expected_str!r}, got {result_str!r}"
|
||||
)
|
||||
|
||||
@@ -21,6 +21,12 @@ from noteflow.domain.value_objects import AnnotationId, AnnotationType, MeetingI
|
||||
from noteflow.grpc._mixins.annotation import AnnotationMixin
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
# Test constants for annotation timestamps and time ranges
|
||||
SAMPLE_ANNOTATION_END_TIME = 120.0
|
||||
SAMPLE_ANNOTATION_START_TIME_SHORT = 15.0
|
||||
SAMPLE_ANNOTATION_START_TIME_ACTION = 25.0
|
||||
TIME_RANGE_FILTER_START = 20.0
|
||||
|
||||
|
||||
class MockRepositoryProvider:
|
||||
"""Mock repository provider as async context manager."""
|
||||
@@ -121,7 +127,7 @@ class TestAddAnnotation:
|
||||
"""AddAnnotation creates annotation with all fields populated."""
|
||||
meeting_id = MeetingId(uuid4())
|
||||
expected_text = "Important decision"
|
||||
expected_start = 15.0
|
||||
expected_start = SAMPLE_ANNOTATION_START_TIME_SHORT
|
||||
expected_end = 30.0
|
||||
expected_segments = [1, 2, 3]
|
||||
|
||||
@@ -199,7 +205,7 @@ class TestAddAnnotation:
|
||||
meeting_id=str(meeting_id),
|
||||
annotation_type=noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM,
|
||||
text="Follow up with client",
|
||||
start_time=25.0,
|
||||
start_time=SAMPLE_ANNOTATION_START_TIME_ACTION,
|
||||
end_time=35.0,
|
||||
)
|
||||
|
||||
@@ -267,7 +273,7 @@ class TestAddAnnotation:
|
||||
# (We need to check the mock provider's commit - access via the servicer)
|
||||
mock_annotations_repo.add.assert_called_once()
|
||||
|
||||
async def test_aborts_on_invalid_meeting_id(
|
||||
async def test_aborts_on_invalid_meeting_id_add_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -312,7 +318,7 @@ class TestGetAnnotation:
|
||||
annotation_type=AnnotationType.DECISION,
|
||||
text="Key decision made",
|
||||
start_time=100.0,
|
||||
end_time=120.0,
|
||||
end_time=SAMPLE_ANNOTATION_END_TIME,
|
||||
segment_ids=[5, 6, 7],
|
||||
)
|
||||
mock_annotations_repo.get.return_value = expected_annotation
|
||||
@@ -324,13 +330,13 @@ class TestGetAnnotation:
|
||||
assert response.meeting_id == str(meeting_id), "meeting_id should match"
|
||||
assert response.text == "Key decision made", "text should match"
|
||||
assert response.start_time == 100.0, "start_time should match"
|
||||
assert response.end_time == 120.0, "end_time should match"
|
||||
assert response.end_time == SAMPLE_ANNOTATION_END_TIME, "end_time should match"
|
||||
assert list(response.segment_ids) == [5, 6, 7], "segment_ids should match"
|
||||
assert (
|
||||
response.annotation_type == noteflow_pb2.ANNOTATION_TYPE_DECISION
|
||||
), "annotation_type should be DECISION"
|
||||
|
||||
async def test_aborts_when_annotation_not_found(
|
||||
async def test_aborts_when_annotation_not_found_get_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -347,7 +353,7 @@ class TestGetAnnotation:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_on_invalid_annotation_id(
|
||||
async def test_aborts_on_invalid_annotation_id_get_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -452,7 +458,7 @@ class TestListAnnotations:
|
||||
|
||||
request = noteflow_pb2.ListAnnotationsRequest(
|
||||
meeting_id=str(meeting_id),
|
||||
start_time=20.0,
|
||||
start_time=TIME_RANGE_FILTER_START,
|
||||
end_time=40.0,
|
||||
)
|
||||
response = await servicer.ListAnnotations(request, mock_grpc_context)
|
||||
@@ -462,7 +468,7 @@ class TestListAnnotations:
|
||||
response.annotations[0].text == "Annotation in range"
|
||||
), "filtered annotation text should match"
|
||||
mock_annotations_repo.get_by_time_range.assert_called_once_with(
|
||||
meeting_id, 20.0, 40.0
|
||||
meeting_id, TIME_RANGE_FILTER_START, 40.0
|
||||
)
|
||||
mock_annotations_repo.get_by_meeting.assert_not_called()
|
||||
|
||||
@@ -488,7 +494,7 @@ class TestListAnnotations:
|
||||
meeting_id, 50.0, 0.0
|
||||
)
|
||||
|
||||
async def test_aborts_on_invalid_meeting_id(
|
||||
async def test_aborts_on_invalid_meeting_id_list_annotations(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -541,8 +547,8 @@ class TestUpdateAnnotation:
|
||||
annotation_id=str(annotation_id),
|
||||
annotation_type=noteflow_pb2.ANNOTATION_TYPE_DECISION,
|
||||
text="Updated text",
|
||||
start_time=15.0,
|
||||
end_time=25.0,
|
||||
start_time=SAMPLE_ANNOTATION_START_TIME_SHORT,
|
||||
end_time=SAMPLE_ANNOTATION_START_TIME_ACTION,
|
||||
segment_ids=[2, 3],
|
||||
)
|
||||
|
||||
@@ -553,8 +559,8 @@ class TestUpdateAnnotation:
|
||||
assert (
|
||||
response.annotation_type == noteflow_pb2.ANNOTATION_TYPE_DECISION
|
||||
), "annotation_type should be updated to DECISION"
|
||||
assert response.start_time == 15.0, "start_time should be updated"
|
||||
assert response.end_time == 25.0, "end_time should be updated"
|
||||
assert response.start_time == SAMPLE_ANNOTATION_START_TIME_SHORT, "start_time should be updated"
|
||||
assert response.end_time == SAMPLE_ANNOTATION_START_TIME_ACTION, "end_time should be updated"
|
||||
assert list(response.segment_ids) == [2, 3], "segment_ids should be updated"
|
||||
mock_annotations_repo.update.assert_called_once()
|
||||
|
||||
@@ -573,7 +579,7 @@ class TestUpdateAnnotation:
|
||||
annotation_type=AnnotationType.NOTE,
|
||||
text="Original text",
|
||||
start_time=10.0,
|
||||
end_time=20.0,
|
||||
end_time=TIME_RANGE_FILTER_START,
|
||||
)
|
||||
mock_annotations_repo.get.return_value = original_annotation
|
||||
|
||||
@@ -592,9 +598,9 @@ class TestUpdateAnnotation:
|
||||
assert response.text == "Only text updated", "text should be updated"
|
||||
# Note: annotation_type stays as original since UNSPECIFIED is sent
|
||||
assert response.start_time == 10.0, "start_time should remain unchanged"
|
||||
assert response.end_time == 20.0, "end_time should remain unchanged"
|
||||
assert response.end_time == TIME_RANGE_FILTER_START, "end_time should remain unchanged"
|
||||
|
||||
async def test_aborts_when_annotation_not_found(
|
||||
async def test_aborts_when_annotation_not_found_update_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -615,7 +621,7 @@ class TestUpdateAnnotation:
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
mock_annotations_repo.update.assert_not_called()
|
||||
|
||||
async def test_aborts_on_invalid_annotation_id(
|
||||
async def test_aborts_on_invalid_annotation_id_update_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -686,7 +692,7 @@ class TestDeleteAnnotation:
|
||||
assert response.success is True, "should return success=True"
|
||||
mock_annotations_repo.delete.assert_called_once()
|
||||
|
||||
async def test_aborts_when_annotation_not_found(
|
||||
async def test_aborts_when_annotation_not_found_delete_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
@@ -703,7 +709,7 @@ class TestDeleteAnnotation:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_on_invalid_annotation_id(
|
||||
async def test_aborts_on_invalid_annotation_id_delete_annotation(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_annotations_repo: AsyncMock,
|
||||
|
||||
@@ -13,6 +13,9 @@ from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.grpc.service import NoteFlowServicer
|
||||
from noteflow.infrastructure.persistence.repositories import DiarizationJob
|
||||
|
||||
# Test constants for progress calculation
|
||||
EXPECTED_PROGRESS_PERCENT = 50.0
|
||||
|
||||
|
||||
class _DummyContext:
|
||||
"""Minimal gRPC context that raises if abort is invoked."""
|
||||
@@ -151,7 +154,7 @@ async def test_progress_percent_running() -> None:
|
||||
|
||||
# With 120s audio at 0.17 ratio -> ~20s estimated duration
|
||||
# 10s elapsed / 20s estimated = 50% progress
|
||||
assert response.progress_percent == pytest.approx(50.0, rel=0.2), "running job progress should be approximately 50% based on elapsed time"
|
||||
assert response.progress_percent == pytest.approx(EXPECTED_PROGRESS_PERCENT, rel=0.2), "running job progress should be approximately 50% based on elapsed time"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -20,6 +20,9 @@ from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.grpc.service import NoteFlowServicer
|
||||
from noteflow.infrastructure.persistence.repositories import DiarizationJob
|
||||
|
||||
# Test constants for progress calculation
|
||||
EXPECTED_RUNNING_JOB_PROGRESS_PERCENT = 50.0
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from noteflow.grpc.meeting_store import InMemoryMeetingStore
|
||||
|
||||
@@ -419,7 +422,7 @@ class TestGetDiarizationJobStatusProgress:
|
||||
)
|
||||
|
||||
# 120s audio at ~0.17 ratio = ~20s estimated; 10s elapsed = ~50%
|
||||
assert response.progress_percent == pytest.approx(50.0, rel=0.25), "Running job progress should be ~50% based on elapsed time"
|
||||
assert response.progress_percent == pytest.approx(EXPECTED_RUNNING_JOB_PROGRESS_PERCENT, rel=0.25), "Running job progress should be ~50% based on elapsed time"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_status_progress_completed_is_full(
|
||||
|
||||
@@ -285,7 +285,7 @@ class TestExtractEntities:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_with_invalid_meeting_id(
|
||||
async def test_extract_aborts_with_invalid_meeting_id_format(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_grpc_context: MagicMock,
|
||||
@@ -417,7 +417,7 @@ class TestUpdateEntity:
|
||||
|
||||
assert response.entity.category == "company", "entity category should be updated"
|
||||
|
||||
async def test_aborts_when_entity_not_found(
|
||||
async def test_update_aborts_when_entity_not_found(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_entities_repo: AsyncMock,
|
||||
@@ -437,7 +437,7 @@ class TestUpdateEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_when_entity_belongs_to_different_meeting(
|
||||
async def test_update_aborts_when_entity_belongs_to_different_meeting(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_entities_repo: AsyncMock,
|
||||
@@ -461,7 +461,7 @@ class TestUpdateEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_with_invalid_entity_id_format(
|
||||
async def test_update_aborts_with_invalid_entity_id_format(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_grpc_context: MagicMock,
|
||||
@@ -478,7 +478,7 @@ class TestUpdateEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_with_invalid_meeting_id_format(
|
||||
async def test_update_aborts_with_invalid_meeting_id_format(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_grpc_context: MagicMock,
|
||||
@@ -554,7 +554,7 @@ class TestDeleteEntity:
|
||||
assert response.success is True, "delete should succeed"
|
||||
mock_entities_repo.delete.assert_called_once_with(entity.id)
|
||||
|
||||
async def test_aborts_when_entity_not_found(
|
||||
async def test_delete_aborts_when_entity_not_found(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_entities_repo: AsyncMock,
|
||||
@@ -573,7 +573,7 @@ class TestDeleteEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_when_entity_belongs_to_different_meeting(
|
||||
async def test_delete_aborts_when_entity_belongs_to_different_meeting(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_entities_repo: AsyncMock,
|
||||
@@ -619,7 +619,7 @@ class TestDeleteEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_with_invalid_entity_id_format(
|
||||
async def test_delete_aborts_with_invalid_entity_id_format(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_grpc_context: MagicMock,
|
||||
@@ -635,7 +635,7 @@ class TestDeleteEntity:
|
||||
|
||||
mock_grpc_context.abort.assert_called_once()
|
||||
|
||||
async def test_aborts_with_invalid_meeting_id_format(
|
||||
async def test_delete_aborts_with_invalid_meeting_id_format(
|
||||
self,
|
||||
servicer: MockServicerHost,
|
||||
mock_grpc_context: MagicMock,
|
||||
|
||||
@@ -376,8 +376,6 @@ class TestExportTranscriptMeetingNotFound:
|
||||
async def test_export_aborts_when_meeting_not_found(
|
||||
self,
|
||||
export_servicer: MockServicerHost,
|
||||
mock_meetings_repo: AsyncMock,
|
||||
mock_segments_repo: AsyncMock,
|
||||
mock_grpc_context: MagicMock,
|
||||
) -> None:
|
||||
"""ExportTranscript aborts with NOT_FOUND when meeting does not exist."""
|
||||
|
||||
@@ -21,6 +21,10 @@ from noteflow.domain.value_objects import MeetingId, MeetingState
|
||||
from noteflow.grpc._mixins.meeting import MeetingMixin
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
# Test constants for pagination
|
||||
PAGE_LIMIT_SMALL = 25
|
||||
PAGE_OFFSET_STANDARD = 50
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
@@ -467,11 +471,11 @@ class TestListMeetings:
|
||||
"""ListMeetings respects limit parameter."""
|
||||
meeting_mixin_meetings_repo.list_all.return_value = ([], 0)
|
||||
|
||||
request = noteflow_pb2.ListMeetingsRequest(limit=25)
|
||||
request = noteflow_pb2.ListMeetingsRequest(limit=PAGE_LIMIT_SMALL)
|
||||
await meeting_mixin_servicer.ListMeetings(request, mock_grpc_context)
|
||||
|
||||
call_kwargs = meeting_mixin_meetings_repo.list_all.call_args[1]
|
||||
assert call_kwargs["limit"] == 25, "Limit should be passed to repository"
|
||||
assert call_kwargs["limit"] == PAGE_LIMIT_SMALL, "Limit should be passed to repository"
|
||||
|
||||
async def test_list_meetings_uses_default_limit_of_100(
|
||||
self,
|
||||
@@ -497,11 +501,11 @@ class TestListMeetings:
|
||||
"""ListMeetings respects offset parameter."""
|
||||
meeting_mixin_meetings_repo.list_all.return_value = ([], 0)
|
||||
|
||||
request = noteflow_pb2.ListMeetingsRequest(offset=50)
|
||||
request = noteflow_pb2.ListMeetingsRequest(offset=PAGE_OFFSET_STANDARD)
|
||||
await meeting_mixin_servicer.ListMeetings(request, mock_grpc_context)
|
||||
|
||||
call_kwargs = meeting_mixin_meetings_repo.list_all.call_args[1]
|
||||
assert call_kwargs["offset"] == 50, "Offset should be passed to repository"
|
||||
assert call_kwargs["offset"] == PAGE_OFFSET_STANDARD, "Offset should be passed to repository"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("proto_sort_order", "expected_desc"),
|
||||
|
||||
@@ -22,6 +22,9 @@ from noteflow.infrastructure.persistence.repositories.preferences_repo import (
|
||||
PreferenceWithMetadata,
|
||||
)
|
||||
|
||||
# Test constants
|
||||
ETAG_HEX_DIGEST_LENGTH = 32
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from datetime import datetime
|
||||
|
||||
@@ -81,7 +84,7 @@ class TestComputeEtag:
|
||||
etag2 = _compute_etag(prefs, updated_at)
|
||||
|
||||
assert etag1 == etag2, "ETag should be identical for same inputs"
|
||||
assert len(etag1) == 32, "ETag should be 32 chars (MD5 hex digest)"
|
||||
assert len(etag1) == ETAG_HEX_DIGEST_LENGTH, "ETag should be 32 chars (MD5 hex digest)"
|
||||
|
||||
def test_different_values_produce_different_etag(self) -> None:
|
||||
"""Different preference values produce different ETags."""
|
||||
|
||||
@@ -579,7 +579,7 @@ class TestDeleteProject:
|
||||
|
||||
assert response.success is True, "Should return success"
|
||||
|
||||
async def test_delete_project_not_found(
|
||||
async def test_delete_project_not_found_returns_success_false(
|
||||
self,
|
||||
project_mixin_servicer: MockProjectServicerHost,
|
||||
mock_project_service: MagicMock,
|
||||
|
||||
@@ -231,11 +231,11 @@ class TestDiarizationDatetimeAwareness:
|
||||
diarization_dir = Path("src/noteflow/grpc/_mixins/diarization")
|
||||
|
||||
# Check all Python files in the diarization package
|
||||
for diarization_path in diarization_dir.glob("*.py"):
|
||||
content = diarization_path.read_text()
|
||||
|
||||
# Should not find datetime.now() pattern
|
||||
assert "datetime.now()" not in content, (
|
||||
f"datetime.now() found in {diarization_path.name} - "
|
||||
"should use utc_now() for timezone-aware datetimes"
|
||||
)
|
||||
files_with_datetime_now = [
|
||||
p.name for p in diarization_dir.glob("*.py")
|
||||
if "datetime.now()" in p.read_text()
|
||||
]
|
||||
assert not files_with_datetime_now, (
|
||||
f"datetime.now() found in {files_with_datetime_now} - "
|
||||
"should use utc_now() for timezone-aware datetimes"
|
||||
)
|
||||
|
||||
@@ -22,6 +22,7 @@ if TYPE_CHECKING:
|
||||
# Test constants
|
||||
MULTI_SESSION_COUNT = 5
|
||||
DIARIZATION_TASK_COUNT = 3
|
||||
EXPECTED_AUDIO_SAMPLE_COUNT = 3200
|
||||
|
||||
|
||||
class TestStreamCancellation:
|
||||
@@ -203,7 +204,7 @@ class TestAudioWriterCleanup:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_writer_closed_on_cleanup(
|
||||
self, memory_servicer: NoteFlowServicer, crypto: MagicMock, tmp_path: Path
|
||||
self, memory_servicer: NoteFlowServicer, tmp_path: Path
|
||||
) -> None:
|
||||
"""Verify audio writer is closed during cleanup."""
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
@@ -366,7 +367,7 @@ class TestPartialBufferCleanup:
|
||||
memory_servicer._partial_buffers[meeting_id].append(audio_chunk)
|
||||
|
||||
# PartialAudioBuffer len() returns sample count, not chunk count
|
||||
assert len(memory_servicer._partial_buffers[meeting_id]) == 3200, "Should have 3200 samples (2 chunks)"
|
||||
assert len(memory_servicer._partial_buffers[meeting_id]) == EXPECTED_AUDIO_SAMPLE_COUNT, "Should have 3200 samples (2 chunks)"
|
||||
|
||||
memory_servicer._cleanup_streaming_state(meeting_id)
|
||||
|
||||
@@ -653,9 +654,7 @@ class TestGrpcContextCancellationReal:
|
||||
assert meeting_id not in memory_servicer._vad_instances, "VAD should be cleaned up"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_context_cancelled_check_pattern(
|
||||
self, memory_servicer: NoteFlowServicer
|
||||
) -> None:
|
||||
async def test_context_cancelled_check_pattern(self) -> None:
|
||||
"""Verify the implicit cancellation detection pattern works correctly."""
|
||||
import asyncio
|
||||
|
||||
|
||||
@@ -16,13 +16,21 @@ from noteflow.grpc._mixins.converters import (
|
||||
proto_timestamp_to_datetime,
|
||||
)
|
||||
|
||||
# Test constants for timestamp conversion tests
|
||||
TEST_YEAR = 2024
|
||||
TEST_MONTH = 6
|
||||
TEST_DAY = 15
|
||||
TEST_HOUR = 14
|
||||
TEST_MINUTE = 30
|
||||
TEST_SECOND = 45
|
||||
|
||||
|
||||
class TestDatetimeProtoTimestampConversion:
|
||||
"""Test datetime <-> protobuf Timestamp conversions."""
|
||||
|
||||
def test_datetime_to_proto_timestamp_computes_correct_epoch(self) -> None:
|
||||
"""Convert datetime to proto Timestamp with correct epoch seconds."""
|
||||
dt = datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC)
|
||||
dt = datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC)
|
||||
expected_epoch = 1718461845
|
||||
|
||||
ts = datetime_to_proto_timestamp(dt)
|
||||
@@ -34,7 +42,7 @@ class TestDatetimeProtoTimestampConversion:
|
||||
def test_proto_timestamp_to_datetime_returns_utc(self) -> None:
|
||||
"""Convert proto Timestamp to datetime with UTC timezone."""
|
||||
ts = Timestamp()
|
||||
ts.FromDatetime(datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC))
|
||||
ts.FromDatetime(datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC))
|
||||
|
||||
dt = proto_timestamp_to_datetime(ts)
|
||||
|
||||
@@ -42,7 +50,7 @@ class TestDatetimeProtoTimestampConversion:
|
||||
|
||||
def test_datetime_proto_timestamp_roundtrip(self) -> None:
|
||||
"""Datetime -> proto Timestamp -> datetime roundtrip preserves value."""
|
||||
original = datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC)
|
||||
original = datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC)
|
||||
|
||||
ts = datetime_to_proto_timestamp(original)
|
||||
result = proto_timestamp_to_datetime(ts)
|
||||
@@ -67,11 +75,11 @@ class TestEpochSecondsConversion:
|
||||
dt = epoch_seconds_to_datetime(epoch_seconds)
|
||||
|
||||
assert dt.tzinfo == UTC, "Returned datetime should have UTC timezone"
|
||||
assert dt.year == 2024, f"Expected year 2024, got {dt.year}"
|
||||
assert dt.year == TEST_YEAR, f"Expected year {TEST_YEAR}, got {dt.year}"
|
||||
|
||||
def test_datetime_to_epoch_seconds_computes_correct_value(self) -> None:
|
||||
"""Convert datetime to epoch seconds with correct value."""
|
||||
dt = datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC)
|
||||
dt = datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC)
|
||||
expected_seconds = 1718461845.0
|
||||
|
||||
seconds = datetime_to_epoch_seconds(dt)
|
||||
@@ -98,7 +106,7 @@ class TestEpochSecondsConversion:
|
||||
[
|
||||
pytest.param(0.0, 1970, id="unix_epoch_start"),
|
||||
pytest.param(1000000000.0, 2001, id="billion_seconds"),
|
||||
pytest.param(1718458245.0, 2024, id="recent_date"),
|
||||
pytest.param(1718458245.0, TEST_YEAR, id="recent_date"),
|
||||
],
|
||||
)
|
||||
def test_epoch_seconds_to_datetime_various_values(
|
||||
@@ -120,12 +128,12 @@ class TestIsoStringConversion:
|
||||
dt = iso_string_to_datetime(iso_str)
|
||||
|
||||
assert dt.tzinfo == UTC, "Z suffix should be parsed as UTC"
|
||||
assert dt.year == 2024, f"Expected year 2024, got {dt.year}"
|
||||
assert dt.month == 6, f"Expected month 6, got {dt.month}"
|
||||
assert dt.day == 15, f"Expected day 15, got {dt.day}"
|
||||
assert dt.hour == 14, f"Expected hour 14, got {dt.hour}"
|
||||
assert dt.minute == 30, f"Expected minute 30, got {dt.minute}"
|
||||
assert dt.second == 45, f"Expected second 45, got {dt.second}"
|
||||
assert dt.year == TEST_YEAR, f"Expected year {TEST_YEAR}, got {dt.year}"
|
||||
assert dt.month == TEST_MONTH, f"Expected month {TEST_MONTH}, got {dt.month}"
|
||||
assert dt.day == TEST_DAY, f"Expected day {TEST_DAY}, got {dt.day}"
|
||||
assert dt.hour == TEST_HOUR, f"Expected hour {TEST_HOUR}, got {dt.hour}"
|
||||
assert dt.minute == TEST_MINUTE, f"Expected minute {TEST_MINUTE}, got {dt.minute}"
|
||||
assert dt.second == TEST_SECOND, f"Expected second {TEST_SECOND}, got {dt.second}"
|
||||
|
||||
def test_iso_string_with_offset_preserved(self) -> None:
|
||||
"""Parse ISO string with timezone offset."""
|
||||
@@ -150,12 +158,12 @@ class TestIsoStringConversion:
|
||||
|
||||
def test_datetime_to_iso_string_includes_timezone(self) -> None:
|
||||
"""Format datetime as ISO string with timezone."""
|
||||
dt = datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC)
|
||||
dt = datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC)
|
||||
|
||||
iso_str = datetime_to_iso_string(dt)
|
||||
|
||||
assert "2024-06-15" in iso_str, f"Date should be in output: {iso_str}"
|
||||
assert "14:30:45" in iso_str, f"Time should be in output: {iso_str}"
|
||||
assert f"{TEST_YEAR}-0{TEST_MONTH}-{TEST_DAY}" in iso_str, f"Date should be in output: {iso_str}"
|
||||
assert f"{TEST_HOUR}:{TEST_MINUTE}:{TEST_SECOND}" in iso_str, f"Time should be in output: {iso_str}"
|
||||
# UTC represented as +00:00 in isoformat
|
||||
assert "+00:00" in iso_str or "Z" in iso_str, (
|
||||
f"Timezone should be in output: {iso_str}"
|
||||
@@ -163,7 +171,7 @@ class TestIsoStringConversion:
|
||||
|
||||
def test_iso_string_roundtrip(self) -> None:
|
||||
"""Datetime -> ISO string -> datetime roundtrip preserves value."""
|
||||
original = datetime(2024, 6, 15, 14, 30, 45, tzinfo=UTC)
|
||||
original = datetime(TEST_YEAR, TEST_MONTH, TEST_DAY, TEST_HOUR, TEST_MINUTE, TEST_SECOND, tzinfo=UTC)
|
||||
|
||||
iso_str = datetime_to_iso_string(original)
|
||||
result = iso_string_to_datetime(iso_str)
|
||||
|
||||
@@ -13,6 +13,13 @@ from noteflow.infrastructure.asr.segmenter import (
|
||||
SegmenterState,
|
||||
)
|
||||
|
||||
# Test constants for custom configuration values
|
||||
CUSTOM_SAMPLE_RATE_HZ = 44100
|
||||
"""Custom sample rate in Hz for testing non-default configuration."""
|
||||
|
||||
MAX_SEGMENT_DURATION_SEC = 60.0
|
||||
"""Maximum segment duration in seconds for testing force-emit behavior."""
|
||||
|
||||
|
||||
class TestSegmenterInitialization:
|
||||
"""Tests for Segmenter initialization."""
|
||||
@@ -26,11 +33,11 @@ class TestSegmenterInitialization:
|
||||
|
||||
def test_custom_config(self) -> None:
|
||||
"""Segmenter accepts custom configuration."""
|
||||
config = SegmenterConfig(sample_rate=44100, max_segment_duration=60.0)
|
||||
config = SegmenterConfig(sample_rate=CUSTOM_SAMPLE_RATE_HZ, max_segment_duration=MAX_SEGMENT_DURATION_SEC)
|
||||
segmenter = Segmenter(config=config)
|
||||
|
||||
assert segmenter.config.sample_rate == 44100, "custom sample rate should be applied"
|
||||
assert segmenter.config.max_segment_duration == 60.0, "custom max segment duration should be applied"
|
||||
assert segmenter.config.sample_rate == CUSTOM_SAMPLE_RATE_HZ, "custom sample rate should be applied"
|
||||
assert segmenter.config.max_segment_duration == MAX_SEGMENT_DURATION_SEC, "custom max segment duration should be applied"
|
||||
|
||||
def test_initial_state_is_idle(self) -> None:
|
||||
"""Segmenter starts in IDLE state."""
|
||||
|
||||
@@ -11,6 +11,10 @@ from numpy.typing import NDArray
|
||||
from noteflow.config.constants import DEFAULT_SAMPLE_RATE
|
||||
from noteflow.infrastructure.audio import SoundDeviceCapture
|
||||
|
||||
# Test constants for audio capture configuration
|
||||
CUSTOM_SAMPLE_RATE_HZ = 44100
|
||||
"""Custom sample rate in Hz for testing non-default audio capture configuration."""
|
||||
|
||||
|
||||
class TestSoundDeviceCapture:
|
||||
"""Tests for SoundDeviceCapture class."""
|
||||
@@ -95,11 +99,11 @@ class TestSoundDeviceCapture:
|
||||
capture.start(
|
||||
device_id=None,
|
||||
on_frames=dummy_callback,
|
||||
sample_rate=44100,
|
||||
sample_rate=CUSTOM_SAMPLE_RATE_HZ,
|
||||
channels=1,
|
||||
)
|
||||
|
||||
assert capture.sample_rate == 44100, "sample_rate should reflect configured value"
|
||||
assert capture.sample_rate == CUSTOM_SAMPLE_RATE_HZ, "sample_rate should reflect configured value"
|
||||
assert capture.channels == 1, "channels should reflect configured value"
|
||||
assert capture.is_capturing() is True, "is_capturing should return True after start"
|
||||
finally:
|
||||
|
||||
@@ -10,6 +10,10 @@ import pytest
|
||||
from noteflow.config.constants import DEFAULT_SAMPLE_RATE
|
||||
from noteflow.infrastructure.audio import AudioDeviceInfo, TimestampedAudio
|
||||
|
||||
# Test constants for audio frame sizes
|
||||
AUDIO_FRAME_SIZE_SAMPLES = 1600
|
||||
"""Standard audio frame size in samples (0.1 seconds at 16kHz)."""
|
||||
|
||||
|
||||
class TestAudioDeviceInfo:
|
||||
"""Tests for AudioDeviceInfo dataclass."""
|
||||
@@ -50,19 +54,19 @@ class TestTimestampedAudio:
|
||||
|
||||
def test_timestamped_audio_creation(self) -> None:
|
||||
"""Test TimestampedAudio can be created with valid values."""
|
||||
frames = np.zeros(1600, dtype=np.float32)
|
||||
frames = np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
audio = TimestampedAudio(
|
||||
frames=frames,
|
||||
timestamp=1.0,
|
||||
duration=0.1,
|
||||
)
|
||||
assert len(audio.frames) == 1600, "frames length should match input"
|
||||
assert len(audio.frames) == AUDIO_FRAME_SIZE_SAMPLES, "frames length should match input"
|
||||
assert audio.timestamp == 1.0, "timestamp should match input"
|
||||
assert audio.duration == 0.1, "duration should match input"
|
||||
|
||||
def test_timestamped_audio_negative_duration_raises(self) -> None:
|
||||
"""Test TimestampedAudio raises on negative duration."""
|
||||
frames = np.zeros(1600, dtype=np.float32)
|
||||
frames = np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
with pytest.raises(ValueError, match="Duration must be non-negative"):
|
||||
TimestampedAudio(
|
||||
frames=frames,
|
||||
@@ -72,7 +76,7 @@ class TestTimestampedAudio:
|
||||
|
||||
def test_timestamped_audio_negative_timestamp_raises(self) -> None:
|
||||
"""Test TimestampedAudio raises on negative timestamp."""
|
||||
frames = np.zeros(1600, dtype=np.float32)
|
||||
frames = np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
with pytest.raises(ValueError, match="Timestamp must be non-negative"):
|
||||
TimestampedAudio(
|
||||
frames=frames,
|
||||
@@ -92,7 +96,7 @@ class TestTimestampedAudio:
|
||||
|
||||
def test_timestamped_audio_zero_timestamp_valid(self) -> None:
|
||||
"""Test TimestampedAudio accepts zero timestamp."""
|
||||
frames = np.zeros(1600, dtype=np.float32)
|
||||
frames = np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
audio = TimestampedAudio(
|
||||
frames=frames,
|
||||
timestamp=0.0,
|
||||
|
||||
@@ -14,6 +14,13 @@ from noteflow.infrastructure.audio.reader import MeetingAudioReader
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
from noteflow.infrastructure.security.crypto import AesGcmCryptoBox
|
||||
|
||||
# Test constants for audio configuration
|
||||
AUDIO_FRAME_SIZE_SAMPLES = 1600
|
||||
"""Standard audio frame size in samples (0.1 seconds at 16kHz)."""
|
||||
|
||||
CUSTOM_SAMPLE_RATE_HZ = 48000
|
||||
"""Custom sample rate in Hz for testing non-default sample rate handling."""
|
||||
|
||||
# crypto and meetings_dir fixtures are provided by tests/conftest.py
|
||||
|
||||
|
||||
@@ -46,13 +53,13 @@ def test_reader_uses_manifest_sample_rate(
|
||||
wrapped_dek = crypto.wrap_dek(dek)
|
||||
|
||||
writer = MeetingAudioWriter(crypto, meetings_dir)
|
||||
writer.open(meeting_id, dek, wrapped_dek, sample_rate=48000)
|
||||
writer.write_chunk(np.zeros(1600, dtype=np.float32)) # 1600 samples @ 48kHz
|
||||
writer.open(meeting_id, dek, wrapped_dek, sample_rate=CUSTOM_SAMPLE_RATE_HZ)
|
||||
writer.write_chunk(np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)) # 1600 samples @ 48kHz
|
||||
writer.close()
|
||||
|
||||
reader = MeetingAudioReader(crypto, meetings_dir)
|
||||
chunks = reader.load_meeting_audio(meeting_id)
|
||||
|
||||
assert reader.sample_rate == 48000, "reader should expose sample_rate from manifest"
|
||||
assert reader.sample_rate == CUSTOM_SAMPLE_RATE_HZ, "reader should expose sample_rate from manifest"
|
||||
assert len(chunks) == 1, "should load exactly one chunk"
|
||||
assert chunks[0].duration == pytest.approx(1600 / 48000, rel=1e-6), "chunk duration should match sample count / sample rate"
|
||||
assert chunks[0].duration == pytest.approx(AUDIO_FRAME_SIZE_SAMPLES / CUSTOM_SAMPLE_RATE_HZ, rel=1e-6), "chunk duration should match sample count / sample rate"
|
||||
|
||||
@@ -7,6 +7,13 @@ import pytest
|
||||
|
||||
from noteflow.infrastructure.audio import TimestampedAudio, TimestampedRingBuffer
|
||||
|
||||
# Test constants for ring buffer configuration
|
||||
DEFAULT_MAX_DURATION_SEC = 30.0
|
||||
"""Default maximum buffer duration in seconds (30 seconds)."""
|
||||
|
||||
CUSTOM_MAX_DURATION_SEC = 15.0
|
||||
"""Custom maximum buffer duration in seconds for testing configuration."""
|
||||
|
||||
|
||||
class TestTimestampedRingBuffer:
|
||||
"""Tests for TimestampedRingBuffer class."""
|
||||
@@ -21,7 +28,7 @@ class TestTimestampedRingBuffer:
|
||||
def test_init_with_default_duration(self) -> None:
|
||||
"""Test buffer uses default max_duration of 30 seconds."""
|
||||
buffer = TimestampedRingBuffer()
|
||||
assert buffer.max_duration == 30.0, "default max_duration should be 30 seconds"
|
||||
assert buffer.max_duration == DEFAULT_MAX_DURATION_SEC, "default max_duration should be 30 seconds"
|
||||
|
||||
def test_init_with_invalid_duration_raises(self) -> None:
|
||||
"""Test buffer raises on non-positive max_duration."""
|
||||
@@ -172,8 +179,8 @@ class TestTimestampedRingBuffer:
|
||||
|
||||
def test_max_duration_property(self) -> None:
|
||||
"""Test max_duration property returns configured value."""
|
||||
buffer = TimestampedRingBuffer(max_duration=15.0)
|
||||
assert buffer.max_duration == 15.0, "max_duration should return configured value"
|
||||
buffer = TimestampedRingBuffer(max_duration=CUSTOM_MAX_DURATION_SEC)
|
||||
assert buffer.max_duration == CUSTOM_MAX_DURATION_SEC, "max_duration should return configured value"
|
||||
|
||||
def test_len_returns_chunk_count(
|
||||
self, timestamped_audio_sequence: list[TimestampedAudio]
|
||||
|
||||
@@ -13,6 +13,13 @@ from noteflow.config.constants import DEFAULT_SAMPLE_RATE
|
||||
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
|
||||
from noteflow.infrastructure.security.crypto import AesGcmCryptoBox, ChunkedAssetReader
|
||||
|
||||
# Test constants for audio frame sizes
|
||||
AUDIO_FRAME_SIZE_SAMPLES = 1600
|
||||
"""Standard audio frame size in samples (0.1 seconds at 16kHz)."""
|
||||
|
||||
PCM16_BYTES_PER_FRAME = 3200
|
||||
"""PCM16 encoded size in bytes for AUDIO_FRAME_SIZE_SAMPLES (2 bytes per sample)."""
|
||||
|
||||
# crypto and meetings_dir fixtures are provided by tests/conftest.py
|
||||
|
||||
|
||||
@@ -78,13 +85,13 @@ class TestMeetingAudioWriterBasics:
|
||||
writer.open(meeting_id, dek, wrapped_dek)
|
||||
|
||||
# Create test audio: 1600 samples = 0.1 seconds at 16kHz
|
||||
test_audio = np.linspace(-1.0, 1.0, 1600, dtype=np.float32)
|
||||
test_audio = np.linspace(-1.0, 1.0, AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
writer.write_chunk(test_audio)
|
||||
|
||||
# Audio is 3200 bytes, buffer is 1000, so should flush
|
||||
assert writer.bytes_written > 0, "bytes_written should be positive after flush"
|
||||
# PCM16 = 2 bytes/sample = 3200 bytes raw, but encrypted with overhead
|
||||
assert writer.bytes_written > 3200, "bytes_written should exceed raw size due to encryption overhead"
|
||||
assert writer.bytes_written > PCM16_BYTES_PER_FRAME, "bytes_written should exceed raw size due to encryption overhead"
|
||||
assert writer.chunk_count == 1, "chunk_count should be 1 after first flush"
|
||||
assert writer.write_count == 1, "write_count should be 1 after one write_chunk call"
|
||||
|
||||
@@ -107,10 +114,10 @@ class TestMeetingAudioWriterBasics:
|
||||
# Write 100 chunks of 1600 samples each (3200 bytes per write)
|
||||
# Buffer is 10000, so ~3 writes per encrypted chunk
|
||||
num_writes = 100
|
||||
bytes_per_write = 1600 * 2 # 3200 bytes
|
||||
bytes_per_write = PCM16_BYTES_PER_FRAME # 3200 bytes
|
||||
|
||||
for _ in range(num_writes):
|
||||
audio = np.random.uniform(-0.5, 0.5, 1600).astype(np.float32)
|
||||
audio = np.random.uniform(-0.5, 0.5, AUDIO_FRAME_SIZE_SAMPLES).astype(np.float32)
|
||||
writer.write_chunk(audio)
|
||||
|
||||
# write_count tracks incoming audio frames
|
||||
@@ -221,7 +228,7 @@ class TestMeetingAudioWriterBasics:
|
||||
writer.open(meeting_id, dek, wrapped_dek)
|
||||
|
||||
# Write small audio chunk (won't trigger auto-flush)
|
||||
writer.write_chunk(np.zeros(1600, dtype=np.float32))
|
||||
writer.write_chunk(np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32))
|
||||
|
||||
# Data should be buffered, not written
|
||||
assert writer.buffered_bytes > 0, "Data should be buffered before flush"
|
||||
@@ -265,7 +272,7 @@ class TestMeetingAudioWriterErrors:
|
||||
) -> None:
|
||||
"""Test writer raises RuntimeError if write called before open."""
|
||||
writer = MeetingAudioWriter(crypto, meetings_dir)
|
||||
audio = np.zeros(1600, dtype=np.float32)
|
||||
audio = np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32)
|
||||
|
||||
with pytest.raises(RuntimeError, match="not open"):
|
||||
writer.write_chunk(audio)
|
||||
@@ -398,7 +405,7 @@ class TestMeetingAudioWriterIntegration:
|
||||
wrapped_dek = crypto.wrap_dek(dek)
|
||||
|
||||
writer.open(meeting_id, dek, wrapped_dek)
|
||||
writer.write_chunk(np.zeros(1600, dtype=np.float32))
|
||||
writer.write_chunk(np.zeros(AUDIO_FRAME_SIZE_SAMPLES, dtype=np.float32))
|
||||
writer.close()
|
||||
|
||||
# Read manifest
|
||||
@@ -485,7 +492,7 @@ class TestMeetingAudioWriterPeriodicFlush:
|
||||
def write_audio() -> None:
|
||||
try:
|
||||
for _ in range(write_count):
|
||||
audio = np.random.uniform(-0.5, 0.5, 1600).astype(np.float32)
|
||||
audio = np.random.uniform(-0.5, 0.5, AUDIO_FRAME_SIZE_SAMPLES).astype(np.float32)
|
||||
writer.write_chunk(audio)
|
||||
except (RuntimeError, ValueError, OSError) as e:
|
||||
errors.append(e)
|
||||
|
||||
@@ -116,12 +116,12 @@ class TestEntityNormalization:
|
||||
def test_normalized_text_is_lowercase(self, ner_engine: NerEngine) -> None:
|
||||
"""Normalized text should be lowercase."""
|
||||
entities = ner_engine.extract("John SMITH went to NYC.")
|
||||
for entity in entities:
|
||||
assert entity.normalized_text == entity.normalized_text.lower(), f"Normalized text '{entity.normalized_text}' should be lowercase"
|
||||
non_lowercase = [e for e in entities if e.normalized_text != e.normalized_text.lower()]
|
||||
assert not non_lowercase, f"All normalized text should be lowercase, but found: {[e.normalized_text for e in non_lowercase]}"
|
||||
|
||||
def test_confidence_is_set(self, ner_engine: NerEngine) -> None:
|
||||
"""Entities should have confidence score."""
|
||||
entities = ner_engine.extract("Microsoft Corporation is based in Seattle.")
|
||||
assert entities, "Should find entities"
|
||||
for entity in entities:
|
||||
assert 0.0 <= entity.confidence <= 1.0, "Confidence should be between 0 and 1"
|
||||
invalid_confidence = [e for e in entities if not (0.0 <= e.confidence <= 1.0)]
|
||||
assert not invalid_confidence, f"All entities should have confidence between 0 and 1, but found: {[(e.text, e.confidence) for e in invalid_confidence]}"
|
||||
|
||||
@@ -17,6 +17,34 @@ def _load_migration_module(path: Path) -> ast.Module:
|
||||
return ast.parse(path.read_text())
|
||||
|
||||
|
||||
def _has_variable(path: Path, var_name: str) -> bool:
|
||||
"""Check if migration file has a specific variable assignment."""
|
||||
tree = _load_migration_module(path)
|
||||
nodes = list(ast.walk(tree))
|
||||
assign_matches = [
|
||||
n for n in nodes
|
||||
if isinstance(n, ast.Assign)
|
||||
and any(isinstance(t, ast.Name) and t.id == var_name for t in n.targets)
|
||||
]
|
||||
annassign_matches = [
|
||||
n for n in nodes
|
||||
if isinstance(n, ast.AnnAssign)
|
||||
and isinstance(n.target, ast.Name)
|
||||
and n.target.id == var_name
|
||||
]
|
||||
return bool(assign_matches or annassign_matches)
|
||||
|
||||
|
||||
def _has_function(path: Path, func_name: str) -> bool:
|
||||
"""Check if migration file has a specific function definition."""
|
||||
tree = _load_migration_module(path)
|
||||
matches = [
|
||||
n for n in ast.walk(tree)
|
||||
if isinstance(n, ast.FunctionDef) and n.name == func_name
|
||||
]
|
||||
return bool(matches)
|
||||
|
||||
|
||||
def _find_all_migration_files() -> list[Path]:
|
||||
"""Find all migration files in versions directory."""
|
||||
return sorted(MIGRATIONS_DIR.glob("*.py"))
|
||||
@@ -31,65 +59,27 @@ class TestMigrationStructure:
|
||||
|
||||
def test_all_migrations_have_revision(self) -> None:
|
||||
"""Each migration should have a revision identifier."""
|
||||
for path in _find_all_migration_files():
|
||||
tree = _load_migration_module(path)
|
||||
revision_found = False
|
||||
for node in ast.walk(tree):
|
||||
# Check both Assign (revision = ...) and AnnAssign (revision: str = ...)
|
||||
if isinstance(node, ast.Assign):
|
||||
for target in node.targets:
|
||||
if isinstance(target, ast.Name) and target.id == "revision":
|
||||
revision_found = True
|
||||
break
|
||||
elif (
|
||||
isinstance(node, ast.AnnAssign)
|
||||
and isinstance(node.target, ast.Name)
|
||||
and node.target.id == "revision"
|
||||
):
|
||||
revision_found = True
|
||||
assert revision_found, f"Missing 'revision' in {path.name}"
|
||||
missing_revision = [
|
||||
path.name
|
||||
for path in _find_all_migration_files()
|
||||
if not _has_variable(path, "revision")
|
||||
]
|
||||
assert not missing_revision, f"Missing 'revision' in: {missing_revision}"
|
||||
|
||||
def test_all_migrations_have_down_revision(self) -> None:
|
||||
"""Each migration should have a down_revision."""
|
||||
for path in _find_all_migration_files():
|
||||
tree = _load_migration_module(path)
|
||||
down_revision_found = False
|
||||
for node in ast.walk(tree):
|
||||
# Check both Assign and AnnAssign
|
||||
if isinstance(node, ast.Assign):
|
||||
for target in node.targets:
|
||||
if isinstance(target, ast.Name) and target.id == "down_revision":
|
||||
down_revision_found = True
|
||||
break
|
||||
elif (
|
||||
isinstance(node, ast.AnnAssign)
|
||||
and isinstance(node.target, ast.Name)
|
||||
and node.target.id == "down_revision"
|
||||
):
|
||||
down_revision_found = True
|
||||
assert down_revision_found, f"Missing 'down_revision' in {path.name}"
|
||||
missing = [p.name for p in _find_all_migration_files() if not _has_variable(p, "down_revision")]
|
||||
assert not missing, f"Missing 'down_revision' in: {missing}"
|
||||
|
||||
def test_all_migrations_have_upgrade_function(self) -> None:
|
||||
"""Each migration should have an upgrade function."""
|
||||
for path in _find_all_migration_files():
|
||||
tree = _load_migration_module(path)
|
||||
upgrade_found = False
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef) and node.name == "upgrade":
|
||||
upgrade_found = True
|
||||
break
|
||||
assert upgrade_found, f"Missing 'upgrade()' function in {path.name}"
|
||||
missing = [p.name for p in _find_all_migration_files() if not _has_function(p, "upgrade")]
|
||||
assert not missing, f"Missing 'upgrade()' function in: {missing}"
|
||||
|
||||
def test_all_migrations_have_downgrade_function(self) -> None:
|
||||
"""Each migration should have a downgrade function."""
|
||||
for path in _find_all_migration_files():
|
||||
tree = _load_migration_module(path)
|
||||
downgrade_found = False
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.FunctionDef) and node.name == "downgrade":
|
||||
downgrade_found = True
|
||||
break
|
||||
assert downgrade_found, f"Missing 'downgrade()' function in {path.name}"
|
||||
missing = [p.name for p in _find_all_migration_files() if not _has_function(p, "downgrade")]
|
||||
assert not missing, f"Missing 'downgrade()' function in: {missing}"
|
||||
|
||||
|
||||
class TestSprint0Triggers:
|
||||
|
||||
@@ -23,6 +23,10 @@ from noteflow.infrastructure.summarization import CloudBackend
|
||||
|
||||
from .conftest import build_valid_json_response, create_test_segment
|
||||
|
||||
# Test constants for token usage
|
||||
TEST_TOKEN_COUNT = 150
|
||||
"""Token count used for mocked LLM response usage metrics."""
|
||||
|
||||
|
||||
class TestCloudSummarizerProperties:
|
||||
"""Tests for CloudSummarizer properties."""
|
||||
@@ -174,10 +178,10 @@ class TestCloudSummarizerOpenAI:
|
||||
assert result.summary.action_items == [], "empty segments should yield no action items"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summarize_returns_result(
|
||||
async def test_summarize_returns_result_openai(
|
||||
self, meeting_id: MeetingId, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Summarize should return SummarizationResult."""
|
||||
"""Summarize should return SummarizationResult for OpenAI backend."""
|
||||
response_content = build_valid_json_response(
|
||||
summary="Project meeting summary.",
|
||||
key_points=[{"text": "Key point", "segment_ids": [0]}],
|
||||
@@ -189,7 +193,7 @@ class TestCloudSummarizerOpenAI:
|
||||
choices=[
|
||||
types.SimpleNamespace(message=types.SimpleNamespace(content=response_content))
|
||||
],
|
||||
usage=types.SimpleNamespace(total_tokens=150),
|
||||
usage=types.SimpleNamespace(total_tokens=TEST_TOKEN_COUNT),
|
||||
)
|
||||
|
||||
mock_client = types.SimpleNamespace(
|
||||
@@ -209,7 +213,7 @@ class TestCloudSummarizerOpenAI:
|
||||
|
||||
assert result.provider_name == "openai", "provider_name should be 'openai'"
|
||||
assert result.summary.executive_summary == "Project meeting summary.", "summary should match"
|
||||
assert result.tokens_used == 150, "tokens_used should match response"
|
||||
assert result.tokens_used == TEST_TOKEN_COUNT, "tokens_used should match response"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raises_unavailable_on_auth_error(
|
||||
@@ -237,10 +241,10 @@ class TestCloudSummarizerOpenAI:
|
||||
await summarizer.summarize(request)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raises_invalid_response_on_empty_content(
|
||||
async def test_raises_invalid_response_on_empty_content_openai(
|
||||
self, meeting_id: MeetingId, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Should raise InvalidResponseError on empty response."""
|
||||
"""Should raise InvalidResponseError on empty response from OpenAI."""
|
||||
|
||||
def create_empty_response(**_: object) -> types.SimpleNamespace:
|
||||
return types.SimpleNamespace(
|
||||
@@ -271,10 +275,10 @@ class TestCloudSummarizerAnthropic:
|
||||
"""Tests for CloudSummarizer with Anthropic backend."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summarize_returns_result(
|
||||
async def test_summarize_returns_result_anthropic(
|
||||
self, meeting_id: MeetingId, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Summarize should return SummarizationResult."""
|
||||
"""Summarize should return SummarizationResult for Anthropic backend."""
|
||||
response_content = build_valid_json_response(
|
||||
summary="Anthropic summary.",
|
||||
key_points=[{"text": "Point", "segment_ids": [0]}],
|
||||
@@ -301,7 +305,7 @@ class TestCloudSummarizerAnthropic:
|
||||
|
||||
assert result.provider_name == "anthropic", "provider_name should be 'anthropic'"
|
||||
assert result.summary.executive_summary == "Anthropic summary.", "summary should match"
|
||||
assert result.tokens_used == 150, "tokens_used should sum input and output"
|
||||
assert result.tokens_used == TEST_TOKEN_COUNT, "tokens_used should sum input and output"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raises_unavailable_when_package_missing(
|
||||
@@ -335,10 +339,10 @@ class TestCloudSummarizerAnthropic:
|
||||
await summarizer.summarize(request)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raises_invalid_response_on_empty_content(
|
||||
async def test_raises_invalid_response_on_empty_content_anthropic(
|
||||
self, meeting_id: MeetingId, monkeypatch: pytest.MonkeyPatch
|
||||
) -> None:
|
||||
"""Should raise InvalidResponseError on empty response."""
|
||||
"""Should raise InvalidResponseError on empty response from Anthropic."""
|
||||
|
||||
def create_empty_response(**_: object) -> types.SimpleNamespace:
|
||||
return types.SimpleNamespace(
|
||||
|
||||
@@ -9,6 +9,16 @@ import pytest
|
||||
|
||||
from noteflow.infrastructure.diarization import SpeakerTurn, assign_speaker, assign_speakers_batch
|
||||
|
||||
# Test constants for speaker turn timing
|
||||
TURN_END_TIME_SHORT = 15.0
|
||||
"""Short turn end time in seconds for testing confidence and turn assignment."""
|
||||
|
||||
TURN_END_TIME_MEDIUM = 12.0
|
||||
"""Medium turn end time in seconds for testing partial overlaps."""
|
||||
|
||||
TURN_END_TIME_LONG = 20.0
|
||||
"""Long turn end time in seconds for testing full overlaps and containment."""
|
||||
|
||||
|
||||
class TestSpeakerTurn:
|
||||
"""Tests for the SpeakerTurn dataclass."""
|
||||
@@ -24,7 +34,7 @@ class TestSpeakerTurn:
|
||||
|
||||
def test_create_turn_with_confidence(self) -> None:
|
||||
"""Create a turn with custom confidence."""
|
||||
turn = SpeakerTurn(speaker="SPEAKER_01", start=10.0, end=15.0, confidence=0.85)
|
||||
turn = SpeakerTurn(speaker="SPEAKER_01", start=10.0, end=TURN_END_TIME_SHORT, confidence=0.85)
|
||||
assert turn.confidence == 0.85, "Custom confidence should be stored correctly"
|
||||
|
||||
def test_invalid_end_before_start_raises(self) -> None:
|
||||
@@ -56,7 +66,7 @@ class TestSpeakerTurn:
|
||||
assert turn.overlaps(start, end), f"Turn [5.0, 10.0] should overlap with [{start}, {end}]"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"start,end", [(0.0, 5.0), (10.0, 15.0), (0.0, 3.0), (12.0, 20.0)]
|
||||
"start,end", [(0.0, 5.0), (10.0, TURN_END_TIME_SHORT), (0.0, 3.0), (TURN_END_TIME_MEDIUM, TURN_END_TIME_LONG)]
|
||||
)
|
||||
def test_overlaps_returns_false(self, start: float, end: float) -> None:
|
||||
"""overlaps() returns False when ranges don't overlap."""
|
||||
@@ -66,7 +76,7 @@ class TestSpeakerTurn:
|
||||
def test_overlap_duration_full_overlap(self) -> None:
|
||||
"""overlap_duration() for full overlap returns turn duration."""
|
||||
turn = SpeakerTurn(speaker="SPEAKER_00", start=5.0, end=10.0)
|
||||
assert turn.overlap_duration(0.0, 15.0) == 5.0, "Full overlap should return entire turn duration"
|
||||
assert turn.overlap_duration(0.0, TURN_END_TIME_SHORT) == 5.0, "Full overlap should return entire turn duration"
|
||||
|
||||
def test_overlap_duration_partial_overlap_left(self) -> None:
|
||||
"""overlap_duration() for partial overlap on left side."""
|
||||
@@ -76,18 +86,18 @@ class TestSpeakerTurn:
|
||||
def test_overlap_duration_partial_overlap_right(self) -> None:
|
||||
"""overlap_duration() for partial overlap on right side."""
|
||||
turn = SpeakerTurn(speaker="SPEAKER_00", start=5.0, end=10.0)
|
||||
assert turn.overlap_duration(8.0, 15.0) == 2.0, "Right partial overlap [8.0, 15.0] with [5.0, 10.0] should be 2.0"
|
||||
assert turn.overlap_duration(8.0, TURN_END_TIME_SHORT) == 2.0, "Right partial overlap [8.0, 15.0] with [5.0, 10.0] should be 2.0"
|
||||
|
||||
def test_overlap_duration_contained(self) -> None:
|
||||
"""overlap_duration() when range is contained within turn."""
|
||||
turn = SpeakerTurn(speaker="SPEAKER_00", start=0.0, end=20.0)
|
||||
turn = SpeakerTurn(speaker="SPEAKER_00", start=0.0, end=TURN_END_TIME_LONG)
|
||||
assert turn.overlap_duration(5.0, 10.0) == 5.0, "Contained range [5.0, 10.0] within [0.0, 20.0] should return range duration"
|
||||
|
||||
def test_overlap_duration_no_overlap(self) -> None:
|
||||
"""overlap_duration() returns 0.0 when no overlap."""
|
||||
turn = SpeakerTurn(speaker="SPEAKER_00", start=5.0, end=10.0)
|
||||
assert turn.overlap_duration(0.0, 3.0) == 0.0, "No overlap before turn should return 0.0"
|
||||
assert turn.overlap_duration(12.0, 20.0) == 0.0, "No overlap after turn should return 0.0"
|
||||
assert turn.overlap_duration(TURN_END_TIME_MEDIUM, TURN_END_TIME_LONG) == 0.0, "No overlap after turn should return 0.0"
|
||||
|
||||
|
||||
class TestAssignSpeaker:
|
||||
@@ -141,7 +151,7 @@ class TestAssignSpeaker:
|
||||
"""No overlapping turns returns None."""
|
||||
turns = [
|
||||
SpeakerTurn(speaker="SPEAKER_00", start=0.0, end=5.0),
|
||||
SpeakerTurn(speaker="SPEAKER_01", start=10.0, end=15.0),
|
||||
SpeakerTurn(speaker="SPEAKER_01", start=10.0, end=TURN_END_TIME_SHORT),
|
||||
]
|
||||
speaker, confidence = assign_speaker(6.0, 9.0, turns)
|
||||
assert speaker is None, "No overlapping turns should return None speaker"
|
||||
|
||||
@@ -20,6 +20,19 @@ from noteflow.infrastructure.converters.integration_converters import (
|
||||
SyncRunConverter,
|
||||
)
|
||||
|
||||
# Test constants for sync run metrics
|
||||
SYNC_RUN_ITEMS_SYNCED = 15
|
||||
"""Number of items synced in a standard test sync run fixture."""
|
||||
|
||||
SYNC_RUN_DURATION_MS_SHORT = 5000
|
||||
"""Short sync run duration in milliseconds (5 seconds)."""
|
||||
|
||||
SYNC_RUN_DURATION_MS_MEDIUM = 10000
|
||||
"""Medium sync run duration in milliseconds (10 seconds)."""
|
||||
|
||||
SYNC_RUN_ITEMS_COMPLETE = 25
|
||||
"""Number of items in a complete sync run test case."""
|
||||
|
||||
|
||||
class TestIntegrationConverterOrmToDomain:
|
||||
"""Tests for IntegrationConverter.orm_to_domain."""
|
||||
@@ -219,9 +232,9 @@ class TestSyncRunConverterOrmToDomain:
|
||||
model.status = "success"
|
||||
model.started_at = datetime(2024, 1, 15, 12, 0, 0, tzinfo=UTC)
|
||||
model.ended_at = datetime(2024, 1, 15, 12, 0, 5, tzinfo=UTC)
|
||||
model.duration_ms = 5000
|
||||
model.duration_ms = SYNC_RUN_DURATION_MS_SHORT
|
||||
model.error_message = None
|
||||
model.stats = {"items_synced": 10, "items_total": 15}
|
||||
model.stats = {"items_synced": 10, "items_total": SYNC_RUN_ITEMS_SYNCED}
|
||||
return model
|
||||
|
||||
def test_sync_run_orm_to_domain(
|
||||
@@ -234,7 +247,7 @@ class TestSyncRunConverterOrmToDomain:
|
||||
assert result.id == mock_sync_run_model.id, "ID should match"
|
||||
assert result.integration_id == mock_sync_run_model.integration_id, "Integration ID should match"
|
||||
assert result.status == SyncRunStatus.SUCCESS, "Status should be enum"
|
||||
assert result.duration_ms == 5000, "Duration should match"
|
||||
assert result.duration_ms == SYNC_RUN_DURATION_MS_SHORT, "Duration should match"
|
||||
assert result.error_message is None, "Error message should be None"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -261,7 +274,7 @@ class TestSyncRunConverterOrmToDomain:
|
||||
result = SyncRunConverter.orm_to_domain(mock_sync_run_model)
|
||||
assert isinstance(result.stats, dict), "Stats should be dict"
|
||||
assert result.stats["items_synced"] == 10, "Items synced count should be preserved"
|
||||
assert result.stats["items_total"] == 15, "Items total count should be preserved"
|
||||
assert result.stats["items_total"] == SYNC_RUN_ITEMS_SYNCED, "Items total count should be preserved"
|
||||
|
||||
def test_handles_none_stats(self, mock_sync_run_model: MagicMock) -> None:
|
||||
"""None stats in ORM becomes empty dict in domain."""
|
||||
@@ -291,9 +304,9 @@ class TestSyncRunConverterToOrmKwargs:
|
||||
status=SyncRunStatus.SUCCESS,
|
||||
started_at=datetime(2024, 1, 15, 12, 0, 0, tzinfo=UTC),
|
||||
ended_at=datetime(2024, 1, 15, 12, 0, 10, tzinfo=UTC),
|
||||
duration_ms=10000,
|
||||
duration_ms=SYNC_RUN_DURATION_MS_MEDIUM,
|
||||
error_message=None,
|
||||
stats={"items_synced": 25},
|
||||
stats={"items_synced": SYNC_RUN_ITEMS_COMPLETE},
|
||||
)
|
||||
|
||||
result = SyncRunConverter.to_orm_kwargs(sync_run)
|
||||
@@ -301,8 +314,8 @@ class TestSyncRunConverterToOrmKwargs:
|
||||
assert result["id"] == sync_run.id, "ID should be preserved"
|
||||
assert result["integration_id"] == sync_run.integration_id, "Integration ID should be preserved"
|
||||
assert result["status"] == "success", "Status should be string value"
|
||||
assert result["duration_ms"] == 10000, "Duration should be preserved"
|
||||
assert result["stats"] == {"items_synced": 25}, "Stats should be preserved"
|
||||
assert result["duration_ms"] == SYNC_RUN_DURATION_MS_MEDIUM, "Duration should be preserved"
|
||||
assert result["stats"] == {"items_synced": SYNC_RUN_ITEMS_COMPLETE}, "Stats should be preserved"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("status_enum", "expected_string"),
|
||||
|
||||
@@ -13,6 +13,10 @@ import pytest
|
||||
from noteflow.infrastructure.logging.log_buffer import LogBuffer, LogEntry
|
||||
from noteflow.infrastructure.metrics.collector import MetricsCollector, PerformanceMetrics
|
||||
|
||||
# Test constants for log buffer capacity
|
||||
LARGE_DETAILS_KEY_COUNT = 50
|
||||
"""Number of keys in a large details dictionary for stress testing."""
|
||||
|
||||
|
||||
class TestLogEntry:
|
||||
"""Test LogEntry dataclass."""
|
||||
@@ -336,13 +340,13 @@ class TestLogBufferEdgeCases:
|
||||
def test_log_with_large_details(self) -> None:
|
||||
"""Buffer handles entries with large details objects."""
|
||||
buffer = LogBuffer(capacity=10)
|
||||
large_details = {f"key_{i}": f"value_{i}" * 100 for i in range(50)}
|
||||
large_details = {f"key_{i}": f"value_{i}" * 100 for i in range(LARGE_DETAILS_KEY_COUNT)}
|
||||
|
||||
buffer.log("info", "app", "Large details", large_details)
|
||||
|
||||
recent = buffer.get_recent()
|
||||
assert len(recent) == 1, "Buffer should contain one entry with large details"
|
||||
assert len(recent[0].details) == 50, "Entry should retain all 50 detail keys"
|
||||
assert len(recent[0].details) == LARGE_DETAILS_KEY_COUNT, f"Entry should retain all {LARGE_DETAILS_KEY_COUNT} detail keys"
|
||||
|
||||
def test_rapid_sequential_logging(self) -> None:
|
||||
"""Rapid sequential logging maintains order."""
|
||||
@@ -355,9 +359,8 @@ class TestLogBufferEdgeCases:
|
||||
recent = buffer.get_recent(limit=50)
|
||||
|
||||
# Recent returns newest first, so reverse to check order
|
||||
for i, entry in enumerate(reversed(recent)):
|
||||
expected_msg = f"Message {i}"
|
||||
assert entry.message == expected_msg, f"Expected '{expected_msg}', got '{entry.message}'"
|
||||
actual_messages = [entry.message for entry in reversed(recent)]
|
||||
assert actual_messages == messages, f"Messages should maintain sequential order"
|
||||
|
||||
def test_clear_then_append_works(self) -> None:
|
||||
"""Buffer works correctly after clear."""
|
||||
@@ -402,11 +405,12 @@ class TestMetricsCollectorEdgeCases:
|
||||
|
||||
history = collector.get_history()
|
||||
|
||||
for i in range(len(history) - 1):
|
||||
assert history[i].timestamp <= history[i + 1].timestamp, (
|
||||
f"Timestamp at {i} ({history[i].timestamp}) is greater than "
|
||||
f"timestamp at {i+1} ({history[i + 1].timestamp})"
|
||||
)
|
||||
out_of_order = [
|
||||
(i, history[i].timestamp, history[i + 1].timestamp)
|
||||
for i in range(len(history) - 1)
|
||||
if history[i].timestamp > history[i + 1].timestamp
|
||||
]
|
||||
assert not out_of_order, f"Timestamps should be in chronological order, but found out-of-order pairs: {out_of_order}"
|
||||
|
||||
def test_history_eviction_removes_oldest(self) -> None:
|
||||
"""History eviction removes oldest entries first."""
|
||||
|
||||
@@ -14,6 +14,10 @@ from noteflow.infrastructure.triggers.calendar import (
|
||||
parse_calendar_event_config,
|
||||
)
|
||||
|
||||
# Test constants for non-iterable input validation
|
||||
NON_ITERABLE_TEST_VALUE = 12345
|
||||
"""Integer value used to test that non-iterable input returns empty list."""
|
||||
|
||||
|
||||
def _settings(**overrides: object) -> CalendarTriggerSettings:
|
||||
"""Create CalendarTriggerSettings with defaults and overrides."""
|
||||
@@ -316,4 +320,4 @@ class TestParseCalendarEventConfig:
|
||||
|
||||
def test_non_iterable_returns_empty(self) -> None:
|
||||
"""Non-iterable input should return empty list."""
|
||||
assert parse_calendar_event_config(12345) == [], "non-iterable returns empty"
|
||||
assert parse_calendar_event_config(NON_ITERABLE_TEST_VALUE) == [], "non-iterable returns empty"
|
||||
|
||||
@@ -352,14 +352,17 @@ class TestRecoveryIdempotence:
|
||||
|
||||
# Combined should be exactly the count (first wins, others find nothing)
|
||||
total_recovered = result1 + result2 + result3
|
||||
assert total_recovered == CONCURRENT_RECOVERY_COUNT, "Total recovered should match"
|
||||
assert total_recovered == CONCURRENT_RECOVERY_COUNT, (
|
||||
f"expected {CONCURRENT_RECOVERY_COUNT} total recovered, got {total_recovered}"
|
||||
)
|
||||
|
||||
# Verify all in ERROR state
|
||||
async with SqlAlchemyUnitOfWork(session_factory, tmp_path) as uow:
|
||||
for mid in meeting_ids:
|
||||
recovered = await uow.meetings.get(mid)
|
||||
assert recovered is not None, "Recovered meeting should exist"
|
||||
assert recovered.state == MeetingState.ERROR, "Recovered meeting state"
|
||||
meetings = [await uow.meetings.get(mid) for mid in meeting_ids]
|
||||
missing = [m for m in meetings if m is None]
|
||||
wrong_state = [m for m in meetings if m is not None and m.state != MeetingState.ERROR]
|
||||
assert not missing, f"All recovered meetings should exist, but {len(missing)} are missing"
|
||||
assert not wrong_state, f"All meetings should be in ERROR state, but {len(wrong_state)} are not"
|
||||
|
||||
|
||||
class TestMixedCrashRecovery:
|
||||
@@ -485,8 +488,14 @@ class TestRecoveryResult:
|
||||
recovery_service = RecoveryService(uow=uow, meetings_dir=tmp_path)
|
||||
result = await recovery_service.recover_all()
|
||||
|
||||
assert result.meetings_recovered == RECORDING_MEETINGS_COUNT
|
||||
assert result.diarization_jobs_failed == RUNNING_JOBS_COUNT
|
||||
assert result.meetings_recovered == RECORDING_MEETINGS_COUNT, (
|
||||
f"expected {RECORDING_MEETINGS_COUNT} meetings recovered, "
|
||||
f"got {result.meetings_recovered}"
|
||||
)
|
||||
assert result.diarization_jobs_failed == RUNNING_JOBS_COUNT, (
|
||||
f"expected {RUNNING_JOBS_COUNT} jobs failed, "
|
||||
f"got {result.diarization_jobs_failed}"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -517,8 +526,12 @@ class TestRecoveryResult:
|
||||
recovery_service = RecoveryService(uow=uow, meetings_dir=tmp_path)
|
||||
result = await recovery_service.recover_all()
|
||||
|
||||
assert result.meetings_recovered == 0
|
||||
assert result.diarization_jobs_failed == 0
|
||||
assert result.meetings_recovered == 0, (
|
||||
f"expected 0 meetings recovered, got {result.meetings_recovered}"
|
||||
)
|
||||
assert result.diarization_jobs_failed == 0, (
|
||||
f"expected 0 jobs failed, got {result.diarization_jobs_failed}"
|
||||
)
|
||||
|
||||
|
||||
class TestPartialTransactionRecovery:
|
||||
@@ -570,7 +583,7 @@ class TestPartialTransactionRecovery:
|
||||
# Simulate interrupted stop: begin_stopping but crash before stop_recording
|
||||
async with SqlAlchemyUnitOfWork(session_factory, tmp_path) as uow:
|
||||
m = await uow.meetings.get(meeting_id)
|
||||
assert m is not None
|
||||
assert m is not None, f"meeting {meeting_id} should exist before state transition"
|
||||
m.begin_stopping() # State = STOPPING
|
||||
await uow.meetings.update(m)
|
||||
await uow.commit()
|
||||
@@ -584,5 +597,7 @@ class TestPartialTransactionRecovery:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, tmp_path) as uow:
|
||||
recovered = await uow.meetings.get(meeting_id)
|
||||
assert recovered is not None
|
||||
assert recovered.state == MeetingState.ERROR, "Should be marked ERROR"
|
||||
assert recovered is not None, f"meeting {meeting_id} should exist after recovery"
|
||||
assert recovered.state == MeetingState.ERROR, (
|
||||
f"recovered meeting should be ERROR, got {recovered.state}"
|
||||
)
|
||||
|
||||
@@ -50,8 +50,12 @@ class TestConnectionPoolBehavior:
|
||||
tasks = [concurrent_operation(i) for i in range(pool_size)]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
assert len(results) == pool_size
|
||||
assert set(results) == set(range(pool_size))
|
||||
assert len(results) == pool_size, (
|
||||
f"expected {pool_size} results, got {len(results)}"
|
||||
)
|
||||
assert set(results) == set(range(pool_size)), (
|
||||
f"expected result indices {set(range(pool_size))}, got {set(results)}"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -80,7 +84,9 @@ class TestConnectionPoolBehavior:
|
||||
timeout=POOL_TIMEOUT_SECONDS,
|
||||
)
|
||||
|
||||
assert len(results) == pool_size + overflow
|
||||
assert len(results) == pool_size + overflow, (
|
||||
f"expected {pool_size + overflow} results, got {len(results)}"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -119,7 +125,9 @@ class TestTransactionRollback:
|
||||
# Verify not persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
retrieved = await uow.meetings.get(meeting_id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, (
|
||||
f"meeting {meeting_id} should not persist without commit"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -139,7 +147,9 @@ class TestTransactionRollback:
|
||||
# Verify not persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
retrieved = await uow.meetings.get(meeting_id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, (
|
||||
f"meeting {meeting_id} should not persist after explicit rollback"
|
||||
)
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -159,8 +169,12 @@ class TestTransactionRollback:
|
||||
# Verify persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
retrieved = await uow.meetings.get(meeting_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.title == "Commit Test"
|
||||
assert retrieved is not None, (
|
||||
f"meeting {meeting_id} should persist after commit"
|
||||
)
|
||||
assert retrieved.title == "Commit Test", (
|
||||
f"expected title 'Commit Test', got '{retrieved.title}'"
|
||||
)
|
||||
|
||||
|
||||
class TestTransactionIsolation:
|
||||
@@ -228,13 +242,16 @@ class TestTransactionIsolation:
|
||||
ids = await asyncio.gather(*[create_meeting(i) for i in range(10)])
|
||||
|
||||
# All should have unique IDs
|
||||
assert len(set(ids)) == 10
|
||||
assert len(set(ids)) == 10, (
|
||||
f"expected 10 unique IDs, got {len(set(ids))} unique from {ids}"
|
||||
)
|
||||
|
||||
# All should be persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
for mid in ids:
|
||||
m = await uow.meetings.get(mid)
|
||||
assert m is not None
|
||||
missing_ids = [
|
||||
mid for mid in ids if await uow.meetings.get(mid) is None
|
||||
]
|
||||
assert not missing_ids, f"meetings not persisted: {missing_ids}"
|
||||
|
||||
|
||||
class TestDatabaseReconnection:
|
||||
@@ -258,7 +275,7 @@ class TestDatabaseReconnection:
|
||||
# Second operation should succeed (pool_pre_ping handles stale connections)
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
count = await uow.meetings.count_by_state(MeetingState.CREATED)
|
||||
assert count >= 0
|
||||
assert count >= 0, f"expected non-negative count, got {count}"
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
@@ -278,7 +295,10 @@ class TestDatabaseReconnection:
|
||||
# Session 2: Should not see uncommitted changes
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow2:
|
||||
found = await uow2.meetings.get(meeting_id)
|
||||
assert found is None # Not visible in other session
|
||||
assert found is None, (
|
||||
f"meeting {meeting_id} should not be visible in other session "
|
||||
"before commit"
|
||||
)
|
||||
|
||||
# Still not committed in uow1
|
||||
|
||||
@@ -317,7 +337,9 @@ class TestSegmentOperations:
|
||||
# Verify all segments persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, ".") as uow:
|
||||
segments = await uow.segments.get_by_meeting(str(meeting_id))
|
||||
assert len(segments) == BULK_SEGMENT_COUNT
|
||||
assert len(segments) == BULK_SEGMENT_COUNT, (
|
||||
f"expected {BULK_SEGMENT_COUNT} segments, got {len(segments)}"
|
||||
)
|
||||
|
||||
|
||||
class TestDatabaseFailureChaos:
|
||||
|
||||
@@ -78,7 +78,7 @@ class TestDiarizationJobRepository:
|
||||
|
||||
result = await job_repo.get("nonexistent-job-id")
|
||||
|
||||
assert result is None
|
||||
assert result is None, "get should return None for nonexistent job ID"
|
||||
|
||||
async def test_update_status_to_running(self, session: AsyncSession) -> None:
|
||||
"""Test updating job status from QUEUED to RUNNING."""
|
||||
@@ -100,11 +100,11 @@ class TestDiarizationJobRepository:
|
||||
updated = await job_repo.update_status(job.job_id, JOB_STATUS_RUNNING)
|
||||
await session.commit()
|
||||
|
||||
assert updated is True
|
||||
assert updated is True, "update_status should return True on success"
|
||||
|
||||
retrieved = await job_repo.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.status == JOB_STATUS_RUNNING
|
||||
assert retrieved is not None, "job should still be retrievable after status update"
|
||||
assert retrieved.status == JOB_STATUS_RUNNING, f"expected RUNNING status, got {retrieved.status}"
|
||||
|
||||
async def test_update_status_to_completed_with_results(self, session: AsyncSession) -> None:
|
||||
"""Test updating job status to COMPLETED with segments and speakers."""
|
||||
@@ -180,7 +180,7 @@ class TestDiarizationJobRepository:
|
||||
result = await job_repo.update_status("nonexistent", JOB_STATUS_RUNNING)
|
||||
await session.commit()
|
||||
|
||||
assert result is False
|
||||
assert result is False, "update_status should return False for nonexistent job"
|
||||
|
||||
async def test_list_for_meeting_returns_jobs_in_order(self, session: AsyncSession) -> None:
|
||||
"""Test listing jobs for a meeting returns them newest first."""
|
||||
@@ -204,8 +204,8 @@ class TestDiarizationJobRepository:
|
||||
|
||||
jobs = await job_repo.list_for_meeting(str(meeting.id))
|
||||
|
||||
assert len(jobs) == 3
|
||||
assert [j.job_id for j in jobs] == list(reversed(job_ids))
|
||||
assert len(jobs) == 3, f"expected 3 jobs for meeting, got {len(jobs)}"
|
||||
assert [j.job_id for j in jobs] == list(reversed(job_ids)), "jobs should be returned in newest-first order"
|
||||
|
||||
async def test_list_for_meeting_excludes_other_meetings(self, session: AsyncSession) -> None:
|
||||
"""Test listing jobs only returns jobs for the specified meeting."""
|
||||
@@ -234,8 +234,8 @@ class TestDiarizationJobRepository:
|
||||
|
||||
jobs = await job_repo.list_for_meeting(str(meeting1.id))
|
||||
|
||||
assert len(jobs) == 1
|
||||
assert jobs[0].job_id == job1.job_id
|
||||
assert len(jobs) == 1, f"expected 1 job for meeting1, got {len(jobs)}"
|
||||
assert jobs[0].job_id == job1.job_id, "returned job should be from meeting1, not meeting2"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -352,11 +352,11 @@ class TestDiarizationJobCrashRecovery:
|
||||
failed_count = await job_repo.mark_running_as_failed("New crash")
|
||||
await session.commit()
|
||||
|
||||
assert failed_count == 0
|
||||
assert failed_count == 0, "should not mark already FAILED jobs"
|
||||
|
||||
retrieved = await job_repo.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.error_message == "Original error"
|
||||
assert retrieved is not None, "failed job should still be retrievable"
|
||||
assert retrieved.error_message == "Original error", "original error message should be preserved"
|
||||
|
||||
async def test_mark_running_as_failed_handles_multiple_jobs(
|
||||
self, session: AsyncSession
|
||||
@@ -433,8 +433,8 @@ class TestDiarizationJobPruning:
|
||||
pruned = await job_repo.prune_completed(ttl_seconds=3600)
|
||||
await session.commit()
|
||||
|
||||
assert pruned == 1
|
||||
assert await job_repo.get(job.job_id) is None
|
||||
assert pruned == 1, f"expected 1 old completed job pruned, got {pruned}"
|
||||
assert await job_repo.get(job.job_id) is None, "old completed job should be deleted after pruning"
|
||||
|
||||
async def test_prune_completed_removes_old_failed_jobs(
|
||||
self, session: AsyncSession
|
||||
@@ -461,7 +461,7 @@ class TestDiarizationJobPruning:
|
||||
pruned = await job_repo.prune_completed(ttl_seconds=3600)
|
||||
await session.commit()
|
||||
|
||||
assert pruned == 1
|
||||
assert pruned == 1, f"expected 1 old failed job pruned, got {pruned}"
|
||||
|
||||
async def test_prune_completed_keeps_recent_jobs(self, session: AsyncSession) -> None:
|
||||
"""Test pruning keeps jobs within TTL window."""
|
||||
@@ -483,8 +483,8 @@ class TestDiarizationJobPruning:
|
||||
pruned = await job_repo.prune_completed(ttl_seconds=3600)
|
||||
await session.commit()
|
||||
|
||||
assert pruned == 0
|
||||
assert await job_repo.get(job.job_id) is not None
|
||||
assert pruned == 0, "recent jobs should not be pruned"
|
||||
assert await job_repo.get(job.job_id) is not None, "recent job should still exist after pruning"
|
||||
|
||||
async def test_prune_completed_keeps_running_jobs(self, session: AsyncSession) -> None:
|
||||
"""Test pruning never removes RUNNING or QUEUED jobs."""
|
||||
@@ -517,9 +517,9 @@ class TestDiarizationJobPruning:
|
||||
pruned = await job_repo.prune_completed(ttl_seconds=0)
|
||||
await session.commit()
|
||||
|
||||
assert pruned == 0
|
||||
assert await job_repo.get(running_job.job_id) is not None
|
||||
assert await job_repo.get(queued_job.job_id) is not None
|
||||
assert pruned == 0, "RUNNING and QUEUED jobs should never be pruned"
|
||||
assert await job_repo.get(running_job.job_id) is not None, "old RUNNING job should still exist"
|
||||
assert await job_repo.get(queued_job.job_id) is not None, "old QUEUED job should still exist"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -568,7 +568,7 @@ class TestStreamingDiarizationTurns:
|
||||
|
||||
turns = await job_repo.get_streaming_turns(str(meeting.id))
|
||||
|
||||
assert turns == []
|
||||
assert turns == [], "should return empty list for meeting with no turns"
|
||||
|
||||
async def test_add_streaming_turns_empty_list_returns_zero(
|
||||
self, session: AsyncSession
|
||||
@@ -584,7 +584,7 @@ class TestStreamingDiarizationTurns:
|
||||
count = await job_repo.add_streaming_turns(str(meeting.id), [])
|
||||
await session.commit()
|
||||
|
||||
assert count == 0
|
||||
assert count == 0, "adding empty turns list should return 0"
|
||||
|
||||
async def test_streaming_turns_ordered_by_start_time(self, session: AsyncSession) -> None:
|
||||
"""Test turns are returned ordered by start_time regardless of insertion order."""
|
||||
@@ -605,8 +605,8 @@ class TestStreamingDiarizationTurns:
|
||||
|
||||
retrieved = await job_repo.get_streaming_turns(str(meeting.id))
|
||||
|
||||
assert len(retrieved) == 3
|
||||
assert [t.start_time for t in retrieved] == [0.0, 3.0, 5.0]
|
||||
assert len(retrieved) == 3, f"expected 3 turns retrieved, got {len(retrieved)}"
|
||||
assert [t.start_time for t in retrieved] == [0.0, 3.0, 5.0], "turns should be ordered by start_time"
|
||||
|
||||
async def test_clear_streaming_turns_removes_all(self, session: AsyncSession) -> None:
|
||||
"""Test clearing streaming turns removes all turns for a meeting."""
|
||||
@@ -627,10 +627,10 @@ class TestStreamingDiarizationTurns:
|
||||
deleted = await job_repo.clear_streaming_turns(str(meeting.id))
|
||||
await session.commit()
|
||||
|
||||
assert deleted == 2
|
||||
assert deleted == 2, f"expected 2 turns deleted, got {deleted}"
|
||||
|
||||
remaining = await job_repo.get_streaming_turns(str(meeting.id))
|
||||
assert remaining == []
|
||||
assert remaining == [], "no turns should remain after clear"
|
||||
|
||||
async def test_clear_streaming_turns_isolates_meetings(self, session: AsyncSession) -> None:
|
||||
"""Test clearing turns for one meeting doesn't affect others."""
|
||||
@@ -652,10 +652,10 @@ class TestStreamingDiarizationTurns:
|
||||
await job_repo.clear_streaming_turns(str(meeting1.id))
|
||||
await session.commit()
|
||||
|
||||
assert await job_repo.get_streaming_turns(str(meeting1.id)) == []
|
||||
assert await job_repo.get_streaming_turns(str(meeting1.id)) == [], "meeting1 turns should be cleared"
|
||||
remaining = await job_repo.get_streaming_turns(str(meeting2.id))
|
||||
assert len(remaining) == 1
|
||||
assert remaining[0].speaker == "S2"
|
||||
assert len(remaining) == 1, "meeting2 should still have its turn"
|
||||
assert remaining[0].speaker == "S2", f"meeting2 turn should have speaker S2, got {remaining[0].speaker}"
|
||||
|
||||
async def test_streaming_turns_deleted_on_meeting_cascade(
|
||||
self, session: AsyncSession
|
||||
@@ -676,7 +676,7 @@ class TestStreamingDiarizationTurns:
|
||||
await session.commit()
|
||||
|
||||
remaining = await job_repo.get_streaming_turns(str(meeting.id))
|
||||
assert remaining == []
|
||||
assert remaining == [], "turns should be cascade deleted when meeting is deleted"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -703,7 +703,7 @@ class TestDiarizationJobCascadeDelete:
|
||||
await meeting_repo.delete(meeting.id)
|
||||
await session.commit()
|
||||
|
||||
assert await job_repo.get(job.job_id) is None
|
||||
assert await job_repo.get(job.job_id) is None, "job should be cascade deleted when meeting is deleted"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -731,9 +731,9 @@ class TestDiarizationJobConcurrency:
|
||||
|
||||
active = await job_repo.get_active_for_meeting(str(meeting.id))
|
||||
|
||||
assert active is not None
|
||||
assert active.job_id == job.job_id
|
||||
assert active.status == JOB_STATUS_QUEUED
|
||||
assert active is not None, "get_active_for_meeting should return QUEUED job"
|
||||
assert active.job_id == job.job_id, "returned job_id should match the created job"
|
||||
assert active.status == JOB_STATUS_QUEUED, f"expected QUEUED status, got {active.status}"
|
||||
|
||||
async def test_get_active_for_meeting_returns_running_job(
|
||||
self, session: AsyncSession
|
||||
@@ -756,9 +756,9 @@ class TestDiarizationJobConcurrency:
|
||||
|
||||
active = await job_repo.get_active_for_meeting(str(meeting.id))
|
||||
|
||||
assert active is not None
|
||||
assert active.job_id == job.job_id
|
||||
assert active.status == JOB_STATUS_RUNNING
|
||||
assert active is not None, "get_active_for_meeting should return RUNNING job"
|
||||
assert active.job_id == job.job_id, "returned job_id should match the created job"
|
||||
assert active.status == JOB_STATUS_RUNNING, f"expected RUNNING status, got {active.status}"
|
||||
|
||||
async def test_get_active_for_meeting_returns_none_for_completed(
|
||||
self, session: AsyncSession
|
||||
@@ -781,7 +781,7 @@ class TestDiarizationJobConcurrency:
|
||||
|
||||
active = await job_repo.get_active_for_meeting(str(meeting.id))
|
||||
|
||||
assert active is None
|
||||
assert active is None, "get_active_for_meeting should return None for COMPLETED job"
|
||||
|
||||
async def test_get_active_for_meeting_returns_none_for_cancelled(
|
||||
self, session: AsyncSession
|
||||
@@ -804,7 +804,7 @@ class TestDiarizationJobConcurrency:
|
||||
|
||||
active = await job_repo.get_active_for_meeting(str(meeting.id))
|
||||
|
||||
assert active is None
|
||||
assert active is None, "get_active_for_meeting should return None for CANCELLED job"
|
||||
|
||||
async def test_get_active_for_meeting_returns_most_recent(
|
||||
self, session: AsyncSession
|
||||
@@ -835,8 +835,8 @@ class TestDiarizationJobConcurrency:
|
||||
|
||||
active = await job_repo.get_active_for_meeting(str(meeting.id))
|
||||
|
||||
assert active is not None
|
||||
assert active.job_id == job2.job_id
|
||||
assert active is not None, "should return an active job when multiple exist"
|
||||
assert active.job_id == job2.job_id, "should return the most recently created active job"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -863,11 +863,11 @@ class TestDiarizationJobCancelledStatus:
|
||||
updated = await job_repo.update_status(job.job_id, JOB_STATUS_CANCELLED)
|
||||
await session.commit()
|
||||
|
||||
assert updated is True
|
||||
assert updated is True, "update_status to CANCELLED should return True"
|
||||
|
||||
retrieved = await job_repo.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.status == JOB_STATUS_CANCELLED
|
||||
assert retrieved is not None, "cancelled job should still be retrievable"
|
||||
assert retrieved.status == JOB_STATUS_CANCELLED, f"expected CANCELLED status, got {retrieved.status}"
|
||||
|
||||
async def test_prune_includes_cancelled_jobs(self, session: AsyncSession) -> None:
|
||||
"""Test pruning removes old CANCELLED jobs along with COMPLETED/FAILED."""
|
||||
@@ -892,8 +892,8 @@ class TestDiarizationJobCancelledStatus:
|
||||
pruned = await job_repo.prune_completed(ttl_seconds=3600)
|
||||
await session.commit()
|
||||
|
||||
assert pruned == 1
|
||||
assert await job_repo.get(job.job_id) is None
|
||||
assert pruned == 1, f"expected 1 old CANCELLED job pruned, got {pruned}"
|
||||
assert await job_repo.get(job.job_id) is None, "old CANCELLED job should be deleted after pruning"
|
||||
|
||||
async def test_mark_running_ignores_cancelled_jobs(
|
||||
self, session: AsyncSession
|
||||
@@ -917,8 +917,8 @@ class TestDiarizationJobCancelledStatus:
|
||||
failed_count = await job_repo.mark_running_as_failed()
|
||||
await session.commit()
|
||||
|
||||
assert failed_count == 0
|
||||
assert failed_count == 0, "crash recovery should not affect CANCELLED jobs"
|
||||
|
||||
retrieved = await job_repo.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.status == JOB_STATUS_CANCELLED
|
||||
assert retrieved is not None, "cancelled job should still be retrievable"
|
||||
assert retrieved.status == JOB_STATUS_CANCELLED, "CANCELLED status should remain unchanged"
|
||||
|
||||
@@ -32,6 +32,7 @@ if TYPE_CHECKING:
|
||||
|
||||
# Annotation timestamps
|
||||
ANNOTATION_START_TIME = 10.5
|
||||
ANNOTATION_END_TIME_SECONDS = 15.0
|
||||
|
||||
|
||||
class MockContext:
|
||||
@@ -71,24 +72,24 @@ class TestAnnotationCRUD:
|
||||
annotation_type=noteflow_pb2.ANNOTATION_TYPE_NOTE,
|
||||
text="Important point discussed",
|
||||
start_time=ANNOTATION_START_TIME,
|
||||
end_time=15.0,
|
||||
end_time=ANNOTATION_END_TIME_SECONDS,
|
||||
segment_ids=[0, 1, 2],
|
||||
)
|
||||
result = await servicer.AddAnnotation(request, MockContext())
|
||||
|
||||
assert result.id
|
||||
assert result.text == "Important point discussed"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_NOTE
|
||||
assert result.start_time == pytest.approx(ANNOTATION_START_TIME)
|
||||
assert result.end_time == pytest.approx(15.0)
|
||||
assert list(result.segment_ids) == [0, 1, 2]
|
||||
assert result.id, "annotation ID should be assigned"
|
||||
assert result.text == "Important point discussed", f"expected annotation text 'Important point discussed', got {result.text!r}"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_NOTE, f"expected annotation type ANNOTATION_TYPE_NOTE, got {result.annotation_type}"
|
||||
assert result.start_time == pytest.approx(ANNOTATION_START_TIME), f"expected start_time {ANNOTATION_START_TIME}, got {result.start_time}"
|
||||
assert result.end_time == pytest.approx(ANNOTATION_END_TIME_SECONDS), f"expected end_time {ANNOTATION_END_TIME_SECONDS}, got {result.end_time}"
|
||||
assert list(result.segment_ids) == [0, 1, 2], f"expected segment_ids [0, 1, 2], got {list(result.segment_ids)}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
from noteflow.domain.value_objects import AnnotationId
|
||||
|
||||
saved = await uow.annotations.get(AnnotationId(result.id))
|
||||
assert saved is not None
|
||||
assert saved.text == "Important point discussed"
|
||||
assert saved is not None, "annotation should be persisted in database"
|
||||
assert saved.text == "Important point discussed", f"expected persisted text 'Important point discussed', got {saved.text!r}"
|
||||
|
||||
async def test_get_annotation_retrieves_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -113,9 +114,9 @@ class TestAnnotationCRUD:
|
||||
get_request = noteflow_pb2.GetAnnotationRequest(annotation_id=added.id)
|
||||
result = await servicer.GetAnnotation(get_request, MockContext())
|
||||
|
||||
assert result.id == added.id
|
||||
assert result.text == "Follow up on this"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM
|
||||
assert result.id == added.id, f"expected annotation ID {added.id}, got {result.id}"
|
||||
assert result.text == "Follow up on this", f"expected text 'Follow up on this', got {result.text!r}"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM, f"expected annotation type ANNOTATION_TYPE_ACTION_ITEM, got {result.annotation_type}"
|
||||
|
||||
async def test_list_annotations_for_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -141,7 +142,7 @@ class TestAnnotationCRUD:
|
||||
list_request = noteflow_pb2.ListAnnotationsRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.ListAnnotations(list_request, MockContext())
|
||||
|
||||
assert len(result.annotations) == 3
|
||||
assert len(result.annotations) == 3, f"expected 3 annotations, got {len(result.annotations)}"
|
||||
|
||||
async def test_list_annotations_with_time_range_filter(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -177,7 +178,7 @@ class TestAnnotationCRUD:
|
||||
)
|
||||
result = await servicer.ListAnnotations(list_request, MockContext())
|
||||
|
||||
assert len(result.annotations) == 2
|
||||
assert len(result.annotations) == 2, f"expected 2 annotations in time range 10-50, got {len(result.annotations)}"
|
||||
|
||||
async def test_update_annotation_modifies_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -206,15 +207,15 @@ class TestAnnotationCRUD:
|
||||
)
|
||||
result = await servicer.UpdateAnnotation(update_request, MockContext())
|
||||
|
||||
assert result.text == "Updated text"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM
|
||||
assert result.text == "Updated text", f"expected updated text 'Updated text', got {result.text!r}"
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM, f"expected updated type ANNOTATION_TYPE_ACTION_ITEM, got {result.annotation_type}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
from noteflow.domain.value_objects import AnnotationId
|
||||
|
||||
saved = await uow.annotations.get(AnnotationId(added.id))
|
||||
assert saved is not None
|
||||
assert saved.text == "Updated text"
|
||||
assert saved is not None, "updated annotation should exist in database"
|
||||
assert saved.text == "Updated text", f"expected database text 'Updated text', got {saved.text!r}"
|
||||
|
||||
async def test_delete_annotation_removes_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -239,13 +240,13 @@ class TestAnnotationCRUD:
|
||||
delete_request = noteflow_pb2.DeleteAnnotationRequest(annotation_id=added.id)
|
||||
result = await servicer.DeleteAnnotation(delete_request, MockContext())
|
||||
|
||||
assert result.success is True
|
||||
assert result.success is True, "delete operation should return success=True"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
from noteflow.domain.value_objects import AnnotationId
|
||||
|
||||
deleted = await uow.annotations.get(AnnotationId(added.id))
|
||||
assert deleted is None
|
||||
assert deleted is None, "annotation should be removed from database after deletion"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -272,7 +273,7 @@ class TestAnnotationTypes:
|
||||
)
|
||||
result = await servicer.AddAnnotation(request, MockContext())
|
||||
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_NOTE
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_NOTE, f"expected ANNOTATION_TYPE_NOTE, got {result.annotation_type}"
|
||||
|
||||
async def test_action_item_annotation_type(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -294,7 +295,7 @@ class TestAnnotationTypes:
|
||||
)
|
||||
result = await servicer.AddAnnotation(request, MockContext())
|
||||
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_ACTION_ITEM, f"expected ANNOTATION_TYPE_ACTION_ITEM, got {result.annotation_type}"
|
||||
|
||||
async def test_decision_annotation_type(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -316,7 +317,7 @@ class TestAnnotationTypes:
|
||||
)
|
||||
result = await servicer.AddAnnotation(request, MockContext())
|
||||
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_DECISION
|
||||
assert result.annotation_type == noteflow_pb2.ANNOTATION_TYPE_DECISION, f"expected ANNOTATION_TYPE_DECISION, got {result.annotation_type}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -341,7 +342,7 @@ class TestAnnotationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.AddAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, f"expected INVALID_ARGUMENT status code, got {context.abort_code}"
|
||||
|
||||
async def test_get_annotation_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -355,7 +356,7 @@ class TestAnnotationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND status code for nonexistent annotation, got {context.abort_code}"
|
||||
|
||||
async def test_update_annotation_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -372,7 +373,7 @@ class TestAnnotationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.UpdateAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND status code for updating nonexistent annotation, got {context.abort_code}"
|
||||
|
||||
async def test_delete_annotation_not_found_e2e(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -386,7 +387,7 @@ class TestAnnotationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.DeleteAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND status code for deleting nonexistent annotation, got {context.abort_code}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -427,8 +428,8 @@ class TestAnnotationIsolation:
|
||||
list_request = noteflow_pb2.ListAnnotationsRequest(meeting_id=str(meeting1.id))
|
||||
result = await servicer.ListAnnotations(list_request, MockContext())
|
||||
|
||||
assert len(result.annotations) == 1
|
||||
assert result.annotations[0].text == "Meeting 1 annotation"
|
||||
assert len(result.annotations) == 1, f"expected 1 annotation for meeting 1, got {len(result.annotations)}"
|
||||
assert result.annotations[0].text == "Meeting 1 annotation", f"expected 'Meeting 1 annotation', got {result.annotations[0].text!r}"
|
||||
|
||||
async def test_annotations_deleted_with_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -460,4 +461,4 @@ class TestAnnotationIsolation:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetAnnotation(get_request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND for annotation after meeting deletion, got {context.abort_code}"
|
||||
|
||||
@@ -133,10 +133,12 @@ class TestExportServiceDatabase:
|
||||
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
content = await export_service.export_transcript(meeting.id, ExportFormat.HTML)
|
||||
assert isinstance(content, str)
|
||||
assert isinstance(content, str), f"HTML export should return string, got {type(content).__name__}"
|
||||
|
||||
assert "<html" in content.lower() or "<!doctype" in content.lower()
|
||||
assert "HTML content test" in content
|
||||
assert "<html" in content.lower() or "<!doctype" in content.lower(), (
|
||||
"HTML export should contain HTML markup (<html or <!doctype)"
|
||||
)
|
||||
assert "HTML content test" in content, "HTML export should contain segment text 'HTML content test'"
|
||||
|
||||
@pytest.mark.slow
|
||||
@requires_weasyprint
|
||||
@@ -244,9 +246,9 @@ class TestExportServiceDatabase:
|
||||
ExportFormat.MARKDOWN,
|
||||
)
|
||||
|
||||
assert result_path.exists()
|
||||
assert result_path.exists(), f"Export file should exist at {result_path}"
|
||||
content = result_path.read_text()
|
||||
assert "File content" in content
|
||||
assert "File content" in content, "Exported file should contain segment text 'File content'"
|
||||
|
||||
async def test_export_to_file_infers_format_from_extension(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -266,9 +268,9 @@ class TestExportServiceDatabase:
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
result_path = await export_service.export_to_file(meeting.id, output_path)
|
||||
|
||||
assert result_path.suffix == ".html"
|
||||
assert result_path.suffix == ".html", f"File should have .html extension, got {result_path.suffix}"
|
||||
content = result_path.read_text()
|
||||
assert "<" in content
|
||||
assert "<" in content, "HTML file should contain HTML tags (angle brackets)"
|
||||
|
||||
async def test_export_nonexistent_meeting_raises(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -353,9 +355,13 @@ class TestExportGrpcServicer:
|
||||
)
|
||||
result = await servicer.ExportTranscript(request, MockContext())
|
||||
|
||||
assert result.content
|
||||
assert "HTML export content" in result.content
|
||||
assert result.file_extension == ".html"
|
||||
assert result.content, "gRPC HTML export response should have non-empty content"
|
||||
assert "HTML export content" in result.content, (
|
||||
"gRPC HTML export should contain segment text 'HTML export content'"
|
||||
)
|
||||
assert result.file_extension == ".html", (
|
||||
f"gRPC HTML export should have .html extension, got {result.file_extension}"
|
||||
)
|
||||
|
||||
@pytest.mark.slow
|
||||
@requires_weasyprint
|
||||
@@ -410,7 +416,9 @@ class TestExportGrpcServicer:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.ExportTranscript(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"Expected NOT_FOUND status for nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_export_transcript_invalid_meeting_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -427,7 +435,9 @@ class TestExportGrpcServicer:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.ExportTranscript(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, (
|
||||
f"Expected INVALID_ARGUMENT status for invalid meeting ID, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -464,10 +474,10 @@ class TestExportContent:
|
||||
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
content = await export_service.export_transcript(meeting.id, ExportFormat.MARKDOWN)
|
||||
assert isinstance(content, str)
|
||||
assert isinstance(content, str), f"Markdown export should return string, got {type(content).__name__}"
|
||||
|
||||
assert "Alice" in content
|
||||
assert "Bob" in content
|
||||
assert "Alice" in content, "Export should include speaker label 'Alice'"
|
||||
assert "Bob" in content, "Export should include speaker label 'Bob'"
|
||||
|
||||
async def test_export_includes_timestamps(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -488,9 +498,11 @@ class TestExportContent:
|
||||
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
content = await export_service.export_transcript(meeting.id, ExportFormat.MARKDOWN)
|
||||
assert isinstance(content, str)
|
||||
assert isinstance(content, str), f"Markdown export should return string, got {type(content).__name__}"
|
||||
|
||||
assert "01:05" in content or "1:05" in content
|
||||
assert "01:05" in content or "1:05" in content, (
|
||||
"Export should include formatted timestamp for 65.5 seconds (01:05 or 1:05)"
|
||||
)
|
||||
|
||||
async def test_export_empty_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -503,9 +515,9 @@ class TestExportContent:
|
||||
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
content = await export_service.export_transcript(meeting.id, ExportFormat.MARKDOWN)
|
||||
assert isinstance(content, str)
|
||||
assert isinstance(content, str), f"Markdown export should return string, got {type(content).__name__}"
|
||||
|
||||
assert "Empty Meeting" in content
|
||||
assert "Empty Meeting" in content, "Export should include meeting title 'Empty Meeting'"
|
||||
|
||||
async def test_export_long_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -562,10 +574,10 @@ class TestExportFormats:
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
|
||||
fmt = export_service._infer_format_from_extension(".md")
|
||||
assert fmt == ExportFormat.MARKDOWN
|
||||
assert fmt == ExportFormat.MARKDOWN, f"Expected MARKDOWN format for .md extension, got {fmt}"
|
||||
|
||||
fmt = export_service._infer_format_from_extension(".markdown")
|
||||
assert fmt == ExportFormat.MARKDOWN
|
||||
assert fmt == ExportFormat.MARKDOWN, f"Expected MARKDOWN format for .markdown extension, got {fmt}"
|
||||
|
||||
async def test_infer_format_html(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -574,10 +586,10 @@ class TestExportFormats:
|
||||
export_service = ExportService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
|
||||
fmt = export_service._infer_format_from_extension(".html")
|
||||
assert fmt == ExportFormat.HTML
|
||||
assert fmt == ExportFormat.HTML, f"Expected HTML format for .html extension, got {fmt}"
|
||||
|
||||
fmt = export_service._infer_format_from_extension(".htm")
|
||||
assert fmt == ExportFormat.HTML
|
||||
assert fmt == ExportFormat.HTML, f"Expected HTML format for .htm extension, got {fmt}"
|
||||
|
||||
async def test_infer_format_pdf(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
|
||||
@@ -219,8 +219,11 @@ class TestNerExtractionFlow:
|
||||
|
||||
# Force refresh should re-extract
|
||||
result = await service.extract_entities(meeting_id, force_refresh=True)
|
||||
assert not result.cached
|
||||
assert mock_engine.extract_from_segments_call_count == initial_count + 1
|
||||
assert not result.cached, "Force refresh should not return cached result"
|
||||
assert mock_engine.extract_from_segments_call_count == initial_count + 1, (
|
||||
f"Engine should be called again on force refresh, expected {initial_count + 1}, "
|
||||
f"got {mock_engine.extract_from_segments_call_count}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -255,8 +258,8 @@ class TestNerPersistence:
|
||||
|
||||
# Should get cached result without extraction
|
||||
result = await service2.get_entities(meeting_id)
|
||||
assert len(result) == 1
|
||||
assert result[0].text == "World"
|
||||
assert len(result) == 1, f"Expected 1 entity from cache, got {len(result)}"
|
||||
assert result[0].text == "World", f"Expected entity text 'World', got '{result[0].text}'"
|
||||
|
||||
async def test_clear_entities_removes_all_for_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -283,11 +286,11 @@ class TestNerPersistence:
|
||||
|
||||
await service.extract_entities(meeting_id)
|
||||
deleted_count = await service.clear_entities(meeting_id)
|
||||
assert deleted_count == 2
|
||||
assert deleted_count == 2, f"Expected 2 entities deleted, got {deleted_count}"
|
||||
|
||||
# Verify entities are gone
|
||||
entities = await service.get_entities(meeting_id)
|
||||
assert len(entities) == 0
|
||||
assert len(entities) == 0, f"Expected no entities after clear, got {len(entities)}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -322,16 +325,16 @@ class TestNerPinning:
|
||||
|
||||
# Pin entity
|
||||
result = await service.pin_entity(entity_id, is_pinned=True)
|
||||
assert result is True
|
||||
assert result is True, "Pin operation should return True for existing entity"
|
||||
|
||||
# Verify persistence
|
||||
entities = await service.get_entities(meeting_id)
|
||||
assert entities[0].is_pinned is True
|
||||
assert entities[0].is_pinned is True, "Entity should be pinned after pin operation"
|
||||
|
||||
# Unpin
|
||||
await service.pin_entity(entity_id, is_pinned=False)
|
||||
entities = await service.get_entities(meeting_id)
|
||||
assert entities[0].is_pinned is False
|
||||
assert entities[0].is_pinned is False, "Entity should be unpinned after unpin operation"
|
||||
|
||||
async def test_pin_entity_nonexistent_returns_false(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -342,7 +345,7 @@ class TestNerPinning:
|
||||
service = NerService(MockNerEngine(), uow_factory)
|
||||
|
||||
result = await service.pin_entity(uuid4(), is_pinned=True)
|
||||
assert result is False
|
||||
assert result is False, "Pin operation should return False for nonexistent entity"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -385,8 +388,10 @@ class TestEntityMutations:
|
||||
# Verify persistence
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
persisted = await uow.entities.get(entity_id)
|
||||
assert persisted is not None
|
||||
assert persisted.text == "Jonathan Smith"
|
||||
assert persisted is not None, "Updated entity should be persisted in database"
|
||||
assert persisted.text == "Jonathan Smith", (
|
||||
f"Persisted text should be 'Jonathan Smith', got '{persisted.text}'"
|
||||
)
|
||||
|
||||
async def test_update_entity_category(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -417,7 +422,7 @@ class TestEntityMutations:
|
||||
updated = await uow.entities.update(entity_id, category="company")
|
||||
await uow.commit()
|
||||
|
||||
assert updated is not None
|
||||
assert updated is not None, "Update should return the updated entity"
|
||||
assert updated.category == EntityCategory.COMPANY, "Category should be updated"
|
||||
|
||||
async def test_update_entity_text_and_category(
|
||||
@@ -450,9 +455,11 @@ class TestEntityMutations:
|
||||
)
|
||||
await uow.commit()
|
||||
|
||||
assert updated is not None
|
||||
assert updated.text == "New Product"
|
||||
assert updated.category == EntityCategory.PRODUCT
|
||||
assert updated is not None, "Update should return the updated entity"
|
||||
assert updated.text == "New Product", f"Text should be 'New Product', got '{updated.text}'"
|
||||
assert updated.category == EntityCategory.PRODUCT, (
|
||||
f"Category should be PRODUCT, got {updated.category}"
|
||||
)
|
||||
|
||||
async def test_update_nonexistent_entity_returns_none(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -460,7 +467,7 @@ class TestEntityMutations:
|
||||
"""Update on nonexistent entity returns None."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.entities.update(uuid4(), text="Doesn't exist")
|
||||
assert result is None
|
||||
assert result is None, "Update on nonexistent entity should return None"
|
||||
|
||||
async def test_delete_entity_removes_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -504,7 +511,7 @@ class TestEntityMutations:
|
||||
"""Delete on nonexistent entity returns False."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.entities.delete(uuid4())
|
||||
assert result is False
|
||||
assert result is False, "Delete on nonexistent entity should return False"
|
||||
|
||||
async def test_delete_does_not_affect_other_entities(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -531,7 +538,7 @@ class TestEntityMutations:
|
||||
|
||||
await service.extract_entities(meeting_id)
|
||||
entities = await service.get_entities(meeting_id)
|
||||
assert len(entities) == 2
|
||||
assert len(entities) == 2, f"Expected 2 entities after extraction, got {len(entities)}"
|
||||
|
||||
john_id = next(e.id for e in entities if e.text == "John")
|
||||
jane_id = next(e.id for e in entities if e.text == "Jane")
|
||||
@@ -544,9 +551,9 @@ class TestEntityMutations:
|
||||
# Verify Jane still exists
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
remaining = await uow.entities.get_by_meeting(meeting_id)
|
||||
assert len(remaining) == 1
|
||||
assert remaining[0].id == jane_id
|
||||
assert remaining[0].text == "Jane"
|
||||
assert len(remaining) == 1, f"Expected 1 entity remaining after delete, got {len(remaining)}"
|
||||
assert remaining[0].id == jane_id, "Remaining entity should be Jane"
|
||||
assert remaining[0].text == "Jane", f"Remaining entity text should be 'Jane', got '{remaining[0].text}'"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -570,9 +577,9 @@ class TestNerEdgeCases:
|
||||
|
||||
result = await service.extract_entities(meeting_id)
|
||||
|
||||
assert result.total_count == 0
|
||||
assert result.entities == []
|
||||
assert not result.cached
|
||||
assert result.total_count == 0, f"Expected 0 entities for empty meeting, got {result.total_count}"
|
||||
assert result.entities == [], f"Expected empty entities list, got {result.entities}"
|
||||
assert not result.cached, "First extraction should not be cached"
|
||||
|
||||
async def test_extract_from_nonexistent_meeting_raises(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -609,12 +616,18 @@ class TestNerEdgeCases:
|
||||
service = NerService(mock_engine, uow_factory)
|
||||
|
||||
# Before extraction
|
||||
assert await service.has_entities(meeting_id) is False
|
||||
assert await service.has_entities(meeting_id) is False, (
|
||||
"has_entities should return False before extraction"
|
||||
)
|
||||
|
||||
# After extraction
|
||||
await service.extract_entities(meeting_id)
|
||||
assert await service.has_entities(meeting_id) is True
|
||||
assert await service.has_entities(meeting_id) is True, (
|
||||
"has_entities should return True after extraction"
|
||||
)
|
||||
|
||||
# After clearing
|
||||
await service.clear_entities(meeting_id)
|
||||
assert await service.has_entities(meeting_id) is False
|
||||
assert await service.has_entities(meeting_id) is False, (
|
||||
"has_entities should return False after clearing"
|
||||
)
|
||||
|
||||
@@ -122,8 +122,10 @@ class TestStreamInitialization:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
m = await uow.meetings.get(meeting.id)
|
||||
assert m is not None
|
||||
assert m.state == MeetingState.RECORDING
|
||||
assert m is not None, f"meeting {meeting.id} should exist in database after stream init"
|
||||
assert m.state == MeetingState.RECORDING, (
|
||||
f"expected meeting state RECORDING after stream start, got {m.state}"
|
||||
)
|
||||
|
||||
async def test_stream_init_recovers_streaming_turns(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -167,11 +169,15 @@ class TestStreamInitialization:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
m = await uow.meetings.get(meeting.id)
|
||||
assert m is not None
|
||||
assert m.state == MeetingState.RECORDING
|
||||
assert m is not None, f"meeting {meeting.id} should exist in database after recovery"
|
||||
assert m.state == MeetingState.RECORDING, (
|
||||
f"expected meeting state RECORDING after stream with recovered turns, got {m.state}"
|
||||
)
|
||||
|
||||
persisted_turns = await uow.diarization_jobs.get_streaming_turns(str(meeting.id))
|
||||
assert len(persisted_turns) == 2
|
||||
assert len(persisted_turns) == 2, (
|
||||
f"expected 2 persisted streaming turns for crash recovery, got {len(persisted_turns)}"
|
||||
)
|
||||
|
||||
async def test_stream_init_fails_for_nonexistent_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -193,7 +199,9 @@ class TestStreamInitialization:
|
||||
async for _ in servicer.StreamTranscription(chunk_iter(), context):
|
||||
pass
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND status for nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_stream_rejects_invalid_meeting_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -215,7 +223,9 @@ class TestStreamInitialization:
|
||||
async for _ in servicer.StreamTranscription(chunk_iter(), context):
|
||||
pass
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, (
|
||||
f"expected INVALID_ARGUMENT status for malformed meeting ID, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -289,9 +299,13 @@ class TestStreamSegmentPersistence:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(meeting.id)
|
||||
assert len(segments) >= 1
|
||||
assert len(segments) >= 1, (
|
||||
f"expected at least 1 segment persisted to database, got {len(segments)}"
|
||||
)
|
||||
segment_texts = [s.text for s in segments]
|
||||
assert "Hello world" in segment_texts
|
||||
assert "Hello world" in segment_texts, (
|
||||
f"expected 'Hello world' in segment texts, got {segment_texts}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -306,7 +320,9 @@ class TestStreamStateManagement:
|
||||
meeting = Meeting.create(title="State Test")
|
||||
await uow.meetings.create(meeting)
|
||||
await uow.commit()
|
||||
assert meeting.state == MeetingState.CREATED
|
||||
assert meeting.state == MeetingState.CREATED, (
|
||||
f"expected initial meeting state CREATED, got {meeting.state}"
|
||||
)
|
||||
|
||||
mock_asr = MagicMock()
|
||||
mock_asr.is_loaded = True
|
||||
@@ -325,8 +341,10 @@ class TestStreamStateManagement:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
m = await uow.meetings.get(meeting.id)
|
||||
assert m is not None
|
||||
assert m.state == MeetingState.RECORDING
|
||||
assert m is not None, f"meeting {meeting.id} should exist in database after stream"
|
||||
assert m.state == MeetingState.RECORDING, (
|
||||
f"expected meeting state RECORDING after stream start, got {m.state}"
|
||||
)
|
||||
|
||||
async def test_concurrent_streams_rejected(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -358,7 +376,9 @@ class TestStreamStateManagement:
|
||||
async for _ in servicer.StreamTranscription(chunk_iter(), context):
|
||||
pass
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.FAILED_PRECONDITION
|
||||
assert context.abort_code == grpc.StatusCode.FAILED_PRECONDITION, (
|
||||
f"expected FAILED_PRECONDITION for concurrent stream, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -389,7 +409,9 @@ class TestStreamCleanup:
|
||||
async for _ in servicer.StreamTranscription(chunk_iter(), MockContext()):
|
||||
pass
|
||||
|
||||
assert str(meeting.id) not in servicer._active_streams
|
||||
assert str(meeting.id) not in servicer._active_streams, (
|
||||
f"meeting {meeting.id} should be removed from active streams after completion"
|
||||
)
|
||||
|
||||
async def test_streaming_state_cleaned_up_on_error(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -419,7 +441,9 @@ class TestStreamCleanup:
|
||||
pass # Expected: mock_asr.transcribe_async raises RuntimeError("ASR failed")
|
||||
|
||||
meeting_id_str = str(meeting.id)
|
||||
assert meeting_id_str not in servicer._active_streams
|
||||
assert meeting_id_str not in servicer._active_streams, (
|
||||
f"meeting {meeting_id_str} should be removed from active streams after error cleanup"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -459,4 +483,6 @@ class TestStreamStopRequest:
|
||||
async for _ in servicer.StreamTranscription(chunk_iter(), MockContext()):
|
||||
pass
|
||||
|
||||
assert chunks_processed <= 5
|
||||
assert chunks_processed <= 5, (
|
||||
f"expected stream to stop after ~3 chunks due to stop request, but processed {chunks_processed}"
|
||||
)
|
||||
|
||||
@@ -87,8 +87,10 @@ class TestSummarizationGeneration:
|
||||
await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
style_prompt = captured[0]
|
||||
assert style_prompt is not None
|
||||
assert all(kw in style_prompt.lower() for kw in ("formal", "bullet", "comprehensive"))
|
||||
assert style_prompt is not None, "style_prompt should be set when options are provided"
|
||||
assert all(
|
||||
kw in style_prompt.lower() for kw in ("formal", "bullet", "comprehensive")
|
||||
), f"style_prompt should contain tone/format/verbosity keywords, got: {style_prompt}"
|
||||
|
||||
async def test_generate_summary_without_options_passes_none(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -106,7 +108,7 @@ class TestSummarizationGeneration:
|
||||
noteflow_pb2.GenerateSummaryRequest(meeting_id=str(meeting.id)), MockContext()
|
||||
)
|
||||
|
||||
assert captured[0] is None
|
||||
assert captured[0] is None, f"style_prompt should be None when no options provided, got: {captured[0]}"
|
||||
|
||||
async def test_generate_summary_with_summarization_service(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -131,8 +133,10 @@ class TestSummarizationGeneration:
|
||||
assert len(result.action_items) == 1, "Should have 1 action item"
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
saved = await uow.summaries.get_by_meeting(meeting.id)
|
||||
assert saved is not None
|
||||
assert saved.executive_summary == "This meeting discussed important content."
|
||||
assert saved is not None, "Summary should be persisted to database"
|
||||
assert (
|
||||
saved.executive_summary == "This meeting discussed important content."
|
||||
), f"expected 'This meeting discussed important content.', got '{saved.executive_summary}'"
|
||||
|
||||
async def _add_test_segments(
|
||||
self, uow: SqlAlchemyUnitOfWork, meeting_id: MeetingId, count: int
|
||||
@@ -200,7 +204,9 @@ class TestSummarizationGeneration:
|
||||
)
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert result.executive_summary == "Existing summary content"
|
||||
assert (
|
||||
result.executive_summary == "Existing summary content"
|
||||
), f"expected 'Existing summary content', got '{result.executive_summary}'"
|
||||
mock_service.summarize.assert_not_called()
|
||||
|
||||
async def test_generate_summary_regenerates_with_force_flag(
|
||||
@@ -251,7 +257,9 @@ class TestSummarizationGeneration:
|
||||
)
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert result.executive_summary == "New regenerated summary"
|
||||
assert (
|
||||
result.executive_summary == "New regenerated summary"
|
||||
), f"expected 'New regenerated summary', got '{result.executive_summary}'"
|
||||
mock_service.summarize.assert_called_once()
|
||||
|
||||
async def test_generate_summary_placeholder_fallback(
|
||||
@@ -280,8 +288,12 @@ class TestSummarizationGeneration:
|
||||
request = noteflow_pb2.GenerateSummaryRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert "Segment 0" in result.executive_summary or "Segment 1" in result.executive_summary
|
||||
assert result.model_version == "placeholder/v0"
|
||||
assert (
|
||||
"Segment 0" in result.executive_summary or "Segment 1" in result.executive_summary
|
||||
), f"placeholder summary should contain segment text, got: {result.executive_summary}"
|
||||
assert (
|
||||
result.model_version == "placeholder/v0"
|
||||
), f"expected model_version 'placeholder/v0', got '{result.model_version}'"
|
||||
|
||||
async def test_generate_summary_placeholder_on_service_error(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -315,8 +327,12 @@ class TestSummarizationGeneration:
|
||||
request = noteflow_pb2.GenerateSummaryRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert "Content that should appear" in result.executive_summary
|
||||
assert result.model_version == "placeholder/v0"
|
||||
assert (
|
||||
"Content that should appear" in result.executive_summary
|
||||
), f"placeholder summary should contain segment text, got: {result.executive_summary}"
|
||||
assert (
|
||||
result.model_version == "placeholder/v0"
|
||||
), f"expected model_version 'placeholder/v0', got '{result.model_version}'"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -365,9 +381,16 @@ class TestSummarizationPersistence:
|
||||
)
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
saved = await uow.summaries.get_by_meeting(meeting.id)
|
||||
assert saved is not None and len(saved.key_points) == 3
|
||||
assert saved.key_points[0].text == "Key point 1"
|
||||
assert saved.key_points[1].segment_ids == [1, 2]
|
||||
assert saved is not None, "Summary should be persisted to database"
|
||||
assert (
|
||||
len(saved.key_points) == 3
|
||||
), f"expected 3 key points, got {len(saved.key_points)}"
|
||||
assert (
|
||||
saved.key_points[0].text == "Key point 1"
|
||||
), f"expected first key point text 'Key point 1', got '{saved.key_points[0].text}'"
|
||||
assert (
|
||||
saved.key_points[1].segment_ids == [1, 2]
|
||||
), f"expected second key point segment_ids [1, 2], got {saved.key_points[1].segment_ids}"
|
||||
|
||||
async def test_summary_with_action_items_persisted(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -485,7 +508,9 @@ class TestSummarizationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GenerateSummary(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert (
|
||||
context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
), f"expected NOT_FOUND status, got {context.abort_code}"
|
||||
|
||||
async def test_generate_summary_invalid_meeting_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -499,7 +524,9 @@ class TestSummarizationErrors:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GenerateSummary(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert (
|
||||
context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
), f"expected INVALID_ARGUMENT status, got {context.abort_code}"
|
||||
|
||||
async def test_generate_summary_empty_transcript(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -515,4 +542,6 @@ class TestSummarizationErrors:
|
||||
request = noteflow_pb2.GenerateSummaryRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert "No transcript available" in result.executive_summary
|
||||
assert (
|
||||
"No transcript available" in result.executive_summary
|
||||
), f"empty transcript should produce 'No transcript available', got: {result.executive_summary}"
|
||||
|
||||
@@ -132,7 +132,7 @@ class TestEntityRepositorySave:
|
||||
await session.commit()
|
||||
|
||||
retrieved = await entity_repo.get(entity.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, f"Entity {entity.id} should exist after save"
|
||||
assert retrieved.segment_ids == [0, 2, 5, 7, 10], "Segment IDs should be preserved"
|
||||
|
||||
async def test_save_normalizes_text_to_lowercase(
|
||||
@@ -154,7 +154,7 @@ class TestEntityRepositorySave:
|
||||
await session.commit()
|
||||
|
||||
retrieved = await entity_repo.get(entity.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, f"Entity {entity.id} should exist after save"
|
||||
assert retrieved.text == "ACME CORP", "Original text preserved"
|
||||
assert retrieved.normalized_text == "acme corp", "Normalized text is lowercase"
|
||||
|
||||
@@ -190,8 +190,8 @@ class TestEntityRepositorySaveBatch:
|
||||
await session.commit()
|
||||
|
||||
assert len(saved) == 5, "Should return all entities"
|
||||
for entity in saved:
|
||||
assert entity.db_id is not None, "Each entity should have db_id"
|
||||
missing_db_id = [e for e in saved if e.db_id is None]
|
||||
assert not missing_db_id, f"All entities should have db_id, but {len(missing_db_id)} are missing"
|
||||
|
||||
async def test_saves_empty_batch(
|
||||
self,
|
||||
@@ -268,9 +268,9 @@ class TestEntityRepositoryGet:
|
||||
|
||||
retrieved = await entity_repo.get(entity.id)
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, f"Entity {entity.id} should exist after save"
|
||||
assert isinstance(retrieved.category, EntityCategory), "Category should be enum"
|
||||
assert retrieved.category == EntityCategory.PERSON
|
||||
assert retrieved.category == EntityCategory.PERSON, f"Expected PERSON category, got {retrieved.category}"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -361,7 +361,7 @@ class TestEntityRepositoryGetByMeeting:
|
||||
|
||||
# Check ordering (category first, then text within category)
|
||||
texts = [e.text for e in result]
|
||||
assert texts == sorted(texts, key=lambda t: (result[texts.index(t)].category.value, t))
|
||||
assert texts == sorted(texts, key=lambda t: (result[texts.index(t)].category.value, t)), f"Entities should be ordered by category then text, got {texts}"
|
||||
|
||||
async def test_isolates_meetings(
|
||||
self,
|
||||
@@ -397,7 +397,7 @@ class TestEntityRepositoryGetByMeeting:
|
||||
result = await entity_repo.get_by_meeting(meeting1.id)
|
||||
|
||||
assert len(result) == 1, "Should return only meeting1 entities"
|
||||
assert result[0].text == "Meeting 1 Entity"
|
||||
assert result[0].text == "Meeting 1 Entity", f"Expected 'Meeting 1 Entity', got '{result[0].text}'"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -475,7 +475,7 @@ class TestEntityRepositoryUpdatePinned:
|
||||
assert result is True, "Should return True on success"
|
||||
|
||||
retrieved = await entity_repo.get(entity.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, f"Entity {entity.id} should exist after update"
|
||||
assert retrieved.is_pinned is True, "Should be pinned"
|
||||
|
||||
async def test_unpins_entity(
|
||||
@@ -502,7 +502,7 @@ class TestEntityRepositoryUpdatePinned:
|
||||
assert result is True, "Should return True"
|
||||
|
||||
retrieved = await entity_repo.get(entity.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, f"Entity {entity.id} should exist after update"
|
||||
assert retrieved.is_pinned is False, "Should be unpinned"
|
||||
|
||||
async def test_update_pinned_returns_false_for_nonexistent(
|
||||
@@ -593,7 +593,7 @@ class TestEntityRepositoryUpdate:
|
||||
result = await entity_repo.update(entity.id, text="John Smith", category="person")
|
||||
await session.commit()
|
||||
|
||||
assert result is not None
|
||||
assert result is not None, f"Entity {entity.id} should exist after update"
|
||||
assert result.text == "John Smith", "Text updated"
|
||||
assert result.category == EntityCategory.PERSON, "Category updated"
|
||||
|
||||
|
||||
@@ -49,6 +49,10 @@ class MockContext:
|
||||
raise grpc.RpcError()
|
||||
|
||||
|
||||
# Test constants
|
||||
LARGE_OFFSET = 100
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
class TestInvalidInputHandling:
|
||||
"""Integration tests for invalid input handling."""
|
||||
@@ -65,7 +69,9 @@ class TestInvalidInputHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetMeeting(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, (
|
||||
f"expected INVALID_ARGUMENT for malformed UUID, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_empty_meeting_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -79,7 +85,9 @@ class TestInvalidInputHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetMeeting(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, (
|
||||
f"expected INVALID_ARGUMENT for empty meeting ID, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_nonexistent_meeting_returns_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -93,7 +101,9 @@ class TestInvalidInputHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetMeeting(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND for nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_delete_nonexistent_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -107,7 +117,9 @@ class TestInvalidInputHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.DeleteMeeting(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when deleting nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -120,7 +132,9 @@ class TestSegmentEdgeCases:
|
||||
"""Test getting segments from nonexistent meeting returns empty list."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(MeetingId(uuid4()))
|
||||
assert segments == []
|
||||
assert segments == [], (
|
||||
f"expected empty list for nonexistent meeting, got {len(segments)} segments"
|
||||
)
|
||||
|
||||
async def test_segment_with_zero_duration(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -141,8 +155,13 @@ class TestSegmentEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(meeting.id)
|
||||
assert len(segments) == 1
|
||||
assert segments[0].start_time == segments[0].end_time
|
||||
assert len(segments) == 1, (
|
||||
f"expected 1 segment for zero-duration test, got {len(segments)}"
|
||||
)
|
||||
assert segments[0].start_time == segments[0].end_time, (
|
||||
f"expected start_time == end_time for zero-duration segment, "
|
||||
f"got start={segments[0].start_time}, end={segments[0].end_time}"
|
||||
)
|
||||
|
||||
async def test_segment_with_large_text(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -164,8 +183,12 @@ class TestSegmentEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(meeting.id)
|
||||
assert len(segments) == 1
|
||||
assert len(segments[0].text) == len(large_text)
|
||||
assert len(segments) == 1, (
|
||||
f"expected 1 segment for large-text test, got {len(segments)}"
|
||||
)
|
||||
assert len(segments[0].text) == len(large_text), (
|
||||
f"expected text length {len(large_text)}, got {len(segments[0].text)}"
|
||||
)
|
||||
|
||||
async def test_segment_ordering_preserved(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -188,7 +211,9 @@ class TestSegmentEdgeCases:
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(meeting.id)
|
||||
segment_ids = [s.segment_id for s in segments]
|
||||
assert segment_ids == sorted(segment_ids)
|
||||
assert segment_ids == sorted(segment_ids), (
|
||||
f"expected segments ordered by segment_id, got {segment_ids}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -235,7 +260,9 @@ class TestDiarizationJobEdgeCases:
|
||||
str(uuid4()),
|
||||
JOB_STATUS_COMPLETED,
|
||||
)
|
||||
assert result is False
|
||||
assert result is False, (
|
||||
f"expected False when updating nonexistent job, got {result}"
|
||||
)
|
||||
|
||||
async def test_get_nonexistent_job(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -243,7 +270,9 @@ class TestDiarizationJobEdgeCases:
|
||||
"""Test getting nonexistent job returns None."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.diarization_jobs.get(str(uuid4()))
|
||||
assert result is None
|
||||
assert result is None, (
|
||||
f"expected None for nonexistent job, got {result}"
|
||||
)
|
||||
|
||||
async def test_job_meeting_cascade_delete(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -269,7 +298,9 @@ class TestDiarizationJobEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
job = await uow.diarization_jobs.get(job_id)
|
||||
assert job is None
|
||||
assert job is None, (
|
||||
f"expected job to be cascade-deleted with meeting, got {job}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -301,8 +332,11 @@ class TestSummaryEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
saved = await uow.summaries.get_by_meeting(meeting.id)
|
||||
assert saved is not None
|
||||
assert saved.executive_summary == "Second summary"
|
||||
assert saved is not None, "expected summary to exist after overwrite"
|
||||
assert saved.executive_summary == "Second summary", (
|
||||
f"expected executive_summary to be 'Second summary', "
|
||||
f"got '{saved.executive_summary}'"
|
||||
)
|
||||
|
||||
async def test_get_summary_for_nonexistent_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -310,7 +344,9 @@ class TestSummaryEdgeCases:
|
||||
"""Test getting summary for nonexistent meeting returns None."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.summaries.get_by_meeting(MeetingId(uuid4()))
|
||||
assert result is None
|
||||
assert result is None, (
|
||||
f"expected None for summary of nonexistent meeting, got {result}"
|
||||
)
|
||||
|
||||
async def test_delete_summary_for_meeting_without_summary(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -323,7 +359,9 @@ class TestSummaryEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.summaries.delete_by_meeting(meeting.id)
|
||||
assert result is False
|
||||
assert result is False, (
|
||||
f"expected False when deleting nonexistent summary, got {result}"
|
||||
)
|
||||
|
||||
async def test_summary_deleted_with_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -346,7 +384,9 @@ class TestSummaryEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.summaries.get_by_meeting(meeting.id)
|
||||
assert result is None
|
||||
assert result is None, (
|
||||
f"expected summary to be cascade-deleted with meeting, got {result}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -367,7 +407,9 @@ class TestPreferencesEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
value = await uow.preferences.get("test_key")
|
||||
assert value == "value2"
|
||||
assert value == "value2", (
|
||||
f"expected preference to be overwritten to 'value2', got '{value}'"
|
||||
)
|
||||
|
||||
async def test_get_nonexistent_preference(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -375,7 +417,9 @@ class TestPreferencesEdgeCases:
|
||||
"""Test getting nonexistent preference returns None."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
value = await uow.preferences.get("nonexistent_key")
|
||||
assert value is None
|
||||
assert value is None, (
|
||||
f"expected None for nonexistent preference, got {value}"
|
||||
)
|
||||
|
||||
async def test_delete_nonexistent_preference(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -383,7 +427,9 @@ class TestPreferencesEdgeCases:
|
||||
"""Test deleting nonexistent preference returns False."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.preferences.delete("nonexistent_key")
|
||||
assert result is False
|
||||
assert result is False, (
|
||||
f"expected False when deleting nonexistent preference, got {result}"
|
||||
)
|
||||
|
||||
async def test_preference_with_special_characters_in_key(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -396,7 +442,9 @@ class TestPreferencesEdgeCases:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
value = await uow.preferences.get(key)
|
||||
assert value == "value"
|
||||
assert value == "value", (
|
||||
f"expected preference with special chars in key to store 'value', got '{value}'"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -419,7 +467,9 @@ class TestTransactionRollback:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.meetings.get(meeting_id)
|
||||
assert result is None
|
||||
assert result is None, (
|
||||
f"expected meeting to be rolled back on exception, got {result}"
|
||||
)
|
||||
|
||||
async def test_partial_commit_not_allowed(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -431,7 +481,9 @@ class TestTransactionRollback:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
result = await uow.meetings.get(meeting.id)
|
||||
assert result is None
|
||||
assert result is None, (
|
||||
f"expected meeting not to persist without commit, got {result}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -450,10 +502,12 @@ class TestListingEdgeCases:
|
||||
|
||||
servicer = NoteFlowServicer(session_factory=session_factory)
|
||||
|
||||
request = noteflow_pb2.ListMeetingsRequest(offset=100)
|
||||
request = noteflow_pb2.ListMeetingsRequest(offset=LARGE_OFFSET)
|
||||
result = await servicer.ListMeetings(request, MockContext())
|
||||
|
||||
assert len(result.meetings) == 0
|
||||
assert len(result.meetings) == 0, (
|
||||
f"expected empty meetings list with large offset, got {len(result.meetings)}"
|
||||
)
|
||||
|
||||
async def test_list_empty_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -464,8 +518,12 @@ class TestListingEdgeCases:
|
||||
request = noteflow_pb2.ListMeetingsRequest()
|
||||
result = await servicer.ListMeetings(request, MockContext())
|
||||
|
||||
assert result.total_count == 0
|
||||
assert len(result.meetings) == 0
|
||||
assert result.total_count == 0, (
|
||||
f"expected total_count=0 for empty database, got {result.total_count}"
|
||||
)
|
||||
assert len(result.meetings) == 0, (
|
||||
f"expected empty meetings list for empty database, got {len(result.meetings)}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -487,7 +545,9 @@ class TestExportErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.ExportTranscript(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when exporting nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_export_invalid_format(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -514,8 +574,10 @@ class TestExportErrorHandling:
|
||||
)
|
||||
result = await servicer.ExportTranscript(request, MockContext())
|
||||
|
||||
assert result.content
|
||||
assert result.file_extension
|
||||
assert result.content, "expected non-empty content for export with unspecified format"
|
||||
assert result.file_extension, (
|
||||
"expected non-empty file_extension for export with unspecified format"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -534,7 +596,9 @@ class TestSummarizationErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GenerateSummary(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when summarizing nonexistent meeting, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_summarize_empty_meeting_returns_placeholder(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -550,7 +614,9 @@ class TestSummarizationErrorHandling:
|
||||
request = noteflow_pb2.GenerateSummaryRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.GenerateSummary(request, MockContext())
|
||||
|
||||
assert "No transcript" in result.executive_summary
|
||||
assert "No transcript" in result.executive_summary, (
|
||||
f"expected placeholder summary for empty meeting, got '{result.executive_summary}'"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -569,7 +635,9 @@ class TestAnnotationErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when getting nonexistent annotation, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_update_nonexistent_annotation(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -586,7 +654,9 @@ class TestAnnotationErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.UpdateAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when updating nonexistent annotation, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_delete_nonexistent_annotation(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -600,7 +670,9 @@ class TestAnnotationErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.DeleteAnnotation(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND when deleting nonexistent annotation, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -619,4 +691,6 @@ class TestDiarizationJobErrorHandling:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetDiarizationJobStatus(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND for nonexistent diarization job, got {context.abort_code}"
|
||||
)
|
||||
|
||||
@@ -93,18 +93,18 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
)
|
||||
result = await servicer.CreateMeeting(request, MockContext())
|
||||
|
||||
assert result.id
|
||||
assert result.title == "Database Test Meeting"
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_CREATED
|
||||
assert result.id, "CreateMeeting response should include a meeting ID"
|
||||
assert result.title == "Database Test Meeting", f"expected title 'Database Test Meeting', got '{result.title}'"
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_CREATED, f"expected state CREATED, got {result.state}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
from noteflow.domain.value_objects import MeetingId
|
||||
|
||||
meeting = await uow.meetings.get(MeetingId(uuid4().hex.replace("-", "")))
|
||||
meeting = await uow.meetings.get(MeetingId(result.id))
|
||||
assert meeting is not None
|
||||
assert meeting.title == "Database Test Meeting"
|
||||
assert meeting.metadata["source"] == "integration_test"
|
||||
assert meeting is not None, f"meeting with ID {result.id} should exist in database"
|
||||
assert meeting.title == "Database Test Meeting", f"expected title 'Database Test Meeting', got '{meeting.title}'"
|
||||
assert meeting.metadata["source"] == "integration_test", f"expected metadata source 'integration_test', got '{meeting.metadata.get('source')}'"
|
||||
|
||||
async def test_get_meeting_retrieves_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -120,8 +120,8 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
request = noteflow_pb2.GetMeetingRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.GetMeeting(request, MockContext())
|
||||
|
||||
assert result.id == str(meeting.id)
|
||||
assert result.title == "Persisted Meeting"
|
||||
assert result.id == str(meeting.id), f"expected meeting ID {meeting.id}, got {result.id}"
|
||||
assert result.title == "Persisted Meeting", f"expected title 'Persisted Meeting', got '{result.title}'"
|
||||
|
||||
async def test_get_meeting_with_segments(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -149,10 +149,10 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
)
|
||||
result = await servicer.GetMeeting(request, MockContext())
|
||||
|
||||
assert len(result.segments) == 3
|
||||
assert result.segments[0].text == "Segment 0"
|
||||
assert result.segments[1].text == "Segment 1"
|
||||
assert result.segments[2].text == "Segment 2"
|
||||
assert len(result.segments) == 3, f"expected 3 segments, got {len(result.segments)}"
|
||||
assert result.segments[0].text == "Segment 0", f"expected segment 0 text 'Segment 0', got '{result.segments[0].text}'"
|
||||
assert result.segments[1].text == "Segment 1", f"expected segment 1 text 'Segment 1', got '{result.segments[1].text}'"
|
||||
assert result.segments[2].text == "Segment 2", f"expected segment 2 text 'Segment 2', got '{result.segments[2].text}'"
|
||||
|
||||
async def test_get_nonexistent_meeting_returns_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -166,7 +166,7 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetMeeting(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND status for nonexistent meeting, got {context.abort_code}"
|
||||
|
||||
async def test_list_meetings_queries_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -183,8 +183,8 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
request = noteflow_pb2.ListMeetingsRequest(limit=10)
|
||||
result = await servicer.ListMeetings(request, MockContext())
|
||||
|
||||
assert result.total_count == 5
|
||||
assert len(result.meetings) == 5
|
||||
assert result.total_count == 5, f"expected total_count 5, got {result.total_count}"
|
||||
assert len(result.meetings) == 5, f"expected 5 meetings in response, got {len(result.meetings)}"
|
||||
|
||||
async def test_list_meetings_with_state_filter(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -212,8 +212,8 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
)
|
||||
result = await servicer.ListMeetings(request, MockContext())
|
||||
|
||||
assert result.total_count == 1
|
||||
assert result.meetings[0].title == "Recording"
|
||||
assert result.total_count == 1, f"expected 1 meeting with RECORDING state, got {result.total_count}"
|
||||
assert result.meetings[0].title == "Recording", f"expected meeting title 'Recording', got '{result.meetings[0].title}'"
|
||||
|
||||
async def test_delete_meeting_removes_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -229,11 +229,11 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
request = noteflow_pb2.DeleteMeetingRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.DeleteMeeting(request, MockContext())
|
||||
|
||||
assert result.success is True
|
||||
assert result.success is True, "DeleteMeeting should return success=True"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
deleted = await uow.meetings.get(meeting.id)
|
||||
assert deleted is None
|
||||
assert deleted is None, f"meeting {meeting.id} should have been deleted from database"
|
||||
|
||||
async def test_stop_meeting_updates_database_state(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -250,12 +250,12 @@ class TestServicerMeetingOperationsWithDatabase:
|
||||
request = noteflow_pb2.StopMeetingRequest(meeting_id=str(meeting.id))
|
||||
result = await servicer.StopMeeting(request, MockContext())
|
||||
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED, f"expected STOPPED state in response, got {result.state}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
stopped = await uow.meetings.get(meeting.id)
|
||||
assert stopped is not None
|
||||
assert stopped.state == MeetingState.STOPPED
|
||||
assert stopped is not None, f"meeting {meeting.id} should exist in database after stopping"
|
||||
assert stopped.state == MeetingState.STOPPED, f"expected STOPPED state in database, got {stopped.state}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -286,14 +286,14 @@ class TestServicerDiarizationWithDatabase:
|
||||
)
|
||||
result = await servicer.RefineSpeakerDiarization(request, MockContext())
|
||||
|
||||
assert result.job_id
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_QUEUED
|
||||
assert result.job_id, "RefineSpeakerDiarization response should include a job ID"
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_QUEUED, f"expected QUEUED status, got {result.status}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
job = await uow.diarization_jobs.get(result.job_id)
|
||||
assert job is not None
|
||||
assert job.meeting_id == str(meeting.id)
|
||||
assert job.status == JOB_STATUS_QUEUED
|
||||
assert job is not None, f"diarization job {result.job_id} should exist in database"
|
||||
assert job.meeting_id == str(meeting.id), f"expected job meeting_id {meeting.id}, got {job.meeting_id}"
|
||||
assert job.status == JOB_STATUS_QUEUED, f"expected job status QUEUED, got {job.status}"
|
||||
|
||||
async def test_get_diarization_job_status_retrieves_from_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -318,10 +318,10 @@ class TestServicerDiarizationWithDatabase:
|
||||
request = noteflow_pb2.GetDiarizationJobStatusRequest(job_id=job.job_id)
|
||||
result = await servicer.GetDiarizationJobStatus(request, MockContext())
|
||||
|
||||
assert result.job_id == job.job_id
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_COMPLETED
|
||||
assert result.segments_updated == DIARIZATION_SEGMENTS_UPDATED
|
||||
assert list(result.speaker_ids) == ["SPEAKER_00", "SPEAKER_01"]
|
||||
assert result.job_id == job.job_id, f"expected job_id {job.job_id}, got {result.job_id}"
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_COMPLETED, f"expected COMPLETED status, got {result.status}"
|
||||
assert result.segments_updated == DIARIZATION_SEGMENTS_UPDATED, f"expected {DIARIZATION_SEGMENTS_UPDATED} segments_updated, got {result.segments_updated}"
|
||||
assert list(result.speaker_ids) == ["SPEAKER_00", "SPEAKER_01"], f"expected speaker_ids ['SPEAKER_00', 'SPEAKER_01'], got {list(result.speaker_ids)}"
|
||||
|
||||
async def test_get_nonexistent_job_returns_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession]
|
||||
@@ -335,7 +335,7 @@ class TestServicerDiarizationWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match=".*"):
|
||||
await servicer.GetDiarizationJobStatus(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND status for nonexistent job, got {context.abort_code}"
|
||||
|
||||
async def test_refine_rejects_recording_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -359,8 +359,8 @@ class TestServicerDiarizationWithDatabase:
|
||||
)
|
||||
result = await servicer.RefineSpeakerDiarization(request, MockContext())
|
||||
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_FAILED
|
||||
assert "stopped" in result.error_message.lower()
|
||||
assert result.status == noteflow_pb2.JOB_STATUS_FAILED, f"expected FAILED status for recording meeting, got {result.status}"
|
||||
assert "stopped" in result.error_message.lower(), f"expected 'stopped' in error message, got '{result.error_message}'"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -397,7 +397,7 @@ class TestServicerServerInfoWithDatabase:
|
||||
request = noteflow_pb2.ServerInfoRequest()
|
||||
result = await servicer.GetServerInfo(request, MockContext())
|
||||
|
||||
assert result.active_meetings == 3
|
||||
assert result.active_meetings == 3, f"expected 3 active meetings (2 recording + 1 stopping), got {result.active_meetings}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -440,9 +440,9 @@ class TestServicerShutdownWithDatabase:
|
||||
j2 = await uow.diarization_jobs.get(job2.job_id)
|
||||
j3 = await uow.diarization_jobs.get(job3.job_id)
|
||||
|
||||
assert j1 is not None and j1.status == JOB_STATUS_FAILED
|
||||
assert j2 is not None and j2.status == JOB_STATUS_FAILED
|
||||
assert j3 is not None and j3.status == JOB_STATUS_COMPLETED
|
||||
assert j1 is not None and j1.status == JOB_STATUS_FAILED, f"queued job should be marked FAILED after shutdown, got status={j1.status if j1 else 'None'}"
|
||||
assert j2 is not None and j2.status == JOB_STATUS_FAILED, f"running job should be marked FAILED after shutdown, got status={j2.status if j2 else 'None'}"
|
||||
assert j3 is not None and j3.status == JOB_STATUS_COMPLETED, f"completed job should remain COMPLETED after shutdown, got status={j3.status if j3 else 'None'}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -477,15 +477,15 @@ class TestServicerRenameSpeakerWithDatabase:
|
||||
)
|
||||
result = await servicer.RenameSpeaker(request, MockContext())
|
||||
|
||||
assert result.segments_updated == 3
|
||||
assert result.success is True
|
||||
assert result.segments_updated == 3, f"expected 3 segments updated, got {result.segments_updated}"
|
||||
assert result.success is True, "RenameSpeaker should return success=True"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
segments = await uow.segments.get_by_meeting(meeting.id)
|
||||
alice_segments = [s for s in segments if s.speaker_id == "Alice"]
|
||||
other_segments = [s for s in segments if s.speaker_id == "SPEAKER_01"]
|
||||
assert len(alice_segments) == 3
|
||||
assert len(other_segments) == 2
|
||||
assert len(alice_segments) == 3, f"expected 3 segments with speaker_id 'Alice', got {len(alice_segments)}"
|
||||
assert len(other_segments) == 2, f"expected 2 segments with speaker_id 'SPEAKER_01', got {len(other_segments)}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -505,7 +505,7 @@ class TestServicerTransactionIntegrity:
|
||||
from noteflow.domain.value_objects import MeetingId
|
||||
|
||||
meeting = await uow.meetings.get(MeetingId(result.id))
|
||||
assert meeting is not None
|
||||
assert meeting is not None, f"meeting {result.id} should exist in database after atomic create"
|
||||
|
||||
async def test_stop_meeting_clears_streaming_turns(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -532,7 +532,7 @@ class TestServicerTransactionIntegrity:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
remaining = await uow.diarization_jobs.get_streaming_turns(str(meeting.id))
|
||||
assert remaining == []
|
||||
assert remaining == [], f"expected no streaming turns after StopMeeting, got {len(remaining)} turns"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -568,7 +568,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
confidence=0.95,
|
||||
is_pinned=False,
|
||||
)
|
||||
assert uow._session is not None
|
||||
assert uow._session is not None, "UnitOfWork session should be initialized"
|
||||
uow._session.add(entity_model)
|
||||
await uow.commit()
|
||||
|
||||
@@ -589,11 +589,11 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
)
|
||||
result = await servicer.UpdateEntity(request, MockContext())
|
||||
|
||||
assert (result.entity.id, result.entity.text) == (entity_id, "Updated Name")
|
||||
assert (result.entity.id, result.entity.text) == (entity_id, "Updated Name"), f"expected entity ({entity_id}, 'Updated Name'), got ({result.entity.id}, '{result.entity.text}')"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
updated = await uow.entities.get(PyUUID(entity_id))
|
||||
assert updated is not None and updated.text == "Updated Name"
|
||||
assert updated is not None and updated.text == "Updated Name", f"entity text in database should be 'Updated Name', got '{updated.text if updated else 'None'}'"
|
||||
|
||||
async def test_update_entity_category_via_grpc(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -610,11 +610,11 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
)
|
||||
result = await servicer.UpdateEntity(request, MockContext())
|
||||
|
||||
assert result.entity.category == "company"
|
||||
assert result.entity.category == "company", f"expected category 'company' in response, got '{result.entity.category}'"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
updated = await uow.entities.get(PyUUID(entity_id))
|
||||
assert updated is not None and updated.category.value == "company"
|
||||
assert updated is not None and updated.category.value == "company", f"entity category in database should be 'company', got '{updated.category.value if updated else 'None'}'"
|
||||
|
||||
async def test_update_entity_both_fields_via_grpc(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -632,13 +632,13 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
)
|
||||
result = await servicer.UpdateEntity(request, MockContext())
|
||||
|
||||
assert (result.entity.text, result.entity.category) == ("Acme Industries", "company")
|
||||
assert (result.entity.text, result.entity.category) == ("Acme Industries", "company"), f"expected ('Acme Industries', 'company'), got ('{result.entity.text}', '{result.entity.category}')"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
updated = await uow.entities.get(PyUUID(entity_id))
|
||||
assert updated is not None and (updated.text, updated.category.value) == (
|
||||
"Acme Industries", "company"
|
||||
)
|
||||
), f"entity in database should have text='Acme Industries' and category='company', got text='{updated.text if updated else 'None'}', category='{updated.category.value if updated else 'None'}'"
|
||||
|
||||
async def test_update_nonexistent_entity_grpc_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -659,7 +659,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match="not found"):
|
||||
await servicer.UpdateEntity(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND for nonexistent entity, got {context.abort_code}"
|
||||
|
||||
async def test_update_entity_grpc_invalid_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -680,7 +680,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match="Invalid"):
|
||||
await servicer.UpdateEntity(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, f"expected INVALID_ARGUMENT for malformed entity_id, got {context.abort_code}"
|
||||
|
||||
async def test_delete_entity_grpc_removes_from_db(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -695,10 +695,10 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
)
|
||||
result = await servicer.DeleteEntity(request, MockContext())
|
||||
|
||||
assert result.success is True
|
||||
assert result.success is True, "DeleteEntity should return success=True"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.entities.get(PyUUID(entity_id)) is None
|
||||
assert await uow.entities.get(PyUUID(entity_id)) is None, f"entity {entity_id} should have been deleted from database"
|
||||
|
||||
async def test_delete_nonexistent_entity_grpc_not_found(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -719,7 +719,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match="not found"):
|
||||
await servicer.DeleteEntity(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, f"expected NOT_FOUND for nonexistent entity, got {context.abort_code}"
|
||||
|
||||
async def test_delete_entity_grpc_invalid_id(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -740,7 +740,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
with pytest.raises(grpc.RpcError, match="Invalid"):
|
||||
await servicer.DeleteEntity(request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, f"expected INVALID_ARGUMENT for malformed entity_id, got {context.abort_code}"
|
||||
|
||||
async def test_grpc_delete_preserves_other_entities(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -752,7 +752,7 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
meeting = Meeting.create(title="Multi-Entity Meeting")
|
||||
await uow.meetings.create(meeting)
|
||||
entity1_id, entity2_id = uuid4(), uuid4()
|
||||
assert uow._session is not None
|
||||
assert uow._session is not None, "UnitOfWork session should be initialized"
|
||||
uow._session.add(NamedEntityModel(
|
||||
id=entity1_id, meeting_id=meeting.id, text="Entity One",
|
||||
normalized_text="entity one", category="company",
|
||||
@@ -774,4 +774,4 @@ class TestServicerEntityMutationsWithDatabase:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
deleted, kept = await uow.entities.get(entity1_id), await uow.entities.get(entity2_id)
|
||||
assert deleted is None and kept is not None and kept.text == "Entity Two"
|
||||
assert deleted is None and kept is not None and kept.text == "Entity Two", f"only entity1 should be deleted; entity2 should remain with text='Entity Two', got deleted={deleted is None}, kept={kept is not None}, kept_text='{kept.text if kept else 'None'}'"
|
||||
|
||||
@@ -62,9 +62,9 @@ class TestMeetingStoreBasicOperations:
|
||||
meeting1 = store.create(title="Meeting 1")
|
||||
meeting2 = store.create(title="Meeting 2")
|
||||
|
||||
assert meeting1.id != meeting2.id
|
||||
assert meeting1.title == "Meeting 1"
|
||||
assert meeting2.title == "Meeting 2"
|
||||
assert meeting1.id != meeting2.id, f"meeting IDs should be unique, got {meeting1.id} twice"
|
||||
assert meeting1.title == "Meeting 1", f"expected 'Meeting 1', got {meeting1.title!r}"
|
||||
assert meeting2.title == "Meeting 2", f"expected 'Meeting 2', got {meeting2.title!r}"
|
||||
|
||||
def test_insert_and_get_meeting(self) -> None:
|
||||
"""Test inserting and retrieving a meeting."""
|
||||
@@ -85,7 +85,7 @@ class TestMeetingStoreBasicOperations:
|
||||
|
||||
result = store.get(str(uuid4()))
|
||||
|
||||
assert result is None
|
||||
assert result is None, f"get should return None for nonexistent meeting, got {result}"
|
||||
|
||||
def test_update_meeting_in_store(self) -> None:
|
||||
"""Test updating a meeting in MeetingStore."""
|
||||
@@ -96,8 +96,8 @@ class TestMeetingStoreBasicOperations:
|
||||
store.update(meeting)
|
||||
|
||||
retrieved = store.get(str(meeting.id))
|
||||
assert retrieved is not None
|
||||
assert retrieved.title == "Updated Title"
|
||||
assert retrieved is not None, "updated meeting should exist in store"
|
||||
assert retrieved.title == "Updated Title", f"expected 'Updated Title', got {retrieved.title!r}"
|
||||
|
||||
def test_delete_meeting_from_store(self) -> None:
|
||||
"""Test deleting a meeting from MeetingStore."""
|
||||
@@ -106,8 +106,8 @@ class TestMeetingStoreBasicOperations:
|
||||
|
||||
result = store.delete(str(meeting.id))
|
||||
|
||||
assert result is True
|
||||
assert store.get(str(meeting.id)) is None
|
||||
assert result is True, "delete should return True for existing meeting"
|
||||
assert store.get(str(meeting.id)) is None, "deleted meeting should not be retrievable"
|
||||
|
||||
def test_delete_nonexistent_returns_false(self) -> None:
|
||||
"""Test deleting nonexistent meeting returns False."""
|
||||
@@ -115,7 +115,7 @@ class TestMeetingStoreBasicOperations:
|
||||
|
||||
result = store.delete(str(uuid4()))
|
||||
|
||||
assert result is False
|
||||
assert result is False, "delete should return False for nonexistent meeting"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -131,8 +131,8 @@ class TestMeetingStoreListingAndFiltering:
|
||||
|
||||
meetings, total = store.list_all(limit=3, offset=0)
|
||||
|
||||
assert len(meetings) == 3
|
||||
assert total == 10
|
||||
assert len(meetings) == 3, f"expected page size of meetings, got {len(meetings)}"
|
||||
assert total == 10, f"expected all meetings in total, got {total}"
|
||||
|
||||
def test_list_all_with_offset(self) -> None:
|
||||
"""Test listing meetings with offset."""
|
||||
@@ -143,8 +143,8 @@ class TestMeetingStoreListingAndFiltering:
|
||||
|
||||
meetings, total = store.list_all(limit=10, offset=2)
|
||||
|
||||
assert len(meetings) == 3
|
||||
assert total == 5
|
||||
assert len(meetings) == 3, f"expected remaining meetings after offset, got {len(meetings)}"
|
||||
assert total == 5, f"expected all meetings in total, got {total}"
|
||||
|
||||
def test_list_all_filter_by_state(self) -> None:
|
||||
"""Test listing meetings filtered by state."""
|
||||
@@ -157,9 +157,9 @@ class TestMeetingStoreListingAndFiltering:
|
||||
|
||||
meetings, total = store.list_all(states=[MeetingState.RECORDING])
|
||||
|
||||
assert len(meetings) == 1
|
||||
assert total == 1
|
||||
assert meetings[0].id == recording.id
|
||||
assert len(meetings) == 1, f"expected 1 recording meeting, got {len(meetings)}"
|
||||
assert total == 1, f"expected total=1 for recording state, got {total}"
|
||||
assert meetings[0].id == recording.id, f"expected meeting id {recording.id}, got {meetings[0].id}"
|
||||
|
||||
def test_list_all_filter_by_multiple_states(self) -> None:
|
||||
"""Test filtering by multiple states."""
|
||||
@@ -178,8 +178,8 @@ class TestMeetingStoreListingAndFiltering:
|
||||
states=[MeetingState.RECORDING, MeetingState.STOPPING]
|
||||
)
|
||||
|
||||
assert len(meetings) == 2
|
||||
assert total == 2
|
||||
assert len(meetings) == 2, f"expected 2 meetings (recording+stopping), got {len(meetings)}"
|
||||
assert total == 2, f"expected total=2 for multi-state filter, got {total}"
|
||||
|
||||
def test_list_all_sort_order(self) -> None:
|
||||
"""Test listing with sort order."""
|
||||
@@ -191,8 +191,12 @@ class TestMeetingStoreListingAndFiltering:
|
||||
meetings_desc, _ = store.list_all(sort_desc=True)
|
||||
meetings_asc, _ = store.list_all(sort_desc=False)
|
||||
|
||||
assert meetings_desc[0].created_at >= meetings_desc[-1].created_at
|
||||
assert meetings_asc[0].created_at <= meetings_asc[-1].created_at
|
||||
assert meetings_desc[0].created_at >= meetings_desc[-1].created_at, (
|
||||
"descending sort should have newest first"
|
||||
)
|
||||
assert meetings_asc[0].created_at <= meetings_asc[-1].created_at, (
|
||||
"ascending sort should have oldest first"
|
||||
)
|
||||
|
||||
def test_count_by_state_in_store(self) -> None:
|
||||
"""Test counting meetings by state in MeetingStore."""
|
||||
@@ -207,8 +211,8 @@ class TestMeetingStoreListingAndFiltering:
|
||||
created_count = store.count_by_state(MeetingState.CREATED)
|
||||
recording_count = store.count_by_state(MeetingState.RECORDING)
|
||||
|
||||
assert created_count == 2
|
||||
assert recording_count == 1
|
||||
assert created_count == 2, f"expected 2 created meetings, got {created_count}"
|
||||
assert recording_count == 1, f"expected 1 recording meeting, got {recording_count}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -238,9 +242,9 @@ class TestMeetingStoreSegments:
|
||||
|
||||
segments = store.fetch_segments(str(meeting.id))
|
||||
|
||||
assert len(segments) == 2
|
||||
assert segments[0].text == "First segment"
|
||||
assert segments[1].text == "Second segment"
|
||||
assert len(segments) == 2, f"expected 2 segments, got {len(segments)}"
|
||||
assert segments[0].text == "First segment", f"first segment mismatch: {segments[0].text!r}"
|
||||
assert segments[1].text == "Second segment", f"second segment mismatch: {segments[1].text!r}"
|
||||
|
||||
def test_add_segment_to_nonexistent_meeting(self) -> None:
|
||||
"""Test adding segment to nonexistent meeting returns None."""
|
||||
@@ -249,7 +253,7 @@ class TestMeetingStoreSegments:
|
||||
|
||||
result = store.add_segment(str(uuid4()), segment)
|
||||
|
||||
assert result is None
|
||||
assert result is None, f"add_segment should return None for nonexistent meeting, got {result}"
|
||||
|
||||
def test_get_segments_from_nonexistent_in_store(self) -> None:
|
||||
"""Test getting segments from nonexistent meeting returns empty list in store."""
|
||||
@@ -257,7 +261,7 @@ class TestMeetingStoreSegments:
|
||||
|
||||
segments = store.fetch_segments(str(uuid4()))
|
||||
|
||||
assert segments == []
|
||||
assert segments == [], f"expected empty list for nonexistent meeting, got {segments}"
|
||||
|
||||
def test_get_next_segment_id_in_store(self) -> None:
|
||||
"""Test getting next segment ID in MeetingStore."""
|
||||
@@ -265,13 +269,13 @@ class TestMeetingStoreSegments:
|
||||
meeting = store.create(title="Segment ID Test")
|
||||
|
||||
next_id = store.compute_next_segment_id(str(meeting.id))
|
||||
assert next_id == 0
|
||||
assert next_id == 0, f"expected next_id=0 for empty meeting, got {next_id}"
|
||||
|
||||
segment = Segment(segment_id=0, text="First", start_time=0.0, end_time=1.0)
|
||||
store.add_segment(str(meeting.id), segment)
|
||||
|
||||
next_id = store.compute_next_segment_id(str(meeting.id))
|
||||
assert next_id == 1
|
||||
assert next_id == 1, f"expected next_id=1 after adding segment, got {next_id}"
|
||||
|
||||
def test_get_next_segment_id_nonexistent_meeting(self) -> None:
|
||||
"""Test next segment ID for nonexistent meeting is 0."""
|
||||
@@ -279,7 +283,7 @@ class TestMeetingStoreSegments:
|
||||
|
||||
next_id = store.compute_next_segment_id(str(uuid4()))
|
||||
|
||||
assert next_id == 0
|
||||
assert next_id == 0, f"expected next_id=0 for nonexistent meeting, got {next_id}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -299,8 +303,10 @@ class TestMeetingStoreSummary:
|
||||
store.set_summary(str(meeting.id), summary)
|
||||
retrieved = store.get_summary(str(meeting.id))
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved.executive_summary == "This is the executive summary."
|
||||
assert retrieved is not None, "summary should be retrievable after set"
|
||||
assert retrieved.executive_summary == "This is the executive summary.", (
|
||||
f"summary mismatch: {retrieved.executive_summary!r}"
|
||||
)
|
||||
|
||||
def test_set_summary_nonexistent_meeting(self) -> None:
|
||||
"""Test setting summary on nonexistent meeting returns None."""
|
||||
@@ -312,7 +318,7 @@ class TestMeetingStoreSummary:
|
||||
|
||||
result = store.set_summary(str(uuid4()), summary)
|
||||
|
||||
assert result is None
|
||||
assert result is None, f"set_summary should return None for nonexistent meeting, got {result}"
|
||||
|
||||
def test_get_summary_nonexistent_meeting(self) -> None:
|
||||
"""Test getting summary from nonexistent meeting returns None."""
|
||||
@@ -320,7 +326,7 @@ class TestMeetingStoreSummary:
|
||||
|
||||
result = store.get_summary(str(uuid4()))
|
||||
|
||||
assert result is None
|
||||
assert result is None, f"get_summary should return None for nonexistent meeting, got {result}"
|
||||
|
||||
def test_clear_summary(self) -> None:
|
||||
"""Test clearing meeting summary."""
|
||||
@@ -332,8 +338,8 @@ class TestMeetingStoreSummary:
|
||||
|
||||
result = store.clear_summary(str(meeting.id))
|
||||
|
||||
assert result is True
|
||||
assert store.get_summary(str(meeting.id)) is None
|
||||
assert result is True, "clear_summary should return True when summary existed"
|
||||
assert store.get_summary(str(meeting.id)) is None, "summary should be None after clearing"
|
||||
|
||||
def test_clear_summary_when_none_set(self) -> None:
|
||||
"""Test clearing summary when none is set returns False."""
|
||||
@@ -342,7 +348,7 @@ class TestMeetingStoreSummary:
|
||||
|
||||
result = store.clear_summary(str(meeting.id))
|
||||
|
||||
assert result is False
|
||||
assert result is False, "clear_summary should return False when no summary exists"
|
||||
|
||||
def test_clear_summary_nonexistent_meeting(self) -> None:
|
||||
"""Test clearing summary on nonexistent meeting returns False."""
|
||||
@@ -350,7 +356,7 @@ class TestMeetingStoreSummary:
|
||||
|
||||
result = store.clear_summary(str(uuid4()))
|
||||
|
||||
assert result is False
|
||||
assert result is False, "clear_summary should return False for nonexistent meeting"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -364,10 +370,10 @@ class TestMeetingStoreAtomicUpdates:
|
||||
|
||||
result = store.update_state(str(meeting.id), MeetingState.RECORDING)
|
||||
|
||||
assert result is True
|
||||
assert result is True, "update_state should return True for existing meeting"
|
||||
retrieved = store.get(str(meeting.id))
|
||||
assert retrieved is not None
|
||||
assert retrieved.state == MeetingState.RECORDING
|
||||
assert retrieved is not None, "meeting should exist after state update"
|
||||
assert retrieved.state == MeetingState.RECORDING, f"expected RECORDING state, got {retrieved.state}"
|
||||
|
||||
def test_update_state_nonexistent_meeting(self) -> None:
|
||||
"""Test state update on nonexistent meeting returns False."""
|
||||
@@ -375,7 +381,7 @@ class TestMeetingStoreAtomicUpdates:
|
||||
|
||||
result = store.update_state(str(uuid4()), MeetingState.RECORDING)
|
||||
|
||||
assert result is False
|
||||
assert result is False, "update_state should return False for nonexistent meeting"
|
||||
|
||||
def test_update_title(self) -> None:
|
||||
"""Test atomic title update."""
|
||||
@@ -384,10 +390,10 @@ class TestMeetingStoreAtomicUpdates:
|
||||
|
||||
result = store.update_title(str(meeting.id), "Updated")
|
||||
|
||||
assert result is True
|
||||
assert result is True, "update_title should return True for existing meeting"
|
||||
retrieved = store.get(str(meeting.id))
|
||||
assert retrieved is not None
|
||||
assert retrieved.title == "Updated"
|
||||
assert retrieved is not None, "meeting should exist after title update"
|
||||
assert retrieved.title == "Updated", f"expected 'Updated', got {retrieved.title!r}"
|
||||
|
||||
def test_update_end_time(self) -> None:
|
||||
"""Test atomic end time update."""
|
||||
@@ -397,10 +403,10 @@ class TestMeetingStoreAtomicUpdates:
|
||||
|
||||
result = store.update_end_time(str(meeting.id), end_time)
|
||||
|
||||
assert result is True
|
||||
assert result is True, "update_end_time should return True for existing meeting"
|
||||
retrieved = store.get(str(meeting.id))
|
||||
assert retrieved is not None
|
||||
assert retrieved.ended_at == end_time
|
||||
assert retrieved is not None, "meeting should exist after end_time update"
|
||||
assert retrieved.ended_at == end_time, f"expected {end_time}, got {retrieved.ended_at}"
|
||||
|
||||
def test_active_count_property(self) -> None:
|
||||
"""Test active_count property."""
|
||||
@@ -417,7 +423,7 @@ class TestMeetingStoreAtomicUpdates:
|
||||
stopping.begin_stopping()
|
||||
store.update(stopping)
|
||||
|
||||
assert store.active_count == 2
|
||||
assert store.active_count == 2, f"expected 2 active meetings, got {store.active_count}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -442,8 +448,8 @@ class TestMeetingStoreThreadSafety:
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
assert len(created_ids) == 10
|
||||
assert len(set(created_ids)) == 10 # All unique
|
||||
assert len(created_ids) == 10, f"expected 10 meetings created, got {len(created_ids)}"
|
||||
assert len(set(created_ids)) == 10, "all meeting IDs should be unique"
|
||||
|
||||
def test_concurrent_reads_and_writes(self) -> None:
|
||||
"""Test concurrent reads and writes are thread-safe."""
|
||||
@@ -493,8 +499,8 @@ class TestMemoryUnitOfWork:
|
||||
store = MeetingStore()
|
||||
|
||||
async with MemoryUnitOfWork(store) as uow:
|
||||
assert uow is not None
|
||||
assert isinstance(uow, MemoryUnitOfWork)
|
||||
assert uow is not None, "context manager should return non-None UoW"
|
||||
assert isinstance(uow, MemoryUnitOfWork), f"expected MemoryUnitOfWork, got {type(uow)}"
|
||||
|
||||
async def test_commit_is_noop(self) -> None:
|
||||
"""Test commit is a no-op (changes already applied)."""
|
||||
@@ -506,7 +512,7 @@ class TestMemoryUnitOfWork:
|
||||
await uow.commit()
|
||||
|
||||
retrieved = await uow.meetings.get(meeting.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, "meeting should exist after commit"
|
||||
|
||||
async def test_rollback_does_not_undo_changes(self) -> None:
|
||||
"""Test rollback does not undo changes (memory doesn't support rollback)."""
|
||||
@@ -517,29 +523,29 @@ class TestMemoryUnitOfWork:
|
||||
await uow.meetings.create(meeting)
|
||||
await uow.rollback()
|
||||
|
||||
# Meeting still exists after rollback
|
||||
assert store.get(str(meeting.id)) is not None
|
||||
# Meeting still exists after rollback (memory doesn't support rollback)
|
||||
assert store.get(str(meeting.id)) is not None, "meeting should persist despite rollback in memory mode"
|
||||
|
||||
async def test_feature_flag_annotations(self) -> None:
|
||||
"""Test annotations feature flag is False."""
|
||||
store = MeetingStore()
|
||||
uow = MemoryUnitOfWork(store)
|
||||
|
||||
assert uow.supports_annotations is False
|
||||
assert uow.supports_annotations is False, "memory UoW should not support annotations"
|
||||
|
||||
async def test_feature_flag_diarization_jobs(self) -> None:
|
||||
"""Test diarization jobs feature flag is False."""
|
||||
store = MeetingStore()
|
||||
uow = MemoryUnitOfWork(store)
|
||||
|
||||
assert uow.supports_diarization_jobs is False
|
||||
assert uow.supports_diarization_jobs is False, "memory UoW should not support diarization jobs"
|
||||
|
||||
async def test_feature_flag_preferences(self) -> None:
|
||||
"""Test preferences feature flag is False."""
|
||||
store = MeetingStore()
|
||||
uow = MemoryUnitOfWork(store)
|
||||
|
||||
assert uow.supports_preferences is False
|
||||
assert uow.supports_preferences is False, "memory UoW should not support preferences"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -556,8 +562,8 @@ class TestMemoryMeetingRepository:
|
||||
|
||||
retrieved = await repo.get(meeting.id)
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved.id == meeting.id
|
||||
assert retrieved is not None, "meeting should be retrievable after create"
|
||||
assert retrieved.id == meeting.id, f"expected id {meeting.id}, got {retrieved.id}"
|
||||
|
||||
async def test_update(self) -> None:
|
||||
"""Test updating a meeting."""
|
||||
@@ -571,8 +577,8 @@ class TestMemoryMeetingRepository:
|
||||
await repo.update(meeting)
|
||||
|
||||
retrieved = await repo.get(meeting.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.title == "Updated"
|
||||
assert retrieved is not None, "meeting should exist after update"
|
||||
assert retrieved.title == "Updated", f"expected 'Updated', got {retrieved.title!r}"
|
||||
|
||||
async def test_delete(self) -> None:
|
||||
"""Test deleting a meeting."""
|
||||
@@ -584,8 +590,8 @@ class TestMemoryMeetingRepository:
|
||||
|
||||
result = await repo.delete(meeting.id)
|
||||
|
||||
assert result is True
|
||||
assert await repo.get(meeting.id) is None
|
||||
assert result is True, "delete should return True for existing meeting"
|
||||
assert await repo.get(meeting.id) is None, "meeting should not exist after delete"
|
||||
|
||||
async def test_list_all(self) -> None:
|
||||
"""Test listing all meetings."""
|
||||
@@ -598,8 +604,8 @@ class TestMemoryMeetingRepository:
|
||||
|
||||
meetings, total = await repo.list_all()
|
||||
|
||||
assert len(meetings) == 5
|
||||
assert total == 5
|
||||
assert len(meetings) == 5, f"expected 5 meetings, got {len(meetings)}"
|
||||
assert total == 5, f"expected total=5, got {total}"
|
||||
|
||||
async def test_count_by_state_via_repo(self) -> None:
|
||||
"""Test counting meetings by state via MemoryMeetingRepository."""
|
||||
@@ -617,8 +623,8 @@ class TestMemoryMeetingRepository:
|
||||
created_count = await repo.count_by_state(MeetingState.CREATED)
|
||||
recording_count = await repo.count_by_state(MeetingState.RECORDING)
|
||||
|
||||
assert created_count == 3
|
||||
assert recording_count == 1
|
||||
assert created_count == 3, f"expected 3 created meetings, got {created_count}"
|
||||
assert recording_count == 1, f"expected 1 recording meeting, got {recording_count}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -644,8 +650,8 @@ class TestMemorySegmentRepository:
|
||||
|
||||
segments = await segment_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert len(segments) == 1
|
||||
assert segments[0].text == "Test segment"
|
||||
assert len(segments) == 1, f"expected 1 segment, got {len(segments)}"
|
||||
assert segments[0].text == "Test segment", f"segment text mismatch: {segments[0].text!r}"
|
||||
|
||||
async def test_add_batch(self) -> None:
|
||||
"""Test adding segments in batch."""
|
||||
@@ -664,7 +670,7 @@ class TestMemorySegmentRepository:
|
||||
|
||||
retrieved = await segment_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert len(retrieved) == 5
|
||||
assert len(retrieved) == 5, f"expected 5 segments after batch add, got {len(retrieved)}"
|
||||
|
||||
async def test_semantic_search_returns_empty(self) -> None:
|
||||
"""Test semantic search returns empty (not supported)."""
|
||||
@@ -673,7 +679,7 @@ class TestMemorySegmentRepository:
|
||||
|
||||
results = await segment_repo.search_semantic([0.1, 0.2, 0.3])
|
||||
|
||||
assert results == []
|
||||
assert results == [], f"semantic search should return empty in memory mode, got {results}"
|
||||
|
||||
async def test_get_next_segment_id_via_repo(self) -> None:
|
||||
"""Test getting next segment ID via MemorySegmentRepository."""
|
||||
@@ -685,13 +691,13 @@ class TestMemorySegmentRepository:
|
||||
await meeting_repo.create(meeting)
|
||||
|
||||
next_id = await segment_repo.compute_next_segment_id(meeting.id)
|
||||
assert next_id == 0
|
||||
assert next_id == 0, f"expected next_id=0 for empty meeting, got {next_id}"
|
||||
|
||||
segment = Segment(segment_id=0, text="First", start_time=0.0, end_time=1.0)
|
||||
await segment_repo.add(meeting.id, segment)
|
||||
|
||||
next_id = await segment_repo.compute_next_segment_id(meeting.id)
|
||||
assert next_id == 1
|
||||
assert next_id == 1, f"expected next_id=1 after adding segment, got {next_id}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -715,8 +721,10 @@ class TestMemorySummaryRepository:
|
||||
|
||||
retrieved = await summary_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved.executive_summary == "Executive summary content"
|
||||
assert retrieved is not None, "summary should be retrievable after save"
|
||||
assert retrieved.executive_summary == "Executive summary content", (
|
||||
f"summary mismatch: {retrieved.executive_summary!r}"
|
||||
)
|
||||
|
||||
async def test_delete_by_meeting(self) -> None:
|
||||
"""Test deleting summary by meeting ID."""
|
||||
@@ -732,8 +740,8 @@ class TestMemorySummaryRepository:
|
||||
|
||||
result = await summary_repo.delete_by_meeting(meeting.id)
|
||||
|
||||
assert result is True
|
||||
assert await summary_repo.get_by_meeting(meeting.id) is None
|
||||
assert result is True, "delete_by_meeting should return True when summary existed"
|
||||
assert await summary_repo.get_by_meeting(meeting.id) is None, "summary should not exist after delete"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -876,7 +884,7 @@ class TestGrpcServicerMemoryFallback:
|
||||
"""Test servicer falls back to memory store without session factory."""
|
||||
servicer = NoteFlowServicer(session_factory=None)
|
||||
|
||||
assert servicer._memory_store is not None
|
||||
assert servicer._memory_store is not None, "servicer should have memory store when no session_factory"
|
||||
|
||||
async def test_create_meeting_in_memory_mode(self) -> None:
|
||||
"""Test creating meeting works in memory mode."""
|
||||
@@ -885,8 +893,8 @@ class TestGrpcServicerMemoryFallback:
|
||||
request = noteflow_pb2.CreateMeetingRequest(title="Memory Meeting")
|
||||
result = await servicer.CreateMeeting(request, MockContext())
|
||||
|
||||
assert result.id
|
||||
assert result.title == "Memory Meeting"
|
||||
assert result.id, "created meeting should have an ID"
|
||||
assert result.title == "Memory Meeting", f"expected 'Memory Meeting', got {result.title!r}"
|
||||
|
||||
async def test_get_meeting_in_memory_mode(self) -> None:
|
||||
"""Test getting meeting works in memory mode."""
|
||||
@@ -898,8 +906,8 @@ class TestGrpcServicerMemoryFallback:
|
||||
get_request = noteflow_pb2.GetMeetingRequest(meeting_id=created.id)
|
||||
result = await servicer.GetMeeting(get_request, MockContext())
|
||||
|
||||
assert result.id == created.id
|
||||
assert result.title == "Get Memory Test"
|
||||
assert result.id == created.id, f"expected id {created.id}, got {result.id}"
|
||||
assert result.title == "Get Memory Test", f"expected 'Get Memory Test', got {result.title!r}"
|
||||
|
||||
async def test_list_meetings_in_memory_mode(self) -> None:
|
||||
"""Test listing meetings works in memory mode."""
|
||||
@@ -912,7 +920,7 @@ class TestGrpcServicerMemoryFallback:
|
||||
list_request = noteflow_pb2.ListMeetingsRequest()
|
||||
result = await servicer.ListMeetings(list_request, MockContext())
|
||||
|
||||
assert len(result.meetings) == 3
|
||||
assert len(result.meetings) == 3, f"expected 3 meetings, got {len(result.meetings)}"
|
||||
|
||||
async def test_delete_meeting_in_memory_mode(self) -> None:
|
||||
"""Test deleting meeting works in memory mode."""
|
||||
@@ -924,13 +932,15 @@ class TestGrpcServicerMemoryFallback:
|
||||
delete_request = noteflow_pb2.DeleteMeetingRequest(meeting_id=created.id)
|
||||
result = await servicer.DeleteMeeting(delete_request, MockContext())
|
||||
|
||||
assert result.success is True
|
||||
assert result.success is True, "delete should succeed"
|
||||
|
||||
get_request = noteflow_pb2.GetMeetingRequest(meeting_id=created.id)
|
||||
context = MockContext()
|
||||
with pytest.raises(grpc.RpcError, match=r".*"):
|
||||
await servicer.GetMeeting(get_request, context)
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND
|
||||
assert context.abort_code == grpc.StatusCode.NOT_FOUND, (
|
||||
f"expected NOT_FOUND, got {context.abort_code}"
|
||||
)
|
||||
|
||||
async def test_get_server_info_in_memory_mode(self) -> None:
|
||||
"""Test GetServerInfo works in memory mode."""
|
||||
@@ -939,7 +949,7 @@ class TestGrpcServicerMemoryFallback:
|
||||
request = noteflow_pb2.ServerInfoRequest()
|
||||
result = await servicer.GetServerInfo(request, MockContext())
|
||||
|
||||
assert result.active_meetings >= 0
|
||||
assert result.active_meetings >= 0, f"active_meetings should be non-negative, got {result.active_meetings}"
|
||||
|
||||
async def test_annotation_operations_fail_in_memory_mode(self) -> None:
|
||||
"""Test annotation operations fail gracefully in memory mode."""
|
||||
@@ -960,7 +970,9 @@ class TestGrpcServicerMemoryFallback:
|
||||
with pytest.raises(grpc.RpcError, match=r".*"):
|
||||
await servicer.AddAnnotation(add_request, context)
|
||||
|
||||
assert context.abort_code == grpc.StatusCode.UNIMPLEMENTED
|
||||
assert context.abort_code == grpc.StatusCode.UNIMPLEMENTED, (
|
||||
f"expected UNIMPLEMENTED for annotations in memory mode, got {context.abort_code}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -985,8 +997,8 @@ class TestMemoryModeConstraints:
|
||||
|
||||
# Verify through store directly
|
||||
stored_meeting = store.get(str(meeting.id))
|
||||
assert stored_meeting is not None
|
||||
assert len(stored_meeting.segments) == 1
|
||||
assert stored_meeting is not None, "meeting should exist in store"
|
||||
assert len(stored_meeting.segments) == 1, f"expected 1 segment, got {len(stored_meeting.segments)}"
|
||||
|
||||
async def test_memory_mode_summary_persists_on_meeting(self) -> None:
|
||||
"""Test summary is stored on meeting entity in memory mode."""
|
||||
@@ -1004,9 +1016,11 @@ class TestMemoryModeConstraints:
|
||||
|
||||
# Verify through store directly
|
||||
stored_meeting = store.get(str(meeting.id))
|
||||
assert stored_meeting is not None
|
||||
assert stored_meeting.summary is not None
|
||||
assert stored_meeting.summary.executive_summary == "Test summary"
|
||||
assert stored_meeting is not None, "meeting should exist in store"
|
||||
assert stored_meeting.summary is not None, "summary should be attached to meeting"
|
||||
assert stored_meeting.summary.executive_summary == "Test summary", (
|
||||
f"summary mismatch: {stored_meeting.summary.executive_summary!r}"
|
||||
)
|
||||
|
||||
async def test_memory_mode_no_semantic_search(self) -> None:
|
||||
"""Test semantic search is not available in memory mode."""
|
||||
@@ -1026,7 +1040,7 @@ class TestMemoryModeConstraints:
|
||||
|
||||
results = await uow.segments.search_semantic([0.1] * 384)
|
||||
|
||||
assert results == []
|
||||
assert results == [], f"semantic search should return empty in memory mode, got {results}"
|
||||
|
||||
async def test_memory_mode_meetings_isolated(self) -> None:
|
||||
"""Test meetings are isolated in memory mode."""
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("theme")
|
||||
|
||||
assert value == "dark"
|
||||
assert value == "dark", f"expected theme to be 'dark', got {value!r}"
|
||||
|
||||
async def test_set_and_get_integer_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting and getting an integer preference value."""
|
||||
@@ -55,7 +55,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("max_recordings")
|
||||
|
||||
assert value == 100
|
||||
assert value == 100, f"expected max_recordings to be 100, got {value}"
|
||||
|
||||
async def test_set_and_get_boolean_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting and getting a boolean preference value."""
|
||||
@@ -66,7 +66,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("notifications_enabled")
|
||||
|
||||
assert value is True
|
||||
assert value is True, f"expected notifications_enabled to be True, got {value}"
|
||||
|
||||
async def test_set_and_get_list_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting and getting a list preference value."""
|
||||
@@ -78,7 +78,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("preferred_languages")
|
||||
|
||||
assert value == languages
|
||||
assert value == languages, f"expected preferred_languages to be {languages}, got {value}"
|
||||
|
||||
async def test_set_and_get_dict_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting and getting a dict preference value."""
|
||||
@@ -90,7 +90,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("keyboard_shortcuts")
|
||||
|
||||
assert value == shortcuts
|
||||
assert value == shortcuts, f"expected keyboard_shortcuts to be {shortcuts}, got {value}"
|
||||
|
||||
async def test_get_nonexistent_key_returns_none(self, session: AsyncSession) -> None:
|
||||
"""Test getting a key that doesn't exist returns None."""
|
||||
@@ -98,7 +98,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("nonexistent_key")
|
||||
|
||||
assert value is None
|
||||
assert value is None, f"expected nonexistent key to return None, got {value!r}"
|
||||
|
||||
async def test_set_updates_existing_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting a key that already exists updates the value."""
|
||||
@@ -112,7 +112,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("volume")
|
||||
|
||||
assert value == VOLUME_UPDATED
|
||||
assert value == VOLUME_UPDATED, f"expected volume to be updated to {VOLUME_UPDATED}, got {value}"
|
||||
|
||||
async def test_set_null_value(self, session: AsyncSession) -> None:
|
||||
"""Test setting a None value explicitly."""
|
||||
@@ -123,7 +123,7 @@ class TestPreferencesRepositoryBasicOperations:
|
||||
|
||||
value = await repo.get("optional_setting")
|
||||
|
||||
assert value is None
|
||||
assert value is None, f"expected explicitly set None value to return None, got {value!r}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -139,7 +139,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("cloud_consent_granted")
|
||||
|
||||
assert value is True
|
||||
assert value is True, f"expected get_bool to return True for truthy value, got {value}"
|
||||
|
||||
async def test_get_bool_with_false_value(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool returns False for falsy boolean value."""
|
||||
@@ -150,7 +150,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("cloud_consent_granted")
|
||||
|
||||
assert value is False
|
||||
assert value is False, f"expected get_bool to return False for falsy value, got {value}"
|
||||
|
||||
async def test_get_bool_with_default_when_missing(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool returns default when key doesn't exist."""
|
||||
@@ -159,8 +159,8 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
value_default_false = await repo.get_bool("nonexistent")
|
||||
value_default_true = await repo.get_bool("nonexistent", default=True)
|
||||
|
||||
assert value_default_false is False
|
||||
assert value_default_true is True
|
||||
assert value_default_false is False, f"expected default False when key missing, got {value_default_false}"
|
||||
assert value_default_true is True, f"expected default True when explicitly passed, got {value_default_true}"
|
||||
|
||||
async def test_get_bool_coerces_truthy_integer(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool coerces non-zero integer to True."""
|
||||
@@ -171,7 +171,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("numeric_flag")
|
||||
|
||||
assert value is True
|
||||
assert value is True, f"expected non-zero integer to coerce to True, got {value}"
|
||||
|
||||
async def test_get_bool_coerces_falsy_integer(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool coerces zero to False."""
|
||||
@@ -182,7 +182,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("numeric_flag")
|
||||
|
||||
assert value is False
|
||||
assert value is False, f"expected zero to coerce to False, got {value}"
|
||||
|
||||
async def test_get_bool_coerces_truthy_string(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool coerces non-empty string to True."""
|
||||
@@ -193,7 +193,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("string_flag")
|
||||
|
||||
assert value is True
|
||||
assert value is True, f"expected non-empty string to coerce to True, got {value}"
|
||||
|
||||
async def test_get_bool_coerces_empty_string(self, session: AsyncSession) -> None:
|
||||
"""Test get_bool coerces empty string to False."""
|
||||
@@ -204,7 +204,7 @@ class TestPreferencesRepositoryBooleanCoercion:
|
||||
|
||||
value = await repo.get_bool("string_flag")
|
||||
|
||||
assert value is False
|
||||
assert value is False, f"expected empty string to coerce to False, got {value}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -221,10 +221,10 @@ class TestPreferencesRepositoryDelete:
|
||||
result = await repo.delete("to_delete")
|
||||
await session.commit()
|
||||
|
||||
assert result is True
|
||||
assert result is True, f"expected delete to return True for existing key, got {result}"
|
||||
|
||||
value = await repo.get("to_delete")
|
||||
assert value is None
|
||||
assert value is None, f"expected deleted key to return None, got {value!r}"
|
||||
|
||||
async def test_delete_nonexistent_key(self, session: AsyncSession) -> None:
|
||||
"""Test deleting a key that doesn't exist returns False."""
|
||||
@@ -233,7 +233,7 @@ class TestPreferencesRepositoryDelete:
|
||||
result = await repo.delete("nonexistent")
|
||||
await session.commit()
|
||||
|
||||
assert result is False
|
||||
assert result is False, f"expected delete to return False for nonexistent key, got {result}"
|
||||
|
||||
async def test_delete_then_recreate(self, session: AsyncSession) -> None:
|
||||
"""Test that a deleted key can be recreated."""
|
||||
@@ -250,7 +250,7 @@ class TestPreferencesRepositoryDelete:
|
||||
|
||||
value = await repo.get("recyclable")
|
||||
|
||||
assert value == "new"
|
||||
assert value == "new", f"expected recreated key to have value 'new', got {value!r}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -266,9 +266,9 @@ class TestPreferencesRepositoryIsolation:
|
||||
await repo.set("key3", "value3")
|
||||
await session.commit()
|
||||
|
||||
assert await repo.get("key1") == "value1"
|
||||
assert await repo.get("key2") == "value2"
|
||||
assert await repo.get("key3") == "value3"
|
||||
assert await repo.get("key1") == "value1", "key1 should have independent value 'value1'"
|
||||
assert await repo.get("key2") == "value2", "key2 should have independent value 'value2'"
|
||||
assert await repo.get("key3") == "value3", "key3 should have independent value 'value3'"
|
||||
|
||||
async def test_updating_one_key_doesnt_affect_others(self, session: AsyncSession) -> None:
|
||||
"""Test updating one key doesn't affect other keys."""
|
||||
@@ -281,8 +281,8 @@ class TestPreferencesRepositoryIsolation:
|
||||
await repo.set("changing", "updated")
|
||||
await session.commit()
|
||||
|
||||
assert await repo.get("stable") == "unchanged"
|
||||
assert await repo.get("changing") == "updated"
|
||||
assert await repo.get("stable") == "unchanged", "stable key should remain unchanged"
|
||||
assert await repo.get("changing") == "updated", "changing key should be updated"
|
||||
|
||||
async def test_deleting_one_key_doesnt_affect_others(self, session: AsyncSession) -> None:
|
||||
"""Test deleting one key doesn't affect other keys."""
|
||||
@@ -295,8 +295,8 @@ class TestPreferencesRepositoryIsolation:
|
||||
await repo.delete("goner")
|
||||
await session.commit()
|
||||
|
||||
assert await repo.get("keeper") == "kept"
|
||||
assert await repo.get("goner") is None
|
||||
assert await repo.get("keeper") == "kept", "keeper key should remain after deleting another key"
|
||||
assert await repo.get("goner") is None, "goner key should be deleted"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -312,7 +312,7 @@ class TestPreferencesRepositoryEdgeCases:
|
||||
|
||||
value = await repo.get("")
|
||||
|
||||
assert value == "empty_key_value"
|
||||
assert value == "empty_key_value", f"expected empty string key to store value correctly, got {value!r}"
|
||||
|
||||
async def test_special_characters_in_key(self, session: AsyncSession) -> None:
|
||||
"""Test keys with special characters work correctly."""
|
||||
@@ -324,7 +324,7 @@ class TestPreferencesRepositoryEdgeCases:
|
||||
|
||||
value = await repo.get(special_key)
|
||||
|
||||
assert value == "high"
|
||||
assert value == "high", f"expected special character key to work, got {value!r}"
|
||||
|
||||
async def test_unicode_value(self, session: AsyncSession) -> None:
|
||||
"""Test storing unicode values works correctly."""
|
||||
@@ -335,7 +335,7 @@ class TestPreferencesRepositoryEdgeCases:
|
||||
|
||||
value = await repo.get("greeting")
|
||||
|
||||
assert value == "Hello, World!"
|
||||
assert value == "Hello, World!", f"expected unicode value to be preserved, got {value!r}"
|
||||
|
||||
async def test_nested_dict_value(self, session: AsyncSession) -> None:
|
||||
"""Test storing nested dictionaries works correctly."""
|
||||
@@ -371,9 +371,9 @@ class TestPreferencesRepositoryEdgeCases:
|
||||
|
||||
value = await repo.get("large_data")
|
||||
|
||||
assert value == large_list
|
||||
assert isinstance(value, list)
|
||||
assert len(value) == 1000
|
||||
assert value == large_list, "expected large list to be preserved exactly"
|
||||
assert isinstance(value, list), f"expected list type, got {type(value)}"
|
||||
assert len(value) == 1000, f"expected list length of 1000, got {len(value)}"
|
||||
|
||||
async def test_float_value_precision(self, session: AsyncSession) -> None:
|
||||
"""Test float values maintain precision through JSONB storage."""
|
||||
@@ -384,7 +384,7 @@ class TestPreferencesRepositoryEdgeCases:
|
||||
|
||||
value = await repo.get("audio_gain")
|
||||
|
||||
assert value == pytest.approx(0.123456789)
|
||||
assert value == pytest.approx(0.123456789), f"expected float precision to be maintained, got {value}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -397,7 +397,7 @@ class TestPreferencesRepositoryCloudConsent:
|
||||
|
||||
consent = await repo.get_bool("cloud_consent_granted", False)
|
||||
|
||||
assert consent is False
|
||||
assert consent is False, f"expected cloud consent to default to False, got {consent}"
|
||||
|
||||
async def test_cloud_consent_workflow_grant_consent(self, session: AsyncSession) -> None:
|
||||
"""Test granting cloud consent persists correctly."""
|
||||
@@ -408,7 +408,7 @@ class TestPreferencesRepositoryCloudConsent:
|
||||
|
||||
consent = await repo.get_bool("cloud_consent_granted", False)
|
||||
|
||||
assert consent is True
|
||||
assert consent is True, f"expected cloud consent to be granted, got {consent}"
|
||||
|
||||
async def test_cloud_consent_workflow_revoke_consent(self, session: AsyncSession) -> None:
|
||||
"""Test revoking cloud consent after granting."""
|
||||
@@ -422,7 +422,7 @@ class TestPreferencesRepositoryCloudConsent:
|
||||
|
||||
consent = await repo.get_bool("cloud_consent_granted", False)
|
||||
|
||||
assert consent is False
|
||||
assert consent is False, f"expected cloud consent to be revoked, got {consent}"
|
||||
|
||||
async def test_cloud_consent_survives_session_restart(
|
||||
self, session: AsyncSession
|
||||
@@ -440,7 +440,7 @@ class TestPreferencesRepositoryCloudConsent:
|
||||
|
||||
consent = await repo.get_bool("cloud_consent_granted", False)
|
||||
|
||||
assert consent is True
|
||||
assert consent is True, f"expected cloud consent to survive session restart, got {consent}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -472,8 +472,8 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
result = await repo.get_all(keys=["include"])
|
||||
|
||||
assert "include" in result
|
||||
assert "exclude" not in result
|
||||
assert "include" in result, "filtered key should be included in result"
|
||||
assert "exclude" not in result, "non-filtered key should not be in result"
|
||||
|
||||
async def test_get_all_returns_empty_dict_when_no_prefs(
|
||||
self, session: AsyncSession
|
||||
@@ -483,7 +483,7 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
result = await repo.get_all()
|
||||
|
||||
assert result == {}
|
||||
assert result == {}, f"expected empty dict when no preferences exist, got {result}"
|
||||
|
||||
async def test_get_all_with_metadata_includes_timestamps(
|
||||
self, session: AsyncSession
|
||||
@@ -514,7 +514,7 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
max_ts = await repo.get_max_updated_at()
|
||||
|
||||
assert max_ts is not None
|
||||
assert max_ts is not None, "expected max_updated_at to return a timestamp when preferences exist"
|
||||
|
||||
async def test_get_max_updated_at_returns_none_when_empty(
|
||||
self, session: AsyncSession
|
||||
@@ -524,7 +524,7 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
max_ts = await repo.get_max_updated_at()
|
||||
|
||||
assert max_ts is None
|
||||
assert max_ts is None, f"expected max_updated_at to return None when no preferences exist, got {max_ts}"
|
||||
|
||||
async def test_set_bulk_creates_multiple_preferences(
|
||||
self, session: AsyncSession
|
||||
@@ -537,10 +537,10 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
result = await repo.get_all()
|
||||
|
||||
assert len(result) == 3
|
||||
assert result["key1"] == "value1"
|
||||
assert result["key2"] == "value2"
|
||||
assert result["key3"] == "value3"
|
||||
assert len(result) == 3, f"expected 3 preferences from bulk set, got {len(result)}"
|
||||
assert result["key1"] == "value1", f"expected key1 to be 'value1', got {result.get('key1')!r}"
|
||||
assert result["key2"] == "value2", f"expected key2 to be 'value2', got {result.get('key2')!r}"
|
||||
assert result["key3"] == "value3", f"expected key3 to be 'value3', got {result.get('key3')!r}"
|
||||
|
||||
async def test_set_bulk_updates_existing_preferences(
|
||||
self, session: AsyncSession
|
||||
@@ -556,5 +556,5 @@ class TestPreferencesRepositoryBulkOperations:
|
||||
|
||||
result = await repo.get_all()
|
||||
|
||||
assert result["theme"] == "dark"
|
||||
assert result["new_key"] == "new_value"
|
||||
assert result["theme"] == "dark", f"expected bulk set to update existing key, got {result.get('theme')!r}"
|
||||
assert result["new_key"] == "new_value", f"expected bulk set to create new key, got {result.get('new_key')!r}"
|
||||
|
||||
@@ -87,7 +87,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.get(uuid4())
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent project should return None"
|
||||
|
||||
async def test_create_project_with_settings_repository(self, session: AsyncSession) -> None:
|
||||
"""Test creating project with full settings."""
|
||||
@@ -158,7 +158,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.get_by_slug(workspace.id, "nonexistent")
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent project should return None"
|
||||
|
||||
async def test_get_default_for_workspace(self, session: AsyncSession) -> None:
|
||||
"""Test retrieving default project for workspace."""
|
||||
@@ -193,7 +193,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.get_default_for_workspace(workspace.id)
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent project should return None"
|
||||
|
||||
async def test_update_project(self, session: AsyncSession) -> None:
|
||||
"""Test updating a project."""
|
||||
@@ -209,7 +209,7 @@ class TestProjectRepository:
|
||||
|
||||
# Update via domain entity
|
||||
retrieved = await repo.get(project.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, "project should exist for update"
|
||||
retrieved.update_name("Updated Name")
|
||||
retrieved.update_description("New description")
|
||||
retrieved.update_settings(ProjectSettings(rag_enabled=True))
|
||||
@@ -271,7 +271,7 @@ class TestProjectRepository:
|
||||
|
||||
# Get fresh reference
|
||||
default = await repo.get_default_for_workspace(workspace.id)
|
||||
assert default is not None
|
||||
assert default is not None, "default project should exist before archive attempt"
|
||||
|
||||
with pytest.raises(CannotArchiveDefaultProjectError, match="Cannot archive"):
|
||||
await repo.archive(default.id)
|
||||
@@ -282,7 +282,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.archive(uuid4())
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent project should return None"
|
||||
|
||||
async def test_restore_project(self, session: AsyncSession) -> None:
|
||||
"""Test restoring an archived project."""
|
||||
@@ -312,7 +312,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.restore(uuid4())
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent project should return None"
|
||||
|
||||
async def test_delete_project(self, session: AsyncSession) -> None:
|
||||
"""Test deleting a project."""
|
||||
@@ -329,10 +329,10 @@ class TestProjectRepository:
|
||||
result = await repo.delete(project.id)
|
||||
await session.commit()
|
||||
|
||||
assert result is True
|
||||
assert result is True, "delete should return True for existing project"
|
||||
|
||||
retrieved = await repo.get(project.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, "deleted project should not be retrievable"
|
||||
|
||||
async def test_delete_project_not_found(self, session: AsyncSession) -> None:
|
||||
"""Test deleting non-existent project returns False."""
|
||||
@@ -340,7 +340,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.delete(uuid4())
|
||||
|
||||
assert result is False
|
||||
assert result is False, "delete should return False for non-existent project"
|
||||
|
||||
async def test_list_for_workspace(self, session: AsyncSession) -> None:
|
||||
"""Test listing projects in a workspace."""
|
||||
@@ -368,10 +368,10 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id)
|
||||
|
||||
assert len(result) == 3
|
||||
assert len(result) == 3, f"expected 3 projects, got {len(result)}" # noqa: PLR2004
|
||||
# Default project should come first, then alphabetically
|
||||
assert result[0].is_default is True
|
||||
assert result[0].name == "Default Project"
|
||||
assert result[0].is_default is True, "first project should be default"
|
||||
assert result[0].name == "Default Project", f"first project should be 'Default Project', got '{result[0].name}'"
|
||||
|
||||
async def test_list_for_workspace_excludes_archived(self, session: AsyncSession) -> None:
|
||||
"""Test list_for_workspace excludes archived by default."""
|
||||
@@ -395,8 +395,8 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id, include_archived=False)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].id == active.id
|
||||
assert len(result) == 1, f"expected 1 active project, got {len(result)}"
|
||||
assert result[0].id == active.id, f"expected active project ID {active.id}, got {result[0].id}"
|
||||
|
||||
async def test_list_for_workspace_includes_archived(self, session: AsyncSession) -> None:
|
||||
"""Test list_for_workspace can include archived projects."""
|
||||
@@ -420,7 +420,7 @@ class TestProjectRepository:
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id, include_archived=True)
|
||||
|
||||
assert len(result) == 2
|
||||
assert len(result) == 2, f"expected 2 projects (including archived), got {len(result)}" # noqa: PLR2004
|
||||
|
||||
async def test_list_for_workspace_pagination(self, session: AsyncSession) -> None:
|
||||
"""Test list_for_workspace with pagination."""
|
||||
@@ -428,7 +428,7 @@ class TestProjectRepository:
|
||||
repo = SqlAlchemyProjectRepository(session)
|
||||
|
||||
# Create 5 projects
|
||||
for i in range(5):
|
||||
for i in range(5): # noqa: PLR2004
|
||||
await repo.create(
|
||||
project_id=uuid4(),
|
||||
workspace_id=workspace.id,
|
||||
@@ -436,14 +436,14 @@ class TestProjectRepository:
|
||||
)
|
||||
await session.commit()
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=0)
|
||||
assert len(result) == 2
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=0) # noqa: PLR2004
|
||||
assert len(result) == 2, "first page should be full" # noqa: PLR2004
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=2)
|
||||
assert len(result) == 2
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=2) # noqa: PLR2004
|
||||
assert len(result) == 2, "second page should be full" # noqa: PLR2004
|
||||
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=4)
|
||||
assert len(result) == 1
|
||||
result = await repo.list_for_workspace(workspace.id, limit=2, offset=4) # noqa: PLR2004
|
||||
assert len(result) == 1, "last page has remainder"
|
||||
|
||||
async def test_count_for_workspace(self, session: AsyncSession) -> None:
|
||||
"""Test counting projects in a workspace."""
|
||||
@@ -451,7 +451,7 @@ class TestProjectRepository:
|
||||
repo = SqlAlchemyProjectRepository(session)
|
||||
|
||||
# Create 3 active + 1 archived
|
||||
for _ in range(3):
|
||||
for _ in range(3): # noqa: PLR2004
|
||||
await repo.create(
|
||||
project_id=uuid4(),
|
||||
workspace_id=workspace.id,
|
||||
@@ -470,8 +470,8 @@ class TestProjectRepository:
|
||||
count_active = await repo.count_for_workspace(workspace.id, include_archived=False)
|
||||
count_all = await repo.count_for_workspace(workspace.id, include_archived=True)
|
||||
|
||||
assert count_active == 3
|
||||
assert count_all == 4
|
||||
assert count_active == 3, f"expected 3 active projects, got {count_active}" # noqa: PLR2004
|
||||
assert count_all == 4, f"expected 4 total projects, got {count_all}" # noqa: PLR2004
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -516,7 +516,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
result = await repo.get(uuid4(), uuid4())
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent membership should return None"
|
||||
|
||||
async def test_update_role(self, session: AsyncSession) -> None:
|
||||
"""Test updating a member's role."""
|
||||
@@ -537,8 +537,8 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Verify persistence
|
||||
retrieved = await repo.get(project.id, user.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.role == ProjectRole.ADMIN
|
||||
assert retrieved is not None, "membership should exist after update"
|
||||
assert retrieved.role == ProjectRole.ADMIN, "persisted role should be ADMIN"
|
||||
|
||||
async def test_update_role_not_found(self, session: AsyncSession) -> None:
|
||||
"""Test update_role returns None for non-existent membership."""
|
||||
@@ -546,7 +546,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
result = await repo.update_role(uuid4(), uuid4(), ProjectRole.ADMIN)
|
||||
|
||||
assert result is None
|
||||
assert result is None, "non-existent membership should return None"
|
||||
|
||||
async def test_remove_membership(self, session: AsyncSession) -> None:
|
||||
"""Test removing a project membership."""
|
||||
@@ -562,10 +562,10 @@ class TestProjectMembershipRepository:
|
||||
result = await repo.remove(project.id, user.id)
|
||||
await session.commit()
|
||||
|
||||
assert result is True
|
||||
assert result is True, "remove should return True for existing membership"
|
||||
|
||||
retrieved = await repo.get(project.id, user.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, "removed membership should not be retrievable"
|
||||
|
||||
async def test_remove_membership_not_found(self, session: AsyncSession) -> None:
|
||||
"""Test remove returns False for non-existent membership."""
|
||||
@@ -573,7 +573,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
result = await repo.remove(uuid4(), uuid4())
|
||||
|
||||
assert result is False
|
||||
assert result is False, "remove should return False for non-existent membership"
|
||||
|
||||
async def test_list_for_project(self, session: AsyncSession) -> None:
|
||||
"""Test listing all members of a project."""
|
||||
@@ -583,7 +583,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Create multiple users
|
||||
users = []
|
||||
for i in range(3):
|
||||
for i in range(3): # noqa: PLR2004
|
||||
user = UserModel(
|
||||
id=uuid4(),
|
||||
display_name=f"User {i}",
|
||||
@@ -602,9 +602,9 @@ class TestProjectMembershipRepository:
|
||||
|
||||
result = await repo.list_for_project(project.id)
|
||||
|
||||
assert len(result) == 3
|
||||
assert len(result) == 3, f"expected 3 members, got {len(result)}" # noqa: PLR2004
|
||||
roles_found = {m.role for m in result}
|
||||
assert roles_found == {ProjectRole.ADMIN, ProjectRole.EDITOR, ProjectRole.VIEWER}
|
||||
assert roles_found == {ProjectRole.ADMIN, ProjectRole.EDITOR, ProjectRole.VIEWER}, "all roles should be present"
|
||||
|
||||
async def test_list_for_project_pagination(self, session: AsyncSession) -> None:
|
||||
"""Test list_for_project with pagination."""
|
||||
@@ -614,7 +614,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Create 5 users and memberships
|
||||
repo = SqlAlchemyProjectMembershipRepository(session)
|
||||
for i in range(5):
|
||||
for i in range(5): # noqa: PLR2004
|
||||
user = UserModel(
|
||||
id=uuid4(),
|
||||
display_name=f"User {i}",
|
||||
@@ -626,11 +626,11 @@ class TestProjectMembershipRepository:
|
||||
await repo.add(project.id, user.id, ProjectRole.VIEWER)
|
||||
await session.commit()
|
||||
|
||||
result = await repo.list_for_project(project.id, limit=2, offset=0)
|
||||
assert len(result) == 2
|
||||
result = await repo.list_for_project(project.id, limit=2, offset=0) # noqa: PLR2004
|
||||
assert len(result) == 2, "first page should have 2 members" # noqa: PLR2004
|
||||
|
||||
result = await repo.list_for_project(project.id, limit=2, offset=4)
|
||||
assert len(result) == 1
|
||||
result = await repo.list_for_project(project.id, limit=2, offset=4) # noqa: PLR2004
|
||||
assert len(result) == 1, "last page should have 1 member"
|
||||
|
||||
async def test_list_for_user(self, session: AsyncSession) -> None:
|
||||
"""Test listing all projects a user is a member of."""
|
||||
@@ -642,7 +642,7 @@ class TestProjectMembershipRepository:
|
||||
project_repo = SqlAlchemyProjectRepository(session)
|
||||
membership_repo = SqlAlchemyProjectMembershipRepository(session)
|
||||
|
||||
for i in range(3):
|
||||
for i in range(3): # noqa: PLR2004
|
||||
project = await project_repo.create(
|
||||
project_id=uuid4(),
|
||||
workspace_id=workspace.id,
|
||||
@@ -653,8 +653,8 @@ class TestProjectMembershipRepository:
|
||||
|
||||
result = await membership_repo.list_for_user(user.id)
|
||||
|
||||
assert len(result) == 3
|
||||
assert all(m.user_id == user.id for m in result)
|
||||
assert len(result) == 3, f"expected 3 memberships, got {len(result)}" # noqa: PLR2004
|
||||
assert all(m.user_id == user.id for m in result), "all memberships should belong to user"
|
||||
|
||||
async def test_list_for_user_filtered_by_workspace(self, session: AsyncSession) -> None:
|
||||
"""Test list_for_user can filter by workspace."""
|
||||
@@ -674,7 +674,7 @@ class TestProjectMembershipRepository:
|
||||
membership_repo = SqlAlchemyProjectMembershipRepository(session)
|
||||
|
||||
# Projects in workspace1
|
||||
for i in range(2):
|
||||
for i in range(2): # noqa: PLR2004
|
||||
project = await project_repo.create(
|
||||
project_id=uuid4(),
|
||||
workspace_id=workspace1.id,
|
||||
@@ -693,12 +693,12 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Filter by workspace1
|
||||
result = await membership_repo.list_for_user(user.id, workspace_id=workspace1.id)
|
||||
assert len(result) == 2
|
||||
assert len(result) == 2, f"expected 2 memberships in workspace1, got {len(result)}" # noqa: PLR2004
|
||||
|
||||
# Filter by workspace2
|
||||
result = await membership_repo.list_for_user(user.id, workspace_id=workspace2.id)
|
||||
assert len(result) == 1
|
||||
assert result[0].role == ProjectRole.ADMIN
|
||||
assert len(result) == 1, f"expected 1 membership in workspace2, got {len(result)}"
|
||||
assert result[0].role == ProjectRole.ADMIN, "workspace2 membership should have ADMIN role"
|
||||
|
||||
async def test_bulk_add_memberships(self, session: AsyncSession) -> None:
|
||||
"""Test bulk adding memberships."""
|
||||
@@ -708,7 +708,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Create users
|
||||
users = []
|
||||
for i in range(3):
|
||||
for i in range(3): # noqa: PLR2004
|
||||
user = UserModel(
|
||||
id=uuid4(),
|
||||
display_name=f"User {i}",
|
||||
@@ -728,9 +728,9 @@ class TestProjectMembershipRepository:
|
||||
result = await repo.bulk_add(project.id, memberships)
|
||||
await session.commit()
|
||||
|
||||
assert len(result) == 3
|
||||
assert len(result) == 3, f"expected 3 memberships created, got {len(result)}" # noqa: PLR2004
|
||||
roles = {m.role for m in result}
|
||||
assert roles == {ProjectRole.ADMIN, ProjectRole.EDITOR, ProjectRole.VIEWER}
|
||||
assert roles == {ProjectRole.ADMIN, ProjectRole.EDITOR, ProjectRole.VIEWER}, "all roles should be present"
|
||||
|
||||
async def test_count_for_project(self, session: AsyncSession) -> None:
|
||||
"""Test counting members in a project."""
|
||||
@@ -740,7 +740,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
# Create users and add memberships
|
||||
repo = SqlAlchemyProjectMembershipRepository(session)
|
||||
for i in range(5):
|
||||
for i in range(5): # noqa: PLR2004
|
||||
user = UserModel(
|
||||
id=uuid4(),
|
||||
display_name=f"User {i}",
|
||||
@@ -754,7 +754,7 @@ class TestProjectMembershipRepository:
|
||||
|
||||
count = await repo.count_for_project(project.id)
|
||||
|
||||
assert count == 5, "Should count all members in project"
|
||||
assert count == 5, "Should count all members in project" # noqa: PLR2004
|
||||
|
||||
async def test_count_for_project_empty(self, session: AsyncSession) -> None:
|
||||
"""Test counting members returns 0 for empty project."""
|
||||
|
||||
@@ -70,9 +70,9 @@ class TestRecoveryServiceMeetingRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert recovered[0].state == MeetingState.ERROR
|
||||
assert recovered[0].metadata["crash_previous_state"] == "STOPPING"
|
||||
assert len(recovered) == 1, "should recover exactly one crashed meeting"
|
||||
assert recovered[0].state == MeetingState.ERROR, "recovered meeting should be in ERROR state"
|
||||
assert recovered[0].metadata["crash_previous_state"] == "STOPPING", "crash_previous_state should be 'STOPPING'"
|
||||
|
||||
async def test_ignores_created_meetings(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -86,12 +86,12 @@ class TestRecoveryServiceMeetingRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 0
|
||||
assert len(recovered) == 0, "should not recover any meetings when only CREATED state exists"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
m = await uow.meetings.get(meeting.id)
|
||||
assert m is not None
|
||||
assert m.state == MeetingState.CREATED
|
||||
assert m is not None, f"meeting {meeting.id} should exist in database"
|
||||
assert m.state == MeetingState.CREATED, f"meeting should remain in CREATED state, got {m.state}"
|
||||
|
||||
async def test_ignores_stopped_meetings(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -108,7 +108,7 @@ class TestRecoveryServiceMeetingRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 0
|
||||
assert len(recovered) == 0, "should not recover any meetings when only STOPPED state exists"
|
||||
|
||||
async def test_ignores_error_meetings(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -124,29 +124,46 @@ class TestRecoveryServiceMeetingRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 0
|
||||
assert len(recovered) == 0, "should not recover any meetings when already in ERROR state"
|
||||
|
||||
async def test_recovers_multiple_meetings(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
) -> None:
|
||||
"""Test recovering multiple crashed meetings at once."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
for i in range(5):
|
||||
meeting = Meeting.create(title=f"Crashed Meeting {i}")
|
||||
meeting.start_recording()
|
||||
await uow.meetings.create(meeting)
|
||||
meeting0 = Meeting.create(title="Crashed Meeting 0")
|
||||
meeting0.start_recording()
|
||||
await uow.meetings.create(meeting0)
|
||||
|
||||
meeting1 = Meeting.create(title="Crashed Meeting 1")
|
||||
meeting1.start_recording()
|
||||
await uow.meetings.create(meeting1)
|
||||
|
||||
meeting2 = Meeting.create(title="Crashed Meeting 2")
|
||||
meeting2.start_recording()
|
||||
await uow.meetings.create(meeting2)
|
||||
|
||||
meeting3 = Meeting.create(title="Crashed Meeting 3")
|
||||
meeting3.start_recording()
|
||||
await uow.meetings.create(meeting3)
|
||||
|
||||
meeting4 = Meeting.create(title="Crashed Meeting 4")
|
||||
meeting4.start_recording()
|
||||
await uow.meetings.create(meeting4)
|
||||
|
||||
await uow.commit()
|
||||
expected_ids = {meeting0.id, meeting1.id, meeting2.id, meeting3.id, meeting4.id}
|
||||
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 5
|
||||
assert len(recovered) == len(expected_ids), f"should recover all crashed meetings, got {len(recovered)}"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
for m in recovered:
|
||||
retrieved = await uow.meetings.get(m.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.state == MeetingState.ERROR
|
||||
recovered_ids = {m.id for m in recovered}
|
||||
assert recovered_ids == expected_ids, f"recovered meeting IDs should match expected: {expected_ids}"
|
||||
|
||||
recovered_states = {m.state for m in recovered}
|
||||
assert recovered_states == {MeetingState.ERROR}, f"all recovered meetings should be in ERROR state, got {recovered_states}"
|
||||
|
||||
async def test_recovery_metadata_includes_timestamp(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -161,9 +178,9 @@ class TestRecoveryServiceMeetingRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, _ = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert "crash_recovery_time" in recovered[0].metadata
|
||||
assert recovered[0].metadata["crash_recovery_time"]
|
||||
assert len(recovered) == 1, "should recover exactly one crashed meeting"
|
||||
assert "crash_recovery_time" in recovered[0].metadata, "recovered meeting metadata should include crash_recovery_time"
|
||||
assert recovered[0].metadata["crash_recovery_time"], "crash_recovery_time should have a non-empty value"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -216,7 +233,7 @@ class TestRecoveryServiceDiarizationJobRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
failed_count = await recovery_service.recover_crashed_diarization_jobs()
|
||||
|
||||
assert failed_count == 1
|
||||
assert failed_count == 1, "should fail exactly one running diarization job"
|
||||
|
||||
async def test_ignores_completed_diarization_jobs(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -238,12 +255,12 @@ class TestRecoveryServiceDiarizationJobRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
failed_count = await recovery_service.recover_crashed_diarization_jobs()
|
||||
|
||||
assert failed_count == 0
|
||||
assert failed_count == 0, "should not fail any completed diarization jobs"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
retrieved = await uow.diarization_jobs.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.status == JOB_STATUS_COMPLETED
|
||||
assert retrieved is not None, f"diarization job {job.job_id} should exist in database"
|
||||
assert retrieved.status == JOB_STATUS_COMPLETED, f"completed job should remain COMPLETED, got {retrieved.status}"
|
||||
|
||||
async def test_ignores_already_failed_diarization_jobs(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -265,12 +282,12 @@ class TestRecoveryServiceDiarizationJobRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
failed_count = await recovery_service.recover_crashed_diarization_jobs()
|
||||
|
||||
assert failed_count == 0
|
||||
assert failed_count == 0, "should not fail any already-failed diarization jobs"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
retrieved = await uow.diarization_jobs.get(job.job_id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.error_message == "Original failure"
|
||||
assert retrieved is not None, f"diarization job {job.job_id} should exist in database"
|
||||
assert retrieved.error_message == "Original failure", f"original error message should be preserved, got '{retrieved.error_message}'"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -324,9 +341,9 @@ class TestRecoveryServiceFullRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
result = await recovery_service.recover_all()
|
||||
|
||||
assert result.meetings_recovered == 0
|
||||
assert result.diarization_jobs_failed == 0
|
||||
assert result.total_recovered == 0
|
||||
assert result.meetings_recovered == 0, "should not recover any meetings when none crashed"
|
||||
assert result.diarization_jobs_failed == 0, "should not fail any diarization jobs when none crashed"
|
||||
assert result.total_recovered == 0, "total_recovered should be 0 when nothing to recover"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -338,30 +355,39 @@ class TestRecoveryServiceCounting:
|
||||
) -> None:
|
||||
"""Test count_crashed_meetings returns accurate count."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
for state, title in [
|
||||
(MeetingState.CREATED, "Created"),
|
||||
(MeetingState.RECORDING, "Recording 1"),
|
||||
(MeetingState.RECORDING, "Recording 2"),
|
||||
(MeetingState.STOPPING, "Stopping"),
|
||||
(MeetingState.STOPPED, "Stopped"),
|
||||
]:
|
||||
meeting = Meeting.create(title=title)
|
||||
if state == MeetingState.RECORDING:
|
||||
meeting.start_recording()
|
||||
elif state == MeetingState.STOPPING:
|
||||
meeting.start_recording()
|
||||
meeting.begin_stopping()
|
||||
elif state == MeetingState.STOPPED:
|
||||
meeting.start_recording()
|
||||
meeting.begin_stopping()
|
||||
meeting.stop_recording()
|
||||
await uow.meetings.create(meeting)
|
||||
# CREATED state meeting
|
||||
meeting_created = Meeting.create(title="Created")
|
||||
await uow.meetings.create(meeting_created)
|
||||
|
||||
# First RECORDING state meeting
|
||||
meeting_recording1 = Meeting.create(title="Recording 1")
|
||||
meeting_recording1.start_recording()
|
||||
await uow.meetings.create(meeting_recording1)
|
||||
|
||||
# Second RECORDING state meeting
|
||||
meeting_recording2 = Meeting.create(title="Recording 2")
|
||||
meeting_recording2.start_recording()
|
||||
await uow.meetings.create(meeting_recording2)
|
||||
|
||||
# STOPPING state meeting
|
||||
meeting_stopping = Meeting.create(title="Stopping")
|
||||
meeting_stopping.start_recording()
|
||||
meeting_stopping.begin_stopping()
|
||||
await uow.meetings.create(meeting_stopping)
|
||||
|
||||
# STOPPED state meeting
|
||||
meeting_stopped = Meeting.create(title="Stopped")
|
||||
meeting_stopped.start_recording()
|
||||
meeting_stopped.begin_stopping()
|
||||
meeting_stopped.stop_recording()
|
||||
await uow.meetings.create(meeting_stopped)
|
||||
|
||||
await uow.commit()
|
||||
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
count = await recovery_service.count_crashed_meetings()
|
||||
|
||||
assert count == 3
|
||||
assert count == 3, f"should count 3 crashed meetings (2 RECORDING + 1 STOPPING), got {count}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -393,9 +419,9 @@ class TestRecoveryServiceAudioValidation:
|
||||
)
|
||||
recovered, audio_failures = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert audio_failures == 0
|
||||
assert recovered[0].metadata["audio_valid"] == "true"
|
||||
assert len(recovered) == 1, "should recover exactly one meeting"
|
||||
assert audio_failures == 0, "should report no audio validation failures when files exist"
|
||||
assert recovered[0].metadata["audio_valid"] == "true", "audio_valid should be 'true' when manifest and audio.enc exist"
|
||||
|
||||
async def test_audio_validation_with_missing_audio(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -475,9 +501,9 @@ class TestRecoveryServiceAudioValidation:
|
||||
)
|
||||
recovered, audio_failures = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert audio_failures == 1
|
||||
assert recovered[0].metadata["audio_valid"] == "false"
|
||||
assert len(recovered) == 1, "should recover exactly one meeting"
|
||||
assert audio_failures == 1, "should report one audio validation failure when directory missing"
|
||||
assert recovered[0].metadata["audio_valid"] == "false", "audio_valid should be 'false' when meeting directory missing"
|
||||
|
||||
async def test_audio_validation_skipped_without_meetings_dir(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -492,9 +518,9 @@ class TestRecoveryServiceAudioValidation:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
recovered, audio_failures = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert audio_failures == 0
|
||||
assert recovered[0].metadata["audio_valid"] == "true"
|
||||
assert len(recovered) == 1, "should recover exactly one meeting"
|
||||
assert audio_failures == 0, "should report no audio validation failures when meetings_dir not provided"
|
||||
assert recovered[0].metadata["audio_valid"] == "true", "audio_valid should default to 'true' when validation skipped"
|
||||
|
||||
async def test_audio_validation_uses_asset_path(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -522,6 +548,6 @@ class TestRecoveryServiceAudioValidation:
|
||||
)
|
||||
recovered, audio_failures = await recovery_service.recover_crashed_meetings()
|
||||
|
||||
assert len(recovered) == 1
|
||||
assert audio_failures == 0
|
||||
assert recovered[0].metadata["audio_valid"] == "true"
|
||||
assert len(recovered) == 1, "should recover exactly one meeting"
|
||||
assert audio_failures == 0, "should report no audio validation failures when custom path files exist"
|
||||
assert recovered[0].metadata["audio_valid"] == "true", "audio_valid should be 'true' when files exist at custom asset_path"
|
||||
|
||||
@@ -51,7 +51,7 @@ class TestMeetingRepository:
|
||||
|
||||
result = await repo.get(meeting_id)
|
||||
|
||||
assert result is None
|
||||
assert result is None, f"expected None for non-existent meeting, got {result}"
|
||||
|
||||
async def test_update_meeting(self, session: AsyncSession) -> None:
|
||||
"""Test updating a meeting."""
|
||||
@@ -67,9 +67,9 @@ class TestMeetingRepository:
|
||||
|
||||
# Verify
|
||||
retrieved = await repo.get(meeting.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.state == MeetingState.RECORDING
|
||||
assert retrieved.started_at is not None
|
||||
assert retrieved is not None, "updated meeting should exist"
|
||||
assert retrieved.state == MeetingState.RECORDING, f"expected RECORDING state, got {retrieved.state}"
|
||||
assert retrieved.started_at is not None, "started_at should be set after start_recording()"
|
||||
|
||||
async def test_delete_meeting(self, session: AsyncSession) -> None:
|
||||
"""Test deleting a meeting."""
|
||||
@@ -82,11 +82,11 @@ class TestMeetingRepository:
|
||||
result = await repo.delete(meeting.id)
|
||||
await session.commit()
|
||||
|
||||
assert result is True
|
||||
assert result is True, "delete should return True for existing meeting"
|
||||
|
||||
# Verify deleted
|
||||
retrieved = await repo.get(meeting.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, "meeting should not exist after deletion"
|
||||
|
||||
async def test_delete_meeting_not_found_repository(self, session: AsyncSession) -> None:
|
||||
"""Test deleting non-existent meeting returns False."""
|
||||
@@ -95,7 +95,7 @@ class TestMeetingRepository:
|
||||
|
||||
result = await repo.delete(meeting_id)
|
||||
|
||||
assert result is False
|
||||
assert result is False, "delete should return False for non-existent meeting"
|
||||
|
||||
async def test_list_all_meetings(self, session: AsyncSession) -> None:
|
||||
"""Test listing all meetings with pagination."""
|
||||
@@ -108,10 +108,12 @@ class TestMeetingRepository:
|
||||
await session.commit()
|
||||
|
||||
# List with pagination
|
||||
result, total = await repo.list_all(limit=3, offset=0)
|
||||
page_limit = 3
|
||||
result, total = await repo.list_all(limit=page_limit, offset=0)
|
||||
|
||||
assert len(result) == 3
|
||||
assert total == 5
|
||||
assert len(result) == page_limit, f"expected {page_limit} meetings, got {len(result)}"
|
||||
expected_total = len(meetings)
|
||||
assert total == expected_total, f"expected total {expected_total}, got {total}"
|
||||
|
||||
async def test_list_meetings_filter_by_state(self, session: AsyncSession) -> None:
|
||||
"""Test filtering meetings by state."""
|
||||
@@ -129,8 +131,8 @@ class TestMeetingRepository:
|
||||
# Filter by RECORDING state
|
||||
result, _ = await repo.list_all(states=[MeetingState.RECORDING])
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].title == "Recording"
|
||||
assert len(result) == 1, f"expected 1 meeting in RECORDING state, got {len(result)}"
|
||||
assert result[0].title == "Recording", f"expected 'Recording' title, got '{result[0].title}'"
|
||||
|
||||
async def test_count_by_state(self, session: AsyncSession) -> None:
|
||||
"""Test counting meetings by state."""
|
||||
@@ -143,7 +145,7 @@ class TestMeetingRepository:
|
||||
|
||||
count = await repo.count_by_state(MeetingState.CREATED)
|
||||
|
||||
assert count == 3
|
||||
assert count == 3, f"expected count of 3 CREATED meetings, got {count}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -175,9 +177,9 @@ class TestSegmentRepository:
|
||||
# Get segments
|
||||
result = await segment_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].text == "Hello world"
|
||||
assert result[0].db_id is not None
|
||||
assert len(result) == 1, f"expected 1 segment, got {len(result)}"
|
||||
assert result[0].text == "Hello world", f"expected text 'Hello world', got '{result[0].text}'"
|
||||
assert result[0].db_id is not None, "segment db_id should be set after persistence"
|
||||
|
||||
async def test_add_segment_with_words(self, session: AsyncSession) -> None:
|
||||
"""Test adding segment with word-level timing."""
|
||||
@@ -205,8 +207,8 @@ class TestSegmentRepository:
|
||||
|
||||
result = await segment_repo.get_by_meeting(meeting.id, include_words=True)
|
||||
|
||||
assert len(result[0].words) == 2
|
||||
assert result[0].words[0].word == "Hello"
|
||||
assert len(result[0].words) == 2, f"expected 2 word timings, got {len(result[0].words)}"
|
||||
assert result[0].words[0].word == "Hello", f"expected first word 'Hello', got '{result[0].words[0].word}'"
|
||||
|
||||
async def test_add_batch_segments(self, session: AsyncSession) -> None:
|
||||
"""Test batch adding segments."""
|
||||
@@ -226,7 +228,7 @@ class TestSegmentRepository:
|
||||
|
||||
result = await segment_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert len(result) == 3
|
||||
assert len(result) == 3, f"expected 3 batch-added segments, got {len(result)}"
|
||||
|
||||
async def test_compute_next_segment_id(self, session: AsyncSession) -> None:
|
||||
"""Test compute_next_segment_id returns max + 1 or 0 when empty."""
|
||||
@@ -237,7 +239,8 @@ class TestSegmentRepository:
|
||||
await meeting_repo.create(meeting)
|
||||
await session.commit()
|
||||
|
||||
assert await segment_repo.compute_next_segment_id(meeting.id) == 0
|
||||
next_id = await segment_repo.compute_next_segment_id(meeting.id)
|
||||
assert next_id == 0, f"expected next segment_id 0 for empty meeting, got {next_id}"
|
||||
|
||||
segments = [
|
||||
Segment(segment_id=0, text="Segment 0", start_time=0.0, end_time=1.0),
|
||||
@@ -246,7 +249,8 @@ class TestSegmentRepository:
|
||||
await segment_repo.add_batch(meeting.id, segments)
|
||||
await session.commit()
|
||||
|
||||
assert await segment_repo.compute_next_segment_id(meeting.id) == 6
|
||||
next_id = await segment_repo.compute_next_segment_id(meeting.id)
|
||||
assert next_id == 6, f"expected next segment_id 6 (max 5 + 1), got {next_id}"
|
||||
|
||||
async def test_update_embedding_and_retrieve(self, session: AsyncSession) -> None:
|
||||
"""Test updating a segment embedding persists to the database."""
|
||||
@@ -261,13 +265,13 @@ class TestSegmentRepository:
|
||||
await segment_repo.add(meeting.id, segment)
|
||||
await session.commit()
|
||||
|
||||
assert segment.db_id is not None
|
||||
assert segment.db_id is not None, "segment db_id should be set after add"
|
||||
embedding = [0.1] * 1536
|
||||
await segment_repo.update_embedding(segment.db_id, embedding)
|
||||
await session.commit()
|
||||
|
||||
result = await segment_repo.get_by_meeting(meeting.id)
|
||||
assert result[0].embedding == pytest.approx(embedding)
|
||||
assert result[0].embedding == pytest.approx(embedding), "retrieved embedding should match saved embedding"
|
||||
|
||||
async def test_search_semantic_orders_by_similarity(self, session: AsyncSession) -> None:
|
||||
"""Test semantic search returns closest matches first."""
|
||||
@@ -298,10 +302,11 @@ class TestSegmentRepository:
|
||||
await segment_repo.add_batch(meeting.id, [segment1, segment2])
|
||||
await session.commit()
|
||||
|
||||
results = await segment_repo.search_semantic(query_embedding=emb1, limit=2)
|
||||
assert len(results) == 2
|
||||
assert results[0][0].segment_id == 0
|
||||
assert results[0][1] >= results[1][1]
|
||||
search_limit = 2
|
||||
results = await segment_repo.search_semantic(query_embedding=emb1, limit=search_limit)
|
||||
assert len(results) == search_limit, f"expected {search_limit} semantic search results, got {len(results)}"
|
||||
assert results[0][0].segment_id == 0, f"expected closest match segment_id 0, got {results[0][0].segment_id}"
|
||||
assert results[0][1] >= results[1][1], f"results should be ordered by similarity: {results[0][1]} >= {results[1][1]}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -329,9 +334,9 @@ class TestSummaryRepository:
|
||||
|
||||
result = await summary_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert result is not None
|
||||
assert result.executive_summary == "This was a productive meeting."
|
||||
assert result.model_version == "test/v1"
|
||||
assert result is not None, "summary should exist after save"
|
||||
assert result.executive_summary == "This was a productive meeting.", f"expected executive_summary to match, got '{result.executive_summary}'"
|
||||
assert result.model_version == "test/v1", f"expected model_version 'test/v1', got '{result.model_version}'"
|
||||
|
||||
async def test_save_summary_with_key_points(self, session: AsyncSession) -> None:
|
||||
"""Test saving summary with key points."""
|
||||
@@ -356,9 +361,9 @@ class TestSummaryRepository:
|
||||
|
||||
result = await summary_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert result is not None
|
||||
assert len(result.key_points) == 2
|
||||
assert result.key_points[0].text == "Point 1"
|
||||
assert result is not None, "summary with key points should exist"
|
||||
assert len(result.key_points) == 2, f"expected 2 key points, got {len(result.key_points)}"
|
||||
assert result.key_points[0].text == "Point 1", f"expected first key point 'Point 1', got '{result.key_points[0].text}'"
|
||||
|
||||
async def test_save_summary_with_action_items(self, session: AsyncSession) -> None:
|
||||
"""Test saving summary with action items."""
|
||||
@@ -403,10 +408,10 @@ class TestSummaryRepository:
|
||||
result = await summary_repo.delete_by_meeting(meeting.id)
|
||||
await session.commit()
|
||||
|
||||
assert result is True
|
||||
assert result is True, "delete should return True for existing summary"
|
||||
|
||||
retrieved = await summary_repo.get_by_meeting(meeting.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, "summary should not exist after deletion"
|
||||
|
||||
async def test_update_summary_replaces_items(self, session: AsyncSession) -> None:
|
||||
"""Test saving a summary twice replaces key points and action items."""
|
||||
@@ -470,9 +475,9 @@ class TestAnnotationRepository:
|
||||
|
||||
retrieved = await annotation_repo.get(annotation.id)
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved.text == "Decision made"
|
||||
assert retrieved.segment_ids == [0]
|
||||
assert retrieved is not None, "annotation should exist after add"
|
||||
assert retrieved.text == "Decision made", f"expected text 'Decision made', got '{retrieved.text}'"
|
||||
assert retrieved.segment_ids == [0], f"expected segment_ids [0], got {retrieved.segment_ids}"
|
||||
|
||||
async def test_get_by_meeting_ordered(self, session: AsyncSession) -> None:
|
||||
"""Test annotations returned in start_time order."""
|
||||
@@ -505,7 +510,8 @@ class TestAnnotationRepository:
|
||||
|
||||
result = await annotation_repo.get_by_meeting(meeting.id)
|
||||
|
||||
assert [a.text for a in result] == ["First", "Second"]
|
||||
texts = [a.text for a in result]
|
||||
assert texts == ["First", "Second"], f"expected annotations ordered by start_time, got {texts}"
|
||||
|
||||
async def test_get_by_time_range_inclusive(self, session: AsyncSession) -> None:
|
||||
"""Test time range query includes boundary overlaps."""
|
||||
@@ -538,7 +544,9 @@ class TestAnnotationRepository:
|
||||
|
||||
result = await annotation_repo.get_by_time_range(meeting.id, start_time=1.0, end_time=1.0)
|
||||
|
||||
assert {a.text for a in result} == {"Ends at boundary", "Starts at boundary"}
|
||||
texts = {a.text for a in result}
|
||||
expected = {"Ends at boundary", "Starts at boundary"}
|
||||
assert texts == expected, f"expected both boundary annotations, got {texts}"
|
||||
|
||||
async def test_update_annotation_not_found_raises_repository(self, session: AsyncSession) -> None:
|
||||
"""Test update raises when annotation does not exist."""
|
||||
@@ -562,4 +570,4 @@ class TestAnnotationRepository:
|
||||
|
||||
result = await annotation_repo.delete(AnnotationId(uuid4()))
|
||||
|
||||
assert result is False
|
||||
assert result is False, "delete should return False for non-existent annotation"
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestServerStartupPreferences:
|
||||
"""Test servicer can be initialized with database session factory."""
|
||||
servicer = NoteFlowServicer(session_factory=session_factory)
|
||||
|
||||
assert servicer._session_factory is not None
|
||||
assert servicer._session_factory is not None, "Servicer should have session factory set"
|
||||
|
||||
async def test_preferences_loaded_on_startup(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -59,8 +59,8 @@ class TestServerStartupPreferences:
|
||||
consent = await uow.preferences.get_bool("cloud_consent_granted", False)
|
||||
language = await uow.preferences.get("default_language")
|
||||
|
||||
assert consent is True
|
||||
assert language == "en"
|
||||
assert consent is True, "Cloud consent preference should be True after being set"
|
||||
assert language == "en", f"Default language should be 'en', got {language!r}"
|
||||
|
||||
async def test_preferences_default_when_not_set(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -69,7 +69,7 @@ class TestServerStartupPreferences:
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
consent = await uow.preferences.get_bool("cloud_consent_granted", False)
|
||||
|
||||
assert consent is False
|
||||
assert consent is False, "Cloud consent should default to False when not set"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -127,8 +127,12 @@ class TestServerStartupRecovery:
|
||||
recovery_service = RecoveryService(SqlAlchemyUnitOfWork(session_factory, meetings_dir))
|
||||
result = await recovery_service.recover_all()
|
||||
|
||||
assert result.meetings_recovered == 0
|
||||
assert result.diarization_jobs_failed == 0
|
||||
assert result.meetings_recovered == 0, (
|
||||
f"No meetings should be recovered for clean state, got {result.meetings_recovered}"
|
||||
)
|
||||
assert result.diarization_jobs_failed == 0, (
|
||||
f"No jobs should be failed for clean state, got {result.diarization_jobs_failed}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -164,8 +168,14 @@ class TestServerGracefulShutdown:
|
||||
j1 = await uow.diarization_jobs.get(job1.job_id)
|
||||
j2 = await uow.diarization_jobs.get(job2.job_id)
|
||||
|
||||
assert j1 is not None and j1.status == JOB_STATUS_FAILED
|
||||
assert j2 is not None and j2.status == JOB_STATUS_FAILED
|
||||
assert j1 is not None, f"Job {job1.job_id} should exist after shutdown"
|
||||
assert j1.status == JOB_STATUS_FAILED, (
|
||||
f"Queued job should be marked failed on shutdown, got {j1.status}"
|
||||
)
|
||||
assert j2 is not None, f"Job {job2.job_id} should exist after shutdown"
|
||||
assert j2.status == JOB_STATUS_FAILED, (
|
||||
f"Running job should be marked failed on shutdown, got {j2.status}"
|
||||
)
|
||||
|
||||
async def test_shutdown_preserves_completed_jobs(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -190,9 +200,13 @@ class TestServerGracefulShutdown:
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
job = await uow.diarization_jobs.get(completed_job.job_id)
|
||||
|
||||
assert job is not None
|
||||
assert job.status == JOB_STATUS_COMPLETED
|
||||
assert job.segments_updated == 10
|
||||
assert job is not None, f"Completed job {completed_job.job_id} should exist after shutdown"
|
||||
assert job.status == JOB_STATUS_COMPLETED, (
|
||||
f"Completed job should remain completed after shutdown, got {job.status}"
|
||||
)
|
||||
assert job.segments_updated == 10, (
|
||||
f"Completed job segments_updated should be preserved, expected 10, got {job.segments_updated}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -228,7 +242,9 @@ class TestServerDatabaseOperations:
|
||||
request = noteflow_pb2.ServerInfoRequest()
|
||||
result = await servicer.GetServerInfo(request, MockContext())
|
||||
|
||||
assert result.active_meetings == 2
|
||||
assert result.active_meetings == 2, (
|
||||
f"ServerInfo should report 2 active meetings, got {result.active_meetings}"
|
||||
)
|
||||
|
||||
async def test_multiple_servicer_instances_share_database(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -251,8 +267,12 @@ class TestServerDatabaseOperations:
|
||||
result1 = await servicer1.GetMeeting(request, MockContext())
|
||||
result2 = await servicer2.GetMeeting(request, MockContext())
|
||||
|
||||
assert result1.id == result2.id
|
||||
assert result1.title == "Shared Meeting"
|
||||
assert result1.id == result2.id, (
|
||||
f"Both servicers should return same meeting ID, got {result1.id} vs {result2.id}"
|
||||
)
|
||||
assert result1.title == "Shared Meeting", (
|
||||
f"Meeting title should be 'Shared Meeting', got {result1.title!r}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -279,8 +299,12 @@ class TestServerDatabasePersistence:
|
||||
get_request = noteflow_pb2.GetMeetingRequest(meeting_id=meeting_id)
|
||||
result = await servicer2.GetMeeting(get_request, MockContext())
|
||||
|
||||
assert result.id == meeting_id
|
||||
assert result.title == "Persistent Meeting"
|
||||
assert result.id == meeting_id, (
|
||||
f"Meeting ID should persist across servicer restart, expected {meeting_id}, got {result.id}"
|
||||
)
|
||||
assert result.title == "Persistent Meeting", (
|
||||
f"Meeting title should persist across restart, got {result.title!r}"
|
||||
)
|
||||
|
||||
async def test_preferences_survive_servicer_restart(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -295,4 +319,6 @@ class TestServerDatabasePersistence:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
value = await uow.preferences.get("test_setting")
|
||||
assert value == "test_value"
|
||||
assert value == "test_value", (
|
||||
f"Preference should persist across servicer restart, expected 'test_value', got {value!r}"
|
||||
)
|
||||
|
||||
@@ -29,14 +29,20 @@ class TestServicerShutdown:
|
||||
) -> None:
|
||||
"""Verify shutdown works when no streams are active."""
|
||||
# Empty state
|
||||
assert len(memory_servicer._active_streams) == 0
|
||||
assert len(memory_servicer._diarization_tasks) == 0
|
||||
assert len(memory_servicer._active_streams) == 0, (
|
||||
f"expected no active streams, got {len(memory_servicer._active_streams)}"
|
||||
)
|
||||
assert len(memory_servicer._diarization_tasks) == 0, (
|
||||
f"expected no diarization tasks, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
# Shutdown should complete without error
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# State should still be empty
|
||||
assert len(memory_servicer._active_streams) == 0
|
||||
assert len(memory_servicer._active_streams) == 0, (
|
||||
f"expected no active streams after shutdown, got {len(memory_servicer._active_streams)}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_shutdown_cleans_active_streams(
|
||||
@@ -54,14 +60,20 @@ class TestServicerShutdown:
|
||||
mock_session.close = MagicMock()
|
||||
memory_servicer._diarization_sessions[meeting_id] = mock_session
|
||||
|
||||
assert len(memory_servicer._active_streams) == 5
|
||||
assert len(memory_servicer._diarization_sessions) == 5
|
||||
assert len(memory_servicer._active_streams) == 5, (
|
||||
f"expected 5 active streams, got {len(memory_servicer._active_streams)}"
|
||||
)
|
||||
assert len(memory_servicer._diarization_sessions) == 5, (
|
||||
f"expected 5 diarization sessions, got {len(memory_servicer._diarization_sessions)}"
|
||||
)
|
||||
|
||||
# Shutdown
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Verify all sessions closed
|
||||
assert len(memory_servicer._diarization_sessions) == 0
|
||||
assert len(memory_servicer._diarization_sessions) == 0, (
|
||||
f"expected all diarization sessions closed, got {len(memory_servicer._diarization_sessions)}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_shutdown_cancels_diarization_tasks(
|
||||
@@ -75,17 +87,22 @@ class TestServicerShutdown:
|
||||
memory_servicer._diarization_tasks[f"job-{i}"] = task
|
||||
tasks_created.append(task)
|
||||
|
||||
assert len(memory_servicer._diarization_tasks) == 3
|
||||
assert len(memory_servicer._diarization_tasks) == 3, (
|
||||
f"expected 3 diarization tasks, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
# Shutdown should cancel all
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
assert len(memory_servicer._diarization_tasks) == 0
|
||||
assert len(memory_servicer._diarization_tasks) == 0, (
|
||||
f"expected no diarization tasks after shutdown, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
# Verify tasks are cancelled
|
||||
for task in tasks_created:
|
||||
assert task.done()
|
||||
assert task.cancelled()
|
||||
# Verify tasks are cancelled - collect non-done/non-cancelled tasks
|
||||
not_done = [t for t in tasks_created if not t.done()]
|
||||
not_cancelled = [t for t in tasks_created if t.done() and not t.cancelled()]
|
||||
assert not not_done, f"all tasks should be done after shutdown, {len(not_done)} still running"
|
||||
assert not not_cancelled, f"all tasks should be cancelled, {len(not_cancelled)} completed instead"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_shutdown_marks_cancelled_jobs_failed(
|
||||
@@ -111,8 +128,12 @@ class TestServicerShutdown:
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Verify job marked as failed
|
||||
assert job.status == noteflow_pb2.JOB_STATUS_FAILED
|
||||
assert job.error_message == "ERR_TASK_CANCELLED"
|
||||
assert job.status == noteflow_pb2.JOB_STATUS_FAILED, (
|
||||
f"expected job status FAILED, got {job.status}"
|
||||
)
|
||||
assert job.error_message == "ERR_TASK_CANCELLED", (
|
||||
f"expected error message 'ERR_TASK_CANCELLED', got '{job.error_message}'"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_shutdown_idempotent(
|
||||
@@ -147,14 +168,16 @@ class TestServicerShutdown:
|
||||
writer.open(meeting_id, dek, wrapped_dek, sample_rate=DEFAULT_SAMPLE_RATE)
|
||||
|
||||
memory_servicer._audio_writers[meeting_id] = writer
|
||||
assert writer.is_recording
|
||||
assert writer.is_recording, "expected audio writer to be recording after open"
|
||||
|
||||
# Shutdown
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Verify writer was closed and removed
|
||||
assert meeting_id not in memory_servicer._audio_writers
|
||||
assert not writer.is_recording
|
||||
assert meeting_id not in memory_servicer._audio_writers, (
|
||||
f"expected meeting '{meeting_id}' removed from audio writers after shutdown"
|
||||
)
|
||||
assert not writer.is_recording, "expected audio writer to stop recording after shutdown"
|
||||
|
||||
|
||||
class TestStreamingStateCleanup:
|
||||
@@ -174,9 +197,15 @@ class TestStreamingStateCleanup:
|
||||
memory_servicer._active_streams.add(meeting_id)
|
||||
|
||||
# Verify state exists
|
||||
assert len(memory_servicer._active_streams) == 10
|
||||
assert len(memory_servicer._vad_instances) == 10
|
||||
assert len(memory_servicer._segmenters) == 10
|
||||
assert len(memory_servicer._active_streams) == 10, (
|
||||
f"expected 10 active streams, got {len(memory_servicer._active_streams)}"
|
||||
)
|
||||
assert len(memory_servicer._vad_instances) == 10, (
|
||||
f"expected 10 VAD instances, got {len(memory_servicer._vad_instances)}"
|
||||
)
|
||||
assert len(memory_servicer._segmenters) == 10, (
|
||||
f"expected 10 segmenters, got {len(memory_servicer._segmenters)}"
|
||||
)
|
||||
|
||||
# Clean up all streams
|
||||
for meeting_id in meeting_ids:
|
||||
@@ -184,10 +213,18 @@ class TestStreamingStateCleanup:
|
||||
memory_servicer._active_streams.discard(meeting_id)
|
||||
|
||||
# Verify all state cleaned
|
||||
assert len(memory_servicer._active_streams) == 0
|
||||
assert len(memory_servicer._vad_instances) == 0
|
||||
assert len(memory_servicer._segmenters) == 0
|
||||
assert len(memory_servicer._partial_buffers) == 0
|
||||
assert len(memory_servicer._active_streams) == 0, (
|
||||
f"expected no active streams after cleanup, got {len(memory_servicer._active_streams)}"
|
||||
)
|
||||
assert len(memory_servicer._vad_instances) == 0, (
|
||||
f"expected no VAD instances after cleanup, got {len(memory_servicer._vad_instances)}"
|
||||
)
|
||||
assert len(memory_servicer._segmenters) == 0, (
|
||||
f"expected no segmenters after cleanup, got {len(memory_servicer._segmenters)}"
|
||||
)
|
||||
assert len(memory_servicer._partial_buffers) == 0, (
|
||||
f"expected no partial buffers after cleanup, got {len(memory_servicer._partial_buffers)}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_with_diarization_sessions(
|
||||
@@ -209,7 +246,9 @@ class TestStreamingStateCleanup:
|
||||
|
||||
# Verify session was closed
|
||||
mock_session.close.assert_called_once()
|
||||
assert meeting_id not in memory_servicer._diarization_sessions
|
||||
assert meeting_id not in memory_servicer._diarization_sessions, (
|
||||
f"expected meeting '{meeting_id}' removed from diarization sessions after cleanup"
|
||||
)
|
||||
|
||||
|
||||
class TestTaskCancellation:
|
||||
@@ -228,8 +267,8 @@ class TestTaskCancellation:
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Task should be cancelled and done
|
||||
assert task.done()
|
||||
assert task.cancelled()
|
||||
assert task.done(), "expected long-running task to be done after shutdown"
|
||||
assert task.cancelled(), "expected long-running task to be cancelled after shutdown"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_with_exception_handling(
|
||||
@@ -249,11 +288,13 @@ class TestTaskCancellation:
|
||||
await task
|
||||
|
||||
# Verify task is done (not stuck)
|
||||
assert task.done()
|
||||
assert task.done(), "expected failing task to be done after exception"
|
||||
|
||||
# Cleanup should still work
|
||||
await memory_servicer.shutdown()
|
||||
assert len(memory_servicer._diarization_tasks) == 0
|
||||
assert len(memory_servicer._diarization_tasks) == 0, (
|
||||
f"expected no diarization tasks after shutdown, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mixed_task_states_on_shutdown(
|
||||
@@ -277,7 +318,9 @@ class TestTaskCancellation:
|
||||
# Shutdown should handle all states
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
assert len(memory_servicer._diarization_tasks) == 0
|
||||
assert len(memory_servicer._diarization_tasks) == 0, (
|
||||
f"expected no diarization tasks after shutdown, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
|
||||
class TestResourceCleanupOrder:
|
||||
@@ -316,8 +359,12 @@ class TestResourceCleanupOrder:
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Diarization should be closed before audio (based on shutdown() order)
|
||||
assert "diarization" in cleanup_order
|
||||
assert "audio" in cleanup_order
|
||||
assert "diarization" in cleanup_order, (
|
||||
f"expected 'diarization' in cleanup order, got {cleanup_order}"
|
||||
)
|
||||
assert "audio" in cleanup_order, (
|
||||
f"expected 'audio' in cleanup order, got {cleanup_order}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_tasks_cancelled_before_sessions_closed(
|
||||
@@ -337,7 +384,7 @@ class TestResourceCleanupOrder:
|
||||
await memory_servicer.shutdown()
|
||||
|
||||
# Both should be cleaned up
|
||||
assert task.done()
|
||||
assert task.done(), "expected task to be done after shutdown"
|
||||
mock_session.close.assert_called_once()
|
||||
|
||||
|
||||
@@ -362,7 +409,9 @@ class TestConcurrentShutdown:
|
||||
)
|
||||
|
||||
# Should be clean
|
||||
assert len(memory_servicer._diarization_tasks) == 0
|
||||
assert len(memory_servicer._diarization_tasks) == 0, (
|
||||
f"expected no diarization tasks after concurrent shutdowns, got {len(memory_servicer._diarization_tasks)}"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_new_operations_during_shutdown(
|
||||
|
||||
@@ -32,14 +32,16 @@ def test_trigger_settings_env_parsing(
|
||||
monkeypatch.setenv("NOTEFLOW_TRIGGER_AUDIO_MIN_SAMPLES", "5")
|
||||
monkeypatch.setenv("NOTEFLOW_TRIGGER_POLL_INTERVAL_SECONDS", "1.5")
|
||||
settings = get_trigger_settings()
|
||||
assert getattr(settings, attr) == expected
|
||||
actual = getattr(settings, attr)
|
||||
assert actual == expected, f"TriggerSettings.{attr}: expected {expected!r}, got {actual!r}"
|
||||
|
||||
|
||||
def test_trigger_settings_poll_interval_parsing(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""TriggerSettings parses poll interval as float."""
|
||||
monkeypatch.setenv("NOTEFLOW_TRIGGER_POLL_INTERVAL_SECONDS", "1.5")
|
||||
settings = get_trigger_settings()
|
||||
assert settings.trigger_poll_interval_seconds == pytest.approx(1.5)
|
||||
actual = settings.trigger_poll_interval_seconds
|
||||
assert actual == pytest.approx(1.5), f"poll_interval_seconds: expected 1.5, got {actual}"
|
||||
|
||||
|
||||
class TestRetentionSettings:
|
||||
@@ -50,9 +52,19 @@ class TestRetentionSettings:
|
||||
# Access via class to check field defaults without loading from env
|
||||
expected_retention_days_default = 90
|
||||
expected_check_interval_default = 24
|
||||
assert Settings.model_fields["retention_enabled"].default is False
|
||||
assert Settings.model_fields["retention_days"].default == expected_retention_days_default
|
||||
assert Settings.model_fields["retention_check_interval_hours"].default == expected_check_interval_default
|
||||
retention_enabled_default = Settings.model_fields["retention_enabled"].default
|
||||
retention_days_default = Settings.model_fields["retention_days"].default
|
||||
check_interval_default = Settings.model_fields["retention_check_interval_hours"].default
|
||||
assert retention_enabled_default is False, (
|
||||
f"retention_enabled default: expected False, got {retention_enabled_default!r}"
|
||||
)
|
||||
assert retention_days_default == expected_retention_days_default, (
|
||||
f"retention_days default: expected {expected_retention_days_default}, got {retention_days_default}"
|
||||
)
|
||||
assert check_interval_default == expected_check_interval_default, (
|
||||
f"retention_check_interval_hours default: expected {expected_check_interval_default}, "
|
||||
f"got {check_interval_default}"
|
||||
)
|
||||
|
||||
def test_retention_env_parsing(self, monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
"""Retention settings should parse from environment variables."""
|
||||
@@ -65,9 +77,16 @@ class TestRetentionSettings:
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
assert settings.retention_enabled is True
|
||||
assert settings.retention_days == expected_retention_days
|
||||
assert settings.retention_check_interval_hours == expected_check_interval
|
||||
assert settings.retention_enabled is True, (
|
||||
f"retention_enabled: expected True, got {settings.retention_enabled!r}"
|
||||
)
|
||||
assert settings.retention_days == expected_retention_days, (
|
||||
f"retention_days: expected {expected_retention_days}, got {settings.retention_days}"
|
||||
)
|
||||
assert settings.retention_check_interval_hours == expected_check_interval, (
|
||||
f"retention_check_interval_hours: expected {expected_check_interval}, "
|
||||
f"got {settings.retention_check_interval_hours}"
|
||||
)
|
||||
|
||||
def test_retention_days_validation(self) -> None:
|
||||
"""Retention days should be validated within range."""
|
||||
|
||||
@@ -25,9 +25,9 @@ class TestUnitOfWork:
|
||||
) -> None:
|
||||
"""Test UoW works as async context manager."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert uow.meetings is not None
|
||||
assert uow.segments is not None
|
||||
assert uow.summaries is not None
|
||||
assert uow.meetings is not None, "meetings repository should be initialized"
|
||||
assert uow.segments is not None, "segments repository should be initialized"
|
||||
assert uow.summaries is not None, "summaries repository should be initialized"
|
||||
|
||||
async def test_uow_commit(self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path) -> None:
|
||||
"""Test UoW commit persists changes."""
|
||||
@@ -40,8 +40,8 @@ class TestUnitOfWork:
|
||||
# Verify in new UoW
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
retrieved = await uow.meetings.get(meeting.id)
|
||||
assert retrieved is not None
|
||||
assert retrieved.title == "Commit Test"
|
||||
assert retrieved is not None, f"meeting {meeting.id} should exist after commit"
|
||||
assert retrieved.title == "Commit Test", f"expected title 'Commit Test', got '{retrieved.title}'"
|
||||
|
||||
async def test_uow_rollback(self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path) -> None:
|
||||
"""Test UoW rollback discards changes."""
|
||||
@@ -54,7 +54,7 @@ class TestUnitOfWork:
|
||||
# Verify not persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
retrieved = await uow.meetings.get(meeting.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, f"meeting {meeting.id} should not exist after rollback"
|
||||
|
||||
async def test_uow_auto_rollback_on_exception(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -70,7 +70,7 @@ class TestUnitOfWork:
|
||||
# Verify not persisted
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
retrieved = await uow.meetings.get(meeting.id)
|
||||
assert retrieved is None
|
||||
assert retrieved is None, f"meeting {meeting.id} should not exist after exception rollback"
|
||||
|
||||
async def test_uow_transactional_consistency(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -103,9 +103,9 @@ class TestUnitOfWork:
|
||||
segs = await uow.segments.get_by_meeting(meeting.id)
|
||||
s = await uow.summaries.get_by_meeting(meeting.id)
|
||||
|
||||
assert m is not None
|
||||
assert len(segs) == 1
|
||||
assert s is not None
|
||||
assert m is not None, f"meeting {meeting.id} should exist after transactional commit"
|
||||
assert len(segs) == 1, f"expected 1 segment, got {len(segs)}"
|
||||
assert s is not None, f"summary for meeting {meeting.id} should exist after commit"
|
||||
|
||||
async def test_uow_repository_caching(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -114,11 +114,11 @@ class TestUnitOfWork:
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
meetings1 = uow.meetings
|
||||
meetings2 = uow.meetings
|
||||
assert meetings1 is meetings2
|
||||
assert meetings1 is meetings2, "meetings repository should be cached (same instance)"
|
||||
|
||||
segments1 = uow.segments
|
||||
segments2 = uow.segments
|
||||
assert segments1 is segments2
|
||||
assert segments1 is segments2, "segments repository should be cached (same instance)"
|
||||
|
||||
async def test_uow_multiple_operations(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -146,6 +146,6 @@ class TestUnitOfWork:
|
||||
m = await uow.meetings.get(meeting.id)
|
||||
segs = await uow.segments.get_by_meeting(meeting.id)
|
||||
|
||||
assert m is not None
|
||||
assert m.state == MeetingState.RECORDING
|
||||
assert len(segs) == 1
|
||||
assert m is not None, f"meeting {meeting.id} should exist after multiple operations"
|
||||
assert m.state == MeetingState.RECORDING, f"expected state RECORDING, got {m.state}"
|
||||
assert len(segs) == 1, f"expected 1 segment after operations, got {len(segs)}"
|
||||
|
||||
@@ -40,21 +40,21 @@ class TestUnitOfWorkFeatureFlags:
|
||||
) -> None:
|
||||
"""Test database UoW supports annotations."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert uow.supports_annotations is True
|
||||
assert uow.supports_annotations is True, "database UoW should support annotations"
|
||||
|
||||
async def test_supports_diarization_jobs_true(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
) -> None:
|
||||
"""Test database UoW supports diarization jobs."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert uow.supports_diarization_jobs is True
|
||||
assert uow.supports_diarization_jobs is True, "database UoW should support diarization jobs"
|
||||
|
||||
async def test_supports_preferences_true(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
) -> None:
|
||||
"""Test database UoW supports preferences."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert uow.supports_preferences is True
|
||||
assert uow.supports_preferences is True, "database UoW should support preferences"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -115,9 +115,9 @@ class TestUnitOfWorkCrossRepositoryOperations:
|
||||
await uow.commit()
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.meetings.get(meeting.id) is None
|
||||
assert await uow.segments.get_by_meeting(meeting.id) == []
|
||||
assert await uow.summaries.get_by_meeting(meeting.id) is None
|
||||
assert await uow.meetings.get(meeting.id) is None, "meeting should be deleted"
|
||||
assert await uow.segments.get_by_meeting(meeting.id) == [], "segments should cascade delete"
|
||||
assert await uow.summaries.get_by_meeting(meeting.id) is None, "summary should cascade delete"
|
||||
|
||||
async def test_meeting_deletion_cascades_to_diarization_jobs(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -144,8 +144,8 @@ class TestUnitOfWorkCrossRepositoryOperations:
|
||||
await uow.commit()
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.diarization_jobs.get(job.job_id) is None
|
||||
assert await uow.diarization_jobs.get_streaming_turns(str(meeting.id)) == []
|
||||
assert await uow.diarization_jobs.get(job.job_id) is None, "diarization job should cascade delete"
|
||||
assert await uow.diarization_jobs.get_streaming_turns(str(meeting.id)) == [], "streaming turns should cascade delete"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -238,7 +238,7 @@ class TestUnitOfWorkRollbackScenarios:
|
||||
await uow.rollback()
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.meetings.get(meeting.id) is None
|
||||
assert await uow.meetings.get(meeting.id) is None, "meeting should not exist after rollback"
|
||||
|
||||
async def test_exception_during_segment_add_rolls_back_meeting(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -259,7 +259,7 @@ class TestUnitOfWorkRollbackScenarios:
|
||||
raise TestError("Simulated failure")
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.meetings.get(meeting.id) is None
|
||||
assert await uow.meetings.get(meeting.id) is None, "meeting should not exist after exception rollback"
|
||||
|
||||
async def test_rollback_then_new_work_in_same_context(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -276,10 +276,10 @@ class TestUnitOfWorkRollbackScenarios:
|
||||
await uow.commit()
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
assert await uow.meetings.get(meeting1.id) is None
|
||||
assert await uow.meetings.get(meeting1.id) is None, "rolled back meeting should not exist"
|
||||
m2 = await uow.meetings.get(meeting2.id)
|
||||
assert m2 is not None
|
||||
assert m2.title == "Committed"
|
||||
assert m2 is not None, "committed meeting should be retrievable"
|
||||
assert m2.title == "Committed", f"expected title 'Committed', got {m2.title!r}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -304,12 +304,12 @@ class TestUnitOfWorkRepositoryCaching:
|
||||
jobs1 = uow.diarization_jobs
|
||||
jobs2 = uow.diarization_jobs
|
||||
|
||||
assert meetings1 is meetings2
|
||||
assert segments1 is segments2
|
||||
assert summaries1 is summaries2
|
||||
assert annotations1 is annotations2
|
||||
assert preferences1 is preferences2
|
||||
assert jobs1 is jobs2
|
||||
assert meetings1 is meetings2, "meetings repository should be cached"
|
||||
assert segments1 is segments2, "segments repository should be cached"
|
||||
assert summaries1 is summaries2, "summaries repository should be cached"
|
||||
assert annotations1 is annotations2, "annotations repository should be cached"
|
||||
assert preferences1 is preferences2, "preferences repository should be cached"
|
||||
assert jobs1 is jobs2, "diarization_jobs repository should be cached"
|
||||
|
||||
async def test_repository_instances_new_per_context(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -321,7 +321,7 @@ class TestUnitOfWorkRepositoryCaching:
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow2:
|
||||
meetings2 = uow2.meetings
|
||||
|
||||
assert meetings1 is not meetings2
|
||||
assert meetings1 is not meetings2, "repository instances should differ across UoW contexts"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -371,16 +371,16 @@ class TestUnitOfWorkComplexWorkflows:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
meeting = await uow.meetings.get(meeting.id)
|
||||
assert meeting is not None
|
||||
assert meeting.state == MeetingState.CREATED
|
||||
assert meeting is not None, "meeting should be retrievable after creation"
|
||||
assert meeting.state == MeetingState.CREATED, f"expected CREATED state, got {meeting.state}"
|
||||
meeting.start_recording()
|
||||
await uow.meetings.update(meeting)
|
||||
await uow.commit()
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
meeting = await uow.meetings.get(meeting.id)
|
||||
assert meeting is not None
|
||||
assert meeting.state == MeetingState.RECORDING
|
||||
assert meeting is not None, "meeting should be retrievable for segment addition"
|
||||
assert meeting.state == MeetingState.RECORDING, f"expected RECORDING state, got {meeting.state}"
|
||||
for i in range(5):
|
||||
segment = Segment(
|
||||
segment_id=i,
|
||||
@@ -393,7 +393,7 @@ class TestUnitOfWorkComplexWorkflows:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
meeting = await uow.meetings.get(meeting.id)
|
||||
assert meeting is not None
|
||||
assert meeting is not None, "meeting should be retrievable for stopping"
|
||||
meeting.begin_stopping()
|
||||
meeting.stop_recording()
|
||||
await uow.meetings.update(meeting)
|
||||
@@ -401,8 +401,8 @@ class TestUnitOfWorkComplexWorkflows:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
meeting = await uow.meetings.get(meeting.id)
|
||||
assert meeting is not None
|
||||
assert meeting.state == MeetingState.STOPPED
|
||||
assert meeting is not None, "meeting should be retrievable for summary creation"
|
||||
assert meeting.state == MeetingState.STOPPED, f"expected STOPPED state, got {meeting.state}"
|
||||
|
||||
summary = Summary(
|
||||
meeting_id=meeting.id,
|
||||
@@ -419,10 +419,10 @@ class TestUnitOfWorkComplexWorkflows:
|
||||
segments = await uow.segments.get_by_meeting(meeting_id)
|
||||
summary = await uow.summaries.get_by_meeting(meeting_id)
|
||||
|
||||
assert final_meeting is not None
|
||||
assert final_meeting.state == MeetingState.STOPPED
|
||||
assert len(segments) == 5
|
||||
assert summary is not None
|
||||
assert final_meeting is not None, "final meeting should be retrievable"
|
||||
assert final_meeting.state == MeetingState.STOPPED, f"expected STOPPED state, got {final_meeting.state}"
|
||||
assert len(segments) == 5, f"expected 5 segments, got {len(segments)}"
|
||||
assert summary is not None, "summary should be retrievable"
|
||||
|
||||
async def test_diarization_job_workflow(
|
||||
self, session_factory: async_sessionmaker[AsyncSession], meetings_dir: Path
|
||||
@@ -461,10 +461,10 @@ class TestUnitOfWorkComplexWorkflows:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
final_job = await uow.diarization_jobs.get(job.job_id)
|
||||
assert final_job is not None
|
||||
assert final_job.status == JOB_STATUS_COMPLETED
|
||||
assert final_job.segments_updated == 10
|
||||
assert final_job.speaker_ids == ["SPEAKER_00", "SPEAKER_01"]
|
||||
assert final_job is not None, "completed job should be retrievable"
|
||||
assert final_job.status == JOB_STATUS_COMPLETED, f"expected COMPLETED status, got {final_job.status}"
|
||||
assert final_job.segments_updated == 10, f"expected 10 segments updated, got {final_job.segments_updated}"
|
||||
assert final_job.speaker_ids == ["SPEAKER_00", "SPEAKER_01"], f"unexpected speaker_ids: {final_job.speaker_ids}"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -477,7 +477,7 @@ class TestUnitOfWorkPreferencesWorkflow:
|
||||
"""Test cloud consent workflow as used by server startup."""
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
consent = await uow.preferences.get_bool("cloud_consent_granted", False)
|
||||
assert consent is False
|
||||
assert consent is False, "initial consent should be False"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
await uow.preferences.set("cloud_consent_granted", True)
|
||||
@@ -485,7 +485,7 @@ class TestUnitOfWorkPreferencesWorkflow:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
consent = await uow.preferences.get_bool("cloud_consent_granted", False)
|
||||
assert consent is True
|
||||
assert consent is True, "consent should be True after granting"
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
await uow.preferences.set("cloud_consent_granted", False)
|
||||
@@ -493,4 +493,4 @@ class TestUnitOfWorkPreferencesWorkflow:
|
||||
|
||||
async with SqlAlchemyUnitOfWork(session_factory, meetings_dir) as uow:
|
||||
consent = await uow.preferences.get_bool("cloud_consent_granted", False)
|
||||
assert consent is False
|
||||
assert consent is False, "consent should be False after revoking"
|
||||
|
||||
@@ -132,22 +132,34 @@ class TestStopMeetingTriggersWebhook:
|
||||
result = await servicer.StopMeeting(request, MockGrpcContext())
|
||||
|
||||
# StopMeeting returns Meeting proto directly - state should be STOPPED
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED, (
|
||||
f"expected meeting state STOPPED, got {result.state}"
|
||||
)
|
||||
|
||||
# Verify webhooks were triggered (recording.stopped + meeting.completed)
|
||||
assert len(captured_webhook_calls) == 2
|
||||
assert len(captured_webhook_calls) == 2, (
|
||||
f"expected 2 webhook calls (recording.stopped + meeting.completed), got {len(captured_webhook_calls)}"
|
||||
)
|
||||
|
||||
event_types = {call["event_type"] for call in captured_webhook_calls}
|
||||
assert WebhookEventType.RECORDING_STOPPED in event_types
|
||||
assert WebhookEventType.MEETING_COMPLETED in event_types
|
||||
assert WebhookEventType.RECORDING_STOPPED in event_types, (
|
||||
f"expected RECORDING_STOPPED event in {event_types}"
|
||||
)
|
||||
assert WebhookEventType.MEETING_COMPLETED in event_types, (
|
||||
f"expected MEETING_COMPLETED event in {event_types}"
|
||||
)
|
||||
|
||||
# Verify meeting.completed payload
|
||||
completed_call = next(
|
||||
c for c in captured_webhook_calls
|
||||
if c["event_type"] == WebhookEventType.MEETING_COMPLETED
|
||||
)
|
||||
assert completed_call["payload"]["meeting_id"] == meeting_id
|
||||
assert completed_call["payload"]["title"] == "Webhook Integration Test"
|
||||
assert completed_call["payload"]["meeting_id"] == meeting_id, (
|
||||
f"expected meeting_id {meeting_id}, got {completed_call['payload']['meeting_id']}"
|
||||
)
|
||||
assert completed_call["payload"]["title"] == "Webhook Integration Test", (
|
||||
f"expected title 'Webhook Integration Test', got {completed_call['payload']['title']}"
|
||||
)
|
||||
|
||||
async def test_stop_meeting_with_failed_webhook_still_succeeds(
|
||||
self,
|
||||
@@ -185,7 +197,9 @@ class TestStopMeetingTriggersWebhook:
|
||||
result = await servicer.StopMeeting(request, MockGrpcContext())
|
||||
|
||||
# Meeting stop succeeds despite webhook failure
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED, (
|
||||
f"expected meeting state STOPPED despite webhook failure, got {result.state}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@@ -213,4 +227,6 @@ class TestNoWebhookServiceGracefulDegradation:
|
||||
request = noteflow_pb2.StopMeetingRequest(meeting_id=meeting_id)
|
||||
result = await servicer.StopMeeting(request, MockGrpcContext())
|
||||
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED
|
||||
assert result.state == noteflow_pb2.MEETING_STATE_STOPPED, (
|
||||
f"expected meeting state STOPPED without webhook service, got {result.state}"
|
||||
)
|
||||
|
||||
@@ -113,9 +113,15 @@ class TestWebhookRepositoryCreate:
|
||||
await session.commit()
|
||||
|
||||
assert len(created.events) == 3, "Should have 3 events"
|
||||
assert WebhookEventType.MEETING_COMPLETED in created.events
|
||||
assert WebhookEventType.SUMMARY_GENERATED in created.events
|
||||
assert WebhookEventType.RECORDING_STARTED in created.events
|
||||
assert (
|
||||
WebhookEventType.MEETING_COMPLETED in created.events
|
||||
), "MEETING_COMPLETED event should be in events"
|
||||
assert (
|
||||
WebhookEventType.SUMMARY_GENERATED in created.events
|
||||
), "SUMMARY_GENERATED event should be in events"
|
||||
assert (
|
||||
WebhookEventType.RECORDING_STARTED in created.events
|
||||
), "RECORDING_STARTED event should be in events"
|
||||
|
||||
async def test_creates_webhook_with_optional_fields(
|
||||
self,
|
||||
@@ -221,10 +227,14 @@ class TestWebhookRepositoryGetById:
|
||||
|
||||
retrieved = await webhook_repo.get_by_id(config.id)
|
||||
|
||||
assert retrieved is not None
|
||||
assert retrieved is not None, "Should retrieve webhook by ID"
|
||||
assert isinstance(retrieved.events, frozenset), "Events should be frozenset"
|
||||
assert WebhookEventType.MEETING_COMPLETED in retrieved.events
|
||||
assert WebhookEventType.RECORDING_STOPPED in retrieved.events
|
||||
assert (
|
||||
WebhookEventType.MEETING_COMPLETED in retrieved.events
|
||||
), "MEETING_COMPLETED event should be in retrieved events"
|
||||
assert (
|
||||
WebhookEventType.RECORDING_STOPPED in retrieved.events
|
||||
), "RECORDING_STOPPED event should be in retrieved events"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -271,9 +281,13 @@ class TestWebhookRepositoryGetAll:
|
||||
result = await webhook_repo.get_all()
|
||||
|
||||
assert len(result) == 2, "Should return both webhooks"
|
||||
urls = {w.url for w in result}
|
||||
assert "https://example.com/hook1" in urls
|
||||
assert "https://example.com/hook2" in urls
|
||||
result_urls = (result[0].url, result[1].url)
|
||||
assert (
|
||||
"https://example.com/hook1" in result_urls
|
||||
), "hook1 URL should be in results"
|
||||
assert (
|
||||
"https://example.com/hook2" in result_urls
|
||||
), "hook2 URL should be in results"
|
||||
|
||||
async def test_filters_by_workspace(
|
||||
self,
|
||||
@@ -306,7 +320,9 @@ class TestWebhookRepositoryGetAll:
|
||||
result = await webhook_repo.get_all(workspace_id=workspace1)
|
||||
|
||||
assert len(result) == 1, "Should return only workspace1 webhooks"
|
||||
assert result[0].url == "https://workspace1.com/hook"
|
||||
assert (
|
||||
result[0].url == "https://workspace1.com/hook"
|
||||
), f"URL should be workspace1 hook, got {result[0].url}"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
@@ -344,8 +360,10 @@ class TestWebhookRepositoryGetAllEnabled:
|
||||
result = await webhook_repo.get_all_enabled()
|
||||
|
||||
assert len(result) == 1, "Should return only enabled webhook"
|
||||
assert result[0].url == "https://example.com/enabled"
|
||||
assert result[0].enabled is True
|
||||
assert (
|
||||
result[0].url == "https://example.com/enabled"
|
||||
), f"URL should be enabled webhook, got {result[0].url}"
|
||||
assert result[0].enabled is True, "Webhook should be enabled"
|
||||
|
||||
async def test_filters_enabled_by_workspace(
|
||||
self,
|
||||
@@ -377,7 +395,9 @@ class TestWebhookRepositoryGetAllEnabled:
|
||||
result = await webhook_repo.get_all_enabled(workspace_id=workspace1)
|
||||
|
||||
assert len(result) == 1, "Should return only workspace1 enabled webhooks"
|
||||
assert result[0].workspace_id == workspace1
|
||||
assert (
|
||||
result[0].workspace_id == workspace1
|
||||
), f"Workspace ID should be {workspace1}, got {result[0].workspace_id}"
|
||||
|
||||
async def test_returns_empty_when_all_disabled(
|
||||
self,
|
||||
@@ -503,8 +523,12 @@ class TestWebhookRepositoryUpdate:
|
||||
await session.commit()
|
||||
|
||||
assert len(result.events) == 1, "Should have only 1 event"
|
||||
assert WebhookEventType.RECORDING_STARTED in result.events
|
||||
assert WebhookEventType.MEETING_COMPLETED not in result.events
|
||||
assert (
|
||||
WebhookEventType.RECORDING_STARTED in result.events
|
||||
), "RECORDING_STARTED should be in updated events"
|
||||
assert (
|
||||
WebhookEventType.MEETING_COMPLETED not in result.events
|
||||
), "MEETING_COMPLETED should not be in updated events"
|
||||
|
||||
async def test_update_raises_for_nonexistent_webhook(
|
||||
self,
|
||||
|
||||
@@ -1,101 +1,15 @@
|
||||
{
|
||||
"generated_at": "2025-12-31T15:28:38.066948+00:00",
|
||||
"generated_at": "2025-12-31T21:15:56.514631+00:00",
|
||||
"rules": {
|
||||
"alias_import": [
|
||||
"alias_import|src/noteflow/domain/auth/oidc.py|cc2f0972|datetime->dt",
|
||||
"alias_import|src/noteflow/grpc/service.py|d8a43a4a|__version__->NOTEFLOW_VERSION"
|
||||
],
|
||||
"assertion_roulette": [
|
||||
"assertion_roulette|tests/domain/test_meeting.py|test_immediate_stop_after_start_zero_duration|assertions=5",
|
||||
"assertion_roulette|tests/domain/test_meeting.py|test_state_transition_does_not_modify_segments|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_default_values|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_default_values|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_settings_with_full_configuration|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_timestamps_are_set|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_with_nested_rules|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_project.py|test_with_values|assertions=4",
|
||||
"assertion_roulette|tests/domain/test_summary.py|test_action_item_with_all_fields|assertions=4",
|
||||
"assertion_roulette|tests/grpc/test_entities_mixin.py|test_returns_extracted_entities|assertions=4",
|
||||
"assertion_roulette|tests/infrastructure/asr/test_engine.py|test_load_model_with_stub_sets_state|assertions=4",
|
||||
"assertion_roulette|tests/infrastructure/asr/test_engine.py|test_transcribe_with_stubbed_model|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/audio/test_writer.py|test_flush_writes_buffered_data|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/audio/test_writer.py|test_manifest_contains_correct_metadata|assertions=6",
|
||||
"assertion_roulette|tests/infrastructure/audio/test_writer.py|test_periodic_flush_thread_starts_on_open|assertions=4",
|
||||
"assertion_roulette|tests/infrastructure/audio/test_writer.py|test_write_chunk_converts_float32_to_pcm16|assertions=4",
|
||||
"assertion_roulette|tests/infrastructure/calendar/test_google_adapter.py|test_list_events_returns_calendar_events|assertions=7",
|
||||
"assertion_roulette|tests/infrastructure/calendar/test_oauth_manager.py|test_initiate_google_auth_returns_url_and_state|assertions=6",
|
||||
"assertion_roulette|tests/infrastructure/export/test_html.py|test_export_escapes_html|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/export/test_markdown.py|test_export_includes_sections|assertions=10",
|
||||
"assertion_roulette|tests/infrastructure/summarization/test_ollama_provider.py|test_ollama_summarize_returns_result|assertions=8",
|
||||
"assertion_roulette|tests/infrastructure/test_calendar_converters.py|test_calendar_event_info_to_orm_kwargs|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/test_observability.py|test_collect_now_returns_metrics|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/test_orm_converters.py|test_converts_orm_to_domain_annotation|assertions=5",
|
||||
"assertion_roulette|tests/infrastructure/triggers/test_foreground_app.py|test_foreground_app_settings_lowercases_apps|assertions=7",
|
||||
"assertion_roulette|tests/integration/test_e2e_annotations.py|test_add_annotation_persists_to_database|assertions=8",
|
||||
"assertion_roulette|tests/integration/test_e2e_annotations.py|test_update_annotation_modifies_database|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_e2e_ner.py|test_delete_does_not_affect_other_entities|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_grpc_servicer_database.py|test_create_meeting_persists_to_database|assertions=6",
|
||||
"assertion_roulette|tests/integration/test_grpc_servicer_database.py|test_get_diarization_job_status_retrieves_from_database|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_grpc_servicer_database.py|test_get_meeting_with_segments|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_grpc_servicer_database.py|test_refine_speaker_diarization_creates_job_in_database|assertions=5",
|
||||
"assertion_roulette|tests/integration/test_grpc_servicer_database.py|test_rename_speaker_updates_segments_in_database|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_preferences_repository.py|test_set_bulk_creates_multiple_preferences|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_signal_handling.py|test_cleanup_all_active_streams|assertions=7",
|
||||
"assertion_roulette|tests/integration/test_signal_handling.py|test_shutdown_cancels_diarization_tasks|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_unit_of_work_advanced.py|test_diarization_job_workflow|assertions=4",
|
||||
"assertion_roulette|tests/integration/test_unit_of_work_advanced.py|test_meeting_lifecycle_workflow|assertions=11",
|
||||
"assertion_roulette|tests/integration/test_unit_of_work_advanced.py|test_repository_instances_cached_within_context|assertions=6",
|
||||
"assertion_roulette|tests/integration/test_webhook_integration.py|test_stop_meeting_triggers_meeting_completed_webhook|assertions=6",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_adds_get_logger_to_existing_import|assertions=4",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_grpc_service_pattern|assertions=4",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_keeps_import_logging_when_constants_used|assertions=5",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_transforms_complex_module|assertions=5",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_transforms_simple_module|assertions=4",
|
||||
"assertion_roulette|tests/scripts/test_migrate_logging.py|test_uses_logging_constants_detection|assertions=4"
|
||||
],
|
||||
"conditional_test_logic": [
|
||||
"conditional_test_logic|tests/application/test_meeting_service.py|test_meeting_state_transitions|if@122",
|
||||
"conditional_test_logic|tests/grpc/test_sprint_15_1_critical_bugs.py|test_no_datetime_now_in_diarization_mixin|for@234",
|
||||
"conditional_test_logic|tests/grpc/test_stream_lifecycle.py|test_double_start_same_meeting_id_detected|if@454",
|
||||
"conditional_test_logic|tests/infrastructure/audio/test_capture.py|test_get_default_device_returns_device_or_none|if@42",
|
||||
"conditional_test_logic|tests/infrastructure/audio/test_ring_buffer.py|test_chunk_count_property|for@173",
|
||||
"conditional_test_logic|tests/infrastructure/audio/test_ring_buffer.py|test_get_window_chronological_order|for@132",
|
||||
"conditional_test_logic|tests/infrastructure/audio/test_ring_buffer.py|test_ring_buffer_duration_property|for@164",
|
||||
"conditional_test_logic|tests/infrastructure/ner/test_engine.py|test_confidence_is_set|for@126",
|
||||
"conditional_test_logic|tests/infrastructure/ner/test_engine.py|test_normalized_text_is_lowercase|for@119",
|
||||
"conditional_test_logic|tests/infrastructure/persistence/test_migrations.py|test_all_migrations_have_down_revision|for@54",
|
||||
"conditional_test_logic|tests/infrastructure/persistence/test_migrations.py|test_all_migrations_have_downgrade_function|for@85",
|
||||
"conditional_test_logic|tests/infrastructure/persistence/test_migrations.py|test_all_migrations_have_revision|for@34",
|
||||
"conditional_test_logic|tests/infrastructure/persistence/test_migrations.py|test_all_migrations_have_upgrade_function|for@74",
|
||||
"conditional_test_logic|tests/infrastructure/test_observability.py|test_rapid_collection_maintains_order|for@403",
|
||||
"conditional_test_logic|tests/infrastructure/test_observability.py|test_rapid_sequential_logging|for@356",
|
||||
"conditional_test_logic|tests/infrastructure/triggers/test_calendar.py|test_datetime_parsing_formats|if@315",
|
||||
"conditional_test_logic|tests/infrastructure/triggers/test_calendar.py|test_overlap_scenarios|if@177",
|
||||
"conditional_test_logic|tests/integration/test_crash_scenarios.py|test_concurrent_recovery_calls|for@359",
|
||||
"conditional_test_logic|tests/integration/test_database_resilience.py|test_concurrent_creates_unique_ids|for@235",
|
||||
"conditional_test_logic|tests/integration/test_entity_repository.py|test_saves_multiple_entities|for@193",
|
||||
"conditional_test_logic|tests/integration/test_recovery_service.py|test_recovers_multiple_meetings|for@146",
|
||||
"conditional_test_logic|tests/integration/test_signal_handling.py|test_shutdown_cancels_diarization_tasks|for@85"
|
||||
],
|
||||
"deprecated_pattern": [
|
||||
"deprecated_pattern|src/noteflow/infrastructure/export/html.py|b089eb78|str.format()"
|
||||
],
|
||||
"duplicate_test_name": [
|
||||
"duplicate_test_name|tests/application/test_recovery_service.py|test_audio_validation_skipped_without_meetings_dir|count=2",
|
||||
"duplicate_test_name|tests/config/test_feature_flags.py|test_default_values|count=4",
|
||||
"duplicate_test_name|tests/domain/test_project.py|test_is_frozen|count=2",
|
||||
"duplicate_test_name|tests/domain/test_project.py|test_with_values|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_annotation_mixin.py|test_aborts_on_invalid_annotation_id|count=3",
|
||||
"duplicate_test_name|tests/grpc/test_annotation_mixin.py|test_aborts_on_invalid_meeting_id|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_annotation_mixin.py|test_aborts_when_annotation_not_found|count=3",
|
||||
"duplicate_test_name|tests/grpc/test_entities_mixin.py|test_aborts_when_entity_belongs_to_different_meeting|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_entities_mixin.py|test_aborts_when_entity_not_found|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_entities_mixin.py|test_aborts_with_invalid_entity_id_format|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_entities_mixin.py|test_aborts_with_invalid_meeting_id_format|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_entities_mixin.py|test_aborts_with_invalid_meeting_id|count=2",
|
||||
"duplicate_test_name|tests/grpc/test_project_mixin.py|test_delete_project_not_found|count=2",
|
||||
"duplicate_test_name|tests/infrastructure/summarization/test_cloud_provider.py|test_raises_invalid_response_on_empty_content|count=2",
|
||||
"duplicate_test_name|tests/infrastructure/summarization/test_cloud_provider.py|test_summarize_returns_result|count=2"
|
||||
"conditional_test_logic|tests/grpc/test_stream_lifecycle.py|test_double_start_same_meeting_id_detected|if@456",
|
||||
"conditional_test_logic|tests/infrastructure/audio/test_capture.py|test_get_default_device_returns_device_or_none|if@46",
|
||||
"conditional_test_logic|tests/infrastructure/triggers/test_calendar.py|test_overlap_scenarios|if@181"
|
||||
],
|
||||
"eager_test": [
|
||||
"eager_test|tests/infrastructure/audio/test_writer.py|test_audio_roundtrip_encryption_decryption|methods=15",
|
||||
@@ -107,9 +21,6 @@
|
||||
"exception_handling|tests/integration/test_memory_fallback.py|test_concurrent_reads_and_writes|catches_Exception",
|
||||
"exception_handling|tests/integration/test_memory_fallback.py|test_concurrent_reads_and_writes|catches_Exception"
|
||||
],
|
||||
"high_complexity": [
|
||||
"high_complexity|src/noteflow/infrastructure/observability/usage.py|record|complexity=20"
|
||||
],
|
||||
"long_method": [
|
||||
"long_method|src/noteflow/application/services/summarization_service.py|summarize|lines=104",
|
||||
"long_method|src/noteflow/grpc/_mixins/oidc.py|RegisterOidcProvider|lines=79",
|
||||
@@ -138,106 +49,21 @@
|
||||
],
|
||||
"long_test": [
|
||||
"long_test|tests/infrastructure/audio/test_capture.py|test_start_with_stubbed_stream_invokes_callback|lines=54",
|
||||
"long_test|tests/integration/test_e2e_streaming.py|test_segments_persisted_to_database|lines=70",
|
||||
"long_test|tests/integration/test_unit_of_work_advanced.py|test_meeting_lifecycle_workflow|lines=63"
|
||||
],
|
||||
"magic_number_test": [
|
||||
"magic_number_test|tests/domain/test_annotation.py|test_annotation_very_long_duration|value=7200.0",
|
||||
"magic_number_test|tests/domain/test_meeting.py|test_duration_seconds_with_times|value=1800.0",
|
||||
"magic_number_test|tests/domain/test_segment.py|test_segment_very_long_duration|value=36000.0",
|
||||
"magic_number_test|tests/domain/test_summary.py|test_key_point_with_many_segment_ids|value=50",
|
||||
"magic_number_test|tests/domain/test_summary.py|test_key_point_with_timing|value=10.5",
|
||||
"magic_number_test|tests/domain/test_summary.py|test_key_point_with_timing|value=25.0",
|
||||
"magic_number_test|tests/domain/test_summary.py|test_summary_very_long_executive_summary|value=10000",
|
||||
"magic_number_test|tests/grpc/test_annotation_mixin.py|test_returns_annotation_when_found|value=120.0",
|
||||
"magic_number_test|tests/grpc/test_annotation_mixin.py|test_updates_annotation_successfully|value=15.0",
|
||||
"magic_number_test|tests/grpc/test_annotation_mixin.py|test_updates_annotation_successfully|value=25.0",
|
||||
"magic_number_test|tests/grpc/test_annotation_mixin.py|test_updates_text_only|value=20.0",
|
||||
"magic_number_test|tests/grpc/test_diarization_cancel.py|test_progress_percent_running|value=50.0",
|
||||
"magic_number_test|tests/grpc/test_diarization_mixin.py|test_status_progress_running_is_time_based|value=50.0",
|
||||
"magic_number_test|tests/grpc/test_meeting_mixin.py|test_list_meetings_respects_limit|value=25",
|
||||
"magic_number_test|tests/grpc/test_meeting_mixin.py|test_list_meetings_respects_offset|value=50",
|
||||
"magic_number_test|tests/grpc/test_preferences_mixin.py|test_computes_deterministic_etag|value=32",
|
||||
"magic_number_test|tests/grpc/test_stream_lifecycle.py|test_partial_buffers_cleared_on_cleanup|value=3200",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_epoch_seconds_to_datetime_returns_utc|value=2024",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_iso_string_with_z_suffix_parsed_as_utc|value=14",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_iso_string_with_z_suffix_parsed_as_utc|value=15",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_iso_string_with_z_suffix_parsed_as_utc|value=2024",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_iso_string_with_z_suffix_parsed_as_utc|value=30",
|
||||
"magic_number_test|tests/grpc/test_timestamp_converters.py|test_iso_string_with_z_suffix_parsed_as_utc|value=45",
|
||||
"magic_number_test|tests/infrastructure/asr/test_segmenter.py|test_custom_config|value=44100",
|
||||
"magic_number_test|tests/infrastructure/asr/test_segmenter.py|test_custom_config|value=60.0",
|
||||
"magic_number_test|tests/infrastructure/audio/test_capture.py|test_properties_after_start|value=44100",
|
||||
"magic_number_test|tests/infrastructure/audio/test_dto.py|test_timestamped_audio_creation|value=1600",
|
||||
"magic_number_test|tests/infrastructure/audio/test_reader.py|test_reader_uses_manifest_sample_rate|value=1600",
|
||||
"magic_number_test|tests/infrastructure/audio/test_reader.py|test_reader_uses_manifest_sample_rate|value=48000",
|
||||
"magic_number_test|tests/infrastructure/audio/test_reader.py|test_reader_uses_manifest_sample_rate|value=48000",
|
||||
"magic_number_test|tests/infrastructure/audio/test_ring_buffer.py|test_init_with_default_duration|value=30.0",
|
||||
"magic_number_test|tests/infrastructure/audio/test_ring_buffer.py|test_max_duration_property|value=15.0",
|
||||
"magic_number_test|tests/infrastructure/audio/test_writer.py|test_write_chunk_converts_float32_to_pcm16|value=3200",
|
||||
"magic_number_test|tests/infrastructure/summarization/test_cloud_provider.py|test_summarize_returns_result|value=150",
|
||||
"magic_number_test|tests/infrastructure/summarization/test_cloud_provider.py|test_summarize_returns_result|value=150",
|
||||
"magic_number_test|tests/infrastructure/test_diarization.py|test_overlap_duration_full_overlap|value=15.0",
|
||||
"magic_number_test|tests/infrastructure/test_diarization.py|test_overlap_duration_no_overlap|value=12.0",
|
||||
"magic_number_test|tests/infrastructure/test_diarization.py|test_overlap_duration_no_overlap|value=20.0",
|
||||
"magic_number_test|tests/infrastructure/test_diarization.py|test_overlap_duration_partial_overlap_right|value=15.0",
|
||||
"magic_number_test|tests/infrastructure/test_integration_converters.py|test_converts_stats_dict|value=15",
|
||||
"magic_number_test|tests/infrastructure/test_integration_converters.py|test_sync_run_orm_to_domain|value=5000",
|
||||
"magic_number_test|tests/infrastructure/test_integration_converters.py|test_sync_run_to_orm_kwargs|value=10000",
|
||||
"magic_number_test|tests/infrastructure/test_integration_converters.py|test_sync_run_to_orm_kwargs|value=25",
|
||||
"magic_number_test|tests/infrastructure/test_observability.py|test_log_with_large_details|value=50",
|
||||
"magic_number_test|tests/infrastructure/triggers/test_calendar.py|test_non_iterable_returns_empty|value=12345",
|
||||
"magic_number_test|tests/integration/test_e2e_annotations.py|test_add_annotation_persists_to_database|value=15.0"
|
||||
"long_test|tests/integration/test_e2e_streaming.py|test_segments_persisted_to_database|lines=74",
|
||||
"long_test|tests/integration/test_e2e_streaming.py|test_stream_init_recovers_streaming_turns|lines=51",
|
||||
"long_test|tests/integration/test_e2e_summarization.py|test_generate_summary_regenerates_with_force_flag|lines=52",
|
||||
"long_test|tests/integration/test_e2e_summarization.py|test_summary_with_key_points_persisted|lines=52",
|
||||
"long_test|tests/integration/test_unit_of_work_advanced.py|test_meeting_lifecycle_workflow|lines=63",
|
||||
"long_test|tests/integration/test_webhook_integration.py|test_stop_meeting_triggers_meeting_completed_webhook|lines=61"
|
||||
],
|
||||
"module_size_soft": [
|
||||
"module_size_soft|src/noteflow/config/settings.py|module|lines=566",
|
||||
"module_size_soft|src/noteflow/config/settings.py|module|lines=579",
|
||||
"module_size_soft|src/noteflow/domain/ports/repositories/identity.py|module|lines=599",
|
||||
"module_size_soft|src/noteflow/grpc/server.py|module|lines=534"
|
||||
"module_size_soft|src/noteflow/grpc/server.py|module|lines=537"
|
||||
],
|
||||
"orphaned_import": [
|
||||
"orphaned_import|src/noteflow/infrastructure/observability/otel.py|opentelemetry"
|
||||
],
|
||||
"raises_without_match": [
|
||||
"raises_without_match|tests/domain/test_project.py|test_archive_default_project_raises|line=261",
|
||||
"raises_without_match|tests/domain/test_project.py|test_default_project_cannot_be_archived|line=544",
|
||||
"raises_without_match|tests/grpc/test_stream_lifecycle.py|test_cancelled_error_propagation_in_stream|line=647",
|
||||
"raises_without_match|tests/infrastructure/asr/test_dto.py|test_word_timing_frozen|line=41",
|
||||
"raises_without_match|tests/infrastructure/audio/test_dto.py|test_audio_device_info_frozen|line=42",
|
||||
"raises_without_match|tests/infrastructure/auth/test_oidc_registry.py|test_create_provider_discovery_failure|line=154",
|
||||
"raises_without_match|tests/integration/test_e2e_annotations.py|test_add_annotation_invalid_meeting_id|line=341",
|
||||
"raises_without_match|tests/integration/test_e2e_annotations.py|test_annotations_deleted_with_meeting|line=460",
|
||||
"raises_without_match|tests/integration/test_e2e_annotations.py|test_delete_annotation_not_found_e2e|line=386",
|
||||
"raises_without_match|tests/integration/test_e2e_annotations.py|test_get_annotation_not_found|line=355",
|
||||
"raises_without_match|tests/integration/test_e2e_annotations.py|test_update_annotation_not_found|line=372",
|
||||
"raises_without_match|tests/integration/test_e2e_export.py|test_export_transcript_invalid_meeting_id|line=427",
|
||||
"raises_without_match|tests/integration/test_e2e_export.py|test_export_transcript_nonexistent_meeting|line=410",
|
||||
"raises_without_match|tests/integration/test_e2e_streaming.py|test_concurrent_streams_rejected|line=356",
|
||||
"raises_without_match|tests/integration/test_e2e_streaming.py|test_stream_init_fails_for_nonexistent_meeting|line=191",
|
||||
"raises_without_match|tests/integration/test_e2e_streaming.py|test_stream_rejects_invalid_meeting_id|line=213",
|
||||
"raises_without_match|tests/integration/test_e2e_summarization.py|test_generate_summary_invalid_meeting_id|line=499",
|
||||
"raises_without_match|tests/integration/test_e2e_summarization.py|test_generate_summary_nonexistent_meeting|line=485",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_delete_nonexistent_annotation|line=600",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_delete_nonexistent_meeting|line=107",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_duplicate_job_id_rejected|line=225",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_empty_meeting_id|line=79",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_export_nonexistent_meeting|line=487",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_get_nonexistent_annotation|line=569",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_get_status_nonexistent_job|line=619",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_invalid_uuid_format_for_meeting_id|line=65",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_nonexistent_meeting_returns_not_found|line=93",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_summarize_nonexistent_meeting|line=534",
|
||||
"raises_without_match|tests/integration/test_error_handling.py|test_update_nonexistent_annotation|line=586",
|
||||
"raises_without_match|tests/integration/test_grpc_servicer_database.py|test_get_nonexistent_job_returns_not_found|line=335",
|
||||
"raises_without_match|tests/integration/test_grpc_servicer_database.py|test_get_nonexistent_meeting_returns_not_found|line=166",
|
||||
"raises_without_match|tests/integration/test_project_repository.py|test_archive_default_project_raises_repository|line=276",
|
||||
"raises_without_match|tests/integration/test_trigger_settings.py|test_retention_check_interval_validation|line=91",
|
||||
"raises_without_match|tests/integration/test_trigger_settings.py|test_retention_check_interval_validation|line=98",
|
||||
"raises_without_match|tests/integration/test_trigger_settings.py|test_retention_days_validation|line=77",
|
||||
"raises_without_match|tests/integration/test_trigger_settings.py|test_retention_days_validation|line=81",
|
||||
"raises_without_match|tests/integration/test_unit_of_work_advanced.py|test_exception_during_segment_add_rolls_back_meeting|line=252",
|
||||
"raises_without_match|tests/stress/test_transaction_boundaries.py|test_batch_segment_add_rollback|line=196",
|
||||
"raises_without_match|tests/stress/test_transaction_boundaries.py|test_exception_type_does_not_matter|line=78"
|
||||
],
|
||||
"sensitive_equality": [
|
||||
"sensitive_equality|tests/domain/test_project.py|test_error_message_includes_project_id|str",
|
||||
"sensitive_equality|tests/integration/test_e2e_streaming.py|test_active_stream_removed_on_completion|str",
|
||||
@@ -245,7 +71,7 @@
|
||||
"sensitive_equality|tests/integration/test_grpc_servicer_database.py|test_refine_speaker_diarization_creates_job_in_database|str"
|
||||
],
|
||||
"sleepy_test": [
|
||||
"sleepy_test|tests/integration/test_e2e_streaming.py|test_stop_request_exits_stream_gracefully|line=456",
|
||||
"sleepy_test|tests/integration/test_e2e_streaming.py|test_stop_request_exits_stream_gracefully|line=481",
|
||||
"sleepy_test|tests/integration/test_unit_of_work_advanced.py|test_concurrent_uow_instances_isolated|line=165",
|
||||
"sleepy_test|tests/integration/test_unit_of_work_advanced.py|test_concurrent_uow_instances_isolated|line=171"
|
||||
],
|
||||
@@ -292,12 +118,6 @@
|
||||
"thin_wrapper|src/noteflow/infrastructure/persistence/memory/repositories/integration.py|get_sync_run|get",
|
||||
"thin_wrapper|src/noteflow/infrastructure/persistence/memory/repositories/webhook.py|get_by_id|get",
|
||||
"thin_wrapper|src/noteflow/infrastructure/security/crypto.py|generate_dek|token_bytes"
|
||||
],
|
||||
"unused_fixture": [
|
||||
"unused_fixture|tests/grpc/test_export_mixin.py|test_export_aborts_when_meeting_not_found|mock_meetings_repo",
|
||||
"unused_fixture|tests/grpc/test_export_mixin.py|test_export_aborts_when_meeting_not_found|mock_segments_repo",
|
||||
"unused_fixture|tests/grpc/test_stream_lifecycle.py|test_audio_writer_closed_on_cleanup|crypto",
|
||||
"unused_fixture|tests/grpc/test_stream_lifecycle.py|test_context_cancelled_check_pattern|memory_servicer"
|
||||
]
|
||||
},
|
||||
"schema_version": 1
|
||||
|
||||
@@ -129,30 +129,30 @@ class TestNeedsMigration:
|
||||
|
||||
def test_detects_stdlib_pattern(self) -> None:
|
||||
"""Detects standard stdlib logging pattern."""
|
||||
assert needs_migration(SAMPLE_STDLIB_LOGGER) is True
|
||||
assert needs_migration(SAMPLE_STDLIB_LOGGER) is True, "stdlib logging pattern should require migration"
|
||||
|
||||
def test_ignores_already_migrated(self) -> None:
|
||||
"""Already migrated files don't need migration."""
|
||||
# File has get_logger instead of logging.getLogger, so pattern won't match
|
||||
assert needs_migration(SAMPLE_ALREADY_MIGRATED) is False
|
||||
assert needs_migration(SAMPLE_ALREADY_MIGRATED) is False, "already migrated file should not need migration"
|
||||
|
||||
def test_ignores_no_logging(self) -> None:
|
||||
"""Files without logging don't need migration."""
|
||||
assert needs_migration(SAMPLE_NO_LOGGING) is False
|
||||
assert needs_migration(SAMPLE_NO_LOGGING) is False, "file without logging should not need migration"
|
||||
|
||||
def test_detects_complex_module(self) -> None:
|
||||
"""Detects logging in complex modules."""
|
||||
assert needs_migration(SAMPLE_COMPLEX_MODULE) is True
|
||||
assert needs_migration(SAMPLE_COMPLEX_MODULE) is True, "complex module with logging should require migration"
|
||||
|
||||
def test_requires_both_import_and_getlogger(self) -> None:
|
||||
"""Requires both import logging AND getLogger call."""
|
||||
# Only import, no getLogger
|
||||
only_import = "import logging\n\ndef foo(): pass"
|
||||
assert needs_migration(only_import) is False
|
||||
assert needs_migration(only_import) is False, "file with only import (no getLogger) should not need migration"
|
||||
|
||||
# Only getLogger, no import (invalid but should handle)
|
||||
only_getlogger = "logger = logging.getLogger(__name__)"
|
||||
assert needs_migration(only_getlogger) is False
|
||||
assert needs_migration(only_getlogger) is False, "file with only getLogger (no import) should not need migration"
|
||||
|
||||
|
||||
class TestHasInfrastructureImport:
|
||||
@@ -161,17 +161,17 @@ class TestHasInfrastructureImport:
|
||||
def test_detects_get_logger_import(self) -> None:
|
||||
"""Detects get_logger import."""
|
||||
content = "from noteflow.infrastructure.logging import get_logger"
|
||||
assert has_infrastructure_import(content) is True
|
||||
assert has_infrastructure_import(content) is True, "should detect get_logger import from infrastructure.logging"
|
||||
|
||||
def test_detects_multi_import(self) -> None:
|
||||
"""Detects multi-symbol import."""
|
||||
content = "from noteflow.infrastructure.logging import get_logger, configure_logging"
|
||||
assert has_infrastructure_import(content) is True
|
||||
assert has_infrastructure_import(content) is True, "should detect multi-symbol import from infrastructure.logging"
|
||||
|
||||
def test_no_import(self) -> None:
|
||||
"""No import returns False."""
|
||||
content = "import logging"
|
||||
assert has_infrastructure_import(content) is False
|
||||
assert has_infrastructure_import(content) is False, "stdlib logging import should not be detected as infrastructure import"
|
||||
|
||||
|
||||
class TestShouldSkipFile:
|
||||
@@ -179,22 +179,22 @@ class TestShouldSkipFile:
|
||||
|
||||
def test_skips_proto_files(self) -> None:
|
||||
"""Skips protobuf generated files."""
|
||||
assert should_skip_file(Path('foo_pb2.py')) is True
|
||||
assert should_skip_file(Path('foo_pb2_grpc.py')) is True
|
||||
assert should_skip_file(Path('foo_pb2.py')) is True, "should skip *_pb2.py protobuf files"
|
||||
assert should_skip_file(Path('foo_pb2_grpc.py')) is True, "should skip *_pb2_grpc.py protobuf files"
|
||||
|
||||
def test_skips_logging_module_files(self) -> None:
|
||||
"""Skips files in logging module itself."""
|
||||
assert should_skip_file(
|
||||
Path('src/noteflow/infrastructure/logging/config.py')
|
||||
) is True
|
||||
) is True, "should skip files within infrastructure/logging module"
|
||||
assert should_skip_file(
|
||||
Path('src/noteflow/infrastructure/logging/__init__.py')
|
||||
) is True
|
||||
) is True, "should skip __init__.py within infrastructure/logging module"
|
||||
|
||||
def test_allows_normal_files(self) -> None:
|
||||
"""Allows normal Python files."""
|
||||
assert should_skip_file(Path('src/noteflow/grpc/client.py')) is False
|
||||
assert should_skip_file(Path('src/noteflow/application/services/foo.py')) is False
|
||||
assert should_skip_file(Path('src/noteflow/grpc/client.py')) is False, "should not skip normal grpc files"
|
||||
assert should_skip_file(Path('src/noteflow/application/services/foo.py')) is False, "should not skip normal application service files"
|
||||
|
||||
|
||||
class TestTransformFile:
|
||||
@@ -207,10 +207,10 @@ class TestTransformFile:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert 'logging.getLogger(__name__)' not in result.transformed
|
||||
assert result.has_changes is True, "simple module with logging should have changes"
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed, "transformed code should include infrastructure import"
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "transformed code should use get_logger(__name__)"
|
||||
assert 'logging.getLogger(__name__)' not in result.transformed, "transformed code should not contain logging.getLogger"
|
||||
|
||||
def test_transforms_complex_module(self, tmp_path: Path) -> None:
|
||||
"""Transforms complex module preserving structure, removes unused import logging."""
|
||||
@@ -219,15 +219,15 @@ class TestTransformFile:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert result.has_changes is True, "complex module with logging should have changes"
|
||||
# import logging should be removed (not using constants)
|
||||
assert 'import logging' not in result.transformed
|
||||
assert 'import logging' not in result.transformed, "unused import logging should be removed"
|
||||
# Infrastructure import should be present
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed, "transformed code should include infrastructure import"
|
||||
# Check getLogger replaced
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "transformed code should use get_logger(__name__)"
|
||||
# Changes should include removal
|
||||
assert 'Removed unused import logging' in result.changes
|
||||
assert 'Removed unused import logging' in result.changes, "changes should document removal of unused import"
|
||||
|
||||
def test_handles_multiple_loggers(self, tmp_path: Path) -> None:
|
||||
"""Only transforms main __name__ logger, not child loggers."""
|
||||
@@ -236,11 +236,11 @@ class TestTransformFile:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert result.has_changes is True, "module with multiple loggers should have changes"
|
||||
# Main logger transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "main logger should be transformed to get_logger"
|
||||
# Child logger NOT transformed (different pattern)
|
||||
assert 'child_logger = logging.getLogger(__name__ + ".child")' in result.transformed
|
||||
assert 'child_logger = logging.getLogger(__name__ + ".child")' in result.transformed, "child logger with custom name should not be transformed"
|
||||
|
||||
def test_no_change_for_already_migrated(self, tmp_path: Path) -> None:
|
||||
"""No changes for already migrated files."""
|
||||
@@ -249,7 +249,7 @@ class TestTransformFile:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is False
|
||||
assert result.has_changes is False, "already migrated file should have no changes"
|
||||
|
||||
def test_no_change_for_no_logging(self, tmp_path: Path) -> None:
|
||||
"""No changes for files without logging."""
|
||||
@@ -258,7 +258,7 @@ class TestTransformFile:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is False
|
||||
assert result.has_changes is False, "file without logging should have no changes"
|
||||
|
||||
def test_preserves_file_structure(self, tmp_path: Path) -> None:
|
||||
"""Preserves docstrings, imports order, and code structure."""
|
||||
@@ -268,11 +268,11 @@ class TestTransformFile:
|
||||
result = transform_file(test_file)
|
||||
|
||||
# Docstring preserved
|
||||
assert '"""Complex module with various patterns."""' in result.transformed
|
||||
assert '"""Complex module with various patterns."""' in result.transformed, "module docstring should be preserved"
|
||||
# Class preserved
|
||||
assert 'class MyService:' in result.transformed
|
||||
assert 'class MyService:' in result.transformed, "class definition should be preserved"
|
||||
# Type checking block preserved
|
||||
assert 'if TYPE_CHECKING:' in result.transformed
|
||||
assert 'if TYPE_CHECKING:' in result.transformed, "TYPE_CHECKING block should be preserved"
|
||||
|
||||
|
||||
class TestValidateTransformation:
|
||||
@@ -301,7 +301,7 @@ class TestValidateTransformation:
|
||||
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert any('Missing infrastructure.logging import' in e for e in errors)
|
||||
assert any('Missing infrastructure.logging import' in e for e in errors), f"should detect missing import error, got: {errors}"
|
||||
|
||||
def test_detects_syntax_error(self, tmp_path: Path) -> None:
|
||||
"""Detects syntax errors in transformed code."""
|
||||
@@ -313,7 +313,7 @@ class TestValidateTransformation:
|
||||
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert any('Syntax error' in e for e in errors)
|
||||
assert any('Syntax error' in e for e in errors), f"should detect syntax error, got: {errors}"
|
||||
|
||||
def test_no_errors_for_unchanged(self, tmp_path: Path) -> None:
|
||||
"""No errors for unchanged files."""
|
||||
@@ -325,7 +325,7 @@ class TestValidateTransformation:
|
||||
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert errors == []
|
||||
assert errors == [], f"unchanged file should have no validation errors, got: {errors}"
|
||||
|
||||
|
||||
class TestTransformResultDiff:
|
||||
@@ -339,9 +339,9 @@ class TestTransformResultDiff:
|
||||
result = transform_file(test_file)
|
||||
diff = result.get_diff()
|
||||
|
||||
assert '-logger = logging.getLogger(__name__)' in diff
|
||||
assert '+logger = get_logger(__name__)' in diff
|
||||
assert '+from noteflow.infrastructure.logging import get_logger' in diff
|
||||
assert '-logger = logging.getLogger(__name__)' in diff, "diff should show removed logging.getLogger line"
|
||||
assert '+logger = get_logger(__name__)' in diff, "diff should show added get_logger line"
|
||||
assert '+from noteflow.infrastructure.logging import get_logger' in diff, "diff should show added infrastructure import"
|
||||
|
||||
def test_no_diff_for_unchanged(self, tmp_path: Path) -> None:
|
||||
"""No diff for unchanged files."""
|
||||
@@ -351,7 +351,7 @@ class TestTransformResultDiff:
|
||||
result = transform_file(test_file)
|
||||
diff = result.get_diff()
|
||||
|
||||
assert diff == ''
|
||||
assert diff == '', f"unchanged file should have empty diff, got: {diff!r}"
|
||||
|
||||
|
||||
class TestEndToEndScenarios:
|
||||
@@ -390,10 +390,10 @@ class TestEndToEndScenarios:
|
||||
result = transform_file(test_file)
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert errors == []
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert result.has_changes is True, "gRPC service file should have changes"
|
||||
assert errors == [], f"gRPC service transformation should have no errors, got: {errors}"
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed, "transformed gRPC service should include infrastructure import"
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "transformed gRPC service should use get_logger"
|
||||
|
||||
def test_application_service_pattern(self, tmp_path: Path) -> None:
|
||||
"""Transforms typical application service file."""
|
||||
@@ -428,10 +428,10 @@ class TestEndToEndScenarios:
|
||||
result = transform_file(test_file)
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert errors == []
|
||||
assert result.has_changes is True, "application service file should have changes"
|
||||
assert errors == [], f"application service transformation should have no errors, got: {errors}"
|
||||
# Percent-style formatting should be preserved
|
||||
assert 'logger.info("Creating meeting: %s", title)' in result.transformed
|
||||
assert 'logger.info("Creating meeting: %s", title)' in result.transformed, "percent-style formatting should be preserved"
|
||||
|
||||
def test_infrastructure_adapter_pattern(self, tmp_path: Path) -> None:
|
||||
"""Transforms typical infrastructure adapter file."""
|
||||
@@ -467,10 +467,10 @@ class TestEndToEndScenarios:
|
||||
result = transform_file(test_file)
|
||||
errors = validate_transformation(result)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert errors == []
|
||||
assert result.has_changes is True, "infrastructure adapter file should have changes"
|
||||
assert errors == [], f"infrastructure adapter transformation should have no errors, got: {errors}"
|
||||
# Exception logging preserved
|
||||
assert 'logger.exception("Query failed")' in result.transformed
|
||||
assert 'logger.exception("Query failed")' in result.transformed, "exception logging should be preserved"
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
@@ -491,7 +491,7 @@ class TestEdgeCases:
|
||||
result = transform_file(test_file)
|
||||
|
||||
# Custom name pattern doesn't match, so no migration needed
|
||||
assert result.has_changes is False
|
||||
assert result.has_changes is False, "logger with custom name should not be transformed"
|
||||
|
||||
def test_multiline_getlogger(self, tmp_path: Path) -> None:
|
||||
"""Handles multiline getLogger call."""
|
||||
@@ -514,7 +514,7 @@ class TestEdgeCases:
|
||||
# Our regex is simple and won't match multiline
|
||||
# This is acceptable - user can migrate manually
|
||||
# The key is we don't break anything
|
||||
assert 'logging' in result.transformed # File should still work
|
||||
assert 'logging' in result.transformed, "multiline getLogger should be preserved (not broken)"
|
||||
|
||||
def test_empty_file(self, tmp_path: Path) -> None:
|
||||
"""Handles empty file gracefully."""
|
||||
@@ -523,8 +523,8 @@ class TestEdgeCases:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is False
|
||||
assert result.transformed == ''
|
||||
assert result.has_changes is False, "empty file should have no changes"
|
||||
assert result.transformed == '', "empty file transformation should produce empty string"
|
||||
|
||||
def test_file_with_only_comments(self, tmp_path: Path) -> None:
|
||||
"""Handles file with only comments."""
|
||||
@@ -535,7 +535,7 @@ class TestEdgeCases:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is False
|
||||
assert result.has_changes is False, "file with only comments should have no changes"
|
||||
|
||||
def test_keeps_import_logging_when_constants_used(self, tmp_path: Path) -> None:
|
||||
"""Keeps import logging when logging constants are used."""
|
||||
@@ -544,20 +544,20 @@ class TestEdgeCases:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert result.has_changes is True, "file with logging constants should have changes"
|
||||
# import logging should be KEPT (logging.DEBUG is used)
|
||||
assert 'import logging' in result.transformed
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert 'import logging' in result.transformed, "import logging should be kept when constants are used"
|
||||
assert 'from noteflow.infrastructure.logging import get_logger' in result.transformed, "infrastructure import should be added"
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "logger should use get_logger"
|
||||
# Should NOT have the removal message
|
||||
assert 'Removed unused import logging' not in result.changes
|
||||
assert 'Removed unused import logging' not in result.changes, "should not remove import logging when constants are used"
|
||||
|
||||
def test_uses_logging_constants_detection(self) -> None:
|
||||
"""Tests uses_logging_constants function directly."""
|
||||
assert uses_logging_constants("logging.DEBUG") is True
|
||||
assert uses_logging_constants("logging.StreamHandler()") is True
|
||||
assert uses_logging_constants("logging.basicConfig()") is True
|
||||
assert uses_logging_constants("logger.info('hello')") is False
|
||||
assert uses_logging_constants("logging.DEBUG") is True, "should detect logging.DEBUG constant"
|
||||
assert uses_logging_constants("logging.StreamHandler()") is True, "should detect logging.StreamHandler usage"
|
||||
assert uses_logging_constants("logging.basicConfig()") is True, "should detect logging.basicConfig usage"
|
||||
assert uses_logging_constants("logger.info('hello')") is False, "logger.info should not be detected as constant usage"
|
||||
|
||||
def test_adds_get_logger_to_existing_import(self, tmp_path: Path) -> None:
|
||||
"""Adds get_logger to existing infrastructure import."""
|
||||
@@ -578,7 +578,7 @@ class TestEdgeCases:
|
||||
|
||||
result = transform_file(test_file)
|
||||
|
||||
assert result.has_changes is True
|
||||
assert 'get_logger, configure_logging' in result.transformed
|
||||
assert 'logger = get_logger(__name__)' in result.transformed
|
||||
assert 'Added get_logger to existing import' in result.changes
|
||||
assert result.has_changes is True, "file with partial infrastructure import should have changes"
|
||||
assert 'get_logger, configure_logging' in result.transformed, "get_logger should be added to existing import"
|
||||
assert 'logger = get_logger(__name__)' in result.transformed, "logger should use get_logger"
|
||||
assert 'Added get_logger to existing import' in result.changes, "changes should document adding get_logger to existing import"
|
||||
|
||||
@@ -22,6 +22,11 @@ from noteflow.config.constants import (
|
||||
# Path to Rust constants file
|
||||
RUST_CONSTANTS_PATH = Path(__file__).parent.parent / "client/src-tauri/src/constants.rs"
|
||||
|
||||
# Expected constant values for validation tests
|
||||
EXPECTED_SAMPLE_RATE_HZ = 16000 # 16kHz standard for speech recognition
|
||||
EXPECTED_GRPC_PORT = 50051 # Standard gRPC port convention
|
||||
EXPECTED_SECONDS_PER_HOUR = 3600 # 60 * 60
|
||||
|
||||
|
||||
def _parse_rust_constant(content: str, module: str, name: str) -> str | None:
|
||||
"""Extract a constant value from Rust source.
|
||||
@@ -109,12 +114,18 @@ class TestConstantValues:
|
||||
|
||||
def test_sample_rate_is_16khz(self) -> None:
|
||||
"""Standard sample rate for speech recognition is 16kHz."""
|
||||
assert DEFAULT_SAMPLE_RATE == 16000, "DEFAULT_SAMPLE_RATE should be 16000 Hz"
|
||||
assert DEFAULT_SAMPLE_RATE == EXPECTED_SAMPLE_RATE_HZ, (
|
||||
f"DEFAULT_SAMPLE_RATE should be {EXPECTED_SAMPLE_RATE_HZ} Hz"
|
||||
)
|
||||
|
||||
def test_grpc_port_is_standard(self) -> None:
|
||||
"""Default gRPC port follows convention (50051)."""
|
||||
assert DEFAULT_GRPC_PORT == 50051, "DEFAULT_GRPC_PORT should be 50051"
|
||||
assert DEFAULT_GRPC_PORT == EXPECTED_GRPC_PORT, (
|
||||
f"DEFAULT_GRPC_PORT should be {EXPECTED_GRPC_PORT}"
|
||||
)
|
||||
|
||||
def test_seconds_per_hour_is_correct(self) -> None:
|
||||
"""Seconds per hour must be 3600."""
|
||||
assert SECONDS_PER_HOUR == 3600, "SECONDS_PER_HOUR should be 3600"
|
||||
assert SECONDS_PER_HOUR == EXPECTED_SECONDS_PER_HOUR, (
|
||||
f"SECONDS_PER_HOUR should be {EXPECTED_SECONDS_PER_HOUR}"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user