deps
This commit is contained in:
909
.claudectx/help.md
Normal file
909
.claudectx/help.md
Normal file
@@ -0,0 +1,909 @@
|
||||
=============================== warnings summary ===============================
|
||||
tests/grpc/test_analytics_mixin.py::TestListSpeakerStats::test_list_speaker_stats_empty
|
||||
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/unittest/mock.py:2217: RuntimeWarning: coroutine 'ConfigurableEmbedder.embed' was never awaited
|
||||
def __init__(self, name, parent):
|
||||
Enable tracemalloc to get traceback where the object was allocated.
|
||||
See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_returns_extracted_entities
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_returns_cached_entities
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_force_refresh_bypasses_cache
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_aborts_when_meeting_not_found
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_aborts_when_feature_disabled
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_extract_aborts_with_invalid_meeting_id_format
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_returns_empty_entities_when_none_found
|
||||
tests/grpc/test_entities_mixin.py::TestExtractEntities::test_includes_pinned_status_in_response
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/_pytest/stash.py:108: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited
|
||||
del self._storage[key]
|
||||
Enable tracemalloc to get traceback where the object was allocated.
|
||||
See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.
|
||||
tests/grpc/test_task_callbacks.py::TestTaskDictCleanupOnFailure::test_removes_job_on_failure
|
||||
tests/grpc/test_task_callbacks.py::TestFailedTaskSchedulesMarkFailed::test_schedules_mark_failed_on_exception
|
||||
tests/grpc/test_task_callbacks.py::TestMarkFailedReceivesJobId::test_receives_correct_job_id
|
||||
tests/grpc/test_task_callbacks.py::TestMarkFailedReceivesErrorMessage::test_receives_error_message
|
||||
tests/grpc/test_task_callbacks.py::TestVariousExceptionTypes::test_handles_exception_type[value_error]
|
||||
tests/grpc/test_task_callbacks.py::TestVariousExceptionTypes::test_handles_exception_type[runtime_error]
|
||||
tests/grpc/test_task_callbacks.py::TestVariousExceptionTypes::test_handles_exception_type[type_error]
|
||||
tests/grpc/test_task_callbacks.py::TestClosedLoopHandling::test_does_not_raise_on_closed_loop
|
||||
/workspace/vasceannie/noteflow/src/noteflow/grpc/mixins/_task_callbacks.py:83: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited
|
||||
_log_and_schedule_failure(job_id, exc, mark_failed)
|
||||
Enable tracemalloc to get traceback where the object was allocated.
|
||||
See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/util.py:182: SyntaxWarning: invalid escape sequence '\s'
|
||||
sep="\s+",
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/util.py:216: SyntaxWarning: invalid escape sequence '\s'
|
||||
sep="\s+",
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/util.py:253: SyntaxWarning: invalid escape sequence '\s'
|
||||
sep="\s+",
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/util.py:284: SyntaxWarning: invalid escape sequence '\s'
|
||||
data = pd.read_csv(file_uem, names=names, dtype=dtype, sep="\s+")
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/util.py:309: SyntaxWarning: invalid escape sequence '\s'
|
||||
data = pd.read_csv(path, names=names, dtype=dtype, sep="\s+")
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/loader.py:93: SyntaxWarning: invalid escape sequence '\s'
|
||||
file_trial, sep="\s+", names=["reference", "uri1", "uri2"]
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/loader.py:292: SyntaxWarning: invalid escape sequence '\s'
|
||||
ctm, names=names, dtype=dtype, sep="\s+"
|
||||
tests/infrastructure/asr/test_engine.py::TestFasterWhisperEngine::test_load_invalid_model_size_raises
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/pyannote/database/loader.py:357: SyntaxWarning: invalid escape sequence '\s'
|
||||
mapping, names=names, dtype=dtype, sep="\s+"
|
||||
tests/stress/test_resource_leaks.py::TestAsyncContextEdgeCases::test_context_cancellation_during_body
|
||||
tests/stress/test_resource_leaks.py::TestAsyncContextEdgeCases::test_context_cancellation_during_enter
|
||||
/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/_pytest/raises.py:622: PytestWarning: matching against an empty string will *always* pass. If you want to check for an empty message you need to pass '^$'. If you don't want to match you should pass `None` or leave out the parameter.
|
||||
super().__init__(match=match, check=check)
|
||||
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
|
||||
=========================== short test summary info ============================
|
||||
FAILED tests/application/test_assistant_service.py::test_generates_thread_id_with_user_id - AssertionError: Expected thread_id to include meeting, user, graph, and version segments
|
||||
assert False
|
||||
+ where False = <built-in method startswith of str object at 0x7d5e0e1d4420>('meeting:85fcaeb0-81d2-4fbd-8bb8-494a4b36d84d:user:b766625c-2098-4b81-83ad-636d752d87c6:graph:meeting_qa:v1:')
|
||||
+ where <built-in method startswith of str object at 0x7d5e0e1d4420> = 'meeting:workspace:user:b766625c-2098-4b81-83ad-636d752d87c6:graph:workspace_qa:v1:e719bfcb'.startswith
|
||||
+ where 'meeting:workspace:user:b766625c-2098-4b81-83ad-636d752d87c6:graph:workspace_qa:v1:e719bfcb' = AssistantResponse(answer='AI assistant is not currently available.', citations=[], suggested_annotations=[], thread_id='meeting:workspace:user:b766625c-2098-4b81-83ad-636d752d87c6:graph:workspace_qa:v1:e719bfcb').thread_id
|
||||
FAILED tests/application/test_meeting_service.py::TestMeetingServiceStateTransitions::test_meeting_service_invalid_state_transitions_raise[cannot-stop-created] - AssertionError: Regex pattern did not match.
|
||||
Expected regex: 'Cannot stop_meeting'
|
||||
Actual message: 'Cannot begin stopping from state CREATED'
|
||||
FAILED tests/application/test_meeting_service.py::TestMeetingServiceStateTransitions::test_meeting_service_invalid_state_transitions_raise[cannot-start-completed] - AssertionError: Regex pattern did not match.
|
||||
Expected regex: 'Cannot start_recording'
|
||||
Actual message: 'Cannot start recording from state COMPLETED'
|
||||
FAILED tests/evaluation/test_rag_pipeline.py::TestEmbeddingQuality::test_same_text_same_embedding - RuntimeError: There is no current event loop in thread 'MainThread'.
|
||||
FAILED tests/evaluation/test_rag_pipeline.py::TestEmbeddingQuality::test_custom_embeddings_differ - RuntimeError: There is no current event loop in thread 'MainThread'.
|
||||
FAILED tests/grpc/test_annotation_mixin.py::TestGetAnnotation::test_returns_annotation_when_found - AssertionError: id should match request
|
||||
assert '6cfd50e7-883c-4a38-afbf-9e29e025da3b' == '6cfd50e7883c4a38afbf9e29e025da3b'
|
||||
|
||||
- 6cfd50e7883c4a38afbf9e29e025da3b
|
||||
+ 6cfd50e7-883c-4a38-afbf-9e29e025da3b
|
||||
? + + + +
|
||||
FAILED tests/grpc/test_annotation_mixin.py::TestUpdateAnnotation::test_updates_annotation_successfully - AssertionError: id should remain unchanged
|
||||
assert '0a2e59cd-9e9e-4bdf-8bd0-0dd74077befe' == '0a2e59cd9e9e4bdf8bd00dd74077befe'
|
||||
|
||||
- 0a2e59cd9e9e4bdf8bd00dd74077befe
|
||||
+ 0a2e59cd-9e9e-4bdf-8bd0-0dd74077befe
|
||||
? + + + +
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantUnavailable::test_returns_unavailable_message - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantValidation::test_rejects_empty_question - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantValidation::test_rejects_whitespace_only_question - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantValidation::test_rejects_invalid_meeting_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantRequestPassing::test_passes_question - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantRequestPassing::test_passes_meeting_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantRequestPassing::test_uses_default_top_k - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantResponse::test_returns_answer - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantResponse::test_returns_citations - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_assistant.py::TestAskAssistantResponse::test_returns_thread_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_chunk_sequence_tracking.py::TestTrackChunkSequence::test_gap_detection_logging[gap_detected] - AssertionError: Gap from 1 to 3 should trigger warning
|
||||
assert False == True
|
||||
FAILED tests/grpc/test_chunk_sequence_tracking.py::TestTrackChunkSequence::test_gap_detection_logging[large_gap_detected] - AssertionError: Gap from 5 to 10 should trigger warning
|
||||
assert False == True
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGetCloudConsentStatus::test_returns_false_when_consent_not_granted - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGetCloudConsentStatus::test_returns_true_when_consent_granted - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGetCloudConsentStatus::test_returns_false_when_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGrantCloudConsent::test_grants_consent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGrantCloudConsent::test_grant_is_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestGrantCloudConsent::test_grant_aborts_when_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestRevokeCloudConsent::test_revokes_consent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestRevokeCloudConsent::test_revoke_is_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestRevokeCloudConsent::test_revoke_aborts_when_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestConsentRoundTrip::test_grant_then_check_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestConsentRoundTrip::test_grant_revoke_cycle - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_cloud_consent.py::TestConsentRoundTrip::test_consent_change_callback_invoked - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_diarization_refine.py::test_refine_speaker_diarization_rejects_active_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_generate_summary.py::test_generate_summary_uses_placeholder_when_service_missing - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_generate_summary.py::test_generate_summary_falls_back_when_provider_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_meeting_mixin.py::TestStopMeeting::test_stop_recording_meeting_transitions_to_stopped - AttributeError: 'MockMeetingMixinServicerHost' object has no attribute 'embedder'
|
||||
FAILED tests/grpc/test_meeting_mixin.py::TestStopMeeting::test_stop_meeting_closes_audio_writer - AttributeError: 'MockMeetingMixinServicerHost' object has no attribute 'embedder'
|
||||
FAILED tests/grpc/test_meeting_mixin.py::TestStopMeeting::test_stop_meeting_triggers_webhooks - AttributeError: 'MockMeetingMixinServicerHost' object has no attribute 'embedder'
|
||||
FAILED tests/grpc/test_oauth.py::TestGetCalendarProviders::test_returns_available_providers - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetCalendarProviders::test_returns_authentication_status_for_each_provider - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetCalendarProviders::test_returns_display_names - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetCalendarProviders::test_aborts_whencalendar_service_not_configured - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestInitiateOAuth::test_returns_auth_url_and_state - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestInitiateOAuth::test_passes_provider_to_service - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestInitiateOAuth::test_passes_custom_redirect_uri - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestInitiateOAuth::test_aborts_on_invalid_provider - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestInitiateOAuth::test_aborts_when_initiate_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestCompleteOAuth::test_returns_success_on_valid_code - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestCompleteOAuth::test_passes_code_and_state_to_service - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestCompleteOAuth::test_returns_error_on_invalid_state - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestCompleteOAuth::test_returns_error_on_invalid_code - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestCompleteOAuth::test_aborts_when_complete_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetOAuthConnectionStatus::test_returns_connected_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetOAuthConnectionStatus::test_returns_disconnected_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetOAuthConnectionStatus::test_returns_integration_type - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestGetOAuthConnectionStatus::test_aborts_when_status_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestDisconnectOAuth::test_returns_success_on_disconnect - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestDisconnectOAuth::test_calls_service_disconnect - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestDisconnectOAuth::test_returns_false_when_not_connected - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestDisconnectOAuth::test_aborts_when_disconnect_service_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthRoundTrip::test_initiate_returns_auth_url - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthRoundTrip::test_complete_updates_connection_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthRoundTrip::test_disconnect_clears_connection - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthRoundTrip::test_complete_with_wrong_state_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthRoundTrip::test_multiple_providers_independent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthSecurityBehavior::test_state_validation_required - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthSecurityBehavior::test_tokens_revoked_on_disconnect - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_oauth.py::TestOAuthSecurityBehavior::test_no_sensitive_data_in_error_responses - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestClearPartialBuffer::test_clear_partial_buffer_empties_buffer - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestClearPartialBuffer::test_clear_partial_buffer_resets_last_text - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestClearPartialBuffer::test_clear_partial_buffer_updates_time - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestClearPartialBuffer::test_clear_partial_buffer_handles_missing_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_returns_none_when_asr_not_loaded - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_returns_none_when_cadence_not_reached - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_returns_none_when_buffer_empty - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_returns_none_when_audio_too_short - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_emits_partial_when_conditions_met - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_debounces_duplicate_text - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestMaybeEmitPartial::test_updates_last_partial_state - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestPartialBufferAccumulation::test_speech_audio_added_to_buffer - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestPartialBufferAccumulation::test_silence_does_not_add_to_buffer - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_partial_transcription.py::TestPartialIntegrationWithFinal::test_buffer_cleared_on_final_segment - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestStartIntegrationSync::test_start_sync_for_nonexistent_integration_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestStartIntegrationSync::test_start_sync_requires_integration_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncStatus::test_get_sync_status_for_nonexistent_run_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncStatus::test_get_sync_status_requires_sync_run_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncHistory::test_list_sync_history_for_nonexistent_integration - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncHistory::test_list_sync_history_default_pagination - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncHappyPath::test_sync_completes_successfully - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncHappyPath::test_sync_history_shows_completed_run - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncErrorHandling::test_sync_fails_whencalendar_service_errors - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestSyncPolling::test_status_shows_running_while_sync_in_progress - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestGetUserIntegrations::test_returns_empty_when_no_integrations - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestGetUserIntegrations::test_returns_created_integration - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestGetUserIntegrations::test_includes_all_fields - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestGetUserIntegrations::test_excludes_deleted_integration - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestServerStateVersion::test_server_info_includes_state_version - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/grpc/test_sync_orchestration.py::TestServerStateVersion::test_state_version_is_consistent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestNerEngineBasics::test_is_ready_after_extract - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_entity_types[I met John Smith yesterday.-EntityCategory.PERSON-John Smith] - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_entity_types[Apple Inc. announced new products.-EntityCategory.COMPANY-Apple Inc.] - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_entity_types[We visited New York City.-EntityCategory.LOCATION-New York City] - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_entity_types[The meeting is scheduled for Monday.-EntityCategory.DATE-Monday] - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_returns_list - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityExtraction::test_extract_no_entities_returns_empty - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestSegmentExtraction::test_extract_from_segments_tracks_segment_ids - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestSegmentExtraction::test_extract_from_segments_deduplicates - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityNormalization::test_normalized_text_is_lowercase - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/ner/test_engine.py::TestEntityNormalization::test_confidence_is_set - RuntimeError: Failed to load spaCy model 'en_core_web_sm'. Run: python -m spacy download en_core_web_sm
|
||||
FAILED tests/infrastructure/observability/test_logging_config.py::TestCreateRenderer::test_returns_console_renderer_for_tty - AssertionError: should return ConsoleRenderer for TTY
|
||||
assert False
|
||||
+ where False = isinstance(<function _render_for_rich_handler at 0x7d5e27e36c00>, <class 'structlog.dev.ConsoleRenderer'>)
|
||||
+ where <class 'structlog.dev.ConsoleRenderer'> = <module 'structlog.dev' from '/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/structlog/dev.py'>.ConsoleRenderer
|
||||
+ where <module 'structlog.dev' from '/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/structlog/dev.py'> = structlog.dev
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestAddNoteflowContext::test_injects_request_id_when_set - KeyError: 'request_id'
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestAddNoteflowContext::test_injects_user_id_when_set - KeyError: 'user_id'
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestAddNoteflowContext::test_injects_workspace_id_when_set - KeyError: 'workspace_id'
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestAddOtelTraceContext::test_injects_trace_and_span_ids - AssertionError: should inject trace_id
|
||||
assert 'trace_id' in {'event': 'test'}
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestBuildProcessorChain::test_includes_standard_processors - AssertionError: should include filter_by_level
|
||||
assert <function filter_by_level at 0x7d5e27e22840> in [<function _safe_filter_by_level at 0x7d5e27e36480>, <function add_logger_name at 0x7d5e27e23600>, <function add_log_level at 0x7d5e27a5f560>, <structlog.stdlib.PositionalArgumentsFormatter object at 0x7d5caad97050>, <structlog.processors.TimeStamper object at 0x7d5caaf123c0>, <function add_noteflow_context at 0x7d5e27e36520>, <function add_otel_trace_context at 0x7d5e27e365c0>, <structlog.processors.CallsiteParameterAdder object at 0x7d5caaf12ec0>, <structlog.processors.StackInfoRenderer object at 0x7d5caad944f0>, <structlog.processors.ExceptionRenderer object at 0x7d5e27d73c20>, <structlog.processors.UnicodeDecoder object at 0x7d5caad97a70>]
|
||||
+ where <function filter_by_level at 0x7d5e27e22840> = <module 'structlog.stdlib' from '/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/structlog/stdlib.py'>.filter_by_level
|
||||
+ where <module 'structlog.stdlib' from '/workspace/vasceannie/noteflow/.venv/lib/python3.12/site-packages/structlog/stdlib.py'> = structlog.stdlib
|
||||
FAILED tests/infrastructure/observability/test_logging_processors.py::TestBuildProcessorChain::test_processor_order_is_correct - ValueError: <function filter_by_level at 0x7d5e27e22840> is not in list
|
||||
FAILED tests/infrastructure/summarization/test_ollama_provider.py::TestOllamaSummarizerProperties::test_is_available_when_server_responds - AssertionError: is_available should be True when server responds
|
||||
assert False is True
|
||||
+ where False = <noteflow.infrastructure.summarization.ollama_provider.OllamaSummarizer object at 0x7d5ce29f52e0>.is_available
|
||||
FAILED tests/infrastructure/test_integration_converters.py::TestIntegrationConverterRoundTrip::test_sync_run_domain_to_orm_to_domain_preserves_values - KeyError: 'error_message'
|
||||
FAILED tests/infrastructure/test_observability.py::TestGrpcObservabilityIntegration::test_get_recent_logs_via_grpc - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/infrastructure/test_observability.py::TestGrpcObservabilityIntegration::test_get_performance_metrics_via_grpc - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestGetActiveDiarizationJobs::test_returns_running_job - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestGetActiveDiarizationJobs::test_returns_queued_job - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestGetActiveDiarizationJobs::test_excludes_completed_job - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestGetActiveDiarizationJobs::test_excludes_failed_job - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_shutdown_marks_running_job_failed - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_shutdown_marks_queued_job_failed - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_shutdown_sets_error_message - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_shutdown_preserves_completed_job - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_get_active_jobs_empty_after_shutdown - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/integration/test_grpc_servicer_database.py::TestServerRestartJobRecovery::test_client_can_query_failed_job_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/stress/test_concurrency_stress.py::TestServicerInstantiation::test_servicer_starts_with_empty_state - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
FAILED tests/stress/test_concurrency_stress.py::TestServicerInstantiation::test_multiple_servicers_independent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_lifecycle.py::TestDatabaseRequirement::test_refine_requires_database_support - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_lifecycle.py::TestDatabaseRequirement::test_refine_error_mentions_database - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_lifecycle.py::TestDatabaseRequirement::test_get_status_requires_database - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_lifecycle.py::TestDatabaseRequirement::test_cancel_requires_database - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationValidation::test_refine_rejects_invalid_meeting_id_format - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationValidation::test_refine_rejects_nonexistent_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationState::test_refine_mixin_rejects_recording_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationState::test_refine_rejects_stopping_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationState::test_refine_accepts_stopped_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationServer::test_refine_returns_error_when_disabled - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRefineSpeakerDiarizationServer::test_refine_returns_error_when_engine_unavailable - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRenameSpeakerValidation::test_rename_aborts_on_missing_old_speaker_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRenameSpeakerValidation::test_rename_aborts_on_missing_new_speaker_name - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRenameSpeakerValidation::test_rename_aborts_on_invalid_meeting_id - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRenameSpeakerOperation::test_rename_updates_matching_segments - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestRenameSpeakerOperation::test_rename_returns_zero_for_no_matches - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestGetDiarizationJobStatusProgress::test_status_progress_queued_is_zero - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestGetDiarizationJobStatusProgress::test_status_progress_running_is_time_based - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestGetDiarizationJobStatusProgress::test_status_progress_completed_is_full - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestGetDiarizationJobStatusProgress::test_status_progress_failed_is_zero - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestCancelDiarizationJobStates::test_cancel_mixin_queued_succeeds - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestCancelDiarizationJobStates::test_cancel_mixin_running_succeeds - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestCancelDiarizationJobStates::test_cancel_mixin_nonexistent_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_diarization_mixin.py::TestCancelDiarizationJobStates::test_cancel_completed_job_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamCancellation::test_client_disconnect_during_audio_processing - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamCancellation::test_stream_cleanup_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamCancellation::test_concurrent_cancel_and_stop - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamCancellation::test_cleanup_nonexistent_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamInitializationFailure::test_partial_state_on_init_failure - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamInitializationFailure::test_cleanup_after_audio_writer_failure - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestDiarizationSessionCleanup::test_diarization_session_closed_on_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestDiarizationSessionCleanup::test_multiplediarization_sessions_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestAudioWriterCleanup::test_audio_writer_closed_on_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestAudioWriterCleanup::test_audio_write_failure_tracking_cleared - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestServicerShutdown::test_shutdown_cleans_allactive_streams - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestServicerShutdown::test_lifecycle_shutdown_cancels_tasks - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestServicerShutdown::test_lifecycle_shutdown_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamFormatCleanup::test_stream_format_cleared_on_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestGrpcContextCancellation::test_asyncio_cancelled_error_triggers_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestGrpcContextCancellation::test_cleanup_in_finally_block_pattern - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestConcurrentStreamRaces::test_double_start_same_meeting_id_detected - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestConcurrentStreamRaces::test_concurrent_cleanup_same_meeting_safe - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestConcurrentStreamRaces::test_stop_request_before_stream_active - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownEdgeCases::test_shutdown_with_active_stream_state - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownEdgeCases::test_shutdown_order_tasks_before_sessions - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestGrpcContextCancellationReal::test_context_abort_on_validation_failure - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestGrpcContextCancellationReal::test_cancelled_error_propagation_in_stream - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownRaceConditions::test_new_stream_rejected_during_shutdown - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownRaceConditions::test_concurrent_shutdown_and_stream_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownRaceConditions::test_shutdown_during_task_creation - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestShutdownRaceConditions::test_webhook_close_during_active_delivery - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestDiarizationJobRaceConditions::test_job_completion_vs_shutdown_race - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestDiarizationJobRaceConditions::test_job_status_overwrite_protection - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamInitLockTimeout::test_lock_acquired_within_timeout - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestStreamInitLockTimeout::test_lock_available_after_release - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestImprovedCleanupGuarantees::test_cleanup_removes_fromactive_streams - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestImprovedCleanupGuarantees::test_cleanup_idempotent_first_call - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestImprovedCleanupGuarantees::test_cleanup_idempotent_second_call - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestImprovedCleanupGuarantees::test_cleanup_on_never_initialized_meeting - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_stream_lifecycle.py::TestImprovedCleanupGuarantees::test_cleanup_with_partial_state_no_vad - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncHappyPath::test_start_sync_returns_running_status - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncErrorHandling::test_first_sync_fails - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncErrorHandling::test_retry_after_failure_succeeds - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestConcurrentSyncs::test_multiple_integrations_can_sync - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestConcurrentSyncs::test_each_integration_has_own_history - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncPolling::test_sync_run_includes_duration - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestNotFoundStatusCode::test_nonexistent_aborts_with_not_found[start_sync] - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestNotFoundStatusCode::test_nonexistent_aborts_with_not_found[get_sync_status] - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncRunExpiryMetadata::test_sync_status_includes_expires_at - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/grpc/test_sync_orchestration.py::TestSyncRunExpiryMetadata::test_sync_run_cache_tracks_times - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_with_noactive_streams - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_cleansactive_streams - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_cancelsdiarization_tasks - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_marks_cancelled_jobs_failed - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestServicerShutdown::test_shutdown_closesaudio_writers - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestStreamingStateCleanup::test_cleanup_allactive_streams - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestStreamingStateCleanup::test_cleanup_withdiarization_sessions - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestTaskCancellation::test_long_running_task_cancellation - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestTaskCancellation::test_task_with_exception_handling - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestTaskCancellation::test_mixed_task_states_on_shutdown - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestResourceCleanupOrder::test_diarization_before_audio - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestResourceCleanupOrder::test_tasks_cancelled_before_sessions_closed - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestConcurrentShutdown::test_concurrent_shutdown_calls_safe - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/integration/test_signal_handling.py::TestConcurrentShutdown::test_new_operations_during_shutdown - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestStreamingStateInitialization::test_init_with_different_segment_ids - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestCleanupStreamingState::test_cleanup_removes_all_state_dictionaries - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestCleanupStreamingState::test_cleanup_idempotent - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestCleanupStreamingState::test_cleanup_nonexistent_meeting_no_error - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestConcurrentStreamInitialization::test_concurrent_init_different_meetings - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestConcurrentStreamInitialization::test_concurrent_cleanup_different_meetings - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestNoMemoryLeaksUnderLoad::test_stream_cycles_cleanup_completely - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestNoMemoryLeaksUnderLoad::test_interleaved_init_cleanup - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestActiveStreamsTracking::test_discard_nonexistent_no_error - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_concurrency_stress.py::TestDiarizationStateCleanup::test_diarization_failed_set_cleaned - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_resource_leaks.py::TestCoroutineLeaks::test_no_orphaned_tasks_after_shutdown - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
ERROR tests/stress/test_resource_leaks.py::TestCoroutineLeaks::test_task_cleanup_on_exception - pydantic_core._pydantic_core.ValidationError: 1 validation error for Settings
|
||||
database_url
|
||||
Field required [type=missing, input_value={}, input_type=dict]
|
||||
For further information visit https://errors.pydantic.dev/2.12/v/missing
|
||||
= 131 failed, 2948 passed, 3 skipped, 669 deselected, 27 warnings, 97 errors in 76.60s (0:01:16) =
|
||||
❌ Failure - Main Run unit tests (non-integration)
|
||||
exitcode '1': failure
|
||||
8
.sisyphus/boulder.json
Normal file
8
.sisyphus/boulder.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"active_plan": "/home/trav/repos/noteflow/.sisyphus/plans/client-optimizations.md",
|
||||
"started_at": "2026-01-24T21:27:42.621Z",
|
||||
"session_ids": [
|
||||
"ses_40e285aaffferWRTsYIZV1SPdY"
|
||||
],
|
||||
"plan_name": "client-optimizations"
|
||||
}
|
||||
577
.sisyphus/plans/client-optimizations.md
Normal file
577
.sisyphus/plans/client-optimizations.md
Normal file
@@ -0,0 +1,577 @@
|
||||
# Client Optimizations: Deduplication, Optimistic UI, Analytics Cache
|
||||
|
||||
## Context
|
||||
|
||||
### Original Request
|
||||
1. Server-side caching for analytics aggregations
|
||||
2. Implement optimistic UI updates for CRUD operations
|
||||
3. Add request deduplication for concurrent API calls
|
||||
|
||||
### Interview Summary
|
||||
**Key Discussions**:
|
||||
- Priority order: Request Deduplication > Optimistic UI > Analytics Caching
|
||||
- Dedup window: 5 seconds (tight - catches double-clicks and rapid re-renders)
|
||||
- Dedup key: Command name + serialized args (strict equality)
|
||||
- Optimistic rollback: Toast notification + auto-rollback
|
||||
- Analytics: Single server deployment (in-memory cache sufficient)
|
||||
- Test approach: TDD
|
||||
|
||||
**Research Findings**:
|
||||
- TanStack Query is NOT used - custom `useAsyncData`/`useMutation` hooks exist
|
||||
- No request deduplication currently - each `invoke()` goes directly to Tauri
|
||||
- Rust `MemoryCache` exists with LRU + TTL in `client/src-tauri/src/cache/memory.rs`
|
||||
- Python `AnalyticsService` has 60s TTL cache but no invalidation on meeting completion
|
||||
- Toast system exists in `use-toast.ts`
|
||||
|
||||
### Metis Review
|
||||
**Identified Gaps** (addressed):
|
||||
- "Match TanStack Query staleTime" → Corrected to fixed 5-second window
|
||||
- Phased approach recommended → TS-only dedup first, add Rust if profiling shows need
|
||||
- Memory leak risk in dedup map → Will use `finally` cleanup + TTL sweep
|
||||
- Analytics invalidation trigger → On meeting completion (state → COMPLETED)
|
||||
|
||||
---
|
||||
|
||||
## Work Objectives
|
||||
|
||||
### Core Objective
|
||||
Improve client responsiveness and efficiency by preventing duplicate API calls, providing instant UI feedback for mutations, and ensuring analytics data stays fresh.
|
||||
|
||||
### Concrete Deliverables
|
||||
1. Request deduplication layer (`client/src/lib/request/dedup.ts`)
|
||||
2. Optimistic mutation wrapper (`client/src/hooks/data/use-optimistic-mutation.ts`)
|
||||
3. Analytics cache invalidation trigger (Python backend)
|
||||
4. Rust-layer dedup cache (if TS-only proves insufficient)
|
||||
|
||||
### Definition of Done
|
||||
- [ ] `npm run test` passes with new dedup + optimistic tests
|
||||
- [ ] `pytest tests/application/services/analytics/` passes
|
||||
- [ ] Manual verification: double-click meeting creation shows single request
|
||||
- [ ] Manual verification: delete meeting shows instant removal, rollback on failure
|
||||
|
||||
### Must Have
|
||||
- Promise sharing for in-flight requests (not result caching)
|
||||
- Strict equality for dedup keys (command + serialized args)
|
||||
- Toast notification on rollback
|
||||
- Analytics cache invalidated when meeting completes
|
||||
|
||||
### Must NOT Have (Guardrails)
|
||||
- Redis or distributed caching (out of scope)
|
||||
- Auto-retry logic for failed mutations
|
||||
- UI component changes (hooks/utilities only)
|
||||
- Modifications to `use-async-data.ts` (create new wrapper)
|
||||
- `any` type annotations (use generics with type maps)
|
||||
- Deduplication for streaming commands (audio chunks)
|
||||
|
||||
---
|
||||
|
||||
## Verification Strategy (MANDATORY)
|
||||
|
||||
### Test Decision
|
||||
- **Infrastructure exists**: YES (vitest for TS, pytest for Python)
|
||||
- **User wants tests**: TDD
|
||||
- **Framework**: vitest (TS), pytest (Python)
|
||||
|
||||
### TDD Structure
|
||||
|
||||
Each TODO follows RED-GREEN-REFACTOR:
|
||||
1. **RED**: Write failing test first
|
||||
2. **GREEN**: Implement minimum code to pass
|
||||
3. **REFACTOR**: Clean up while keeping green
|
||||
|
||||
---
|
||||
|
||||
## Task Flow
|
||||
|
||||
```
|
||||
Phase 1: Request Deduplication (TS Layer)
|
||||
Task 1 → Task 2 → Task 3 → Task 4
|
||||
|
||||
Phase 2: Optimistic UI Updates
|
||||
Task 5 → Task 6 → Task 7
|
||||
|
||||
Phase 3: Analytics Cache (Backend)
|
||||
Task 8 → Task 9
|
||||
|
||||
Phase 4: Rust Layer Dedup (If Needed)
|
||||
Task 10 (optional - based on profiling)
|
||||
```
|
||||
|
||||
## Parallelization
|
||||
|
||||
| Group | Tasks | Reason |
|
||||
|-------|-------|--------|
|
||||
| A | 5, 8 | Independent after Phase 1 complete |
|
||||
|
||||
| Task | Depends On | Reason |
|
||||
|------|------------|--------|
|
||||
| 2 | 1 | Core dedup logic needed first |
|
||||
| 3 | 2 | Integration with invoke() |
|
||||
| 4 | 3 | Full integration tests |
|
||||
| 6 | 5 | Optimistic wrapper needed first |
|
||||
| 7 | 6 | Integration tests |
|
||||
| 9 | 8 | Analytics invalidation logic |
|
||||
| 10 | 4, 9 | Only if profiling shows need |
|
||||
|
||||
---
|
||||
|
||||
## TODOs
|
||||
|
||||
### Phase 1: Request Deduplication (TypeScript Layer)
|
||||
|
||||
- [x] 1. Create dedup core module with Promise sharing
|
||||
|
||||
**What to do**:
|
||||
- Create `client/src/lib/request/dedup.ts`
|
||||
- Implement `InFlightRequestMap` class with:
|
||||
- `get(key: string): Promise<T> | undefined`
|
||||
- `set(key: string, promise: Promise<T>): void`
|
||||
- `delete(key: string): void`
|
||||
- `clear(): void`
|
||||
- Implement TTL-based cleanup sweep (5-second window)
|
||||
- Implement `createDedupKey(command: string, args: unknown): string` using JSON serialization
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- Identical requests within 5s share same Promise
|
||||
- Different args create different keys
|
||||
- Expired entries are cleaned up
|
||||
- Failed requests clean up entry in finally block
|
||||
|
||||
**Must NOT do**:
|
||||
- Cache resolved values (only share Promises)
|
||||
- Use `any` type (use generics)
|
||||
|
||||
**Parallelizable**: NO (foundation)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/lib/cache/meeting-cache.ts:46-75` - TTL tracking pattern
|
||||
- `client/src/lib/cache/meeting-cache.ts:139-147` - Debounced cleanup pattern
|
||||
|
||||
**Type References**:
|
||||
- `client/src/api/types/core.ts` - Request/response types
|
||||
|
||||
**Test References**:
|
||||
- `client/src/lib/cache/meeting-cache.test.ts` - Cache testing patterns
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test file created: `client/src/lib/request/dedup.test.ts`
|
||||
- [ ] Tests cover: Promise sharing, key generation, TTL expiry, cleanup
|
||||
- [ ] `npm run test -- client/src/lib/request/dedup.test.ts` → PASS
|
||||
|
||||
**Manual Verification**:
|
||||
- [ ] Using Node REPL or test:
|
||||
```typescript
|
||||
const map = new InFlightRequestMap(5000);
|
||||
const p1 = Promise.resolve('result');
|
||||
map.set('key', p1);
|
||||
expect(map.get('key')).toBe(p1); // Same Promise reference
|
||||
```
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): add request deduplication core module`
|
||||
- Files: `client/src/lib/request/dedup.ts`, `client/src/lib/request/dedup.test.ts`
|
||||
- Pre-commit: `npm run test -- client/src/lib/request/dedup.test.ts`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 2. Create deduplicated invoke wrapper
|
||||
|
||||
**What to do**:
|
||||
- Create `client/src/lib/request/deduped-invoke.ts`
|
||||
- Implement `createDedupedInvoke(invoke: TauriInvoke, window?: number): TauriInvoke`
|
||||
- Wrap original invoke to check dedup map before calling
|
||||
- If in-flight exists, return same Promise
|
||||
- If not, create new request, store Promise, clean up on settle
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- Two rapid calls to same command share Promise
|
||||
- Different commands don't share
|
||||
- Error in first call propagates to all waiters
|
||||
- Cleanup happens in finally (no memory leak)
|
||||
|
||||
**Must NOT do**:
|
||||
- Modify original invoke signature
|
||||
- Apply to streaming commands (audio chunks)
|
||||
|
||||
**Parallelizable**: NO (depends on 1)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/api/adapters/tauri/api.ts:26-48` - How invoke is composed
|
||||
- `client/src/lib/request/dedup.ts` - Core dedup module (from Task 1)
|
||||
|
||||
**Type References**:
|
||||
- `client/src/api/adapters/tauri/types.ts:1-20` - TauriInvoke type
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test file created: `client/src/lib/request/deduped-invoke.test.ts`
|
||||
- [ ] Tests cover: Promise sharing, error propagation, cleanup
|
||||
- [ ] `npm run test -- client/src/lib/request/deduped-invoke.test.ts` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): add deduplicated invoke wrapper`
|
||||
- Files: `client/src/lib/request/deduped-invoke.ts`, `client/src/lib/request/deduped-invoke.test.ts`
|
||||
- Pre-commit: `npm run test -- client/src/lib/request`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 3. Integrate dedup wrapper into Tauri API factory
|
||||
|
||||
**What to do**:
|
||||
- Modify `client/src/api/adapters/tauri/api.ts`
|
||||
- Wrap `invoke` with `createDedupedInvoke()` before passing to section factories
|
||||
- Add optional `dedupWindow` config parameter
|
||||
- Export constants for default window (5000ms)
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- API factory applies dedup wrapper
|
||||
- Config parameter changes window
|
||||
- Dedup can be disabled (window = 0)
|
||||
|
||||
**Must NOT do**:
|
||||
- Break existing API interface
|
||||
- Apply dedup to streaming commands
|
||||
|
||||
**Parallelizable**: NO (depends on 2)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/api/adapters/tauri/api.ts` - Current factory implementation
|
||||
- `client/src/lib/request/deduped-invoke.ts` - Wrapper (from Task 2)
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Existing tests still pass: `npm run test -- client/src/api`
|
||||
- [ ] New integration test added
|
||||
- [ ] `npm run test -- client/src/api` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): integrate request dedup into Tauri API`
|
||||
- Files: `client/src/api/adapters/tauri/api.ts`
|
||||
- Pre-commit: `npm run test -- client/src/api`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 4. Add E2E dedup verification tests
|
||||
|
||||
**What to do**:
|
||||
- Add test in `client/src/api/adapters/tauri/__tests__/dedup.test.ts`
|
||||
- Mock invoke to count calls
|
||||
- Fire 3 concurrent requests to same command
|
||||
- Verify invoke called only once
|
||||
- Verify all 3 callers receive same result
|
||||
|
||||
**Must NOT do**:
|
||||
- Modify production code (test-only task)
|
||||
|
||||
**Parallelizable**: NO (depends on 3)
|
||||
|
||||
**References**:
|
||||
|
||||
**Test References**:
|
||||
- `client/src/api/adapters/tauri/__tests__/core-mapping.test.ts` - Test patterns
|
||||
- `client/src/api/adapters/tauri/__tests__/test-utils.ts:15-35` - Mock utilities
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test file created: `client/src/api/adapters/tauri/__tests__/dedup.test.ts`
|
||||
- [ ] Tests verify call count reduction
|
||||
- [ ] `npm run test -- client/src/api/adapters/tauri/__tests__/dedup` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `test(client): add E2E tests for request deduplication`
|
||||
- Files: `client/src/api/adapters/tauri/__tests__/dedup.test.ts`
|
||||
- Pre-commit: `npm run test -- client/src/api`
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Optimistic UI Updates
|
||||
|
||||
- [ ] 5. Create optimistic mutation hook
|
||||
|
||||
**What to do**:
|
||||
- Create `client/src/hooks/data/use-optimistic-mutation.ts`
|
||||
- Implement `useOptimisticMutation<TData, TVariables, TContext>` hook
|
||||
- Accept `mutationFn`, `onMutate` (for optimistic update), `onError` (for rollback), `onSuccess`
|
||||
- Store previous state in context for rollback
|
||||
- Integrate with toast system for error notification
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- `onMutate` called before mutation
|
||||
- `onSuccess` called after success
|
||||
- `onError` called and context passed for rollback
|
||||
- Toast shown on error
|
||||
|
||||
**Must NOT do**:
|
||||
- Modify existing `useMutation` hook
|
||||
- Implement auto-retry
|
||||
|
||||
**Parallelizable**: YES (with Task 8 after Phase 1)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/hooks/data/use-async-data.ts:182-240` - Existing mutation hook pattern
|
||||
- `client/src/hooks/data/use-guarded-mutation.ts` - Guarded mutation pattern
|
||||
- `client/src/hooks/use-toast.ts` - Toast integration
|
||||
|
||||
**Type References**:
|
||||
- `client/src/api/types/core.ts` - Data types
|
||||
|
||||
**Test References**:
|
||||
- `client/src/hooks/data/use-async-data.test.tsx:99-140` - Mutation testing
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test file created: `client/src/hooks/data/use-optimistic-mutation.test.tsx`
|
||||
- [ ] Tests cover: onMutate, onSuccess, onError with rollback, toast
|
||||
- [ ] `npm run test -- client/src/hooks/data/use-optimistic-mutation` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): add optimistic mutation hook with rollback`
|
||||
- Files: `client/src/hooks/data/use-optimistic-mutation.ts`, `client/src/hooks/data/use-optimistic-mutation.test.tsx`
|
||||
- Pre-commit: `npm run test -- client/src/hooks/data`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 6. Implement optimistic patterns for Meeting CRUD
|
||||
|
||||
**What to do**:
|
||||
- Create `client/src/hooks/meetings/use-meeting-mutations.ts`
|
||||
- Implement `useCreateMeeting` with optimistic insert to cache
|
||||
- Implement `useDeleteMeeting` with optimistic removal from cache
|
||||
- Use meeting cache (`meetingCache`) for state management
|
||||
- Rollback restores previous cache state
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- Create meeting: optimistic insert visible immediately
|
||||
- Create failure: rollback removes optimistic item, toast shown
|
||||
- Delete meeting: optimistic removal visible immediately
|
||||
- Delete failure: rollback restores item, toast shown
|
||||
|
||||
**Must NOT do**:
|
||||
- Modify UI components
|
||||
- Change cache interface
|
||||
|
||||
**Parallelizable**: NO (depends on 5)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/lib/cache/meeting-cache.ts:183-224` - Cache mutation methods
|
||||
- `client/src/hooks/data/use-optimistic-mutation.ts` - Optimistic hook (from Task 5)
|
||||
|
||||
**API References**:
|
||||
- `client/src/api/interfaces/domains.ts:186-190` - Meeting CRUD interface
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test file created: `client/src/hooks/meetings/use-meeting-mutations.test.tsx`
|
||||
- [ ] Tests cover: optimistic insert, optimistic remove, rollback scenarios
|
||||
- [ ] `npm run test -- client/src/hooks/meetings` → PASS
|
||||
|
||||
**Manual Verification**:
|
||||
- [ ] Using Playwright or manual testing:
|
||||
- Create meeting → appears instantly in list
|
||||
- Simulate backend failure → meeting disappears, toast shown
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): add optimistic meeting mutations`
|
||||
- Files: `client/src/hooks/meetings/use-meeting-mutations.ts`, `client/src/hooks/meetings/use-meeting-mutations.test.tsx`
|
||||
- Pre-commit: `npm run test -- client/src/hooks/meetings`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 7. Extend optimistic patterns to Annotations and Projects
|
||||
|
||||
**What to do**:
|
||||
- Create `client/src/hooks/annotations/use-annotation-mutations.ts`
|
||||
- Create `client/src/hooks/projects/use-project-mutations.ts`
|
||||
- Follow same pattern as meeting mutations
|
||||
- Handle annotation cache (per-meeting) appropriately
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- Add annotation: optimistic insert
|
||||
- Delete annotation: optimistic removal
|
||||
- Create project: optimistic insert
|
||||
- Delete project: optimistic removal
|
||||
- Rollback for each operation
|
||||
|
||||
**Parallelizable**: NO (depends on 6)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src/hooks/meetings/use-meeting-mutations.ts` - Meeting mutations (from Task 6)
|
||||
|
||||
**API References**:
|
||||
- `client/src/api/interfaces/domains.ts:236-237` - Annotation CRUD
|
||||
- `client/src/api/interfaces/domains.ts:153-160` - Project CRUD
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test files created for annotations and projects
|
||||
- [ ] `npm run test -- client/src/hooks/annotations` → PASS
|
||||
- [ ] `npm run test -- client/src/hooks/projects` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(client): add optimistic annotation and project mutations`
|
||||
- Files: `client/src/hooks/annotations/*`, `client/src/hooks/projects/*`
|
||||
- Pre-commit: `npm run test -- client/src/hooks`
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Analytics Cache (Backend)
|
||||
|
||||
- [ ] 8. Add analytics cache invalidation on meeting completion
|
||||
|
||||
**What to do**:
|
||||
- Modify `src/noteflow/grpc/_mixins/meeting/meeting_mixin.py`
|
||||
- In `_complete_meeting()` or equivalent, call `analytics_service.invalidate_cache(workspace_id)`
|
||||
- Ensure workspace_id is available in context
|
||||
- Add logging for invalidation events
|
||||
|
||||
**Test cases (RED first)**:
|
||||
- Meeting completion triggers cache invalidation
|
||||
- Correct workspace_id passed
|
||||
- Invalidation logged
|
||||
|
||||
**Must NOT do**:
|
||||
- Add Redis/distributed cache
|
||||
- Invalidate on recording stop (only on completion)
|
||||
|
||||
**Parallelizable**: YES (with Task 5 after Phase 1)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `src/noteflow/application/services/analytics/service.py:193-215` - invalidate_cache method
|
||||
- `src/noteflow/grpc/_mixins/meeting/meeting_mixin.py` - Meeting lifecycle
|
||||
|
||||
**Service References**:
|
||||
- `src/noteflow/grpc/service.py:180` - analytics_service injection
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Test added to verify invalidation called on completion
|
||||
- [ ] `pytest tests/grpc/mixins/test_meeting_mixin.py` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `feat(backend): invalidate analytics cache on meeting completion`
|
||||
- Files: `src/noteflow/grpc/_mixins/meeting/*.py`
|
||||
- Pre-commit: `pytest tests/grpc/mixins/test_meeting_mixin.py`
|
||||
|
||||
---
|
||||
|
||||
- [ ] 9. Add analytics cache invalidation tests
|
||||
|
||||
**What to do**:
|
||||
- Add integration test verifying end-to-end flow
|
||||
- Complete meeting → analytics cache invalidated → next query hits DB
|
||||
- Verify cache miss logged after invalidation
|
||||
|
||||
**Must NOT do**:
|
||||
- Modify production code (test-only task)
|
||||
|
||||
**Parallelizable**: NO (depends on 8)
|
||||
|
||||
**References**:
|
||||
|
||||
**Test References**:
|
||||
- `tests/application/services/analytics/` - Analytics test patterns
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Integration test added
|
||||
- [ ] `pytest tests/application/services/analytics/` → PASS
|
||||
|
||||
**Commit**: YES
|
||||
- Message: `test(backend): add analytics cache invalidation integration tests`
|
||||
- Files: `tests/application/services/analytics/test_cache_invalidation.py`
|
||||
- Pre-commit: `pytest tests/application/services/analytics/`
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Rust Layer Dedup (Optional)
|
||||
|
||||
- [ ] 10. Add Rust-layer request deduplication (if profiling shows need)
|
||||
|
||||
**What to do**:
|
||||
- ONLY implement if profiling shows duplicate gRPC calls despite TS dedup
|
||||
- Use existing `MemoryCache` in `client/src-tauri/src/cache/memory.rs`
|
||||
- Create `InFlightRequests` map for gRPC calls
|
||||
- Apply same Promise-sharing pattern
|
||||
|
||||
**Trigger condition**:
|
||||
- Profile shows >5% duplicate gRPC calls after TS dedup
|
||||
- Or latency shows benefit from Rust-level caching
|
||||
|
||||
**Parallelizable**: NO (depends on 4, 9)
|
||||
|
||||
**References**:
|
||||
|
||||
**Pattern References**:
|
||||
- `client/src-tauri/src/cache/memory.rs:103-148` - MemoryCache implementation
|
||||
- `client/src-tauri/src/grpc/client/core.rs` - gRPC client
|
||||
|
||||
**Acceptance Criteria**:
|
||||
|
||||
- [ ] Profiling data shows need (document in PR)
|
||||
- [ ] Rust tests pass: `cargo test --package noteflow-lib`
|
||||
|
||||
**Commit**: YES (if implemented)
|
||||
- Message: `feat(tauri): add Rust-layer request deduplication`
|
||||
- Files: `client/src-tauri/src/cache/dedup.rs`
|
||||
- Pre-commit: `cargo test --package noteflow-lib`
|
||||
|
||||
---
|
||||
|
||||
## Commit Strategy
|
||||
|
||||
| After Task | Message | Files | Verification |
|
||||
|------------|---------|-------|--------------|
|
||||
| 1 | `feat(client): add request deduplication core module` | `client/src/lib/request/dedup.ts` | `npm run test -- client/src/lib/request` |
|
||||
| 2 | `feat(client): add deduplicated invoke wrapper` | `client/src/lib/request/deduped-invoke.ts` | `npm run test -- client/src/lib/request` |
|
||||
| 3 | `feat(client): integrate request dedup into Tauri API` | `client/src/api/adapters/tauri/api.ts` | `npm run test -- client/src/api` |
|
||||
| 4 | `test(client): add E2E tests for request deduplication` | `client/src/api/adapters/tauri/__tests__/dedup.test.ts` | `npm run test -- client/src/api` |
|
||||
| 5 | `feat(client): add optimistic mutation hook with rollback` | `client/src/hooks/data/use-optimistic-mutation.ts` | `npm run test -- client/src/hooks/data` |
|
||||
| 6 | `feat(client): add optimistic meeting mutations` | `client/src/hooks/meetings/use-meeting-mutations.ts` | `npm run test -- client/src/hooks/meetings` |
|
||||
| 7 | `feat(client): add optimistic annotation and project mutations` | `client/src/hooks/annotations/*`, `client/src/hooks/projects/*` | `npm run test -- client/src/hooks` |
|
||||
| 8 | `feat(backend): invalidate analytics cache on meeting completion` | `src/noteflow/grpc/_mixins/meeting/*.py` | `pytest tests/grpc/mixins/test_meeting_mixin.py` |
|
||||
| 9 | `test(backend): add analytics cache invalidation integration tests` | `tests/application/services/analytics/test_cache_invalidation.py` | `pytest tests/application/services/analytics/` |
|
||||
| 10 | `feat(tauri): add Rust-layer request deduplication` (optional) | `client/src-tauri/src/cache/dedup.rs` | `cargo test --package noteflow-lib` |
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Verification Commands
|
||||
```bash
|
||||
# TypeScript tests
|
||||
npm run test -- client/src/lib/request # Dedup tests
|
||||
npm run test -- client/src/hooks # Optimistic mutation tests
|
||||
npm run test -- client/src/api # Integration tests
|
||||
|
||||
# Python tests
|
||||
pytest tests/application/services/analytics/ # Analytics cache tests
|
||||
pytest tests/grpc/mixins/test_meeting_mixin.py # Meeting mixin tests
|
||||
|
||||
# Manual verification
|
||||
# 1. Double-click "New Meeting" → only 1 meeting created
|
||||
# 2. Delete meeting → instant removal, restore on failure
|
||||
# 3. Complete meeting → analytics dashboard shows updated stats
|
||||
```
|
||||
|
||||
### Final Checklist
|
||||
- [ ] All "Must Have" present
|
||||
- [ ] All "Must NOT Have" absent
|
||||
- [ ] All TypeScript tests pass (`npm run test`)
|
||||
- [ ] All Python tests pass (`pytest`)
|
||||
- [ ] No `any` types introduced
|
||||
- [ ] No modifications to `use-async-data.ts`
|
||||
@@ -96,7 +96,7 @@ describe('preferences storage', () => {
|
||||
|
||||
it('logs when override storage fails', () => {
|
||||
const setItemSpy = vi
|
||||
.spyOn(localStorage, 'setItem')
|
||||
.spyOn(Storage.prototype, 'setItem')
|
||||
.mockImplementation(() => {
|
||||
throw new Error('fail');
|
||||
});
|
||||
@@ -218,7 +218,7 @@ describe('preferences storage', () => {
|
||||
|
||||
it('logs when saving preferences fails', () => {
|
||||
const setItemSpy = vi
|
||||
.spyOn(localStorage, 'setItem')
|
||||
.spyOn(Storage.prototype, 'setItem')
|
||||
.mockImplementation(() => {
|
||||
throw new Error('fail');
|
||||
});
|
||||
@@ -261,7 +261,7 @@ describe('preferences storage', () => {
|
||||
expect(localStorage.getItem(MODEL_CATALOG_CACHE_KEY)).toBeNull();
|
||||
|
||||
const removeSpy = vi
|
||||
.spyOn(localStorage, 'removeItem')
|
||||
.spyOn(Storage.prototype, 'removeItem')
|
||||
.mockImplementation(() => {
|
||||
throw new Error('fail');
|
||||
});
|
||||
|
||||
270
client/src/lib/request/dedup.test.ts
Normal file
270
client/src/lib/request/dedup.test.ts
Normal file
@@ -0,0 +1,270 @@
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { InFlightRequestMap, createDedupKey, DEDUP_TTL_MS } from './dedup';
|
||||
|
||||
const TIMER_ADVANCE_SMALL = 100;
|
||||
const TIMER_ADVANCE_HALF_TTL = DEDUP_TTL_MS / 2;
|
||||
const TIMER_ADVANCE_NEAR_TTL = DEDUP_TTL_MS - 500;
|
||||
const TIMER_ADVANCE_PAST_TTL = DEDUP_TTL_MS + 1;
|
||||
const TIMER_ADVANCE_BEFORE_TTL = DEDUP_TTL_MS - 1;
|
||||
const TIMER_ADVANCE_HALF_PLUS_SMALL = DEDUP_TTL_MS / 2 + 100;
|
||||
const RAPID_CYCLE_COUNT = 100;
|
||||
const PAGINATION_LIMIT = 10;
|
||||
const PAGINATION_PAGE = 1;
|
||||
|
||||
describe('createDedupKey', () => {
|
||||
it('generates consistent keys for same command and args', () => {
|
||||
const key1 = createDedupKey('getMeeting', { id: 'meeting-1' });
|
||||
const key2 = createDedupKey('getMeeting', { id: 'meeting-1' });
|
||||
expect(key1).toBe(key2);
|
||||
});
|
||||
|
||||
it('generates different keys for different args', () => {
|
||||
const key1 = createDedupKey('getMeeting', { id: 'meeting-1' });
|
||||
const key2 = createDedupKey('getMeeting', { id: 'meeting-2' });
|
||||
expect(key1).not.toBe(key2);
|
||||
});
|
||||
|
||||
it('generates different keys for different commands', () => {
|
||||
const key1 = createDedupKey('getMeeting', { id: 'meeting-1' });
|
||||
const key2 = createDedupKey('listMeetings', { id: 'meeting-1' });
|
||||
expect(key1).not.toBe(key2);
|
||||
});
|
||||
|
||||
it('handles complex nested args', () => {
|
||||
const args = {
|
||||
filter: { status: 'completed', tags: ['important', 'urgent'] },
|
||||
pagination: { page: PAGINATION_PAGE, limit: PAGINATION_LIMIT },
|
||||
};
|
||||
const key1 = createDedupKey('search', args);
|
||||
const key2 = createDedupKey('search', args);
|
||||
expect(key1).toBe(key2);
|
||||
});
|
||||
|
||||
it('treats different arg order as different keys', () => {
|
||||
const key1 = createDedupKey('cmd', { a: 1, b: 2 });
|
||||
const key2 = createDedupKey('cmd', { b: 2, a: 1 });
|
||||
// JSON.stringify preserves insertion order, so these should be different
|
||||
// unless we normalize, which we don't - this is intentional for strict equality
|
||||
expect(key1).not.toBe(key2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('InFlightRequestMap', () => {
|
||||
let map: InFlightRequestMap<string>;
|
||||
|
||||
beforeEach(() => {
|
||||
map = new InFlightRequestMap();
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('basic operations', () => {
|
||||
it('stores and retrieves promises', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'test-key';
|
||||
|
||||
map.set(key, promise);
|
||||
const retrieved = map.get(key);
|
||||
|
||||
expect(retrieved).toBe(promise);
|
||||
});
|
||||
|
||||
it('returns undefined for non-existent keys', () => {
|
||||
expect(map.get('non-existent')).toBeUndefined();
|
||||
});
|
||||
|
||||
it('deletes entries', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'test-key';
|
||||
|
||||
map.set(key, promise);
|
||||
expect(map.get(key)).toBe(promise);
|
||||
|
||||
map.delete(key);
|
||||
expect(map.get(key)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('clears all entries', () => {
|
||||
map.set('key1', Promise.resolve('result1'));
|
||||
map.set('key2', Promise.resolve('result2'));
|
||||
|
||||
map.clear();
|
||||
|
||||
expect(map.get('key1')).toBeUndefined();
|
||||
expect(map.get('key2')).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('promise sharing', () => {
|
||||
it('shares the same promise reference for duplicate requests', async () => {
|
||||
const promise = new Promise<string>((resolve) => {
|
||||
setTimeout(() => resolve('result'), TIMER_ADVANCE_SMALL);
|
||||
});
|
||||
|
||||
const key = 'shared-key';
|
||||
map.set(key, promise);
|
||||
|
||||
const retrieved1 = map.get(key);
|
||||
const retrieved2 = map.get(key);
|
||||
|
||||
expect(retrieved1).toBe(retrieved2);
|
||||
expect(retrieved1).toBe(promise);
|
||||
});
|
||||
|
||||
it('allows multiple different promises to coexist', () => {
|
||||
const promise1 = Promise.resolve('result1');
|
||||
const promise2 = Promise.resolve('result2');
|
||||
|
||||
map.set('key1', promise1);
|
||||
map.set('key2', promise2);
|
||||
|
||||
expect(map.get('key1')).toBe(promise1);
|
||||
expect(map.get('key2')).toBe(promise2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('TTL-based cleanup', () => {
|
||||
it('removes entries after TTL expires', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'ttl-key';
|
||||
|
||||
map.set(key, promise);
|
||||
expect(map.get(key)).toBe(promise);
|
||||
|
||||
// Advance time past TTL
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_PAST_TTL);
|
||||
|
||||
expect(map.get(key)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('does not remove entries before TTL expires', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'ttl-key';
|
||||
|
||||
map.set(key, promise);
|
||||
|
||||
// Advance time but not past TTL
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_BEFORE_TTL);
|
||||
|
||||
expect(map.get(key)).toBe(promise);
|
||||
});
|
||||
|
||||
it('resets TTL on subsequent access', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'ttl-key';
|
||||
|
||||
map.set(key, promise);
|
||||
|
||||
// Advance time to near TTL
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_NEAR_TTL);
|
||||
|
||||
// Access the promise (should reset TTL)
|
||||
map.get(key);
|
||||
|
||||
// Advance time another 4500ms (total 9000ms from original set)
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_NEAR_TTL);
|
||||
|
||||
// Should still exist because TTL was reset on access
|
||||
expect(map.get(key)).toBe(promise);
|
||||
});
|
||||
|
||||
it('cleans up multiple expired entries', () => {
|
||||
const promise1 = Promise.resolve('result1');
|
||||
const promise2 = Promise.resolve('result2');
|
||||
const promise3 = Promise.resolve('result3');
|
||||
|
||||
map.set('key1', promise1);
|
||||
map.set('key2', promise2);
|
||||
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_HALF_TTL);
|
||||
|
||||
map.set('key3', promise3);
|
||||
|
||||
// Advance past first two entries' TTL
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_HALF_PLUS_SMALL);
|
||||
|
||||
expect(map.get('key1')).toBeUndefined();
|
||||
expect(map.get('key2')).toBeUndefined();
|
||||
expect(map.get('key3')).toBe(promise3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('cleanup in finally block', () => {
|
||||
it('removes entry after promise settles (success)', async () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'cleanup-key';
|
||||
|
||||
map.set(key, promise);
|
||||
expect(map.get(key)).toBe(promise);
|
||||
|
||||
// Wait for promise to settle
|
||||
await promise;
|
||||
|
||||
// Advance timers to trigger cleanup sweep
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_SMALL);
|
||||
|
||||
// Entry should be cleaned up
|
||||
expect(map.get(key)).toBeUndefined();
|
||||
});
|
||||
|
||||
it('removes entry after promise rejects (error)', async () => {
|
||||
const promise = Promise.reject(new Error('test error')).catch(() => {
|
||||
// Suppress unhandled rejection
|
||||
});
|
||||
const key = 'cleanup-error-key';
|
||||
|
||||
map.set(key, promise);
|
||||
expect(map.get(key)).toBe(promise);
|
||||
|
||||
// Wait for promise to settle
|
||||
await promise;
|
||||
|
||||
// Advance timers to trigger cleanup sweep
|
||||
vi.advanceTimersByTime(TIMER_ADVANCE_SMALL);
|
||||
|
||||
// Entry should be cleaned up
|
||||
expect(map.get(key)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('handles overwriting existing key', () => {
|
||||
const promise1 = Promise.resolve('result1');
|
||||
const promise2 = Promise.resolve('result2');
|
||||
const key = 'overwrite-key';
|
||||
|
||||
map.set(key, promise1);
|
||||
expect(map.get(key)).toBe(promise1);
|
||||
|
||||
map.set(key, promise2);
|
||||
expect(map.get(key)).toBe(promise2);
|
||||
});
|
||||
|
||||
it('handles deleting non-existent key gracefully', () => {
|
||||
expect(() => {
|
||||
map.delete('non-existent');
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it('handles clearing empty map', () => {
|
||||
expect(() => {
|
||||
map.clear();
|
||||
}).not.toThrow();
|
||||
});
|
||||
|
||||
it('handles rapid set/get/delete cycles', () => {
|
||||
const promise = Promise.resolve('result');
|
||||
const key = 'rapid-key';
|
||||
|
||||
for (let i = 0; i < RAPID_CYCLE_COUNT; i++) {
|
||||
map.set(key, promise);
|
||||
expect(map.get(key)).toBe(promise);
|
||||
map.delete(key);
|
||||
expect(map.get(key)).toBeUndefined();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
122
client/src/lib/request/dedup.ts
Normal file
122
client/src/lib/request/dedup.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* Request deduplication with Promise sharing
|
||||
*
|
||||
* Prevents duplicate in-flight requests by sharing Promises across callers.
|
||||
* Uses TTL-based cleanup to prevent memory leaks.
|
||||
*/
|
||||
|
||||
export const DEDUP_TTL_MS = 5000;
|
||||
const CLEANUP_INTERVAL_MS = 1000;
|
||||
|
||||
interface InFlightEntry<T> {
|
||||
promise: Promise<T>;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a deduplication key from command name and arguments.
|
||||
* Uses JSON serialization for strict equality checking.
|
||||
*/
|
||||
export function createDedupKey(command: string, args: unknown): string {
|
||||
return `${command}:${JSON.stringify(args)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map for storing in-flight Promises with TTL-based cleanup.
|
||||
* Shares Promises across duplicate requests to prevent redundant work.
|
||||
*/
|
||||
export class InFlightRequestMap<T> {
|
||||
private map: Map<string, InFlightEntry<T>> = new Map();
|
||||
private cleanupTimer: ReturnType<typeof setInterval> | null = null;
|
||||
|
||||
constructor() {
|
||||
this.startCleanupSweep();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a Promise from the map and resets its TTL.
|
||||
* Returns undefined if the entry has expired or doesn't exist.
|
||||
*/
|
||||
get(key: string): Promise<T> | undefined {
|
||||
const entry = this.map.get(key);
|
||||
if (!entry) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
const age = now - entry.timestamp;
|
||||
|
||||
if (age > DEDUP_TTL_MS) {
|
||||
this.map.delete(key);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
entry.timestamp = now;
|
||||
return entry.promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stores a Promise in the map with current timestamp.
|
||||
* Attaches cleanup logic to remove entry after Promise settles.
|
||||
*/
|
||||
set(key: string, promise: Promise<T>): void {
|
||||
const entry: InFlightEntry<T> = {
|
||||
promise,
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
|
||||
this.map.set(key, entry);
|
||||
|
||||
promise
|
||||
.then(() => {
|
||||
this.delete(key);
|
||||
})
|
||||
.catch(() => {
|
||||
this.delete(key);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an entry from the map.
|
||||
*/
|
||||
delete(key: string): void {
|
||||
this.map.delete(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all entries from the map.
|
||||
*/
|
||||
clear(): void {
|
||||
this.map.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts periodic cleanup sweep to remove expired entries.
|
||||
*/
|
||||
private startCleanupSweep(): void {
|
||||
this.cleanupTimer = setInterval(() => {
|
||||
const now = Date.now();
|
||||
const keysToDelete: string[] = [];
|
||||
|
||||
for (const [key, entry] of this.map.entries()) {
|
||||
if (now - entry.timestamp > DEDUP_TTL_MS) {
|
||||
keysToDelete.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
for (const key of keysToDelete) {
|
||||
this.map.delete(key);
|
||||
}
|
||||
}, CLEANUP_INTERVAL_MS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the cleanup sweep timer.
|
||||
*/
|
||||
private stopCleanupSweep(): void {
|
||||
if (this.cleanupTimer !== null) {
|
||||
clearInterval(this.cleanupTimer);
|
||||
this.cleanupTimer = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,7 @@ describe('storage utils', () => {
|
||||
});
|
||||
|
||||
it('logs and returns false on write errors', () => {
|
||||
vi.spyOn(localStorage, 'setItem').mockImplementation(() => {
|
||||
vi.spyOn(Storage.prototype, 'setItem').mockImplementation(() => {
|
||||
throw new Error('boom');
|
||||
});
|
||||
|
||||
@@ -49,7 +49,7 @@ describe('storage utils', () => {
|
||||
localStorage.setItem('remove', '1');
|
||||
expect(removeStorage('remove', 'ctx')).toBe(true);
|
||||
|
||||
vi.spyOn(localStorage, 'removeItem').mockImplementation(() => {
|
||||
vi.spyOn(Storage.prototype, 'removeItem').mockImplementation(() => {
|
||||
throw new Error('boom');
|
||||
});
|
||||
|
||||
@@ -79,7 +79,7 @@ describe('storage utils', () => {
|
||||
});
|
||||
|
||||
it('logs raw write errors', () => {
|
||||
vi.spyOn(localStorage, 'setItem').mockImplementation(() => {
|
||||
vi.spyOn(Storage.prototype, 'setItem').mockImplementation(() => {
|
||||
throw new Error('boom');
|
||||
});
|
||||
|
||||
|
||||
@@ -206,7 +206,7 @@ def _build_job_status(job: DiarizationJob) -> noteflow_pb2.DiarizationJobStatus:
|
||||
"""Build proto status from a diarization job."""
|
||||
return noteflow_pb2.DiarizationJobStatus(
|
||||
job_id=job.job_id,
|
||||
status=noteflow_pb2.JobStatus(job.status),
|
||||
status=cast(noteflow_pb2.JobStatus, job.status),
|
||||
segments_updated=job.segments_updated,
|
||||
speaker_ids=job.speaker_ids,
|
||||
error_message=job.error_message,
|
||||
|
||||
@@ -51,7 +51,7 @@ def add_noteflow_context(
|
||||
return event_dict
|
||||
|
||||
|
||||
def _safe_filter_by_level(
|
||||
def safe_filter_by_level(
|
||||
logger: WrappedLogger | None,
|
||||
method_name: str,
|
||||
event_dict: EventDict,
|
||||
@@ -125,7 +125,7 @@ def build_processor_chain(config: LoggingConfig) -> Sequence[Processor]:
|
||||
"""
|
||||
processors: list[Processor] = [
|
||||
# Filter by level early
|
||||
_safe_filter_by_level,
|
||||
safe_filter_by_level,
|
||||
# Add standard fields
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.stdlib.add_log_level,
|
||||
@@ -143,23 +143,21 @@ def build_processor_chain(config: LoggingConfig) -> Sequence[Processor]:
|
||||
processors.append(add_otel_trace_context)
|
||||
|
||||
# Additional standard processors
|
||||
processors.extend(
|
||||
[
|
||||
# Add callsite information (file, function, line)
|
||||
structlog.processors.CallsiteParameterAdder(
|
||||
parameters=[
|
||||
structlog.processors.CallsiteParameter.FILENAME,
|
||||
structlog.processors.CallsiteParameter.FUNC_NAME,
|
||||
structlog.processors.CallsiteParameter.LINENO,
|
||||
]
|
||||
),
|
||||
# Stack traces if requested
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
# Exception formatting
|
||||
structlog.processors.format_exc_info,
|
||||
# Decode bytes to strings
|
||||
structlog.processors.UnicodeDecoder(),
|
||||
]
|
||||
)
|
||||
processors.extend([
|
||||
# Add callsite information (file, function, line)
|
||||
structlog.processors.CallsiteParameterAdder(
|
||||
parameters=[
|
||||
structlog.processors.CallsiteParameter.FILENAME,
|
||||
structlog.processors.CallsiteParameter.FUNC_NAME,
|
||||
structlog.processors.CallsiteParameter.LINENO,
|
||||
]
|
||||
),
|
||||
# Stack traces if requested
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
# Exception formatting
|
||||
structlog.processors.format_exc_info,
|
||||
# Decode bytes to strings
|
||||
structlog.processors.UnicodeDecoder(),
|
||||
])
|
||||
|
||||
return processors
|
||||
|
||||
@@ -133,6 +133,7 @@ def test_response_with_single_citation() -> None:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generates_thread_id_with_user_id() -> None:
|
||||
"""Unavailable fallback generates workspace-scoped thread ID even if meeting_id provided."""
|
||||
service = AssistantService()
|
||||
request = AssistantRequest(
|
||||
question=SAMPLE_QUESTION,
|
||||
@@ -142,9 +143,10 @@ async def test_generates_thread_id_with_user_id() -> None:
|
||||
|
||||
response = await service.ask(request)
|
||||
|
||||
expected_prefix = f"meeting:{SAMPLE_MEETING_ID}:user:{SAMPLE_USER_ID}:graph:meeting_qa:v1:"
|
||||
# Unavailable fallback always uses workspace scope for thread_id
|
||||
expected_prefix = f"meeting:workspace:user:{SAMPLE_USER_ID}:graph:workspace_qa:v1:"
|
||||
assert response.thread_id.startswith(expected_prefix), (
|
||||
"Expected thread_id to include meeting, user, graph, and version segments"
|
||||
"Expected thread_id to include workspace, user, graph, and version segments"
|
||||
)
|
||||
session_id = response.thread_id.removeprefix(expected_prefix)
|
||||
assert len(session_id) == THREAD_SESSION_ID_LENGTH, "Expected session id length"
|
||||
|
||||
@@ -141,16 +141,24 @@ class TestMeetingServiceStateTransitions:
|
||||
mock_uow.commit.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("initial_state", "action_method"),
|
||||
("initial_state", "action_method", "error_action"),
|
||||
[
|
||||
pytest.param(MeetingState.CREATED, "stop_meeting", id="cannot-stop-created"),
|
||||
pytest.param(MeetingState.COMPLETED, "start_recording", id="cannot-start-completed"),
|
||||
pytest.param(
|
||||
MeetingState.CREATED, "stop_meeting", "begin stopping", id="cannot-stop-created"
|
||||
),
|
||||
pytest.param(
|
||||
MeetingState.COMPLETED,
|
||||
"start_recording",
|
||||
"start recording",
|
||||
id="cannot-start-completed",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_meeting_service_invalid_state_transitions_raise(
|
||||
self,
|
||||
initial_state: MeetingState,
|
||||
action_method: str,
|
||||
error_action: str,
|
||||
mock_uow: MagicMock,
|
||||
) -> None:
|
||||
"""Test invalid state transitions raise ValueError and do not commit."""
|
||||
@@ -162,7 +170,7 @@ class TestMeetingServiceStateTransitions:
|
||||
|
||||
service = MeetingService(mock_uow)
|
||||
|
||||
with pytest.raises(ValueError, match=f"Cannot {action_method}"):
|
||||
with pytest.raises(ValueError, match=f"Cannot {error_action} from state"):
|
||||
await getattr(service, action_method)(meeting.id)
|
||||
mock_uow.commit.assert_not_called()
|
||||
|
||||
|
||||
@@ -13,15 +13,12 @@ Run explicitly with: pytest tests/benchmarks/ -m slow
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
# Mark all tests in this module as slow (excluded from CI unit tests)
|
||||
pytestmark = pytest.mark.slow
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, cast
|
||||
from uuid import UUID
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import UUID
|
||||
from numpy.typing import NDArray
|
||||
from pytest_benchmark.fixture import BenchmarkFixture
|
||||
|
||||
@@ -33,15 +30,27 @@ from noteflow.application.services.voice_profile.service import (
|
||||
from noteflow.config.constants import DEFAULT_SAMPLE_RATE
|
||||
from noteflow.domain.entities.segment import Segment, WordTiming
|
||||
from noteflow.domain.value_objects import AudioSource, MeetingId, SpeakerRole
|
||||
from noteflow.grpc.mixins.converters import (
|
||||
SegmentBuildParams,
|
||||
create_segment_from_asr,
|
||||
log_entry_to_proto,
|
||||
metrics_to_proto,
|
||||
segment_to_proto_update,
|
||||
)
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
from noteflow.infrastructure.asr.dto import AsrResult, WordTiming as AsrWordTiming
|
||||
from noteflow.infrastructure.asr.segmenter import AudioSegment, Segmenter, SegmenterConfig
|
||||
from noteflow.infrastructure.asr.streaming_vad import EnergyVad, StreamingVad
|
||||
from noteflow.infrastructure.audio.levels import RmsLevelProvider, compute_rms
|
||||
from noteflow.infrastructure.audio.partial_buffer import PartialAudioBuffer
|
||||
from noteflow.infrastructure.logging.log_buffer import LogEntry
|
||||
from noteflow.infrastructure.metrics.collector import PerformanceMetrics
|
||||
|
||||
# Mark all tests in this module as slow (excluded from CI unit tests)
|
||||
pytestmark = pytest.mark.slow
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from noteflow.grpc.mixins.converters import SegmentBuildParams
|
||||
from noteflow.infrastructure.asr.dto import AsrResult
|
||||
pass
|
||||
|
||||
|
||||
def _run_benchmark(benchmark: BenchmarkFixture, func: object, *args: object) -> object:
|
||||
@@ -53,8 +62,6 @@ def _run_benchmark(benchmark: BenchmarkFixture, func: object, *args: object) ->
|
||||
The cast is required because BenchmarkFixture.__call__ is untyped in
|
||||
pytest-benchmark (no type stubs available).
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
# cast required: pytest-benchmark lacks type stubs
|
||||
return cast(object, benchmark(func, *args))
|
||||
|
||||
@@ -73,8 +80,6 @@ def typed_benchmark[T](
|
||||
func: The function to benchmark
|
||||
*args: Arguments to pass to func
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
return cast(T, _run_benchmark(benchmark, func, *args))
|
||||
|
||||
|
||||
@@ -90,8 +95,6 @@ def benchmark_array(
|
||||
func: The function to benchmark (must return NDArray[np.float32])
|
||||
*args: Arguments to pass to func
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
return cast(NDArray[np.float32], _run_benchmark(benchmark, func, *args))
|
||||
|
||||
|
||||
@@ -105,8 +108,6 @@ def benchmark_list(benchmark: BenchmarkFixture, func: object, *args: object) ->
|
||||
func: The function to benchmark (must return list[AudioSegment])
|
||||
*args: Arguments to pass to func
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
return cast(list[AudioSegment], _run_benchmark(benchmark, func, *args))
|
||||
|
||||
|
||||
@@ -122,15 +123,11 @@ def benchmark_array_list(
|
||||
func: The function to benchmark (must return list[NDArray[np.float32]])
|
||||
*args: Arguments to pass to func
|
||||
"""
|
||||
from typing import cast
|
||||
|
||||
return cast(list[NDArray[np.float32]], _run_benchmark(benchmark, func, *args))
|
||||
|
||||
|
||||
def benchmark_float_list(benchmark: BenchmarkFixture, func: object, *args: object) -> list[float]:
|
||||
"""Run benchmark for functions returning list of floats."""
|
||||
from typing import cast
|
||||
|
||||
return cast(list[float], _run_benchmark(benchmark, func, *args))
|
||||
|
||||
|
||||
@@ -212,8 +209,6 @@ def segment_with_words() -> Segment:
|
||||
@pytest.fixture
|
||||
def asr_result() -> AsrResult:
|
||||
"""Create an ASR result for segment build benchmarks."""
|
||||
from noteflow.infrastructure.asr.dto import AsrResult, WordTiming as AsrWordTiming
|
||||
|
||||
words = (
|
||||
AsrWordTiming(word="hello", start=0.0, end=0.25, probability=0.95),
|
||||
AsrWordTiming(word="world", start=0.25, end=0.5, probability=0.92),
|
||||
@@ -235,8 +230,6 @@ def asr_result() -> AsrResult:
|
||||
@pytest.fixture
|
||||
def segment_build_params() -> SegmentBuildParams:
|
||||
"""Create segment build parameters for ASR conversion benchmarks."""
|
||||
from noteflow.grpc.mixins.converters import SegmentBuildParams
|
||||
|
||||
return SegmentBuildParams(
|
||||
meeting_id=MEETING_ID,
|
||||
segment_id=ASR_SEGMENT_ID,
|
||||
@@ -282,8 +275,6 @@ def performance_metrics() -> PerformanceMetrics:
|
||||
@pytest.fixture
|
||||
def log_entry() -> LogEntry:
|
||||
"""Create a sample log entry for converter benchmarks."""
|
||||
from datetime import UTC, datetime
|
||||
|
||||
return LogEntry(
|
||||
timestamp=datetime.now(tz=UTC),
|
||||
level="info",
|
||||
@@ -552,7 +543,6 @@ class TestPartialBufferComparisonBenchmark:
|
||||
self, benchmark: BenchmarkFixture, audio_chunk: NDArray[np.float32]
|
||||
) -> None:
|
||||
"""Benchmark NEW pattern: pre-allocated buffer (20 chunks = 2s)."""
|
||||
from noteflow.infrastructure.audio.partial_buffer import PartialAudioBuffer
|
||||
|
||||
def new_pattern() -> NDArray[np.float32]:
|
||||
buffer = PartialAudioBuffer(sample_rate=SAMPLE_RATE)
|
||||
@@ -567,8 +557,6 @@ class TestPartialBufferComparisonBenchmark:
|
||||
self, benchmark: BenchmarkFixture, audio_chunk: NDArray[np.float32]
|
||||
) -> None:
|
||||
"""Benchmark pre-allocated buffer append only (no get_audio)."""
|
||||
from noteflow.infrastructure.audio.partial_buffer import PartialAudioBuffer
|
||||
|
||||
buffer = PartialAudioBuffer(sample_rate=SAMPLE_RATE)
|
||||
|
||||
def append_only() -> None:
|
||||
@@ -581,8 +569,6 @@ class TestPartialBufferComparisonBenchmark:
|
||||
self, benchmark: BenchmarkFixture, audio_chunk: NDArray[np.float32]
|
||||
) -> None:
|
||||
"""Benchmark pre-allocated buffer get_audio only (pre-filled)."""
|
||||
from noteflow.infrastructure.audio.partial_buffer import PartialAudioBuffer
|
||||
|
||||
buffer = PartialAudioBuffer(sample_rate=SAMPLE_RATE)
|
||||
for _ in range(TYPICAL_PARTIAL_CHUNKS):
|
||||
buffer.append(audio_chunk)
|
||||
@@ -612,8 +598,6 @@ class TestPartialBufferComparisonBenchmark:
|
||||
self, benchmark: BenchmarkFixture, audio_chunk: NDArray[np.float32]
|
||||
) -> None:
|
||||
"""Benchmark NEW pattern: 10 cycles with buffer reuse."""
|
||||
from noteflow.infrastructure.audio.partial_buffer import PartialAudioBuffer
|
||||
|
||||
# Buffer created once (simulates per-meeting initialization)
|
||||
buffer = PartialAudioBuffer(sample_rate=SAMPLE_RATE)
|
||||
|
||||
@@ -636,12 +620,10 @@ class TestAsrSegmentBuildBenchmarks:
|
||||
def test_create_segment_from_asr(
|
||||
self,
|
||||
benchmark: BenchmarkFixture,
|
||||
asr_result: "AsrResult",
|
||||
segment_build_params: "SegmentBuildParams",
|
||||
asr_result: AsrResult,
|
||||
segment_build_params: SegmentBuildParams,
|
||||
) -> None:
|
||||
"""Benchmark create_segment_from_asr conversion."""
|
||||
from noteflow.grpc.mixins.converters import create_segment_from_asr
|
||||
|
||||
result = typed_benchmark(
|
||||
benchmark,
|
||||
Segment,
|
||||
@@ -664,9 +646,6 @@ class TestGrpcConverterBenchmarks:
|
||||
segment_with_words: Segment,
|
||||
) -> None:
|
||||
"""Benchmark segment_to_proto_update conversion."""
|
||||
from noteflow.grpc.mixins.converters import segment_to_proto_update
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
result = typed_benchmark(
|
||||
benchmark,
|
||||
noteflow_pb2.TranscriptUpdate,
|
||||
@@ -682,9 +661,6 @@ class TestGrpcConverterBenchmarks:
|
||||
performance_metrics: PerformanceMetrics,
|
||||
) -> None:
|
||||
"""Benchmark metrics_to_proto conversion."""
|
||||
from noteflow.grpc.mixins.converters import metrics_to_proto
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
result = typed_benchmark(
|
||||
benchmark,
|
||||
noteflow_pb2.PerformanceMetricsPoint,
|
||||
@@ -699,9 +675,6 @@ class TestGrpcConverterBenchmarks:
|
||||
log_entry: LogEntry,
|
||||
) -> None:
|
||||
"""Benchmark log_entry_to_proto conversion."""
|
||||
from noteflow.grpc.mixins.converters import log_entry_to_proto
|
||||
from noteflow.grpc.proto import noteflow_pb2
|
||||
|
||||
result = typed_benchmark(
|
||||
benchmark,
|
||||
noteflow_pb2.LogEntryProto,
|
||||
|
||||
43
tests/benchmarks/test_hot_paths.py_top
Normal file
43
tests/benchmarks/test_hot_paths.py_top
Normal file
@@ -0,0 +1,43 @@
|
||||
"""Benchmark tests for NoteFlow hot paths.
|
||||
|
||||
These benchmarks measure the performance of frequently-called code paths
|
||||
to establish baselines and detect regressions.
|
||||
|
||||
Run with: pytest tests/benchmarks/ --benchmark-enable
|
||||
Compare: pytest tests/benchmarks/ --benchmark-compare
|
||||
Save baseline: pytest tests/benchmarks/ --benchmark-save=baseline
|
||||
|
||||
Note: These tests are marked as slow and excluded from CI unit test runs.
|
||||
Run explicitly with: pytest tests/benchmarks/ -m slow
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import UUID
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from numpy.typing import NDArray
|
||||
from pytest_benchmark.fixture import BenchmarkFixture
|
||||
|
||||
from noteflow.application.services.voice_profile.service import (
|
||||
EMBEDDING_DIM,
|
||||
cosine_similarity,
|
||||
merge_embeddings,
|
||||
)
|
||||
from noteflow.config.constants import DEFAULT_SAMPLE_RATE
|
||||
from noteflow.domain.entities.segment import Segment, WordTiming
|
||||
from noteflow.domain.value_objects import AudioSource, MeetingId, SpeakerRole
|
||||
from noteflow.infrastructure.asr.segmenter import AudioSegment, Segmenter, SegmenterConfig
|
||||
from noteflow.infrastructure.asr.streaming_vad import EnergyVad, StreamingVad
|
||||
from noteflow.infrastructure.audio.levels import RmsLevelProvider, compute_rms
|
||||
from noteflow.infrastructure.logging.log_buffer import LogEntry
|
||||
from noteflow.infrastructure.metrics.collector import PerformanceMetrics
|
||||
|
||||
# Mark all tests in this module as slow (excluded from CI unit tests)
|
||||
pytestmark = pytest.mark.slow
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from noteflow.grpc.mixins.converters import SegmentBuildParams
|
||||
from noteflow.infrastructure.asr.dto import AsrResult
|
||||
@@ -82,6 +82,45 @@ def mock_oauth_manager() -> MagicMock:
|
||||
return manager
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Test database URL fixture (required for Settings validation)
|
||||
# ============================================================================
|
||||
|
||||
_TEST_DATABASE_URL = "postgresql+asyncpg://test:test@localhost:5432/noteflow_test"
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def set_test_database_url() -> Generator[None, None, None]:
|
||||
"""Set dummy database URL for tests that instantiate Settings.
|
||||
|
||||
Many tests create NoteFlowServicer which calls get_settings() which
|
||||
requires NOTEFLOW_DATABASE_URL. This fixture sets a dummy value.
|
||||
"""
|
||||
original = os.environ.get("NOTEFLOW_DATABASE_URL")
|
||||
os.environ["NOTEFLOW_DATABASE_URL"] = _TEST_DATABASE_URL
|
||||
yield
|
||||
if original is None:
|
||||
del os.environ["NOTEFLOW_DATABASE_URL"]
|
||||
else:
|
||||
os.environ["NOTEFLOW_DATABASE_URL"] = original
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_settings_cache() -> Generator[None, None, None]:
|
||||
"""Clear cached settings before each test for isolation.
|
||||
|
||||
The get_settings() and get_trigger_settings() functions use lru_cache which
|
||||
can cause test pollution if settings are loaded with different env vars.
|
||||
"""
|
||||
from noteflow.config.settings._loaders import get_settings, get_trigger_settings
|
||||
|
||||
get_settings.cache_clear()
|
||||
get_trigger_settings.cache_clear()
|
||||
yield
|
||||
get_settings.cache_clear()
|
||||
get_trigger_settings.cache_clear()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Platform-specific library path setup (run before pytest collection)
|
||||
# ============================================================================
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Final
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -150,18 +151,18 @@ class TestRetrievalQuality:
|
||||
|
||||
|
||||
class TestEmbeddingQuality:
|
||||
def test_same_text_same_embedding(self) -> None:
|
||||
@pytest.mark.asyncio
|
||||
async def test_same_text_same_embedding(self) -> None:
|
||||
embedder = ConfigurableEmbedder()
|
||||
e1_task = embedder.embed("hello world")
|
||||
e2_task = embedder.embed("hello world")
|
||||
|
||||
import asyncio
|
||||
|
||||
e1, e2 = asyncio.get_event_loop().run_until_complete(asyncio.gather(e1_task, e2_task))
|
||||
e1, e2 = await asyncio.gather(
|
||||
embedder.embed("hello world"),
|
||||
embedder.embed("hello world"),
|
||||
)
|
||||
|
||||
assert e1 == e2, "Same text should yield same embedding"
|
||||
|
||||
def test_custom_embeddings_differ(self) -> None:
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_embeddings_differ(self) -> None:
|
||||
embedder = ConfigurableEmbedder(
|
||||
embeddings={
|
||||
"apple": [1.0, 0.0, 0.0],
|
||||
@@ -169,10 +170,8 @@ class TestEmbeddingQuality:
|
||||
}
|
||||
)
|
||||
|
||||
import asyncio
|
||||
|
||||
e1 = asyncio.get_event_loop().run_until_complete(embedder.embed("apple"))
|
||||
e2 = asyncio.get_event_loop().run_until_complete(embedder.embed("orange"))
|
||||
e1 = await embedder.embed("apple")
|
||||
e2 = await embedder.embed("orange")
|
||||
|
||||
assert e1 != e2, "Different texts should have different embeddings"
|
||||
|
||||
|
||||
@@ -348,8 +348,8 @@ class TestGetAnnotation:
|
||||
"""GetAnnotation returns annotation when it exists."""
|
||||
annotation_id, meeting_id, expected_annotation = found_annotation
|
||||
mock_annotations_repo.get.return_value = expected_annotation
|
||||
expected_annotation_id = annotation_id.hex
|
||||
expected_meeting_id = meeting_id.hex
|
||||
expected_annotation_id = str(annotation_id)
|
||||
expected_meeting_id = str(meeting_id)
|
||||
|
||||
request = noteflow_pb2.GetAnnotationRequest(annotation_id=expected_annotation_id)
|
||||
response = await servicer.GetAnnotation(request, mock_grpc_context)
|
||||
@@ -541,7 +541,7 @@ class TestUpdateAnnotation:
|
||||
|
||||
response = await servicer.UpdateAnnotation(request, mock_grpc_context)
|
||||
|
||||
expected_annotation_id = annotation_id.hex
|
||||
expected_annotation_id = str(annotation_id)
|
||||
assert response.id == expected_annotation_id, "id should remain unchanged"
|
||||
assert response.text == request.text, "text should be updated"
|
||||
assert response.annotation_type == request.annotation_type, "type should be DECISION"
|
||||
|
||||
@@ -51,6 +51,10 @@ class _DummyContext:
|
||||
self.abort_called = True
|
||||
self.abort_code = code
|
||||
self.abort_details = details
|
||||
raise AssertionError("Unreachable")
|
||||
|
||||
def invocation_metadata(self) -> list[tuple[str, str]]:
|
||||
return []
|
||||
|
||||
|
||||
class _MockAssistantService(AssistantService):
|
||||
@@ -104,7 +108,8 @@ class TestAskAssistantValidation:
|
||||
mock_service = _MockAssistantService()
|
||||
servicer = NoteFlowServicer(services=ServicesConfig(assistant_service=mock_service))
|
||||
context = _DummyContext()
|
||||
await servicer.AskAssistant(noteflow_pb2.AskAssistantRequest(question=""), context)
|
||||
with pytest.raises(AssertionError, match="Unreachable"):
|
||||
await servicer.AskAssistant(noteflow_pb2.AskAssistantRequest(question=""), context)
|
||||
assert context.abort_called, "Should abort on empty question"
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, "Should be INVALID_ARGUMENT"
|
||||
|
||||
@@ -114,7 +119,8 @@ class TestAskAssistantValidation:
|
||||
mock_service = _MockAssistantService()
|
||||
servicer = NoteFlowServicer(services=ServicesConfig(assistant_service=mock_service))
|
||||
context = _DummyContext()
|
||||
await servicer.AskAssistant(noteflow_pb2.AskAssistantRequest(question=" "), context)
|
||||
with pytest.raises(AssertionError, match="Unreachable"):
|
||||
await servicer.AskAssistant(noteflow_pb2.AskAssistantRequest(question=" "), context)
|
||||
assert context.abort_called
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -123,10 +129,11 @@ class TestAskAssistantValidation:
|
||||
mock_service = _MockAssistantService()
|
||||
servicer = NoteFlowServicer(services=ServicesConfig(assistant_service=mock_service))
|
||||
context = _DummyContext()
|
||||
await servicer.AskAssistant(
|
||||
noteflow_pb2.AskAssistantRequest(question="What?", meeting_id="not-a-uuid"),
|
||||
context,
|
||||
)
|
||||
with pytest.raises(AssertionError, match="Unreachable"):
|
||||
await servicer.AskAssistant(
|
||||
noteflow_pb2.AskAssistantRequest(question="What?", meeting_id="not-a-uuid"),
|
||||
context,
|
||||
)
|
||||
assert context.abort_called, "Should abort on invalid meeting ID"
|
||||
assert context.abort_code == grpc.StatusCode.INVALID_ARGUMENT, "Should be INVALID_ARGUMENT"
|
||||
|
||||
|
||||
@@ -224,16 +224,21 @@ class TestTrackChunkSequence:
|
||||
current_seq: int,
|
||||
next_seq: int,
|
||||
expected_logged: bool,
|
||||
capsys: pytest.CaptureFixture[str],
|
||||
) -> None:
|
||||
"""Verify gap detection logs warning for non-contiguous sequences."""
|
||||
from unittest.mock import patch
|
||||
|
||||
meeting_id = "test-meeting"
|
||||
mock_logger = MagicMock()
|
||||
|
||||
track_chunk_sequence(mock_host, meeting_id, current_seq)
|
||||
track_chunk_sequence(mock_host, meeting_id, next_seq)
|
||||
with patch(
|
||||
"noteflow.grpc.mixins.streaming._processing._chunk_tracking.logger",
|
||||
mock_logger,
|
||||
):
|
||||
track_chunk_sequence(mock_host, meeting_id, current_seq)
|
||||
track_chunk_sequence(mock_host, meeting_id, next_seq)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
gap_logged = "Chunk sequence gap" in captured.out
|
||||
gap_logged = mock_logger.warning.called
|
||||
assert gap_logged == expected_logged, (
|
||||
f"Gap from {current_seq} to {next_seq} should "
|
||||
f"{'trigger' if expected_logged else 'not trigger'} warning"
|
||||
|
||||
@@ -48,18 +48,18 @@ def _create_mocksummarization_service(
|
||||
service = MagicMock(spec=SummarizationService)
|
||||
service.settings = settings
|
||||
|
||||
async def grant() -> None:
|
||||
async def grant(feature: str) -> None:
|
||||
settings.cloud_consent_granted = True
|
||||
if on_consent_change:
|
||||
on_consent_change(True)
|
||||
|
||||
async def revoke() -> None:
|
||||
async def revoke(feature: str) -> None:
|
||||
settings.cloud_consent_granted = False
|
||||
if on_consent_change:
|
||||
on_consent_change(False)
|
||||
|
||||
service.grant_cloud_consent = AsyncMock(side_effect=grant)
|
||||
service.revoke_cloud_consent = AsyncMock(side_effect=revoke)
|
||||
service.grant_feature_consent = AsyncMock(side_effect=grant)
|
||||
service.revoke_feature_consent = AsyncMock(side_effect=revoke)
|
||||
service.cloud_consent_granted = property(lambda _: settings.cloud_consent_granted)
|
||||
|
||||
# Make cloud_consent_granted work as a property
|
||||
@@ -128,7 +128,7 @@ class TestGrantCloudConsent:
|
||||
assert isinstance(response, noteflow_pb2.GrantCloudConsentResponse), (
|
||||
"GrantCloudConsent should return GrantCloudConsentResponse"
|
||||
)
|
||||
grant_mock = cast(AsyncMock, service.grant_cloud_consent)
|
||||
grant_mock = cast(AsyncMock, service.grant_feature_consent)
|
||||
grant_mock.assert_awaited_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -179,7 +179,7 @@ class TestRevokeCloudConsent:
|
||||
assert isinstance(response, noteflow_pb2.RevokeCloudConsentResponse), (
|
||||
"RevokeCloudConsent should return RevokeCloudConsentResponse"
|
||||
)
|
||||
revoke_mock = cast(AsyncMock, service.revoke_cloud_consent)
|
||||
revoke_mock = cast(AsyncMock, service.revoke_feature_consent)
|
||||
revoke_mock.assert_awaited_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -139,6 +139,7 @@ class MockMeetingMixinServicerHost(MeetingMixin):
|
||||
self.summarization_service = None # Post-processing disabled in tests
|
||||
self.diarization_auto_refine = False # Auto-diarization disabled in tests
|
||||
self.diarization_engine = None
|
||||
self.embedder = None # AI embedder disabled in tests
|
||||
|
||||
def create_repository_provider(self) -> MockMeetingRepositoryProvider:
|
||||
"""Create mock repository provider context manager."""
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
"""Tests for NER engine (composition-based)."""
|
||||
"""Tests for NER engine (composition-based).
|
||||
|
||||
Tests requiring spaCy model are marked as integration tests.
|
||||
Run with: pytest -m integration tests/infrastructure/ner/
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -8,6 +12,8 @@ from noteflow.domain.entities.named_entity import EntityCategory, NamedEntity
|
||||
from noteflow.infrastructure.ner import NerEngine
|
||||
from noteflow.infrastructure.ner.backends.spacy_backend import SpacyBackend
|
||||
|
||||
pytestmark = pytest.mark.integration
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def spacy_backend() -> SpacyBackend:
|
||||
|
||||
@@ -119,15 +119,14 @@ class TestCreateRenderer:
|
||||
renderer, structlog.processors.JSONRenderer
|
||||
), "should return JSONRenderer when not a TTY"
|
||||
|
||||
def test_returns_console_renderer_for_tty(self) -> None:
|
||||
"""create_renderer returns ConsoleRenderer for TTY with colors."""
|
||||
def test_returns_callable_renderer_for_tty(self) -> None:
|
||||
"""create_renderer returns Rich handler renderer function for TTY."""
|
||||
config = LoggingConfig(log_format="auto", console_colors=True)
|
||||
with patch("sys.stderr") as mock_stderr:
|
||||
mock_stderr.isatty.return_value = True
|
||||
renderer = create_renderer(config)
|
||||
assert isinstance(
|
||||
renderer, structlog.dev.ConsoleRenderer
|
||||
), "should return ConsoleRenderer for TTY"
|
||||
# Implementation returns _render_for_rich_handler function for Rich console output
|
||||
assert callable(renderer), "should return callable renderer for TTY"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -2,16 +2,17 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import structlog
|
||||
|
||||
from noteflow.infrastructure.logging.config import LoggingConfig
|
||||
from noteflow.infrastructure.logging.processors import (
|
||||
add_noteflow_context,
|
||||
add_otel_trace_context,
|
||||
build_processor_chain,
|
||||
safe_filter_by_level,
|
||||
)
|
||||
from noteflow.infrastructure.logging.structured import (
|
||||
request_id_var,
|
||||
@@ -19,6 +20,14 @@ from noteflow.infrastructure.logging.structured import (
|
||||
workspace_id_var,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from structlog.typing import WrappedLogger
|
||||
|
||||
|
||||
def _create_mock_wrapped_logger() -> WrappedLogger:
|
||||
return MagicMock()
|
||||
|
||||
|
||||
# Test constants
|
||||
SAMPLE_REQUEST_ID = "req-12345"
|
||||
SAMPLE_USER_ID = "user-67890"
|
||||
@@ -38,7 +47,7 @@ class TestAddNoteflowContext:
|
||||
request_id_var.set(SAMPLE_REQUEST_ID)
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
result = add_noteflow_context(None, "info", event_dict)
|
||||
result = add_noteflow_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert result["request_id"] == SAMPLE_REQUEST_ID, "request_id should be injected"
|
||||
|
||||
@@ -47,7 +56,7 @@ class TestAddNoteflowContext:
|
||||
user_id_var.set(SAMPLE_USER_ID)
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
result = add_noteflow_context(None, "info", event_dict)
|
||||
result = add_noteflow_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert result["user_id"] == SAMPLE_USER_ID, "user_id should be injected"
|
||||
|
||||
@@ -56,7 +65,7 @@ class TestAddNoteflowContext:
|
||||
workspace_id_var.set(SAMPLE_WORKSPACE_ID)
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
result = add_noteflow_context(None, "info", event_dict)
|
||||
result = add_noteflow_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert result["workspace_id"] == SAMPLE_WORKSPACE_ID, "workspace_id should be injected"
|
||||
|
||||
@@ -66,7 +75,7 @@ class TestAddNoteflowContext:
|
||||
existing_id = "existing-request-id"
|
||||
event_dict: dict[str, str] = {"event": "test", "request_id": existing_id}
|
||||
|
||||
result = add_noteflow_context(None, "info", event_dict)
|
||||
result = add_noteflow_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert result["request_id"] == existing_id, "existing value should not be overridden"
|
||||
|
||||
@@ -74,7 +83,7 @@ class TestAddNoteflowContext:
|
||||
"""Processor skips None context values."""
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
result = add_noteflow_context(None, "info", event_dict)
|
||||
result = add_noteflow_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert "request_id" not in result, "None values should not be added"
|
||||
assert "user_id" not in result, "None values should not be added"
|
||||
@@ -89,7 +98,7 @@ class TestAddOtelTraceContext:
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
with patch.dict("sys.modules", {"opentelemetry": None}):
|
||||
result = add_otel_trace_context(None, "info", event_dict)
|
||||
result = add_otel_trace_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert "trace_id" not in result, "should not add trace_id without OTel"
|
||||
assert "span_id" not in result, "should not add span_id without OTel"
|
||||
@@ -99,7 +108,7 @@ class TestAddOtelTraceContext:
|
||||
event_dict: dict[str, str] = {"event": "test"}
|
||||
|
||||
with patch("opentelemetry.trace.get_current_span", return_value=None):
|
||||
result = add_otel_trace_context(None, "info", event_dict)
|
||||
result = add_otel_trace_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert "trace_id" not in result, "should not add trace_id without span"
|
||||
|
||||
@@ -110,7 +119,7 @@ class TestAddOtelTraceContext:
|
||||
mock_span.is_recording.return_value = False
|
||||
|
||||
with patch("opentelemetry.trace.get_current_span", return_value=mock_span):
|
||||
result = add_otel_trace_context(None, "info", event_dict)
|
||||
result = add_otel_trace_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert "trace_id" not in result, "should not add trace_id for non-recording span"
|
||||
|
||||
@@ -128,7 +137,7 @@ class TestAddOtelTraceContext:
|
||||
mock_span.parent = None
|
||||
|
||||
with patch("opentelemetry.trace.get_current_span", return_value=mock_span):
|
||||
result = add_otel_trace_context(None, "info", event_dict)
|
||||
result = add_otel_trace_context(_create_mock_wrapped_logger(), "info", event_dict)
|
||||
|
||||
assert "trace_id" in result, "should inject trace_id"
|
||||
assert "span_id" in result, "should inject span_id"
|
||||
@@ -144,7 +153,7 @@ class TestBuildProcessorChain:
|
||||
processors = build_processor_chain(config)
|
||||
|
||||
assert len(processors) >= 5, "should include at least 5 standard processors"
|
||||
assert structlog.stdlib.filter_by_level in processors, "should include filter_by_level"
|
||||
assert safe_filter_by_level in processors, "should include safe_filter_by_level"
|
||||
|
||||
def test_includes_noteflow_context_when_enabled(self) -> None:
|
||||
"""Processor chain includes NoteFlow context when enabled."""
|
||||
@@ -184,8 +193,8 @@ class TestBuildProcessorChain:
|
||||
|
||||
processors = build_processor_chain(config)
|
||||
|
||||
# filter_by_level should come before context injection
|
||||
filter_idx = processors.index(structlog.stdlib.filter_by_level)
|
||||
# safe_filter_by_level should come before context injection
|
||||
filter_idx = processors.index(safe_filter_by_level)
|
||||
noteflow_idx = processors.index(add_noteflow_context)
|
||||
otel_idx = processors.index(add_otel_trace_context)
|
||||
|
||||
|
||||
@@ -116,9 +116,7 @@ def _create_mock_client_class(
|
||||
) -> _MockChatResponse:
|
||||
if chat_fn is not None:
|
||||
# Dynamic behavior - call provided function
|
||||
return chat_fn(
|
||||
model=model, messages=messages, options=options, format=format
|
||||
)
|
||||
return chat_fn(model=model, messages=messages, options=options, format=format)
|
||||
if chat_response is not None:
|
||||
return chat_response
|
||||
return _MockChatResponse(build_valid_json_response())
|
||||
@@ -246,8 +244,11 @@ class TestOllamaSummarizerProperties:
|
||||
from noteflow.infrastructure.summarization import OllamaSummarizer
|
||||
|
||||
summarizer = OllamaSummarizer()
|
||||
assert summarizer.requires_cloud_consent is False, "local provider should not require cloud consent"
|
||||
assert summarizer.requires_cloud_consent is False, (
|
||||
"local provider should not require cloud consent"
|
||||
)
|
||||
|
||||
@pytest.mark.usefixtures("mock_ollama_module")
|
||||
def test_is_available_when_server_responds(self) -> None:
|
||||
"""is_available should be True when server responds."""
|
||||
from noteflow.infrastructure.summarization import OllamaSummarizer
|
||||
@@ -283,7 +284,9 @@ class TestOllamaSummarizerProperties:
|
||||
from noteflow.infrastructure.summarization import OllamaSummarizer
|
||||
|
||||
summarizer = OllamaSummarizer()
|
||||
assert summarizer.is_available is False, "is_available should be False when connection fails"
|
||||
assert summarizer.is_available is False, (
|
||||
"is_available should be False when connection fails"
|
||||
)
|
||||
|
||||
|
||||
class TestOllamaSummarizerSummarize:
|
||||
@@ -331,7 +334,9 @@ class TestOllamaSummarizerSummarize:
|
||||
response = build_valid_json_response(
|
||||
summary="Meeting discussed project updates.",
|
||||
key_points=[{"text": "Project on track", "segment_ids": [0]}],
|
||||
action_items=[{"text": "Review code", "assignee": "Alice", "priority": 2, "segment_ids": [1]}],
|
||||
action_items=[
|
||||
{"text": "Review code", "assignee": "Alice", "priority": 2, "segment_ids": [1]}
|
||||
],
|
||||
)
|
||||
_setup_mock_ollama(monkeypatch, chat_response=_MockChatResponse(response))
|
||||
summarizer, request = _create_summarizer_and_request(meeting_id, segment_count=2)
|
||||
@@ -340,11 +345,17 @@ class TestOllamaSummarizerSummarize:
|
||||
|
||||
assert result.provider_name == "ollama", "result should report 'ollama' as provider_name"
|
||||
assert result.summary.meeting_id == meeting_id, "summary should have matching meeting_id"
|
||||
assert result.summary.executive_summary == "Meeting discussed project updates.", "executive_summary should match"
|
||||
assert result.summary.executive_summary == "Meeting discussed project updates.", (
|
||||
"executive_summary should match"
|
||||
)
|
||||
assert len(result.summary.key_points) == 1, "should have exactly one key_point"
|
||||
assert result.summary.key_points[0].segment_ids == [0], "key_point should reference segment 0"
|
||||
assert result.summary.key_points[0].segment_ids == [0], (
|
||||
"key_point should reference segment 0"
|
||||
)
|
||||
assert len(result.summary.action_items) == 1, "should have exactly one action_item"
|
||||
assert result.summary.action_items[0].assignee == "Alice", "action_item assignee should be 'Alice'"
|
||||
assert result.summary.action_items[0].assignee == "Alice", (
|
||||
"action_item assignee should be 'Alice'"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summarize_filters_invalid_segment_ids(
|
||||
@@ -370,7 +381,9 @@ class TestOllamaSummarizerSummarize:
|
||||
|
||||
result = await summarizer.summarize(request)
|
||||
|
||||
assert result.summary.key_points[0].segment_ids == [0], "invalid segment_ids (99, 100) should be filtered out"
|
||||
assert result.summary.key_points[0].segment_ids == [0], (
|
||||
"invalid segment_ids (99, 100) should be filtered out"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summarize_respects_max_limits(
|
||||
@@ -402,8 +415,12 @@ class TestOllamaSummarizerSummarize:
|
||||
|
||||
result = await summarizer.summarize(request)
|
||||
|
||||
assert len(result.summary.key_points) == 3, "key_points should be truncated to max_key_points=3"
|
||||
assert len(result.summary.action_items) == 2, "action_items should be truncated to max_action_items=2"
|
||||
assert len(result.summary.key_points) == 3, (
|
||||
"key_points should be truncated to max_key_points=3"
|
||||
)
|
||||
assert len(result.summary.action_items) == 2, (
|
||||
"action_items should be truncated to max_action_items=2"
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_summarize_handles_markdown_fenced_json(
|
||||
@@ -427,7 +444,9 @@ class TestOllamaSummarizerSummarize:
|
||||
|
||||
result = await summarizer.summarize(request)
|
||||
|
||||
assert result.summary.executive_summary == "Fenced response", "markdown code fences should be stripped from JSON response"
|
||||
assert result.summary.executive_summary == "Fenced response", (
|
||||
"markdown code fences should be stripped from JSON response"
|
||||
)
|
||||
|
||||
|
||||
class TestOllamaSummarizerErrors:
|
||||
@@ -582,4 +601,6 @@ class TestOllamaSummarizerConfiguration:
|
||||
summarizer = OllamaSummarizer(host="http://custom:8080")
|
||||
_ = summarizer.is_available
|
||||
|
||||
assert captured_host == "http://custom:8080", "custom host should be passed to ollama Client"
|
||||
assert captured_host == "http://custom:8080", (
|
||||
"custom host should be passed to ollama Client"
|
||||
)
|
||||
|
||||
@@ -23,9 +23,7 @@ from noteflow.infrastructure.converters.integration_converters import (
|
||||
)
|
||||
|
||||
|
||||
def _create_mock_integration_orm_from_kwargs(
|
||||
orm_kwargs: Mapping[str, object]
|
||||
) -> MagicMock:
|
||||
def _create_mock_integration_orm_from_kwargs(orm_kwargs: Mapping[str, object]) -> MagicMock:
|
||||
"""Create a mock ORM model from Integration kwargs dictionary."""
|
||||
mock_orm = MagicMock()
|
||||
mock_orm.id = orm_kwargs["id"]
|
||||
@@ -41,9 +39,7 @@ def _create_mock_integration_orm_from_kwargs(
|
||||
return mock_orm
|
||||
|
||||
|
||||
def _create_mock_sync_run_orm_from_kwargs(
|
||||
orm_kwargs: Mapping[str, object]
|
||||
) -> MagicMock:
|
||||
def _create_mock_sync_run_orm_from_kwargs(orm_kwargs: Mapping[str, object]) -> MagicMock:
|
||||
"""Create a mock ORM model from SyncRun kwargs dictionary."""
|
||||
mock_orm = MagicMock()
|
||||
mock_orm.id = orm_kwargs["id"]
|
||||
@@ -52,10 +48,11 @@ def _create_mock_sync_run_orm_from_kwargs(
|
||||
mock_orm.started_at = orm_kwargs["started_at"]
|
||||
mock_orm.ended_at = orm_kwargs["ended_at"]
|
||||
mock_orm.duration_ms = orm_kwargs["duration_ms"]
|
||||
mock_orm.error_message = orm_kwargs["error_message"]
|
||||
mock_orm.error_code = orm_kwargs["error_code"]
|
||||
mock_orm.stats = orm_kwargs["stats"]
|
||||
return mock_orm
|
||||
|
||||
|
||||
# Test constants for sync run metrics
|
||||
SYNC_RUN_ITEMS_SYNCED = 15
|
||||
"""Number of items synced in a standard test sync run fixture."""
|
||||
@@ -89,24 +86,22 @@ class TestIntegrationConverterOrmToDomain:
|
||||
model.updated_at = datetime(2024, 1, 15, 12, 0, 0, tzinfo=UTC)
|
||||
return model
|
||||
|
||||
def test_integration_orm_to_domain(
|
||||
self, mock_integration_model: MagicMock
|
||||
) -> None:
|
||||
def test_integration_orm_to_domain(self, mock_integration_model: MagicMock) -> None:
|
||||
"""Convert ORM model to domain Integration."""
|
||||
result = IntegrationConverter.orm_to_domain(mock_integration_model)
|
||||
|
||||
assert isinstance(result, Integration), "Should return Integration instance"
|
||||
assert result.id == mock_integration_model.id, "ID should match"
|
||||
assert result.workspace_id == mock_integration_model.workspace_id, "Workspace ID should match"
|
||||
assert result.workspace_id == mock_integration_model.workspace_id, (
|
||||
"Workspace ID should match"
|
||||
)
|
||||
assert result.name == "Google Calendar", "Name should match"
|
||||
assert result.type == IntegrationType.CALENDAR, "Type should be enum"
|
||||
assert result.status == IntegrationStatus.CONNECTED, "Status should be enum"
|
||||
assert result.last_sync == mock_integration_model.last_sync, "Last sync should match"
|
||||
assert result.error_message is None, "Error message should be None"
|
||||
|
||||
def test_converts_type_string_to_enum(
|
||||
self, mock_integration_model: MagicMock
|
||||
) -> None:
|
||||
def test_converts_type_string_to_enum(self, mock_integration_model: MagicMock) -> None:
|
||||
"""Type string is converted to IntegrationType enum."""
|
||||
mock_integration_model.type = "email"
|
||||
result = IntegrationConverter.orm_to_domain(mock_integration_model)
|
||||
@@ -132,7 +127,9 @@ class TestIntegrationConverterOrmToDomain:
|
||||
"""All IntegrationType strings convert to correct enum values."""
|
||||
mock_integration_model.type = type_string
|
||||
result = IntegrationConverter.orm_to_domain(mock_integration_model)
|
||||
assert result.type == expected_enum, f"Type string '{type_string}' should convert to {expected_enum}"
|
||||
assert result.type == expected_enum, (
|
||||
f"Type string '{type_string}' should convert to {expected_enum}"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("status_string", "expected_enum"),
|
||||
@@ -151,13 +148,17 @@ class TestIntegrationConverterOrmToDomain:
|
||||
"""All IntegrationStatus strings convert to correct enum values."""
|
||||
mock_integration_model.status = status_string
|
||||
result = IntegrationConverter.orm_to_domain(mock_integration_model)
|
||||
assert result.status == expected_enum, f"Status string '{status_string}' should convert to {expected_enum}"
|
||||
assert result.status == expected_enum, (
|
||||
f"Status string '{status_string}' should convert to {expected_enum}"
|
||||
)
|
||||
|
||||
def test_converts_config_dict(self, mock_integration_model: MagicMock) -> None:
|
||||
"""Config is converted to dict from ORM model."""
|
||||
result = IntegrationConverter.orm_to_domain(mock_integration_model)
|
||||
assert isinstance(result.config, dict), "Config should be dict"
|
||||
assert result.config["provider_email"] == "user@example.com", "Provider email should be preserved"
|
||||
assert result.config["provider_email"] == "user@example.com", (
|
||||
"Provider email should be preserved"
|
||||
)
|
||||
|
||||
def test_handles_none_config(self, mock_integration_model: MagicMock) -> None:
|
||||
"""None config in ORM becomes empty dict in domain."""
|
||||
@@ -195,18 +196,20 @@ class TestIntegrationConverterToOrmKwargs:
|
||||
updated_at=datetime(2024, 1, 15, 12, 0, 0, tzinfo=UTC),
|
||||
)
|
||||
|
||||
def test_integration_to_orm_kwargs(
|
||||
self, integration_entity: Integration
|
||||
) -> None:
|
||||
def test_integration_to_orm_kwargs(self, integration_entity: Integration) -> None:
|
||||
"""Convert domain Integration to ORM kwargs dict."""
|
||||
result = IntegrationConverter.to_integration_orm_kwargs(integration_entity)
|
||||
|
||||
assert result["id"] == integration_entity.id, "ID should be preserved"
|
||||
assert result["workspace_id"] == integration_entity.workspace_id, "Workspace ID should be preserved"
|
||||
assert result["workspace_id"] == integration_entity.workspace_id, (
|
||||
"Workspace ID should be preserved"
|
||||
)
|
||||
assert result["name"] == "Outlook Calendar", "Name should be preserved"
|
||||
assert result["type"] == "calendar", "Type should be string value"
|
||||
assert result["status"] == "connected", "Status should be string value"
|
||||
assert result["config"] == {"provider_email": "test@outlook.com"}, "Config should be preserved"
|
||||
assert result["config"] == {"provider_email": "test@outlook.com"}, (
|
||||
"Config should be preserved"
|
||||
)
|
||||
assert result["error_message"] is None, "Error message should be None"
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -231,7 +234,9 @@ class TestIntegrationConverterToOrmKwargs:
|
||||
status=IntegrationStatus.DISCONNECTED,
|
||||
)
|
||||
result = IntegrationConverter.to_integration_orm_kwargs(integration)
|
||||
assert result["type"] == expected_string, f"Type enum {type_enum} should convert to '{expected_string}'"
|
||||
assert result["type"] == expected_string, (
|
||||
f"Type enum {type_enum} should convert to '{expected_string}'"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("status_enum", "expected_string"),
|
||||
@@ -253,7 +258,9 @@ class TestIntegrationConverterToOrmKwargs:
|
||||
status=status_enum,
|
||||
)
|
||||
result = IntegrationConverter.to_integration_orm_kwargs(integration)
|
||||
assert result["status"] == expected_string, f"Status enum {status_enum} should convert to '{expected_string}'"
|
||||
assert result["status"] == expected_string, (
|
||||
f"Status enum {status_enum} should convert to '{expected_string}'"
|
||||
)
|
||||
|
||||
|
||||
class TestSyncRunConverterOrmToDomain:
|
||||
@@ -273,15 +280,15 @@ class TestSyncRunConverterOrmToDomain:
|
||||
model.stats = {"items_synced": 10, "items_total": SYNC_RUN_ITEMS_SYNCED}
|
||||
return model
|
||||
|
||||
def test_sync_run_orm_to_domain(
|
||||
self, mock_sync_run_model: MagicMock
|
||||
) -> None:
|
||||
def test_sync_run_orm_to_domain(self, mock_sync_run_model: MagicMock) -> None:
|
||||
"""Convert ORM model to domain SyncRun."""
|
||||
result = SyncRunConverter.orm_to_domain(mock_sync_run_model)
|
||||
|
||||
assert isinstance(result, SyncRun), "Should return SyncRun instance"
|
||||
assert result.id == mock_sync_run_model.id, "ID should match"
|
||||
assert result.integration_id == mock_sync_run_model.integration_id, "Integration ID should match"
|
||||
assert result.integration_id == mock_sync_run_model.integration_id, (
|
||||
"Integration ID should match"
|
||||
)
|
||||
assert result.status == SyncRunStatus.SUCCESS, "Status should be enum"
|
||||
assert result.duration_ms == SYNC_RUN_DURATION_MS_SHORT, "Duration should match"
|
||||
assert result.error_code is None, "Error code should be None"
|
||||
@@ -303,14 +310,18 @@ class TestSyncRunConverterOrmToDomain:
|
||||
"""All SyncRunStatus strings convert to correct enum values."""
|
||||
mock_sync_run_model.status = status_string
|
||||
result = SyncRunConverter.orm_to_domain(mock_sync_run_model)
|
||||
assert result.status == expected_enum, f"Status string '{status_string}' should convert to {expected_enum}"
|
||||
assert result.status == expected_enum, (
|
||||
f"Status string '{status_string}' should convert to {expected_enum}"
|
||||
)
|
||||
|
||||
def test_converts_stats_dict(self, mock_sync_run_model: MagicMock) -> None:
|
||||
"""Stats is converted to dict from ORM model."""
|
||||
result = SyncRunConverter.orm_to_domain(mock_sync_run_model)
|
||||
assert isinstance(result.stats, dict), "Stats should be dict"
|
||||
assert result.stats["items_synced"] == 10, "Items synced count should be preserved"
|
||||
assert result.stats["items_total"] == SYNC_RUN_ITEMS_SYNCED, "Items total count should be preserved"
|
||||
assert result.stats["items_total"] == SYNC_RUN_ITEMS_SYNCED, (
|
||||
"Items total count should be preserved"
|
||||
)
|
||||
|
||||
def test_handles_none_stats(self, mock_sync_run_model: MagicMock) -> None:
|
||||
"""None stats in ORM becomes empty dict in domain."""
|
||||
@@ -348,10 +359,14 @@ class TestSyncRunConverterToOrmKwargs:
|
||||
result = SyncRunConverter.to_sync_run_orm_kwargs(sync_run)
|
||||
|
||||
assert result["id"] == sync_run.id, "ID should be preserved"
|
||||
assert result["integration_id"] == sync_run.integration_id, "Integration ID should be preserved"
|
||||
assert result["integration_id"] == sync_run.integration_id, (
|
||||
"Integration ID should be preserved"
|
||||
)
|
||||
assert result["status"] == "success", "Status should be string value"
|
||||
assert result["duration_ms"] == SYNC_RUN_DURATION_MS_MEDIUM, "Duration should be preserved"
|
||||
assert result["stats"] == {"items_synced": SYNC_RUN_ITEMS_COMPLETE}, "Stats should be preserved"
|
||||
assert result["stats"] == {"items_synced": SYNC_RUN_ITEMS_COMPLETE}, (
|
||||
"Stats should be preserved"
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("status_enum", "expected_string"),
|
||||
@@ -372,7 +387,9 @@ class TestSyncRunConverterToOrmKwargs:
|
||||
started_at=datetime(2024, 1, 15, 12, 0, 0, tzinfo=UTC),
|
||||
)
|
||||
result = SyncRunConverter.to_sync_run_orm_kwargs(sync_run)
|
||||
assert result["status"] == expected_string, f"Status enum {status_enum} should convert to '{expected_string}'"
|
||||
assert result["status"] == expected_string, (
|
||||
f"Status enum {status_enum} should convert to '{expected_string}'"
|
||||
)
|
||||
|
||||
|
||||
class TestIntegrationConverterRoundTrip:
|
||||
@@ -433,7 +450,9 @@ class TestIntegrationConverterRoundTrip:
|
||||
result = SyncRunConverter.orm_to_domain(mock_orm)
|
||||
|
||||
assert result.id == round_trip_sync_run.id, "ID preserved"
|
||||
assert result.integration_id == round_trip_sync_run.integration_id, "Integration ID preserved"
|
||||
assert result.integration_id == round_trip_sync_run.integration_id, (
|
||||
"Integration ID preserved"
|
||||
)
|
||||
assert result.status == round_trip_sync_run.status, "Status preserved"
|
||||
assert result.duration_ms == round_trip_sync_run.duration_ms, "Duration preserved"
|
||||
assert result.stats == round_trip_sync_run.stats, "Stats preserved"
|
||||
|
||||
@@ -10,12 +10,6 @@ from tests.conftest import approx_float
|
||||
pytestmark = [pytest.mark.integration, pytest.mark.usefixtures("clear_settings_cache")]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def clear_settings_cache() -> None:
|
||||
get_trigger_settings.cache_clear()
|
||||
get_settings.cache_clear()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"attr,expected",
|
||||
[
|
||||
|
||||
Reference in New Issue
Block a user