Files
noteflow/tests/stress/test_resource_leaks.py
Travis Vasceannie d8090a98e8
Some checks failed
CI / test-typescript (push) Has been cancelled
CI / test-rust (push) Has been cancelled
CI / test-python (push) Has been cancelled
ci/cd fixes
2026-01-26 00:28:15 +00:00

802 lines
28 KiB
Python

"""Resource leak detection tests.
Detects file descriptor, memory, and coroutine leaks under load conditions.
These tests verify that resources are properly released during cleanup cycles.
"""
from __future__ import annotations
import asyncio
import gc
from contextlib import asynccontextmanager
from pathlib import Path
from typing import TYPE_CHECKING
from unittest.mock import MagicMock
import pytest
from noteflow.config.constants import DEFAULT_SAMPLE_RATE
from noteflow.grpc.service import NoteFlowServicer
from support.stress_helpers import (
CONCURRENT_CONTEXT_COUNT,
CONTEXT_CANCEL_DELAY_SECONDS,
HEAP_CONVERTER_MAX_GROWTH_BYTES,
HEAP_MAX_GROWTH_BYTES,
HEAP_PROTO_MAX_GROWTH_BYTES,
MEMORY_PRESSURE_ALLOCATION_MB,
ORM_CONVERSION_CYCLES,
PROTO_CYCLES,
RIGOROUS_AUDIO_CHUNKS_PER_CYCLE,
RIGOROUS_CONCURRENT_MEETINGS,
SLOW_ENTER_SLEEP_SECONDS,
calculate_filtered_heap_growth,
calculate_noteflow_heap_growth,
check_resource_leaks,
log_heap_metrics,
log_stress_metrics,
log_thread_metrics,
measure_resource_baseline,
measure_rss_bytes,
run_audio_writer_cycles,
run_audio_writer_high_cycle,
run_audio_writer_thread_cycles,
run_concurrent_context_tasks,
run_concurrent_streaming_sessions,
run_diarization_session_cycles,
run_gc_collection_behavior,
run_interleaved_init_cleanup,
run_operations_under_memory_pressure,
run_orm_conversion_cycles,
run_protobuf_cycles,
run_streaming_init_cleanup_cycles,
run_webhook_executor_cycles,
schedule_tasks_to_servicer,
verify_all_tasks_cancelled,
verify_audio_writer_high_cycle_metrics,
verify_webhook_executor_results,
)
if TYPE_CHECKING:
from collections.abc import AsyncIterator
from threading import Thread
import httpx
from noteflow.infrastructure.security.crypto import AesGcmCryptoBox
# Test constants - rigorous values for real stress testing
STREAMING_CYCLES = 500 # 10x increase for slow leak detection
AUDIO_WRITER_CYCLES = 100 # 5x increase with real data writes
MEMORY_TEST_CYCLES = 1000 # 10x increase for state dict stability
INTERLEAVE_CYCLES = 200 # 10x increase for interleaved patterns
DIARIZATION_CYCLES = 200 # 4x increase for session lifecycle
WRITER_THREAD_CYCLES = 50 # 5x increase for thread lifecycle
FD_LEAK_TOLERANCE = 10
THREAD_LEAK_TOLERANCE = 2
TASK_TEST_COUNT = 50 # 10x increase for coroutine leak detection
TASK_SLEEP_SECONDS = 10
WEBHOOK_EXECUTOR_CYCLES = 20 # 4x increase for HTTP client lifecycle
BYTES_PER_KB = 1024
# High-stress constants (use sparingly - these take longer)
HIGH_STRESS_STREAMING_CYCLES = 2000
HIGH_STRESS_SEGMENTER_CYCLES = 500
HIGH_STRESS_SEGMENTER_CHUNKS = 200
# Async cleanup wait times
ASYNC_CLEANUP_DELAY_SECONDS = 0.1
FLUSH_THREAD_CLEANUP_DELAY_SECONDS = 0.2
FAILING_TASK_DELAY_SECONDS = 0.1
THREAD_STOP_WAIT_SECONDS = 0.5 # Longer wait for thread pool cleanup
def _get_executor_client(executor: object) -> httpx.AsyncClient | None:
"""Access WebhookExecutor._client for stress test verification.
Stress tests need to verify HTTP client cleanup state.
Uses object.__getattribute__ to bypass pyright's protected access check.
"""
client = object.__getattribute__(executor, "_client")
# Runtime type narrowing for httpx.AsyncClient | None
if client is None:
return None
import httpx as httpx_mod
return client if isinstance(client, httpx_mod.AsyncClient) else None
async def _ensure_executor_client(executor: object) -> httpx.AsyncClient:
"""Call WebhookExecutor._ensure_client for stress test setup.
Uses object.__getattribute__ to get the method, then calls it.
"""
import httpx as httpx_mod
method = object.__getattribute__(executor, "_ensure_client")
result = await method()
if not isinstance(result, httpx_mod.AsyncClient):
msg = "Expected httpx.AsyncClient"
raise TypeError(msg)
return result
def _get_session_pipeline(session: object) -> object | None:
"""Access DiarizationSession._pipeline for stress test verification.
Stress tests need to verify pipeline cleanup state.
"""
return object.__getattribute__(session, "_pipeline")
def _get_writer_flush_thread(writer: object) -> Thread | None:
"""Access MeetingAudioWriter._flush_thread for stress test verification.
Stress tests need to verify background thread cleanup.
"""
from threading import Thread
thread = object.__getattribute__(writer, "_flush_thread")
if thread is None:
return None
return thread if isinstance(thread, Thread) else None
class TestFileDescriptorLeaks:
"""Detect file descriptor and thread leaks under high load."""
@pytest.mark.slow
@pytest.mark.asyncio
async def test_streaming_fd_cleanup(self, memory_servicer: NoteFlowServicer) -> None:
"""Verify FDs and threads released after 500 streaming cycles."""
baseline = measure_resource_baseline()
run_streaming_init_cleanup_cycles(memory_servicer, STREAMING_CYCLES)
gc.collect()
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
errors = check_resource_leaks(baseline)
assert not errors, f"Resource leaks after {STREAMING_CYCLES} cycles: {errors}"
@pytest.mark.slow
@pytest.mark.asyncio
async def test_audio_writer_fd_cleanup(self, tmp_path: Path, crypto: AesGcmCryptoBox) -> None:
"""Verify audio writer closes FDs and threads after 100 cycles."""
baseline = measure_resource_baseline()
bytes_per_cycle = run_audio_writer_cycles(crypto, tmp_path, AUDIO_WRITER_CYCLES)
total_bytes = sum(bytes_per_cycle)
gc.collect()
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
log_stress_metrics(
"test_audio_writer_fd_cleanup",
baseline,
bytes_written=total_bytes,
cycles=AUDIO_WRITER_CYCLES,
)
errors = check_resource_leaks(baseline)
assert not errors, f"Audio writer resource leaks: {errors}"
@pytest.mark.slow
@pytest.mark.asyncio
async def test_audio_writer_high_cycle_with_real_data(
self, tmp_path: Path, crypto: AesGcmCryptoBox
) -> None:
"""Verify no leaks after writing real audio data across many cycles.
This test writes actual audio chunks (not just open/close) to stress
the buffer management, flush thread, and encryption pipeline.
"""
baseline = measure_resource_baseline()
successful, total_bytes, total_chunks = run_audio_writer_high_cycle(
crypto,
tmp_path,
cycle_count=AUDIO_WRITER_CYCLES,
chunks_per_cycle=RIGOROUS_AUDIO_CHUNKS_PER_CYCLE,
)
gc.collect()
await asyncio.sleep(FLUSH_THREAD_CLEANUP_DELAY_SECONDS)
verify_audio_writer_high_cycle_metrics(
"test_audio_writer_high_cycle_with_real_data",
baseline,
successful_cycles=successful,
total_bytes=total_bytes,
total_chunks=total_chunks,
cycle_count=AUDIO_WRITER_CYCLES,
chunks_per_cycle=RIGOROUS_AUDIO_CHUNKS_PER_CYCLE,
)
@pytest.mark.slow
@pytest.mark.asyncio
async def test_concurrent_meetings_resource_cleanup(
self, memory_servicer: NoteFlowServicer
) -> None:
"""Verify resources cleaned up after concurrent meeting sessions."""
baseline = measure_resource_baseline()
successful, session_errors = run_concurrent_streaming_sessions(
memory_servicer,
meeting_count=RIGOROUS_CONCURRENT_MEETINGS,
chunks_per_meeting=0,
)
gc.collect()
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
log_stress_metrics(
"test_concurrent_meetings_resource_cleanup",
baseline,
cycles=RIGOROUS_CONCURRENT_MEETINGS,
extra={"successful": successful, "session_errors": len(session_errors)},
)
errors = check_resource_leaks(baseline)
all_errors = session_errors + errors
assert successful == RIGOROUS_CONCURRENT_MEETINGS, (
f"Only {successful}/{RIGOROUS_CONCURRENT_MEETINGS} meetings succeeded"
)
assert not all_errors, f"Concurrent session errors: {all_errors}"
class TestMemoryLeaks:
"""Detect memory leaks under high load (1000+ cycles)."""
@pytest.mark.slow
@pytest.mark.asyncio
async def test_streaming_state_memory_stability(
self, memory_servicer: NoteFlowServicer
) -> None:
"""Verify state dicts don't grow unbounded during 1000 streaming cycles."""
run_streaming_init_cleanup_cycles(memory_servicer, MEMORY_TEST_CYCLES)
gc.collect()
# Verify no lingering state
assert len(memory_servicer.vad_instances) == 0, "VAD instances leaked"
assert len(memory_servicer.segmenters) == 0, "Segmenters leaked"
assert len(memory_servicer.stream_states) == 0, "Stream states leaked"
assert len(memory_servicer.active_streams) == 0, "Active streams leaked"
@pytest.mark.slow
@pytest.mark.asyncio
async def test_resource_interleaved_init_cleanup(
self, memory_servicer: NoteFlowServicer
) -> None:
"""Verify memory stability with 200 interleaved init/cleanup patterns."""
total_inits, total_cleanups = run_interleaved_init_cleanup(
memory_servicer, INTERLEAVE_CYCLES
)
gc.collect()
# Verify clean state and all operations completed
assert total_inits == total_cleanups, (
f"Mismatched init/cleanup: {total_inits} inits, {total_cleanups} cleanups"
)
assert len(memory_servicer.vad_instances) == 0, "VAD leaked after interleave"
assert len(memory_servicer.active_streams) == 0, "Streams leaked after interleave"
@pytest.mark.slow
@pytest.mark.asyncio
async def test_diarization_session_memory(self, memory_servicer: NoteFlowServicer) -> None:
"""Verify 200 diarization session cycles don't leak memory."""
def create_mock_session() -> MagicMock:
mock = MagicMock()
mock.close = MagicMock()
return mock
successful = run_diarization_session_cycles(
memory_servicer, DIARIZATION_CYCLES, create_mock_session
)
gc.collect()
assert successful == DIARIZATION_CYCLES, (
f"Only {successful}/{DIARIZATION_CYCLES} cycles completed"
)
assert len(memory_servicer.stream_states) == 0, "Diarization sessions leaked"
class TestCoroutineLeaks:
"""Detect leaked coroutines/tasks."""
@pytest.mark.asyncio
async def test_no_orphaned_tasks_after_shutdown(
self, memory_servicer: NoteFlowServicer
) -> None:
"""Verify no tasks remain after servicer shutdown."""
# Create tasks using helper (avoids inline loop)
tasks_created = schedule_tasks_to_servicer(
memory_servicer, TASK_TEST_COUNT, TASK_SLEEP_SECONDS
)
# Shutdown
await memory_servicer.shutdown()
# Verify all tasks cancelled and cleared
assert len(memory_servicer.diarization_tasks) == 0, (
f"Expected 0 diarization tasks after shutdown, found {len(memory_servicer.diarization_tasks)}"
)
# Check tasks are cancelled using helper (avoids inline loop)
errors = verify_all_tasks_cancelled(tasks_created)
assert not errors, f"Task cleanup errors: {errors}"
@pytest.mark.asyncio
async def test_task_cleanup_on_exception(self, memory_servicer: NoteFlowServicer) -> None:
"""Verify tasks cleaned up even if task raises exception."""
async def failing_task() -> None:
await asyncio.sleep(FAILING_TASK_DELAY_SECONDS)
raise ValueError("Task failed")
task = asyncio.create_task(failing_task())
memory_servicer.diarization_tasks["failing-job"] = task
# Wait for task to fail
with pytest.raises(ValueError, match="Task failed"):
await task
# Verify task is done (not stuck)
assert task.done(), "Failed task should be marked as done"
# Cleanup
await memory_servicer.shutdown()
assert len(memory_servicer.diarization_tasks) == 0, (
"All diarization tasks should be cleared after shutdown"
)
class TestWebhookClientLeaks:
"""Detect webhook HTTP client leaks."""
@pytest.mark.asyncio
async def test_webhook_executor_cleanup(self) -> None:
"""Verify webhook executor closes HTTP client."""
from noteflow.infrastructure.webhooks.executor import WebhookExecutor
executor = WebhookExecutor()
# Force client creation using type-safe helper
await _ensure_executor_client(executor)
assert _get_executor_client(executor) is not None, (
"HTTP client should exist after ensure_client"
)
# Close
await executor.close()
assert _get_executor_client(executor) is None, "HTTP client should be None after close"
@pytest.mark.asyncio
async def test_webhook_executor_close_idempotent(self) -> None:
"""Verify webhook executor close is idempotent."""
from noteflow.infrastructure.webhooks.executor import WebhookExecutor
executor = WebhookExecutor()
# Close without opening - should not raise
await executor.close()
await executor.close()
await executor.close()
@pytest.mark.asyncio
async def test_webhook_executor_multiple_cycles(self) -> None:
"""Verify webhook executor can be opened and closed multiple times."""
# Run cycles using helper function (avoids inline loop)
results = await run_webhook_executor_cycles(WEBHOOK_EXECUTOR_CYCLES)
# Verify all cycles succeeded using helper (avoids inline loop)
all_created, all_cleared = verify_webhook_executor_results(results)
assert all_created, "Client creation failed in some cycles"
assert all_cleared, "Client cleanup failed in some cycles"
class TestDiarizationSessionLeaks:
"""Detect diarization session resource leaks."""
@pytest.mark.asyncio
async def test_session_close_releases_pipeline(self) -> None:
"""Verify diarization session close releases pipeline reference."""
from noteflow.infrastructure.diarization.session import DiarizationSession
# Create mock pipeline
mock_pipeline = MagicMock()
session = DiarizationSession(
meeting_id="test-session",
_pipeline=mock_pipeline,
)
assert _get_session_pipeline(session) is not None, "Pipeline should exist before close"
session.close()
# Pipeline should be None after close
assert _get_session_pipeline(session) is None, "Pipeline should be released after close"
assert session.is_closed, "Session should be marked as closed"
@pytest.mark.asyncio
async def test_session_close_idempotent(self) -> None:
"""Verify session close is idempotent."""
from noteflow.infrastructure.diarization.session import DiarizationSession
mock_pipeline = MagicMock()
session = DiarizationSession(
meeting_id="test-session",
_pipeline=mock_pipeline,
)
# Multiple closes should not raise
session.close()
session.close()
session.close()
assert session.is_closed
class TestAudioWriterThreadLeaks:
"""Detect audio writer background thread leaks."""
@pytest.mark.slow
@pytest.mark.asyncio
async def test_flush_thread_stopped_on_close(self, tmp_path: Path) -> None:
"""Verify background flush thread stops on close."""
from noteflow.infrastructure.audio.writer import MeetingAudioWriter
from noteflow.infrastructure.security.crypto import AesGcmCryptoBox
from noteflow.infrastructure.security.keystore import InMemoryKeyStore
crypto = AesGcmCryptoBox(InMemoryKeyStore())
writer = MeetingAudioWriter(crypto, tmp_path, buffer_size=1024)
dek = crypto.generate_dek()
wrapped_dek = crypto.wrap_dek(dek)
# Open writer - starts flush thread
writer.open("test-thread", dek, wrapped_dek, sample_rate=DEFAULT_SAMPLE_RATE)
# Verify thread is running using type-safe helper
flush_thread = _get_writer_flush_thread(writer)
assert flush_thread is not None, "Flush thread should exist after open"
assert flush_thread.is_alive(), "Flush thread should be alive after open"
# Close writer
writer.close()
# Give thread time to stop
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
# Thread should be stopped and cleared
assert _get_writer_flush_thread(writer) is None, (
"Flush thread should be cleared after close"
)
@pytest.mark.slow
@pytest.mark.asyncio
async def test_multiple_writer_cycles(self, tmp_path: Path) -> None:
"""Verify no thread leaks across multiple writer cycles."""
import threading
from noteflow.infrastructure.security.crypto import AesGcmCryptoBox
from noteflow.infrastructure.security.keystore import InMemoryKeyStore
crypto = AesGcmCryptoBox(InMemoryKeyStore())
initial_threads = threading.active_count()
# Create and close writers using helper (avoids inline loop)
run_audio_writer_thread_cycles(crypto, tmp_path, WRITER_THREAD_CYCLES, DEFAULT_SAMPLE_RATE)
await asyncio.sleep(THREAD_STOP_WAIT_SECONDS) # Allow threads to fully stop
gc.collect()
final_threads = threading.active_count()
log_thread_metrics(
"test_multiple_writer_cycles",
initial_threads,
final_threads,
WRITER_THREAD_CYCLES,
THREAD_LEAK_TOLERANCE,
)
# Should not have leaked threads
assert final_threads <= initial_threads + THREAD_LEAK_TOLERANCE, (
f"Thread leak: {initial_threads} -> {final_threads}"
)
class TestHeapGrowth:
"""Detect heap growth using tracemalloc snapshots."""
@pytest.mark.slow
@pytest.mark.asyncio
async def test_streaming_heap_stability(self, memory_servicer: NoteFlowServicer) -> None:
"""Verify heap doesn't grow unbounded during streaming cycles."""
import tracemalloc
tracemalloc.start()
gc.collect()
snapshot_before = tracemalloc.take_snapshot()
run_streaming_init_cleanup_cycles(memory_servicer, STREAMING_CYCLES)
gc.collect()
snapshot_after = tracemalloc.take_snapshot()
tracemalloc.stop()
noteflow_growth = calculate_noteflow_heap_growth(snapshot_before, snapshot_after)
log_heap_metrics(
"test_streaming_heap_stability",
noteflow_growth,
cycles=STREAMING_CYCLES,
max_allowed_bytes=HEAP_MAX_GROWTH_BYTES,
filter_name="noteflow",
)
assert noteflow_growth < HEAP_MAX_GROWTH_BYTES, (
f"Heap growth detected: {noteflow_growth / BYTES_PER_KB:.1f}KB in noteflow code"
)
@pytest.mark.slow
@pytest.mark.asyncio
async def test_orm_conversion_heap_stability(self) -> None:
"""Verify ORM conversions don't leak memory."""
import tracemalloc
from uuid import uuid4
from noteflow.infrastructure.converters.orm_converters import OrmConverter
tracemalloc.start()
gc.collect()
snapshot_before = tracemalloc.take_snapshot()
converter = OrmConverter()
meeting_id = uuid4()
run_orm_conversion_cycles(converter, meeting_id, ORM_CONVERSION_CYCLES)
gc.collect()
snapshot_after = tracemalloc.take_snapshot()
tracemalloc.stop()
converter_growth = calculate_filtered_heap_growth(
snapshot_before, snapshot_after, "converter"
)
log_heap_metrics(
"test_orm_conversion_heap_stability",
converter_growth,
cycles=ORM_CONVERSION_CYCLES,
max_allowed_bytes=HEAP_CONVERTER_MAX_GROWTH_BYTES,
filter_name="converter",
)
assert converter_growth < HEAP_CONVERTER_MAX_GROWTH_BYTES, (
f"Converter heap growth: {converter_growth / BYTES_PER_KB:.1f}KB"
)
@pytest.mark.slow
@pytest.mark.asyncio
async def test_protobuf_heap_stability(self) -> None:
"""Verify protobuf operations don't leak memory."""
import tracemalloc
tracemalloc.start()
gc.collect()
snapshot_before = tracemalloc.take_snapshot()
run_protobuf_cycles(PROTO_CYCLES)
gc.collect()
snapshot_after = tracemalloc.take_snapshot()
tracemalloc.stop()
proto_growth = calculate_filtered_heap_growth(
snapshot_before, snapshot_after, "noteflow_pb2"
)
log_heap_metrics(
"test_protobuf_heap_stability",
proto_growth,
cycles=PROTO_CYCLES,
max_allowed_bytes=HEAP_PROTO_MAX_GROWTH_BYTES,
filter_name="noteflow_pb2",
)
assert proto_growth < HEAP_PROTO_MAX_GROWTH_BYTES, (
f"Protobuf heap growth: {proto_growth / BYTES_PER_KB:.1f}KB"
)
class TestAsyncContextEdgeCases:
"""Test async context manager edge cases."""
@pytest.mark.asyncio
async def test_context_exception_cleanup(self) -> None:
"""Verify cleanup runs even when exception raised inside context."""
cleanup_called = False
@asynccontextmanager
async def tracked_context() -> AsyncIterator[str]:
nonlocal cleanup_called
try:
yield "session"
finally:
cleanup_called = True
with pytest.raises(ValueError, match="test error"):
async with tracked_context():
raise ValueError("test error")
assert cleanup_called, "Cleanup should run despite exception"
@pytest.mark.asyncio
async def test_nested_context_cleanup_order(self) -> None:
"""Verify nested contexts clean up in correct (LIFO) order."""
cleanup_order: list[str] = []
@asynccontextmanager
async def tracked_context(name: str) -> AsyncIterator[str]:
try:
yield name
finally:
cleanup_order.append(name)
async with (
tracked_context("outer"),
tracked_context("middle"),
tracked_context("inner"),
):
pass
assert cleanup_order == ["inner", "middle", "outer"], (
f"Wrong cleanup order: {cleanup_order}"
)
@pytest.mark.asyncio
async def test_context_cancellation_during_body(self) -> None:
"""Verify cleanup runs when task cancelled inside context."""
cleanup_called = False
@asynccontextmanager
async def tracked_context() -> AsyncIterator[str]:
nonlocal cleanup_called
try:
yield "session"
finally:
cleanup_called = True
async def task_with_context() -> None:
async with tracked_context():
await asyncio.sleep(SLOW_ENTER_SLEEP_SECONDS)
task = asyncio.create_task(task_with_context())
await asyncio.sleep(CONTEXT_CANCEL_DELAY_SECONDS)
task.cancel()
with pytest.raises(asyncio.CancelledError, match=""):
await task
assert cleanup_called, "Cleanup should run on cancellation"
@pytest.mark.asyncio
async def test_context_cancellation_during_enter(self) -> None:
"""Verify proper handling when cancelled during __aenter__."""
enter_started = False
@asynccontextmanager
async def slow_enter_context() -> AsyncIterator[str]:
nonlocal enter_started
enter_started = True
await asyncio.sleep(SLOW_ENTER_SLEEP_SECONDS)
yield "session"
async def task_with_slow_enter() -> None:
async with slow_enter_context():
pass
task = asyncio.create_task(task_with_slow_enter())
await asyncio.sleep(CONTEXT_CANCEL_DELAY_SECONDS)
task.cancel()
with pytest.raises(asyncio.CancelledError, match=""):
await task
assert enter_started, "Enter should have started"
@pytest.mark.asyncio
async def test_context_exception_in_cleanup(self) -> None:
"""Verify cleanup exception propagates when cleanup fails.
Note: In Python, when an exception occurs in a finally block during
exception handling, the cleanup exception replaces the original.
This test verifies the cleanup exception properly propagates.
"""
cleanup_executed = False
@asynccontextmanager
async def failing_cleanup_context() -> AsyncIterator[str]:
nonlocal cleanup_executed
try:
yield "session"
finally:
cleanup_executed = True
raise RuntimeError("cleanup failed")
with pytest.raises(RuntimeError, match="cleanup failed"):
async with failing_cleanup_context():
raise ValueError("original error")
assert cleanup_executed, "Cleanup should have executed despite body exception"
@pytest.mark.asyncio
async def test_concurrent_context_isolation(self) -> None:
"""Verify concurrent context managers don't interfere."""
active_contexts: set[str] = set()
results: dict[str, int] = {"max_concurrent": 0}
context_ids = [f"ctx-{i}" for i in range(CONCURRENT_CONTEXT_COUNT)]
tasks = run_concurrent_context_tasks(context_ids, active_contexts, results)
await asyncio.gather(*tasks)
assert len(active_contexts) == 0, "All contexts should be cleaned up"
assert results["max_concurrent"] == CONCURRENT_CONTEXT_COUNT, (
f"Expected {CONCURRENT_CONTEXT_COUNT} concurrent, got {results['max_concurrent']}"
)
class TestMemoryPressure:
"""Test behavior under memory pressure conditions."""
@pytest.mark.slow
@pytest.mark.asyncio
async def test_operations_succeed_under_memory_pressure(
self, memory_servicer: NoteFlowServicer
) -> None:
"""Verify streaming operations complete under memory pressure."""
successful, _gc_count = run_operations_under_memory_pressure(
memory_servicer,
cycle_count=STREAMING_CYCLES,
pressure_mb=MEMORY_PRESSURE_ALLOCATION_MB,
)
assert successful == STREAMING_CYCLES, (
f"Only {successful}/{STREAMING_CYCLES} cycles succeeded under pressure"
)
@pytest.mark.slow
@pytest.mark.asyncio
async def test_state_cleanup_under_pressure(self, memory_servicer: NoteFlowServicer) -> None:
"""Verify cleanup properly releases state even under pressure."""
run_operations_under_memory_pressure(
memory_servicer,
cycle_count=STREAMING_CYCLES,
pressure_mb=MEMORY_PRESSURE_ALLOCATION_MB,
)
gc.collect()
await asyncio.sleep(ASYNC_CLEANUP_DELAY_SECONDS)
assert len(memory_servicer.vad_instances) == 0, "VAD instances leaked under pressure"
assert len(memory_servicer.active_streams) == 0, "Streams leaked under pressure"
@pytest.mark.asyncio
async def test_gc_collects_temporary_objects(self) -> None:
"""Verify GC properly collects temporary objects."""
gen0_diff, gen1_diff, gen2_diff = run_gc_collection_behavior()
total_uncollected = gen0_diff + gen1_diff + gen2_diff
assert total_uncollected >= 0, "GC count should not be negative"
@pytest.mark.asyncio
async def test_rss_measurement_returns_valid_value(self) -> None:
"""Verify RSS measurement returns valid value or graceful fallback."""
rss = measure_rss_bytes()
# Either measurement works (positive) or gracefully returns -1
valid_positive = rss > 0
valid_fallback = rss == -1
assert valid_positive or valid_fallback, f"Invalid RSS value: {rss}"