- Deleted final.md, fix_async_fixtures.py, fix_cog_tests.py, fix_fixture_scoping.py, and run_race_condition_tests.sh as they are no longer needed. - Updated main.py to enhance logging and error handling. - Refactored various components for improved performance and maintainability. - Introduced new logging utilities for better context and performance monitoring. - Ensured all datetime operations are timezone-aware and consistent across the codebase.
415 lines
12 KiB
Python
415 lines
12 KiB
Python
"""
|
|
Testing Framework for Discord Voice Chat Quote Bot
|
|
|
|
Comprehensive testing suite including unit tests, integration tests,
|
|
load testing, and performance benchmarks for all bot components.
|
|
"""
|
|
|
|
import asyncio
|
|
import logging
|
|
import os
|
|
import tempfile
|
|
from datetime import datetime, timedelta, timezone
|
|
from typing import Any
|
|
from unittest.mock import AsyncMock, MagicMock
|
|
|
|
import pytest
|
|
|
|
# Disable logging during tests
|
|
logging.disable(logging.CRITICAL)
|
|
|
|
|
|
class TestConfig:
|
|
"""Test configuration and constants"""
|
|
|
|
# Test database settings
|
|
TEST_DB_URL = "postgresql://test_user:test_pass@localhost:5432/test_quote_bot"
|
|
|
|
# Test Discord settings
|
|
TEST_GUILD_ID = 123456789
|
|
TEST_CHANNEL_ID = 987654321
|
|
TEST_USER_ID = 111222333
|
|
|
|
# Test file paths
|
|
TEST_AUDIO_FILE = "test_audio.wav"
|
|
TEST_DATA_DIR = "test_data"
|
|
|
|
# AI service mocks
|
|
MOCK_AI_RESPONSE = {
|
|
"choices": [{"message": {"content": "This is a test response"}}]
|
|
}
|
|
|
|
# Quote analysis mock
|
|
MOCK_QUOTE_SCORES = {
|
|
"funny_score": 7.5,
|
|
"dark_score": 2.1,
|
|
"silly_score": 8.3,
|
|
"suspicious_score": 1.2,
|
|
"asinine_score": 3.4,
|
|
"overall_score": 6.8,
|
|
}
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def event_loop():
|
|
"""Create an instance of the default event loop for the test session."""
|
|
loop = asyncio.get_event_loop_policy().new_event_loop()
|
|
yield loop
|
|
loop.close()
|
|
|
|
|
|
@pytest.fixture
|
|
async def mock_db_manager():
|
|
"""Mock database manager for testing"""
|
|
db_manager = AsyncMock()
|
|
|
|
# Mock common database operations
|
|
db_manager.execute_query.return_value = True
|
|
db_manager.get_connection.return_value = AsyncMock()
|
|
db_manager.close_connection.return_value = None
|
|
|
|
# Mock health check
|
|
async def mock_health_check():
|
|
return {"status": "healthy", "connections": 5}
|
|
|
|
db_manager.check_health = mock_health_check
|
|
|
|
return db_manager
|
|
|
|
|
|
@pytest.fixture
|
|
async def mock_ai_manager():
|
|
"""Mock AI manager for testing"""
|
|
ai_manager = AsyncMock()
|
|
|
|
# Mock text generation
|
|
ai_manager.generate_text.return_value = TestConfig.MOCK_AI_RESPONSE
|
|
|
|
# Mock embeddings
|
|
ai_manager.generate_embedding.return_value = [0.1] * 384 # Mock 384-dim embedding
|
|
|
|
# Mock health check
|
|
async def mock_health_check():
|
|
return {"status": "healthy", "providers": ["openai", "anthropic"]}
|
|
|
|
ai_manager.check_health = mock_health_check
|
|
|
|
return ai_manager
|
|
|
|
|
|
@pytest.fixture
|
|
async def mock_discord_bot():
|
|
"""Mock Discord bot for testing"""
|
|
bot = AsyncMock()
|
|
|
|
# Mock bot properties
|
|
bot.user = MagicMock()
|
|
bot.user.id = 987654321
|
|
bot.user.name = "TestBot"
|
|
|
|
# Mock guild
|
|
guild = MagicMock()
|
|
guild.id = TestConfig.TEST_GUILD_ID
|
|
guild.name = "Test Guild"
|
|
bot.get_guild.return_value = guild
|
|
|
|
# Mock channel
|
|
channel = AsyncMock()
|
|
channel.id = TestConfig.TEST_CHANNEL_ID
|
|
channel.name = "test-channel"
|
|
bot.get_channel.return_value = channel
|
|
|
|
# Mock user
|
|
user = MagicMock()
|
|
user.id = TestConfig.TEST_USER_ID
|
|
user.name = "testuser"
|
|
bot.get_user.return_value = user
|
|
|
|
return bot
|
|
|
|
|
|
@pytest.fixture
|
|
async def mock_discord_interaction():
|
|
"""Mock Discord interaction for testing"""
|
|
interaction = AsyncMock()
|
|
|
|
# Mock interaction properties
|
|
interaction.guild_id = TestConfig.TEST_GUILD_ID
|
|
interaction.channel_id = TestConfig.TEST_CHANNEL_ID
|
|
interaction.user.id = TestConfig.TEST_USER_ID
|
|
interaction.user.name = "testuser"
|
|
interaction.user.guild_permissions.administrator = True
|
|
|
|
# Mock response methods
|
|
interaction.response.defer = AsyncMock()
|
|
interaction.response.send_message = AsyncMock()
|
|
interaction.followup.send = AsyncMock()
|
|
interaction.edit_original_response = AsyncMock()
|
|
|
|
return interaction
|
|
|
|
|
|
@pytest.fixture
|
|
def temp_audio_file():
|
|
"""Create temporary audio file for testing"""
|
|
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
|
# Write minimal WAV header
|
|
f.write(b"RIFF")
|
|
f.write((36).to_bytes(4, "little"))
|
|
f.write(b"WAVE")
|
|
f.write(b"fmt ")
|
|
f.write((16).to_bytes(4, "little"))
|
|
f.write((1).to_bytes(2, "little")) # PCM
|
|
f.write((1).to_bytes(2, "little")) # mono
|
|
f.write((44100).to_bytes(4, "little")) # sample rate
|
|
f.write((88200).to_bytes(4, "little")) # byte rate
|
|
f.write((2).to_bytes(2, "little")) # block align
|
|
f.write((16).to_bytes(2, "little")) # bits per sample
|
|
f.write(b"data")
|
|
f.write((0).to_bytes(4, "little")) # data size
|
|
|
|
temp_path = f.name
|
|
|
|
yield temp_path
|
|
|
|
# Cleanup
|
|
if os.path.exists(temp_path):
|
|
os.unlink(temp_path)
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_quote_data():
|
|
"""Sample quote data for testing"""
|
|
return {
|
|
"id": 1,
|
|
"user_id": TestConfig.TEST_USER_ID,
|
|
"guild_id": TestConfig.TEST_GUILD_ID,
|
|
"channel_id": TestConfig.TEST_CHANNEL_ID,
|
|
"speaker_label": "SPEAKER_01",
|
|
"username": "testuser",
|
|
"quote": "This is a test quote that should be analyzed",
|
|
"timestamp": datetime.now(timezone.utc),
|
|
"funny_score": 7.5,
|
|
"dark_score": 2.1,
|
|
"silly_score": 8.3,
|
|
"suspicious_score": 1.2,
|
|
"asinine_score": 3.4,
|
|
"overall_score": 6.8,
|
|
"laughter_duration": 2.5,
|
|
"laughter_intensity": 0.8,
|
|
"response_type": "high_quality",
|
|
"speaker_confidence": 0.95,
|
|
}
|
|
|
|
|
|
class TestUtilities:
|
|
"""Utility functions for testing"""
|
|
|
|
@staticmethod
|
|
def create_mock_audio_data(
|
|
duration_seconds: float = 1.0, sample_rate: int = 44100
|
|
) -> bytes:
|
|
"""Create mock audio data for testing"""
|
|
import math
|
|
import struct
|
|
|
|
samples = int(duration_seconds * sample_rate)
|
|
audio_data = []
|
|
|
|
for i in range(samples):
|
|
# Generate a simple sine wave
|
|
t = i / sample_rate
|
|
sample = int(32767 * math.sin(2 * math.pi * 440 * t)) # 440 Hz tone
|
|
audio_data.append(struct.pack("<h", sample))
|
|
|
|
return b"".join(audio_data)
|
|
|
|
@staticmethod
|
|
def create_mock_transcription_result():
|
|
"""Create mock transcription result"""
|
|
return {
|
|
"segments": [
|
|
{
|
|
"start_time": 0.0,
|
|
"end_time": 2.5,
|
|
"speaker_label": "SPEAKER_01",
|
|
"text": "This is a test quote",
|
|
"confidence": 0.95,
|
|
"word_count": 5,
|
|
},
|
|
{
|
|
"start_time": 3.0,
|
|
"end_time": 5.5,
|
|
"speaker_label": "SPEAKER_02",
|
|
"text": "This is another speaker",
|
|
"confidence": 0.88,
|
|
"word_count": 4,
|
|
},
|
|
],
|
|
"duration": 6.0,
|
|
"processing_time": 1.2,
|
|
}
|
|
|
|
@staticmethod
|
|
def create_mock_diarization_result():
|
|
"""Create mock speaker diarization result"""
|
|
return {
|
|
"speaker_segments": [
|
|
{
|
|
"start_time": 0.0,
|
|
"end_time": 2.5,
|
|
"speaker_label": "SPEAKER_01",
|
|
"confidence": 0.95,
|
|
"user_id": TestConfig.TEST_USER_ID,
|
|
},
|
|
{
|
|
"start_time": 3.0,
|
|
"end_time": 5.5,
|
|
"speaker_label": "SPEAKER_02",
|
|
"confidence": 0.88,
|
|
"user_id": None,
|
|
},
|
|
],
|
|
"unique_speakers": 2,
|
|
"processing_time": 0.8,
|
|
}
|
|
|
|
@staticmethod
|
|
def assert_quote_scores_valid(scores: Dict[str, float]):
|
|
"""Assert that quote scores are within valid ranges"""
|
|
score_fields = [
|
|
"funny_score",
|
|
"dark_score",
|
|
"silly_score",
|
|
"suspicious_score",
|
|
"asinine_score",
|
|
"overall_score",
|
|
]
|
|
|
|
for field in score_fields:
|
|
assert field in scores, f"Missing score field: {field}"
|
|
assert (
|
|
0.0 <= scores[field] <= 10.0
|
|
), f"Score {field} out of range: {scores[field]}"
|
|
|
|
@staticmethod
|
|
def assert_valid_timestamp(timestamp):
|
|
"""Assert that timestamp is valid and recent"""
|
|
if isinstance(timestamp, str):
|
|
timestamp = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
|
|
|
|
assert isinstance(timestamp, datetime), "Timestamp must be datetime object"
|
|
|
|
# Check that timestamp is within last 24 hours (for test purposes)
|
|
now = datetime.now(timezone.utc)
|
|
assert (
|
|
(now - timedelta(hours=24)) <= timestamp <= (now + timedelta(minutes=1))
|
|
), "Timestamp not recent"
|
|
|
|
|
|
class MockContextManager:
|
|
"""Mock context manager for testing async context managers"""
|
|
|
|
def __init__(self, return_value=None):
|
|
self.return_value = return_value
|
|
|
|
async def __aenter__(self):
|
|
return self.return_value
|
|
|
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
return False
|
|
|
|
|
|
class PerformanceBenchmark:
|
|
"""Performance benchmarking utilities"""
|
|
|
|
def __init__(self):
|
|
self.benchmarks = {}
|
|
|
|
async def benchmark_async_function(self, func, *args, iterations=100, **kwargs):
|
|
"""Benchmark an async function"""
|
|
import time
|
|
|
|
times: list[float] = []
|
|
|
|
for _ in range(iterations):
|
|
start_time = time.perf_counter()
|
|
await func(*args, **kwargs)
|
|
end_time = time.perf_counter()
|
|
times.append(end_time - start_time)
|
|
|
|
avg_time = sum(times) / len(times)
|
|
min_time = min(times)
|
|
max_time = max(times)
|
|
|
|
return {
|
|
"average": avg_time,
|
|
"minimum": min_time,
|
|
"maximum": max_time,
|
|
"iterations": iterations,
|
|
"total_time": sum(times),
|
|
}
|
|
|
|
def assert_performance_threshold(self, benchmark_result: Dict, max_avg_time: float):
|
|
"""Assert that benchmark meets performance threshold"""
|
|
assert (
|
|
benchmark_result["average"] <= max_avg_time
|
|
), f"Performance threshold exceeded: {benchmark_result['average']:.4f}s > {max_avg_time}s"
|
|
|
|
|
|
# Custom pytest markers are defined in pytest.ini configuration file
|
|
|
|
|
|
# Test data generators
|
|
def generate_test_users(count: int = 10) -> list[dict[str, Any]]:
|
|
"""Generate test user data"""
|
|
users = []
|
|
for i in range(count):
|
|
users.append(
|
|
{
|
|
"id": TestConfig.TEST_USER_ID + i,
|
|
"username": f"testuser{i}",
|
|
"guild_id": TestConfig.TEST_GUILD_ID,
|
|
"consent_given": i % 2 == 0, # Alternate consent
|
|
"first_name": f"User{i}",
|
|
"created_at": datetime.now(timezone.utc) - timedelta(days=i),
|
|
}
|
|
)
|
|
return users
|
|
|
|
|
|
def generate_test_quotes(count: int = 50) -> list[dict[str, Any]]:
|
|
"""Generate test quote data"""
|
|
quotes = []
|
|
quote_templates = [
|
|
"This is test quote number {}",
|
|
"Another funny quote {}",
|
|
"A dark humor example {}",
|
|
"Silly statement number {}",
|
|
"Suspicious comment {}",
|
|
]
|
|
|
|
for i in range(count):
|
|
template = quote_templates[i % len(quote_templates)]
|
|
quotes.append(
|
|
{
|
|
"id": i + 1,
|
|
"user_id": TestConfig.TEST_USER_ID + (i % 10),
|
|
"guild_id": TestConfig.TEST_GUILD_ID,
|
|
"quote": template.format(i),
|
|
"timestamp": datetime.now(timezone.utc) - timedelta(hours=i),
|
|
"funny_score": (i % 10) + 1,
|
|
"dark_score": ((i * 2) % 10) + 1,
|
|
"silly_score": ((i * 3) % 10) + 1,
|
|
"suspicious_score": ((i * 4) % 10) + 1,
|
|
"asinine_score": ((i * 5) % 10) + 1,
|
|
"overall_score": ((i * 6) % 10) + 1,
|
|
}
|
|
)
|
|
|
|
return quotes
|
|
|
|
|
|
# Test configuration
|
|
pytest_plugins = ["pytest_asyncio", "pytest_mock", "pytest_cov"]
|