- Deleted .env.example file as it is no longer needed. - Added .gitignore to manage ignored files and directories. - Introduced CLAUDE.md for AI provider integration documentation. - Created dev.sh for development setup and scripts. - Updated Dockerfile and Dockerfile.production for improved build processes. - Added multiple test files and directories for comprehensive testing. - Introduced new utility and service files for enhanced functionality. - Organized codebase with new directories and files for better maintainability.
749 lines
24 KiB
Python
749 lines
24 KiB
Python
"""
|
|
Enhanced mock fixtures for comprehensive testing
|
|
|
|
Provides specialized fixtures for Discord interactions, AI responses,
|
|
database states, and complex testing scenarios.
|
|
"""
|
|
|
|
import asyncio
|
|
import random
|
|
from datetime import datetime, timedelta, timezone
|
|
from typing import Any, Dict, List
|
|
from unittest.mock import AsyncMock, MagicMock
|
|
|
|
import pytest
|
|
|
|
from tests.fixtures.mock_discord import (MockBot, MockDiscordMember,
|
|
create_mock_voice_scenario)
|
|
|
|
|
|
class AIResponseGenerator:
|
|
"""Generate realistic AI responses for testing."""
|
|
|
|
SAMPLE_QUOTE_ANALYSES = [
|
|
{
|
|
"funny_score": 8.5,
|
|
"dark_score": 1.2,
|
|
"silly_score": 7.8,
|
|
"suspicious_score": 0.5,
|
|
"asinine_score": 2.1,
|
|
"overall_score": 7.9,
|
|
"explanation": "This quote demonstrates excellent comedic timing and wordplay.",
|
|
},
|
|
{
|
|
"funny_score": 6.2,
|
|
"dark_score": 5.8,
|
|
"silly_score": 3.1,
|
|
"suspicious_score": 2.4,
|
|
"asinine_score": 4.7,
|
|
"overall_score": 5.5,
|
|
"explanation": "A darker humor quote with moderate entertainment value.",
|
|
},
|
|
{
|
|
"funny_score": 9.1,
|
|
"dark_score": 0.8,
|
|
"silly_score": 9.3,
|
|
"suspicious_score": 0.2,
|
|
"asinine_score": 8.7,
|
|
"overall_score": 8.8,
|
|
"explanation": "Exceptionally funny and absurd, perfect for light entertainment.",
|
|
},
|
|
]
|
|
|
|
SAMPLE_EMBEDDINGS = [
|
|
[0.1] * 384, # Mock 384-dimensional embedding
|
|
[0.2] * 384,
|
|
[-0.1] * 384,
|
|
[0.0] * 384,
|
|
]
|
|
|
|
@classmethod
|
|
def generate_quote_analysis(cls, quote_text: str = None) -> Dict[str, Any]:
|
|
"""Generate realistic quote analysis response."""
|
|
analysis = random.choice(cls.SAMPLE_QUOTE_ANALYSES).copy()
|
|
|
|
if quote_text:
|
|
# Adjust scores based on quote content
|
|
if "funny" in quote_text.lower() or "hilarious" in quote_text.lower():
|
|
analysis["funny_score"] += 1.0
|
|
analysis["overall_score"] += 0.5
|
|
|
|
if "dark" in quote_text.lower() or "death" in quote_text.lower():
|
|
analysis["dark_score"] += 2.0
|
|
|
|
# Ensure scores stay within bounds
|
|
for key in [
|
|
"funny_score",
|
|
"dark_score",
|
|
"silly_score",
|
|
"suspicious_score",
|
|
"asinine_score",
|
|
"overall_score",
|
|
]:
|
|
analysis[key] = max(0.0, min(10.0, analysis[key]))
|
|
|
|
return analysis
|
|
|
|
@classmethod
|
|
def generate_embedding(cls) -> List[float]:
|
|
"""Generate mock embedding vector."""
|
|
return random.choice(cls.SAMPLE_EMBEDDINGS)
|
|
|
|
@classmethod
|
|
def generate_chat_response(cls, prompt: str = None) -> Dict[str, Any]:
|
|
"""Generate mock chat completion response."""
|
|
responses = [
|
|
"This is a helpful AI response to your query.",
|
|
"Based on the context provided, here's my analysis...",
|
|
"I understand your question and here's what I recommend...",
|
|
"After processing the information, my conclusion is...",
|
|
]
|
|
|
|
return {
|
|
"choices": [
|
|
{
|
|
"message": {
|
|
"content": random.choice(responses),
|
|
"role": "assistant",
|
|
},
|
|
"finish_reason": "stop",
|
|
}
|
|
],
|
|
"usage": {"prompt_tokens": 50, "completion_tokens": 20, "total_tokens": 70},
|
|
}
|
|
|
|
|
|
class DatabaseStateBuilder:
|
|
"""Build complex database states for testing."""
|
|
|
|
def __init__(self):
|
|
self.users: List[Dict] = []
|
|
self.quotes: List[Dict] = []
|
|
self.consents: List[Dict] = []
|
|
self.configs: List[Dict] = []
|
|
|
|
def add_user(
|
|
self,
|
|
user_id: int,
|
|
username: str,
|
|
guild_id: int,
|
|
consented: bool = True,
|
|
first_name: str = None,
|
|
) -> "DatabaseStateBuilder":
|
|
"""Add a user with consent status."""
|
|
self.users.append(
|
|
{"user_id": user_id, "username": username, "guild_id": guild_id}
|
|
)
|
|
|
|
self.consents.append(
|
|
{
|
|
"user_id": user_id,
|
|
"guild_id": guild_id,
|
|
"consent_given": consented,
|
|
"first_name": first_name or username,
|
|
"created_at": datetime.now(timezone.utc),
|
|
"updated_at": datetime.now(timezone.utc),
|
|
}
|
|
)
|
|
|
|
return self
|
|
|
|
def add_quotes_for_user(
|
|
self,
|
|
user_id: int,
|
|
guild_id: int,
|
|
count: int = 3,
|
|
score_range: tuple = (6.0, 9.0),
|
|
) -> "DatabaseStateBuilder":
|
|
"""Add multiple quotes for a user."""
|
|
username = next(
|
|
(u["username"] for u in self.users if u["user_id"] == user_id),
|
|
f"User{user_id}",
|
|
)
|
|
|
|
quote_templates = [
|
|
"This is quote number {} from {}",
|
|
"Another hilarious quote {} by {}",
|
|
"A memorable moment {} from {}",
|
|
"Quote {} that made everyone laugh - {}",
|
|
"Interesting observation {} by {}",
|
|
]
|
|
|
|
for i in range(count):
|
|
min_score, max_score = score_range
|
|
base_score = random.uniform(min_score, max_score)
|
|
|
|
quote = {
|
|
"id": len(self.quotes) + 1,
|
|
"user_id": user_id,
|
|
"guild_id": guild_id,
|
|
"channel_id": 987654321,
|
|
"speaker_label": f"SPEAKER_{user_id}",
|
|
"username": username,
|
|
"quote": quote_templates[i % len(quote_templates)].format(
|
|
i + 1, username
|
|
),
|
|
"timestamp": datetime.now(timezone.utc) - timedelta(hours=i),
|
|
"funny_score": base_score + random.uniform(-1.0, 1.0),
|
|
"dark_score": random.uniform(0.0, 3.0),
|
|
"silly_score": base_score + random.uniform(-0.5, 2.0),
|
|
"suspicious_score": random.uniform(0.0, 2.0),
|
|
"asinine_score": random.uniform(2.0, 6.0),
|
|
"overall_score": base_score,
|
|
"laughter_duration": random.uniform(1.0, 5.0),
|
|
"laughter_intensity": random.uniform(0.5, 1.0),
|
|
"response_type": self._classify_response_type(base_score),
|
|
"speaker_confidence": random.uniform(0.8, 1.0),
|
|
}
|
|
|
|
# Ensure scores are within bounds
|
|
for score_key in [
|
|
"funny_score",
|
|
"dark_score",
|
|
"silly_score",
|
|
"suspicious_score",
|
|
"asinine_score",
|
|
"overall_score",
|
|
]:
|
|
quote[score_key] = max(0.0, min(10.0, quote[score_key]))
|
|
|
|
self.quotes.append(quote)
|
|
|
|
return self
|
|
|
|
def add_server_config(
|
|
self, guild_id: int, **config_options
|
|
) -> "DatabaseStateBuilder":
|
|
"""Add server configuration."""
|
|
default_config = {
|
|
"guild_id": guild_id,
|
|
"quote_threshold": 6.0,
|
|
"auto_record": False,
|
|
"max_clip_duration": 120,
|
|
"retention_days": 7,
|
|
"response_delay_minutes": 5,
|
|
}
|
|
default_config.update(config_options)
|
|
self.configs.append(default_config)
|
|
return self
|
|
|
|
def build_mock_database(self) -> AsyncMock:
|
|
"""Build complete mock database with all data."""
|
|
mock_db = AsyncMock()
|
|
|
|
# Configure search_quotes
|
|
mock_db.search_quotes.side_effect = lambda guild_id=None, search_term=None, user_id=None, limit=50, **kwargs: self._filter_quotes(
|
|
guild_id, search_term, user_id, limit
|
|
)
|
|
|
|
# Configure get_top_quotes
|
|
mock_db.get_top_quotes.side_effect = lambda guild_id, limit=10: sorted(
|
|
[q for q in self.quotes if q["guild_id"] == guild_id],
|
|
key=lambda x: x["overall_score"],
|
|
reverse=True,
|
|
)[:limit]
|
|
|
|
# Configure get_random_quote
|
|
mock_db.get_random_quote.side_effect = lambda guild_id: (
|
|
random.choice([q for q in self.quotes if q["guild_id"] == guild_id])
|
|
if self.quotes
|
|
else None
|
|
)
|
|
|
|
# Configure get_quote_stats
|
|
mock_db.get_quote_stats.side_effect = self._get_quote_stats
|
|
|
|
# Configure consent operations
|
|
mock_db.check_user_consent.side_effect = self._check_consent
|
|
mock_db.get_consented_users.side_effect = lambda guild_id: [
|
|
c for c in self.consents if c["guild_id"] == guild_id and c["consent_given"]
|
|
]
|
|
|
|
# Configure server config
|
|
mock_db.get_server_config.side_effect = lambda guild_id: next(
|
|
(c for c in self.configs if c["guild_id"] == guild_id),
|
|
{"quote_threshold": 6.0, "auto_record": False},
|
|
)
|
|
|
|
mock_db.get_admin_stats.side_effect = self._get_admin_stats
|
|
|
|
return mock_db
|
|
|
|
def _filter_quotes(
|
|
self, guild_id: int, search_term: str, user_id: int, limit: int
|
|
) -> List[Dict]:
|
|
"""Filter quotes based on search criteria."""
|
|
filtered = [q for q in self.quotes if q["guild_id"] == guild_id]
|
|
|
|
if search_term:
|
|
filtered = [
|
|
q for q in filtered if search_term.lower() in q["quote"].lower()
|
|
]
|
|
|
|
if user_id:
|
|
filtered = [q for q in filtered if q["user_id"] == user_id]
|
|
|
|
# Sort by timestamp descending and apply limit
|
|
filtered = sorted(filtered, key=lambda x: x["timestamp"], reverse=True)
|
|
return filtered[:limit]
|
|
|
|
def _check_consent(self, user_id: int, guild_id: int) -> bool:
|
|
"""Check if user has given consent."""
|
|
consent = next(
|
|
(
|
|
c
|
|
for c in self.consents
|
|
if c["user_id"] == user_id and c["guild_id"] == guild_id
|
|
),
|
|
None,
|
|
)
|
|
return consent["consent_given"] if consent else False
|
|
|
|
def _get_quote_stats(self, guild_id: int) -> Dict[str, Any]:
|
|
"""Generate quote statistics."""
|
|
guild_quotes = [q for q in self.quotes if q["guild_id"] == guild_id]
|
|
|
|
if not guild_quotes:
|
|
return {
|
|
"total_quotes": 0,
|
|
"unique_speakers": 0,
|
|
"avg_score": 0.0,
|
|
"max_score": 0.0,
|
|
"quotes_this_week": 0,
|
|
"quotes_this_month": 0,
|
|
}
|
|
|
|
now = datetime.now(timezone.utc)
|
|
week_ago = now - timedelta(days=7)
|
|
month_ago = now - timedelta(days=30)
|
|
|
|
return {
|
|
"total_quotes": len(guild_quotes),
|
|
"unique_speakers": len(set(q["user_id"] for q in guild_quotes)),
|
|
"avg_score": sum(q["overall_score"] for q in guild_quotes)
|
|
/ len(guild_quotes),
|
|
"max_score": max(q["overall_score"] for q in guild_quotes),
|
|
"quotes_this_week": len(
|
|
[q for q in guild_quotes if q["timestamp"] >= week_ago]
|
|
),
|
|
"quotes_this_month": len(
|
|
[q for q in guild_quotes if q["timestamp"] >= month_ago]
|
|
),
|
|
}
|
|
|
|
def _get_admin_stats(self) -> Dict[str, Any]:
|
|
"""Generate admin statistics."""
|
|
return {
|
|
"total_quotes": len(self.quotes),
|
|
"unique_speakers": len(set(q["user_id"] for q in self.quotes)),
|
|
"active_consents": len([c for c in self.consents if c["consent_given"]]),
|
|
"total_guilds": len(set(q["guild_id"] for q in self.quotes)),
|
|
"avg_score_global": (
|
|
sum(q["overall_score"] for q in self.quotes) / len(self.quotes)
|
|
if self.quotes
|
|
else 0.0
|
|
),
|
|
}
|
|
|
|
def _classify_response_type(self, score: float) -> str:
|
|
"""Classify response type based on score."""
|
|
if score >= 8.5:
|
|
return "high_quality"
|
|
elif score >= 6.0:
|
|
return "moderate"
|
|
else:
|
|
return "low_quality"
|
|
|
|
|
|
@pytest.fixture
|
|
def ai_response_generator():
|
|
"""Fixture providing AI response generation."""
|
|
return AIResponseGenerator()
|
|
|
|
|
|
@pytest.fixture
|
|
def database_state_builder():
|
|
"""Fixture providing database state builder."""
|
|
return DatabaseStateBuilder()
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_ai_manager(ai_response_generator):
|
|
"""Enhanced AI manager mock with realistic responses."""
|
|
ai_manager = AsyncMock()
|
|
|
|
# Generate text with realistic responses
|
|
ai_manager.generate_text.side_effect = (
|
|
lambda prompt, **kwargs: ai_response_generator.generate_chat_response(prompt)
|
|
)
|
|
|
|
# Generate embeddings
|
|
ai_manager.generate_embedding.side_effect = (
|
|
lambda text: ai_response_generator.generate_embedding()
|
|
)
|
|
|
|
# Analyze quotes
|
|
ai_manager.analyze_quote.side_effect = (
|
|
lambda text: ai_response_generator.generate_quote_analysis(text)
|
|
)
|
|
|
|
# Health check
|
|
ai_manager.check_health.return_value = {
|
|
"status": "healthy",
|
|
"providers": ["openai", "anthropic", "groq"],
|
|
"response_time_ms": 150,
|
|
}
|
|
|
|
return ai_manager
|
|
|
|
|
|
@pytest.fixture
|
|
def populated_database_mock(database_state_builder):
|
|
"""Database mock with realistic populated data."""
|
|
builder = database_state_builder
|
|
|
|
# Create a realistic server setup
|
|
guild_id = 123456789
|
|
|
|
# Add server configuration
|
|
builder.add_server_config(guild_id, quote_threshold=6.5, auto_record=True)
|
|
|
|
# Add users with varying consent
|
|
builder.add_user(
|
|
111222333, "FunnyUser", guild_id, consented=True, first_name="Alex"
|
|
)
|
|
builder.add_user(
|
|
444555666, "QuoteKing", guild_id, consented=True, first_name="Jordan"
|
|
)
|
|
builder.add_user(777888999, "LurkingUser", guild_id, consented=False)
|
|
builder.add_user(123987456, "NewUser", guild_id, consented=True, first_name="Sam")
|
|
|
|
# Add quotes for consented users
|
|
builder.add_quotes_for_user(111222333, guild_id, count=5, score_range=(7.0, 9.0))
|
|
builder.add_quotes_for_user(444555666, guild_id, count=8, score_range=(6.0, 8.5))
|
|
builder.add_quotes_for_user(123987456, guild_id, count=2, score_range=(5.0, 7.0))
|
|
|
|
return builder.build_mock_database()
|
|
|
|
|
|
@pytest.fixture
|
|
def complex_voice_scenario():
|
|
"""Complex voice channel scenario with multiple states."""
|
|
scenario = create_mock_voice_scenario(num_members=5)
|
|
|
|
# Add different permission levels
|
|
scenario["members"][0].guild_permissions.administrator = True # Admin
|
|
scenario["members"][1].guild_permissions.manage_messages = True # Moderator
|
|
# Others are regular users
|
|
|
|
# Add different consent states
|
|
consent_states = [True, True, False, True, False] # Mixed consent
|
|
for i, member in enumerate(scenario["members"]):
|
|
member.has_consent = consent_states[i]
|
|
|
|
# Add voice states
|
|
scenario["members"][0].voice.self_mute = False
|
|
scenario["members"][1].voice.self_mute = True # Muted user
|
|
scenario["members"][2].voice.self_deaf = True # Deafened user
|
|
|
|
return scenario
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_consent_manager():
|
|
"""Enhanced consent manager mock."""
|
|
consent_manager = AsyncMock()
|
|
|
|
# Default consent states
|
|
consent_states = {
|
|
(111222333, 123456789): True,
|
|
(444555666, 123456789): True,
|
|
(777888999, 123456789): False,
|
|
(123987456, 123456789): True,
|
|
}
|
|
|
|
# Check consent
|
|
consent_manager.check_consent.side_effect = (
|
|
lambda user_id, guild_id: consent_states.get((user_id, guild_id), False)
|
|
)
|
|
|
|
# Global opt-outs (empty by default)
|
|
consent_manager.global_opt_outs = set()
|
|
|
|
# Grant/revoke operations
|
|
consent_manager.grant_consent.return_value = True
|
|
consent_manager.revoke_consent.return_value = True
|
|
consent_manager.set_global_opt_out.return_value = True
|
|
|
|
# Get consent status
|
|
consent_manager.get_consent_status.side_effect = lambda user_id, guild_id: {
|
|
"consent_given": consent_states.get((user_id, guild_id), False),
|
|
"global_opt_out": user_id in consent_manager.global_opt_outs,
|
|
"has_record": (user_id, guild_id) in consent_states,
|
|
"consent_timestamp": (
|
|
datetime.now(timezone.utc)
|
|
if consent_states.get((user_id, guild_id))
|
|
else None
|
|
),
|
|
"first_name": f"User{user_id}",
|
|
"created_at": datetime.now(timezone.utc) - timedelta(days=30),
|
|
}
|
|
|
|
# Data operations
|
|
consent_manager.export_user_data.side_effect = lambda user_id, guild_id: {
|
|
"user_id": user_id,
|
|
"guild_id": guild_id,
|
|
"export_timestamp": datetime.now(timezone.utc).isoformat(),
|
|
"quotes": [],
|
|
"consent_records": [],
|
|
"feedback_records": [],
|
|
}
|
|
|
|
consent_manager.delete_user_data.side_effect = lambda user_id, guild_id: {
|
|
"quotes": 3,
|
|
"feedback_records": 1,
|
|
"speaker_profiles": 1,
|
|
}
|
|
|
|
return consent_manager
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_response_scheduler():
|
|
"""Enhanced response scheduler mock."""
|
|
scheduler = AsyncMock()
|
|
|
|
# Status information
|
|
scheduler.get_status.return_value = {
|
|
"is_running": True,
|
|
"queue_size": 2,
|
|
"next_rotation": (datetime.now(timezone.utc) + timedelta(hours=4)).timestamp(),
|
|
"next_daily": (datetime.now(timezone.utc) + timedelta(hours=20)).timestamp(),
|
|
"processed_today": 15,
|
|
"success_rate": 0.95,
|
|
}
|
|
|
|
# Task control
|
|
scheduler.start_tasks.return_value = True
|
|
scheduler.stop_tasks.return_value = True
|
|
|
|
# Scheduling
|
|
scheduler.schedule_custom_response.return_value = True
|
|
|
|
return scheduler
|
|
|
|
|
|
@pytest.fixture
|
|
def full_bot_setup(
|
|
mock_ai_manager,
|
|
populated_database_mock,
|
|
mock_consent_manager,
|
|
mock_response_scheduler,
|
|
):
|
|
"""Complete bot setup with all services mocked."""
|
|
bot = MockBot()
|
|
|
|
# Attach all services
|
|
bot.ai_manager = mock_ai_manager
|
|
bot.db_manager = populated_database_mock
|
|
bot.consent_manager = mock_consent_manager
|
|
bot.response_scheduler = mock_response_scheduler
|
|
bot.metrics = MagicMock()
|
|
|
|
# Audio services
|
|
bot.audio_recorder = MagicMock()
|
|
bot.audio_recorder.get_status = MagicMock(
|
|
return_value={"is_active": True, "active_sessions": 1, "buffer_size": 25.6}
|
|
)
|
|
|
|
bot.transcription_service = MagicMock()
|
|
|
|
# Memory manager
|
|
bot.memory_manager = AsyncMock()
|
|
bot.memory_manager.get_stats.return_value = {
|
|
"total_memories": 50,
|
|
"personality_profiles": 10,
|
|
}
|
|
|
|
# Metrics
|
|
bot.metrics.get_current_metrics.return_value = {
|
|
"uptime_hours": 24.5,
|
|
"memory_mb": 128.3,
|
|
"cpu_percent": 12.1,
|
|
}
|
|
|
|
# TTS service
|
|
bot.tts_service = AsyncMock()
|
|
|
|
return bot
|
|
|
|
|
|
@pytest.fixture
|
|
def permission_test_users():
|
|
"""Users with different permission levels for testing."""
|
|
# Owner user
|
|
owner = MockDiscordMember(user_id=123456789012345678, username="BotOwner")
|
|
owner.guild_permissions.administrator = True
|
|
|
|
# Admin user
|
|
admin = MockDiscordMember(user_id=111111111, username="AdminUser")
|
|
admin.guild_permissions.administrator = True
|
|
admin.guild_permissions.manage_guild = True
|
|
|
|
# Moderator user
|
|
moderator = MockDiscordMember(user_id=222222222, username="ModeratorUser")
|
|
moderator.guild_permissions.manage_messages = True
|
|
moderator.guild_permissions.manage_channels = True
|
|
|
|
# Regular user
|
|
regular = MockDiscordMember(user_id=333333333, username="RegularUser")
|
|
|
|
# Restricted user (no send messages)
|
|
restricted = MockDiscordMember(user_id=444444444, username="RestrictedUser")
|
|
restricted.guild_permissions.send_messages = False
|
|
|
|
return {
|
|
"owner": owner,
|
|
"admin": admin,
|
|
"moderator": moderator,
|
|
"regular": regular,
|
|
"restricted": restricted,
|
|
}
|
|
|
|
|
|
@pytest.fixture
|
|
def error_simulation_manager():
|
|
"""Manager for simulating various error conditions."""
|
|
|
|
class ErrorSimulator:
|
|
def __init__(self):
|
|
self.active_errors = {}
|
|
|
|
def simulate_database_error(self, error_type: str = "connection"):
|
|
"""Simulate database errors."""
|
|
if error_type == "connection":
|
|
return Exception("Database connection failed")
|
|
elif error_type == "timeout":
|
|
return asyncio.TimeoutError("Query timed out")
|
|
elif error_type == "integrity":
|
|
return Exception("Constraint violation")
|
|
else:
|
|
return Exception("Unknown database error")
|
|
|
|
def simulate_discord_api_error(self, error_type: str = "forbidden"):
|
|
"""Simulate Discord API errors."""
|
|
if error_type == "forbidden":
|
|
from discord import Forbidden
|
|
|
|
return Forbidden(MagicMock(), "Insufficient permissions")
|
|
elif error_type == "not_found":
|
|
from discord import NotFound
|
|
|
|
return NotFound(MagicMock(), "Resource not found")
|
|
elif error_type == "rate_limit":
|
|
from discord import HTTPException
|
|
|
|
return HTTPException(MagicMock(), "Rate limited")
|
|
else:
|
|
from discord import DiscordException
|
|
|
|
return DiscordException("Unknown Discord error")
|
|
|
|
def simulate_ai_service_error(self, error_type: str = "api_error"):
|
|
"""Simulate AI service errors."""
|
|
if error_type == "api_error":
|
|
return Exception("AI API request failed")
|
|
elif error_type == "rate_limit":
|
|
return Exception("AI API rate limit exceeded")
|
|
elif error_type == "invalid_response":
|
|
return Exception("Invalid response format from AI service")
|
|
else:
|
|
return Exception("Unknown AI service error")
|
|
|
|
return ErrorSimulator()
|
|
|
|
|
|
@pytest.fixture
|
|
def performance_test_data():
|
|
"""Generate data for performance testing."""
|
|
|
|
class PerformanceDataGenerator:
|
|
@staticmethod
|
|
def generate_large_quote_dataset(count: int = 1000) -> List[Dict]:
|
|
"""Generate large dataset of quotes for performance testing."""
|
|
quotes = []
|
|
base_time = datetime.now(timezone.utc)
|
|
|
|
for i in range(count):
|
|
quotes.append(
|
|
{
|
|
"id": i + 1,
|
|
"user_id": 111222333 + (i % 100), # 100 different users
|
|
"guild_id": 123456789,
|
|
"channel_id": 987654321,
|
|
"speaker_label": f"SPEAKER_{i % 100}",
|
|
"username": f"PerfTestUser{i % 100}",
|
|
"quote": f"Performance test quote number {i} with some additional text to make it more realistic",
|
|
"timestamp": base_time - timedelta(minutes=i),
|
|
"funny_score": 5.0 + (i % 50) / 10,
|
|
"overall_score": 5.0 + (i % 50) / 10,
|
|
"response_type": "moderate",
|
|
}
|
|
)
|
|
|
|
return quotes
|
|
|
|
@staticmethod
|
|
def generate_concurrent_operations(count: int = 50) -> List[Dict]:
|
|
"""Generate operations for concurrent testing."""
|
|
operations = []
|
|
|
|
for i in range(count):
|
|
operations.append(
|
|
{
|
|
"type": "quote_search",
|
|
"params": {
|
|
"guild_id": 123456789,
|
|
"search_term": f"test{i % 10}",
|
|
"limit": 10,
|
|
},
|
|
}
|
|
)
|
|
|
|
return operations
|
|
|
|
return PerformanceDataGenerator()
|
|
|
|
|
|
# Convenience function to create complete test scenarios
|
|
def create_comprehensive_test_scenario(
|
|
guild_count: int = 1, users_per_guild: int = 5, quotes_per_user: int = 3
|
|
) -> Dict[str, Any]:
|
|
"""Create a comprehensive test scenario with multiple guilds, users, and quotes."""
|
|
scenario = {"guilds": [], "users": [], "quotes": [], "consents": []}
|
|
|
|
builder = DatabaseStateBuilder()
|
|
|
|
for guild_i in range(guild_count):
|
|
guild_id = 123456789 + guild_i
|
|
|
|
# Add server config
|
|
builder.add_server_config(guild_id, quote_threshold=6.0 + guild_i)
|
|
|
|
for user_i in range(users_per_guild):
|
|
user_id = 111222333 + (guild_i * 1000) + user_i
|
|
username = f"User{guild_i}_{user_i}"
|
|
|
|
# Vary consent status
|
|
consented = user_i % 3 != 0 # 2/3 users consented
|
|
|
|
builder.add_user(user_id, username, guild_id, consented)
|
|
|
|
if consented:
|
|
builder.add_quotes_for_user(user_id, guild_id, quotes_per_user)
|
|
|
|
scenario["database"] = builder.build_mock_database()
|
|
scenario["builder"] = builder
|
|
|
|
return scenario
|