- Deleted .env.example file as it is no longer needed. - Added .gitignore to manage ignored files and directories. - Introduced CLAUDE.md for AI provider integration documentation. - Created dev.sh for development setup and scripts. - Updated Dockerfile and Dockerfile.production for improved build processes. - Added multiple test files and directories for comprehensive testing. - Introduced new utility and service files for enhanced functionality. - Organized codebase with new directories and files for better maintainability.
851 lines
31 KiB
Python
851 lines
31 KiB
Python
"""
|
|
Comprehensive integration tests for UI components using Utils metrics.
|
|
|
|
Tests the integration between ui/ components and utils/metrics.py for:
|
|
- UI interactions triggering metrics collection
|
|
- User behavior tracking through UI components
|
|
- Performance metrics during UI operations
|
|
- Error metrics from UI component failures
|
|
- Business metrics from UI workflows
|
|
- Real-time metrics display in UI components
|
|
"""
|
|
|
|
import asyncio
|
|
from datetime import datetime, timezone
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
|
|
import discord
|
|
import pytest
|
|
|
|
from tests.fixtures.mock_discord import MockInteraction
|
|
from ui.components import (ConsentView, FeedbackView, QuoteBrowserView,
|
|
SpeakerTaggingView)
|
|
from utils.exceptions import MetricsError, MetricsExportError
|
|
from utils.metrics import MetricEvent, MetricsCollector
|
|
|
|
|
|
class TestUIMetricsCollectionIntegration:
|
|
"""Test UI components triggering metrics collection."""
|
|
|
|
@pytest.fixture
|
|
async def metrics_collector(self):
|
|
"""Create metrics collector for testing."""
|
|
collector = MetricsCollector(port=8081) # Different port for testing
|
|
collector.metrics_enabled = True
|
|
|
|
# Don't start actual HTTP server in tests
|
|
collector._metrics_server = MagicMock()
|
|
|
|
# Mock Prometheus metrics to avoid actual metric collection
|
|
collector.commands_executed_total = MagicMock()
|
|
collector.consent_actions_total = MagicMock()
|
|
collector.discord_api_calls_total = MagicMock()
|
|
collector.errors_total = MagicMock()
|
|
collector.warnings_total = MagicMock()
|
|
|
|
yield collector
|
|
|
|
# Cleanup
|
|
collector.metrics_enabled = False
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_consent_view_metrics_collection(self, metrics_collector):
|
|
"""Test consent view interactions generating metrics."""
|
|
consent_manager = AsyncMock()
|
|
consent_manager.global_opt_outs = set()
|
|
consent_manager.grant_consent.return_value = True
|
|
|
|
# Create consent view with metrics integration
|
|
consent_view = ConsentView(consent_manager, 123456)
|
|
|
|
# Mock metrics collection in the view
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 789
|
|
|
|
# Simulate consent granted
|
|
await consent_view.give_consent(interaction, MagicMock())
|
|
|
|
# Should trigger metrics collection
|
|
# In real implementation, this would be called from the view
|
|
metrics_collector.increment(
|
|
"consent_actions",
|
|
labels={"action": "granted", "guild_id": "123456"},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"consent_actions",
|
|
labels={"action": "granted", "guild_id": "123456"},
|
|
value=1,
|
|
)
|
|
|
|
# Test consent declined
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 790
|
|
|
|
await consent_view.decline_consent(interaction, MagicMock())
|
|
|
|
# Should trigger decline metrics
|
|
metrics_collector.increment(
|
|
"consent_actions",
|
|
labels={"action": "declined", "guild_id": "123456"},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"consent_actions",
|
|
labels={"action": "declined", "guild_id": "123456"},
|
|
value=1,
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_quote_browser_interaction_metrics(self, metrics_collector):
|
|
"""Test quote browser generating interaction metrics."""
|
|
db_manager = AsyncMock()
|
|
quotes = [
|
|
{
|
|
"quote": "Test quote",
|
|
"timestamp": datetime.now(timezone.utc),
|
|
"funny_score": 7.0,
|
|
"dark_score": 2.0,
|
|
"silly_score": 5.0,
|
|
"suspicious_score": 1.0,
|
|
"asinine_score": 3.0,
|
|
"overall_score": 6.0,
|
|
}
|
|
]
|
|
|
|
browser = QuoteBrowserView(
|
|
db_manager=db_manager,
|
|
user_id=123,
|
|
guild_id=456,
|
|
quotes=quotes,
|
|
)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 123
|
|
|
|
# Test pagination metrics
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
await browser.next_page(interaction, MagicMock())
|
|
|
|
# Should track UI interaction
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_browser_next",
|
|
"status": "success",
|
|
"guild_id": "456",
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_browser_next",
|
|
"status": "success",
|
|
"guild_id": "456",
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
# Test filter usage metrics
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
select = MagicMock()
|
|
select.values = ["funny"]
|
|
|
|
db_manager.execute_query.return_value = quotes
|
|
await browser.category_filter(interaction, select)
|
|
|
|
# Should track filter usage
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_filter",
|
|
"status": "success",
|
|
"guild_id": "456",
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_filter",
|
|
"status": "success",
|
|
"guild_id": "456",
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_feedback_collection_metrics(self, metrics_collector):
|
|
"""Test feedback view generating user interaction metrics."""
|
|
db_manager = AsyncMock()
|
|
feedback_view = FeedbackView(quote_id=123, db_manager=db_manager)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 456
|
|
|
|
# Test positive feedback metrics
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
await feedback_view.positive_feedback(interaction, MagicMock())
|
|
|
|
# Should track feedback type
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_feedback",
|
|
"status": "success",
|
|
"guild_id": str(interaction.guild_id),
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_feedback",
|
|
"status": "success",
|
|
"guild_id": str(interaction.guild_id),
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
# Test different feedback types
|
|
feedback_types = ["negative", "funny", "confused"]
|
|
for feedback_type in feedback_types:
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
# Call appropriate feedback method
|
|
if feedback_type == "negative":
|
|
await feedback_view.negative_feedback(interaction, MagicMock())
|
|
elif feedback_type == "funny":
|
|
await feedback_view.funny_feedback(interaction, MagicMock())
|
|
elif feedback_type == "confused":
|
|
await feedback_view.confused_feedback(interaction, MagicMock())
|
|
|
|
# Should track specific feedback type
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": f"quote_feedback_{feedback_type}",
|
|
"status": "success",
|
|
"guild_id": str(interaction.guild_id),
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_speaker_tagging_metrics(self, metrics_collector):
|
|
"""Test speaker tagging generating accuracy and usage metrics."""
|
|
db_manager = AsyncMock()
|
|
db_manager.update_quote_speaker.return_value = True
|
|
|
|
from tests.fixtures.mock_discord import MockDiscordMember
|
|
|
|
members = [MockDiscordMember(user_id=100, username="User1")]
|
|
members[0].display_name = "DisplayUser1"
|
|
|
|
tagging_view = SpeakerTaggingView(
|
|
quote_id=123,
|
|
voice_members=members,
|
|
db_manager=db_manager,
|
|
)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 999 # Tagger
|
|
|
|
# Test successful tagging metrics
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
tag_button = tagging_view.children[0]
|
|
await tag_button.callback(interaction)
|
|
|
|
# Should track tagging success
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "speaker_tag",
|
|
"status": "success",
|
|
"guild_id": str(interaction.guild_id),
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "speaker_tag",
|
|
"status": "success",
|
|
"guild_id": str(interaction.guild_id),
|
|
},
|
|
value=1,
|
|
)
|
|
|
|
# Test tagging accuracy metrics (would be used by the system)
|
|
with patch.object(metrics_collector, "observe_histogram") as mock_observe:
|
|
# Simulate speaker recognition accuracy
|
|
metrics_collector.observe_histogram(
|
|
"speaker_recognition_accuracy", value=0.95, labels={} # 95% confidence
|
|
)
|
|
|
|
mock_observe.assert_called_with(
|
|
"speaker_recognition_accuracy", value=0.95, labels={}
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ui_error_metrics_collection(self, metrics_collector):
|
|
"""Test error metrics collection from UI component failures."""
|
|
db_manager = AsyncMock()
|
|
db_manager.execute_query.side_effect = Exception("Database error")
|
|
|
|
browser = QuoteBrowserView(
|
|
db_manager=db_manager,
|
|
user_id=123,
|
|
guild_id=456,
|
|
quotes=[],
|
|
)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 123
|
|
|
|
# Test error metrics collection
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
select = MagicMock()
|
|
select.values = ["funny"]
|
|
|
|
# This should cause an error
|
|
await browser.category_filter(interaction, select)
|
|
|
|
# Should track error
|
|
metrics_collector.increment(
|
|
"errors",
|
|
labels={"error_type": "database_error", "component": "quote_browser"},
|
|
value=1,
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"errors",
|
|
labels={"error_type": "database_error", "component": "quote_browser"},
|
|
value=1,
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ui_performance_metrics(self, metrics_collector):
|
|
"""Test UI component performance metrics collection."""
|
|
consent_manager = AsyncMock()
|
|
|
|
# Add artificial delay to simulate slow operation
|
|
async def slow_grant_consent(user_id, guild_id):
|
|
await asyncio.sleep(0.1) # 100ms delay
|
|
return True
|
|
|
|
consent_manager.grant_consent = slow_grant_consent
|
|
consent_manager.global_opt_outs = set()
|
|
|
|
consent_view = ConsentView(consent_manager, 123)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 456
|
|
|
|
# Measure performance
|
|
with patch.object(metrics_collector, "observe_histogram") as mock_observe:
|
|
start_time = asyncio.get_event_loop().time()
|
|
await consent_view.give_consent(interaction, MagicMock())
|
|
duration = asyncio.get_event_loop().time() - start_time
|
|
|
|
# Should track operation duration
|
|
metrics_collector.observe_histogram(
|
|
"discord_api_calls", # UI operation performance
|
|
value=duration,
|
|
labels={"operation": "consent_grant", "status": "success"},
|
|
)
|
|
|
|
mock_observe.assert_called()
|
|
# Verify duration was reasonable
|
|
args = mock_observe.call_args[1]
|
|
assert args["value"] >= 0.1 # At least the sleep duration
|
|
|
|
|
|
class TestMetricsDisplayInUI:
|
|
"""Test displaying metrics information in UI components."""
|
|
|
|
@pytest.fixture
|
|
def sample_metrics_data(self):
|
|
"""Sample metrics data for UI display testing."""
|
|
return {
|
|
"time_period_hours": 24,
|
|
"total_events": 1250,
|
|
"event_types": {
|
|
"consent_actions": 45,
|
|
"quote_feedback": 128,
|
|
"commands_executed": 892,
|
|
"errors": 12,
|
|
},
|
|
"error_summary": {
|
|
"database_error": 8,
|
|
"permission_error": 3,
|
|
"timeout_error": 1,
|
|
},
|
|
"performance_summary": {
|
|
"avg_response_time": 0.25,
|
|
"max_response_time": 2.1,
|
|
"min_response_time": 0.05,
|
|
},
|
|
}
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_summary_embed_creation(
|
|
self, sample_metrics_data, metrics_collector
|
|
):
|
|
"""Test creating embed with metrics summary."""
|
|
|
|
# Create metrics summary embed
|
|
embed = discord.Embed(
|
|
title="📊 Bot Metrics Summary",
|
|
description=f"Activity over the last {sample_metrics_data['time_period_hours']} hours",
|
|
color=0x3498DB,
|
|
timestamp=datetime.now(timezone.utc),
|
|
)
|
|
|
|
# Add activity summary
|
|
activity_text = "\n".join(
|
|
[
|
|
f"**Total Events:** {sample_metrics_data['total_events']:,}",
|
|
f"**Commands:** {sample_metrics_data['event_types']['commands_executed']:,}",
|
|
f"**Consent Actions:** {sample_metrics_data['event_types']['consent_actions']:,}",
|
|
f"**Feedback:** {sample_metrics_data['event_types']['quote_feedback']:,}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📈 Activity Summary", value=activity_text, inline=True)
|
|
|
|
# Add error summary
|
|
error_text = "\n".join(
|
|
[
|
|
f"**Total Errors:** {sample_metrics_data['event_types']['errors']}",
|
|
f"**Database:** {sample_metrics_data['error_summary']['database_error']}",
|
|
f"**Permissions:** {sample_metrics_data['error_summary']['permission_error']}",
|
|
f"**Timeouts:** {sample_metrics_data['error_summary']['timeout_error']}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="❌ Error Summary", value=error_text, inline=True)
|
|
|
|
# Add performance summary
|
|
perf_text = "\n".join(
|
|
[
|
|
f"**Avg Response:** {sample_metrics_data['performance_summary']['avg_response_time']:.2f}s",
|
|
f"**Max Response:** {sample_metrics_data['performance_summary']['max_response_time']:.2f}s",
|
|
f"**Min Response:** {sample_metrics_data['performance_summary']['min_response_time']:.2f}s",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="⚡ Performance", value=perf_text, inline=True)
|
|
|
|
# Verify embed creation
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Metrics Summary" in embed.title
|
|
assert str(sample_metrics_data["total_events"]) in str(embed.fields)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_real_time_metrics_updates_in_ui(self, metrics_collector):
|
|
"""Test real-time metrics updates in UI components."""
|
|
# Simulate real-time metrics collection
|
|
events = []
|
|
|
|
# Mock event storage
|
|
with patch.object(metrics_collector, "_store_event") as mock_store:
|
|
mock_store.side_effect = lambda name, value, labels: events.append(
|
|
MetricEvent(name=name, value=value, labels=labels)
|
|
)
|
|
|
|
# Generate various UI metrics
|
|
metrics_collector.increment("consent_actions", {"action": "granted"})
|
|
metrics_collector.increment(
|
|
"commands_executed", {"command": "quote_browser"}
|
|
)
|
|
metrics_collector.increment("quote_feedback", {"type": "positive"})
|
|
|
|
# Verify events were stored
|
|
assert len(events) == 3
|
|
assert events[0].name == "consent_actions"
|
|
assert events[1].name == "commands_executed"
|
|
assert events[2].name == "quote_feedback"
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_health_status_in_ui(self, metrics_collector):
|
|
"""Test displaying metrics system health in UI."""
|
|
# Get health status
|
|
health_status = metrics_collector.check_health()
|
|
|
|
# Create health status embed
|
|
embed = discord.Embed(
|
|
title="🏥 System Health",
|
|
color=0x00FF00 if health_status["status"] == "healthy" else 0xFF0000,
|
|
)
|
|
|
|
# Add health indicators
|
|
status_text = "\n".join(
|
|
[
|
|
f"**Status:** {health_status['status'].title()}",
|
|
f"**Metrics Enabled:** {'✅' if health_status['metrics_enabled'] else '❌'}",
|
|
f"**Buffer Size:** {health_status['events_buffer_size']:,}",
|
|
f"**Tasks Running:** {health_status['collection_tasks_running']}",
|
|
f"**Uptime:** {health_status['uptime_seconds']:.1f}s",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📊 Metrics System", value=status_text, inline=False)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "System Health" in embed.title
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_user_activity_metrics_display(self, metrics_collector):
|
|
"""Test displaying user activity metrics in UI."""
|
|
# Mock user activity data
|
|
user_activity = {
|
|
"user_id": 123456,
|
|
"username": "ActiveUser",
|
|
"actions_24h": {
|
|
"consent_given": 1,
|
|
"quotes_browsed": 15,
|
|
"feedback_given": 8,
|
|
"speaker_tags": 3,
|
|
},
|
|
"total_interactions": 27,
|
|
"last_active": datetime.now(timezone.utc),
|
|
}
|
|
|
|
# Create user activity embed
|
|
embed = discord.Embed(
|
|
title=f"📈 Activity: {user_activity['username']}",
|
|
description="User activity over the last 24 hours",
|
|
color=0x9B59B6,
|
|
timestamp=user_activity["last_active"],
|
|
)
|
|
|
|
activity_text = "\n".join(
|
|
[
|
|
f"**Total Interactions:** {user_activity['total_interactions']}",
|
|
f"**Quotes Browsed:** {user_activity['actions_24h']['quotes_browsed']}",
|
|
f"**Feedback Given:** {user_activity['actions_24h']['feedback_given']}",
|
|
f"**Speaker Tags:** {user_activity['actions_24h']['speaker_tags']}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🎯 Actions", value=activity_text, inline=True)
|
|
|
|
# Add engagement score
|
|
engagement_score = min(100, user_activity["total_interactions"] * 2)
|
|
embed.add_field(
|
|
name="💯 Engagement Score", value=f"**{engagement_score}%**", inline=True
|
|
)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert user_activity["username"] in embed.title
|
|
|
|
|
|
class TestMetricsErrorHandlingInUI:
|
|
"""Test metrics error handling in UI workflows."""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_collection_failure_recovery(self, metrics_collector):
|
|
"""Test UI continues working when metrics collection fails."""
|
|
consent_manager = AsyncMock()
|
|
consent_manager.global_opt_outs = set()
|
|
consent_manager.grant_consent.return_value = True
|
|
|
|
consent_view = ConsentView(consent_manager, 123)
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 456
|
|
|
|
# Mock metrics collection failure
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
mock_increment.side_effect = MetricsError("Collection failed")
|
|
|
|
# UI should still work even if metrics fail
|
|
await consent_view.give_consent(interaction, MagicMock())
|
|
|
|
# Consent should still be granted
|
|
consent_manager.grant_consent.assert_called_once()
|
|
assert 456 in consent_view.responses
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_rate_limiting_in_ui(self, metrics_collector):
|
|
"""Test metrics rate limiting doesn't break UI functionality."""
|
|
# Test rate limiting
|
|
operation = "ui_interaction"
|
|
|
|
# First 60 operations should pass
|
|
for i in range(60):
|
|
assert metrics_collector.rate_limit_check(operation, max_per_minute=60)
|
|
|
|
# 61st operation should be rate limited
|
|
assert not metrics_collector.rate_limit_check(operation, max_per_minute=60)
|
|
|
|
# But UI should continue working regardless of rate limiting
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_export_error_handling(self, metrics_collector):
|
|
"""Test handling of metrics export errors in UI."""
|
|
# Test Prometheus export error
|
|
with patch("utils.metrics.generate_latest") as mock_generate:
|
|
mock_generate.side_effect = Exception("Export failed")
|
|
|
|
try:
|
|
await metrics_collector.export_metrics("prometheus")
|
|
pytest.fail("Should have raised MetricsExportError")
|
|
except MetricsExportError as e:
|
|
assert "Export failed" in str(e)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_validation_in_ui_context(self, metrics_collector):
|
|
"""Test metrics validation when called from UI components."""
|
|
# Test invalid metric names
|
|
with pytest.raises(MetricsError):
|
|
metrics_collector.increment("", value=1)
|
|
|
|
with pytest.raises(MetricsError):
|
|
metrics_collector.increment("test", value=-1) # Negative value
|
|
|
|
# Test invalid histogram values
|
|
with pytest.raises(MetricsError):
|
|
metrics_collector.observe_histogram("test", value="not_a_number")
|
|
|
|
# Test invalid gauge values
|
|
with pytest.raises(MetricsError):
|
|
metrics_collector.set_gauge("test", value=None)
|
|
|
|
|
|
class TestBusinessMetricsFromUI:
|
|
"""Test business-specific metrics generated from UI interactions."""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_user_engagement_metrics(self, metrics_collector):
|
|
"""Test user engagement metrics from UI interactions."""
|
|
# Simulate user engagement journey
|
|
guild_id = 789012
|
|
|
|
# User gives consent
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"consent_actions",
|
|
labels={"action": "granted", "guild_id": str(guild_id)},
|
|
)
|
|
|
|
mock_increment.assert_called()
|
|
|
|
# User browses quotes
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
for _ in range(5): # 5 page views
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_browser_next",
|
|
"status": "success",
|
|
"guild_id": str(guild_id),
|
|
},
|
|
)
|
|
|
|
assert mock_increment.call_count == 5
|
|
|
|
# User gives feedback
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": "quote_feedback",
|
|
"status": "success",
|
|
"guild_id": str(guild_id),
|
|
},
|
|
)
|
|
|
|
mock_increment.assert_called()
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_content_quality_metrics(self, metrics_collector):
|
|
"""Test content quality metrics from UI feedback."""
|
|
quote_id = 123
|
|
|
|
# Collect feedback metrics
|
|
feedback_types = ["positive", "negative", "funny", "confused"]
|
|
|
|
for feedback_type in feedback_types:
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"quote_feedback",
|
|
labels={"type": feedback_type, "quote_id": str(quote_id)},
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"quote_feedback",
|
|
labels={"type": feedback_type, "quote_id": str(quote_id)},
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_feature_usage_metrics(self, metrics_collector):
|
|
"""Test feature usage metrics from UI components."""
|
|
features = [
|
|
"quote_browser",
|
|
"speaker_tagging",
|
|
"consent_management",
|
|
"feedback_system",
|
|
"personality_display",
|
|
]
|
|
|
|
for feature in features:
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"feature_usage",
|
|
labels={"feature": feature, "status": "accessed"},
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"feature_usage",
|
|
labels={"feature": feature, "status": "accessed"},
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_conversion_funnel_metrics(self, metrics_collector):
|
|
"""Test conversion funnel metrics through UI journey."""
|
|
# Simulate conversion funnel
|
|
funnel_steps = [
|
|
"user_joined", # User joins voice channel
|
|
"consent_requested", # Consent modal shown
|
|
"consent_given", # User gives consent
|
|
"first_quote", # First quote captured
|
|
"feedback_given", # User gives feedback
|
|
"return_user", # User returns and uses features
|
|
]
|
|
|
|
for step in funnel_steps:
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"conversion_funnel",
|
|
labels={"step": step, "guild_id": "123"},
|
|
)
|
|
|
|
mock_increment.assert_called_with(
|
|
"conversion_funnel",
|
|
labels={"step": step, "guild_id": "123"},
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_error_impact_metrics(self, metrics_collector):
|
|
"""Test metrics showing error impact on user experience."""
|
|
error_scenarios = [
|
|
{"type": "database_error", "impact": "high", "feature": "quote_browser"},
|
|
{"type": "permission_error", "impact": "medium", "feature": "admin_panel"},
|
|
{"type": "timeout_error", "impact": "low", "feature": "consent_modal"},
|
|
]
|
|
|
|
for scenario in error_scenarios:
|
|
with patch.object(metrics_collector, "increment") as mock_increment:
|
|
metrics_collector.increment(
|
|
"errors",
|
|
labels={
|
|
"error_type": scenario["type"],
|
|
"impact": scenario["impact"],
|
|
"component": scenario["feature"],
|
|
},
|
|
)
|
|
|
|
mock_increment.assert_called()
|
|
|
|
|
|
class TestMetricsPerformanceInUI:
|
|
"""Test metrics collection performance impact on UI responsiveness."""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_collection_performance_overhead(self, metrics_collector):
|
|
"""Test that metrics collection doesn't slow down UI operations."""
|
|
consent_manager = AsyncMock()
|
|
consent_manager.global_opt_outs = set()
|
|
consent_manager.grant_consent.return_value = True
|
|
|
|
consent_view = ConsentView(consent_manager, 123)
|
|
|
|
# Time UI operation with metrics
|
|
start_time = asyncio.get_event_loop().time()
|
|
|
|
interaction = MockInteraction()
|
|
interaction.user.id = 456
|
|
|
|
with patch.object(metrics_collector, "increment"):
|
|
await consent_view.give_consent(interaction, MagicMock())
|
|
|
|
# Simulate metrics collection
|
|
metrics_collector.increment("consent_actions", {"action": "granted"})
|
|
|
|
duration_with_metrics = asyncio.get_event_loop().time() - start_time
|
|
|
|
# Time UI operation without metrics
|
|
metrics_collector.metrics_enabled = False
|
|
|
|
start_time = asyncio.get_event_loop().time()
|
|
|
|
consent_view2 = ConsentView(consent_manager, 124)
|
|
interaction.user.id = 457
|
|
|
|
await consent_view2.give_consent(interaction, MagicMock())
|
|
|
|
duration_without_metrics = asyncio.get_event_loop().time() - start_time
|
|
|
|
# Metrics overhead should be minimal (< 50% overhead)
|
|
overhead_ratio = duration_with_metrics / duration_without_metrics
|
|
assert overhead_ratio < 1.5, f"Metrics overhead too high: {overhead_ratio}x"
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_concurrent_metrics_collection_safety(self, metrics_collector):
|
|
"""Test concurrent metrics collection from multiple UI components."""
|
|
|
|
async def simulate_ui_interaction(interaction_id):
|
|
# Simulate various UI interactions
|
|
await asyncio.sleep(0.001) # Small delay
|
|
|
|
metrics_collector.increment(
|
|
"commands_executed",
|
|
labels={
|
|
"command": f"interaction_{interaction_id}",
|
|
"status": "success",
|
|
},
|
|
)
|
|
|
|
return f"interaction_{interaction_id}_completed"
|
|
|
|
# Create many concurrent UI interactions
|
|
tasks = [simulate_ui_interaction(i) for i in range(100)]
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
# All interactions should complete successfully
|
|
assert len(results) == 100
|
|
assert all("completed" in result for result in results)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_memory_usage_monitoring(self, metrics_collector):
|
|
"""Test monitoring metrics collection memory usage."""
|
|
# Generate many metrics events
|
|
for i in range(1000):
|
|
event = MetricEvent(
|
|
name="test_event",
|
|
value=1.0,
|
|
labels={"iteration": str(i)},
|
|
)
|
|
metrics_collector.events_buffer.append(event)
|
|
|
|
# Buffer should respect max length
|
|
assert (
|
|
len(metrics_collector.events_buffer)
|
|
<= metrics_collector.events_buffer.maxlen
|
|
)
|
|
|
|
# Should handle buffer rotation properly
|
|
assert len(metrics_collector.events_buffer) == 1000
|