- Deleted .env.example file as it is no longer needed. - Added .gitignore to manage ignored files and directories. - Introduced CLAUDE.md for AI provider integration documentation. - Created dev.sh for development setup and scripts. - Updated Dockerfile and Dockerfile.production for improved build processes. - Added multiple test files and directories for comprehensive testing. - Introduced new utility and service files for enhanced functionality. - Organized codebase with new directories and files for better maintainability.
824 lines
30 KiB
Python
824 lines
30 KiB
Python
"""
|
|
Comprehensive integration tests for UI components using Utils audio processing.
|
|
|
|
Tests the integration between ui/ components and utils/audio_processor.py for:
|
|
- UI displaying audio processing results
|
|
- Audio feature extraction for UI visualization
|
|
- Voice activity detection integration with UI
|
|
- Audio quality indicators in UI components
|
|
- Speaker recognition results in UI displays
|
|
- Audio file management through UI workflows
|
|
"""
|
|
|
|
import asyncio
|
|
import tempfile
|
|
from datetime import datetime, timezone
|
|
from pathlib import Path
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
|
|
import discord
|
|
import numpy as np
|
|
import pytest
|
|
|
|
from ui.components import EmbedBuilder, QuoteBrowserView, SpeakerTaggingView
|
|
from utils.audio_processor import AudioProcessor
|
|
|
|
|
|
class TestUIAudioProcessingIntegration:
|
|
"""Test UI components using audio processing results."""
|
|
|
|
@pytest.fixture
|
|
def audio_processor(self):
|
|
"""Create audio processor for testing."""
|
|
processor = AudioProcessor()
|
|
|
|
# Mock VAD model to avoid loading actual model
|
|
processor.preprocessor.vad_model = MagicMock()
|
|
processor.vad_model = processor.preprocessor.vad_model
|
|
|
|
return processor
|
|
|
|
@pytest.fixture
|
|
def mock_audio_data(self):
|
|
"""Create mock audio data for testing."""
|
|
# Generate 2 seconds of sine wave audio at 16kHz
|
|
sample_rate = 16000
|
|
duration = 2.0
|
|
samples = int(duration * sample_rate)
|
|
|
|
# Generate simple sine wave
|
|
t = np.linspace(0, duration, samples, False)
|
|
audio_data = np.sin(2 * np.pi * 440 * t) # 440 Hz tone
|
|
|
|
# Convert to 16-bit PCM bytes
|
|
audio_int16 = (audio_data * 32767).astype(np.int16)
|
|
audio_bytes = audio_int16.tobytes()
|
|
|
|
return {
|
|
"audio_bytes": audio_bytes,
|
|
"sample_rate": sample_rate,
|
|
"duration": duration,
|
|
"samples": samples,
|
|
}
|
|
|
|
@pytest.fixture
|
|
def sample_audio_features(self):
|
|
"""Sample audio features for testing."""
|
|
return {
|
|
"duration": 2.5,
|
|
"sample_rate": 16000,
|
|
"channels": 1,
|
|
"rms_energy": 0.7,
|
|
"max_amplitude": 0.95,
|
|
"spectral_centroid_mean": 2250.5,
|
|
"spectral_centroid_std": 445.2,
|
|
"zero_crossing_rate": 0.12,
|
|
"mfcc_mean": [12.5, -8.2, 3.1, -1.8, 0.9],
|
|
"mfcc_std": [15.2, 6.7, 4.3, 3.1, 2.8],
|
|
"pitch_mean": 195.3,
|
|
"pitch_std": 25.7,
|
|
}
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_quote_embed_with_audio_features(self, sample_audio_features):
|
|
"""Test creating quote embeds with audio processing results."""
|
|
quote_data = {
|
|
"id": 123,
|
|
"quote": "This is a test quote with audio analysis",
|
|
"username": "AudioUser",
|
|
"overall_score": 7.5,
|
|
"funny_score": 8.0,
|
|
"laughter_duration": 2.3,
|
|
"timestamp": datetime.now(timezone.utc),
|
|
# Audio features
|
|
"audio_duration": sample_audio_features["duration"],
|
|
"audio_quality": "high",
|
|
"voice_clarity": 0.85,
|
|
"background_noise": 0.15,
|
|
"speaker_confidence": 0.92,
|
|
}
|
|
|
|
embed = EmbedBuilder.create_quote_embed(quote_data, include_analysis=True)
|
|
|
|
# Verify basic embed structure
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Memorable Quote" in embed.title
|
|
assert quote_data["quote"] in embed.description
|
|
|
|
# Should include audio information
|
|
audio_fields = [
|
|
field
|
|
for field in embed.fields
|
|
if "Audio" in field.name or "Voice" in field.name
|
|
]
|
|
assert len(audio_fields) > 0
|
|
|
|
# Check if audio duration is displayed
|
|
duration_text = f"{quote_data['audio_duration']:.1f}s"
|
|
embed_text = str(embed.to_dict())
|
|
assert (
|
|
duration_text in embed_text
|
|
or str(quote_data["laughter_duration"]) in embed_text
|
|
)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_quality_visualization_in_ui(self, sample_audio_features):
|
|
"""Test displaying audio quality metrics in UI components."""
|
|
# Create audio quality embed
|
|
embed = discord.Embed(
|
|
title="🎤 Audio Quality Analysis",
|
|
description="Detailed audio analysis for voice recording",
|
|
color=0x3498DB,
|
|
)
|
|
|
|
# Add basic audio info
|
|
basic_info = "\n".join(
|
|
[
|
|
f"**Duration:** {sample_audio_features['duration']:.1f}s",
|
|
f"**Sample Rate:** {sample_audio_features['sample_rate']:,} Hz",
|
|
f"**Channels:** {sample_audio_features['channels']}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📊 Basic Info", value=basic_info, inline=True)
|
|
|
|
# Add quality metrics
|
|
quality_metrics = "\n".join(
|
|
[
|
|
f"**RMS Energy:** {sample_audio_features['rms_energy']:.2f}",
|
|
f"**Max Amplitude:** {sample_audio_features['max_amplitude']:.2f}",
|
|
f"**ZCR:** {sample_audio_features['zero_crossing_rate']:.3f}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🎯 Quality Metrics", value=quality_metrics, inline=True)
|
|
|
|
# Add spectral analysis
|
|
spectral_info = "\n".join(
|
|
[
|
|
f"**Spectral Centroid:** {sample_audio_features['spectral_centroid_mean']:.1f} Hz",
|
|
f"**Centroid Std:** {sample_audio_features['spectral_centroid_std']:.1f} Hz",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🌊 Spectral Analysis", value=spectral_info, inline=True)
|
|
|
|
# Add pitch analysis
|
|
if sample_audio_features["pitch_mean"] > 0:
|
|
pitch_info = "\n".join(
|
|
[
|
|
f"**Mean Pitch:** {sample_audio_features['pitch_mean']:.1f} Hz",
|
|
f"**Pitch Variation:** {sample_audio_features['pitch_std']:.1f} Hz",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🎵 Pitch Analysis", value=pitch_info, inline=True)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert len(embed.fields) >= 3
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_voice_activity_detection_ui_integration(
|
|
self, audio_processor, mock_audio_data
|
|
):
|
|
"""Test VAD results integration with UI components."""
|
|
# Mock VAD results
|
|
voice_segments = [
|
|
(0.5, 1.8), # First speech segment
|
|
(2.1, 3.5), # Second speech segment
|
|
(4.0, 4.7), # Third speech segment
|
|
]
|
|
|
|
with patch.object(audio_processor, "detect_voice_activity") as mock_vad:
|
|
mock_vad.return_value = voice_segments
|
|
|
|
detected_segments = await audio_processor.detect_voice_activity(
|
|
mock_audio_data["audio_bytes"]
|
|
)
|
|
|
|
assert detected_segments == voice_segments
|
|
|
|
# Create UI visualization of VAD results
|
|
embed = discord.Embed(
|
|
title="🎤 Voice Activity Detection",
|
|
description=f"Detected {len(voice_segments)} speech segments",
|
|
color=0x00FF00,
|
|
)
|
|
|
|
# Add segment details
|
|
segments_text = ""
|
|
total_speech_time = 0
|
|
|
|
for i, (start, end) in enumerate(voice_segments, 1):
|
|
duration = end - start
|
|
total_speech_time += duration
|
|
segments_text += (
|
|
f"**Segment {i}:** {start:.1f}s - {end:.1f}s ({duration:.1f}s)\n"
|
|
)
|
|
|
|
embed.add_field(
|
|
name="📍 Speech Segments", value=segments_text, inline=False
|
|
)
|
|
|
|
# Add summary statistics
|
|
audio_duration = mock_audio_data["duration"]
|
|
speech_ratio = total_speech_time / audio_duration
|
|
silence_time = audio_duration - total_speech_time
|
|
|
|
summary_text = "\n".join(
|
|
[
|
|
f"**Total Speech:** {total_speech_time:.1f}s",
|
|
f"**Total Silence:** {silence_time:.1f}s",
|
|
f"**Speech Ratio:** {speech_ratio:.1%}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📊 Summary", value=summary_text, inline=True)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Voice Activity Detection" in embed.title
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_speaker_recognition_confidence_in_ui(self, sample_audio_features):
|
|
"""Test displaying speaker recognition confidence in UI."""
|
|
# Mock speaker recognition results
|
|
speaker_results = [
|
|
{
|
|
"speaker_id": "SPEAKER_01",
|
|
"user_id": 123456,
|
|
"username": "Alice",
|
|
"confidence": 0.95,
|
|
"segments": [(0.0, 2.5), (5.1, 7.3)],
|
|
"total_speaking_time": 4.7,
|
|
},
|
|
{
|
|
"speaker_id": "SPEAKER_02",
|
|
"user_id": 789012,
|
|
"username": "Bob",
|
|
"confidence": 0.78,
|
|
"segments": [(2.8, 4.9)],
|
|
"total_speaking_time": 2.1,
|
|
},
|
|
{
|
|
"speaker_id": "SPEAKER_03",
|
|
"user_id": None, # Unknown speaker
|
|
"username": "Unknown",
|
|
"confidence": 0.45,
|
|
"segments": [(8.0, 9.2)],
|
|
"total_speaking_time": 1.2,
|
|
},
|
|
]
|
|
|
|
# Create speaker recognition embed
|
|
embed = discord.Embed(
|
|
title="👥 Speaker Recognition Results",
|
|
description=f"Identified {len(speaker_results)} speakers in recording",
|
|
color=0x9B59B6,
|
|
)
|
|
|
|
for speaker in speaker_results:
|
|
confidence_emoji = (
|
|
"🟢"
|
|
if speaker["confidence"] > 0.8
|
|
else "🟡" if speaker["confidence"] > 0.6 else "🔴"
|
|
)
|
|
|
|
speaker_info = "\n".join(
|
|
[
|
|
f"**Confidence:** {confidence_emoji} {speaker['confidence']:.1%}",
|
|
f"**Speaking Time:** {speaker['total_speaking_time']:.1f}s",
|
|
f"**Segments:** {len(speaker['segments'])}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(
|
|
name=f"🎙️ {speaker['username']} ({speaker['speaker_id']})",
|
|
value=speaker_info,
|
|
inline=True,
|
|
)
|
|
|
|
# Add overall statistics
|
|
total_speakers = len([s for s in speaker_results if s["user_id"] is not None])
|
|
unknown_speakers = len([s for s in speaker_results if s["user_id"] is None])
|
|
avg_confidence = np.mean([s["confidence"] for s in speaker_results])
|
|
|
|
stats_text = "\n".join(
|
|
[
|
|
f"**Known Speakers:** {total_speakers}",
|
|
f"**Unknown Speakers:** {unknown_speakers}",
|
|
f"**Avg Confidence:** {avg_confidence:.1%}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📈 Statistics", value=stats_text, inline=False)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Speaker Recognition" in embed.title
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_processing_progress_in_ui(self, audio_processor):
|
|
"""Test displaying audio processing progress in UI."""
|
|
# Mock processing stages
|
|
processing_stages = [
|
|
{"name": "Audio Validation", "status": "completed", "duration": 0.12},
|
|
{"name": "Format Conversion", "status": "completed", "duration": 0.45},
|
|
{"name": "Noise Reduction", "status": "completed", "duration": 1.23},
|
|
{
|
|
"name": "Voice Activity Detection",
|
|
"status": "completed",
|
|
"duration": 0.87,
|
|
},
|
|
{"name": "Speaker Diarization", "status": "in_progress", "duration": None},
|
|
{"name": "Transcription", "status": "pending", "duration": None},
|
|
]
|
|
|
|
# Create processing status embed
|
|
embed = discord.Embed(
|
|
title="⚙️ Audio Processing Status",
|
|
description="Processing audio clip for quote analysis",
|
|
color=0xF39C12, # Orange for in-progress
|
|
)
|
|
|
|
completed_stages = [s for s in processing_stages if s["status"] == "completed"]
|
|
in_progress_stages = [
|
|
s for s in processing_stages if s["status"] == "in_progress"
|
|
]
|
|
pending_stages = [s for s in processing_stages if s["status"] == "pending"]
|
|
|
|
# Add completed stages
|
|
if completed_stages:
|
|
completed_text = ""
|
|
for stage in completed_stages:
|
|
duration_text = (
|
|
f" ({stage['duration']:.2f}s)" if stage["duration"] else ""
|
|
)
|
|
completed_text += f"✅ {stage['name']}{duration_text}\n"
|
|
|
|
embed.add_field(name="✅ Completed", value=completed_text, inline=True)
|
|
|
|
# Add in-progress stages
|
|
if in_progress_stages:
|
|
progress_text = ""
|
|
for stage in in_progress_stages:
|
|
progress_text += f"⏳ {stage['name']}\n"
|
|
|
|
embed.add_field(name="⏳ In Progress", value=progress_text, inline=True)
|
|
|
|
# Add pending stages
|
|
if pending_stages:
|
|
pending_text = ""
|
|
for stage in pending_stages:
|
|
pending_text += f"⏸️ {stage['name']}\n"
|
|
|
|
embed.add_field(name="⏸️ Pending", value=pending_text, inline=True)
|
|
|
|
# Add progress bar
|
|
total_stages = len(processing_stages)
|
|
completed_count = len(completed_stages)
|
|
progress_percentage = (completed_count / total_stages) * 100
|
|
|
|
progress_bar = "█" * (completed_count * 2) + "░" * (
|
|
(total_stages - completed_count) * 2
|
|
)
|
|
progress_text = f"{progress_bar} {progress_percentage:.0f}%"
|
|
|
|
embed.add_field(name="📊 Overall Progress", value=progress_text, inline=False)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Processing Status" in embed.title
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_error_handling_in_ui(self, audio_processor, mock_audio_data):
|
|
"""Test audio processing error display in UI components."""
|
|
# Mock audio processing failure
|
|
with patch.object(audio_processor, "process_audio_clip") as mock_process:
|
|
mock_process.return_value = None # Processing failed
|
|
|
|
result = await audio_processor.process_audio_clip(
|
|
mock_audio_data["audio_bytes"], source_format="wav"
|
|
)
|
|
|
|
assert result is None
|
|
|
|
# Create error embed
|
|
embed = discord.Embed(
|
|
title="❌ Audio Processing Error",
|
|
description="Failed to process audio clip",
|
|
color=0xFF0000,
|
|
)
|
|
|
|
error_details = "\n".join(
|
|
[
|
|
"**Issue:** Audio processing failed",
|
|
"**Possible Causes:**",
|
|
"• Invalid audio format",
|
|
"• Corrupted audio data",
|
|
"• Insufficient audio quality",
|
|
"• Processing timeout",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🔍 Error Details", value=error_details, inline=False)
|
|
|
|
troubleshooting = "\n".join(
|
|
[
|
|
"**Troubleshooting Steps:**",
|
|
"1. Check your microphone settings",
|
|
"2. Ensure stable internet connection",
|
|
"3. Try speaking closer to the microphone",
|
|
"4. Reduce background noise",
|
|
]
|
|
)
|
|
|
|
embed.add_field(
|
|
name="🛠️ Troubleshooting", value=troubleshooting, inline=False
|
|
)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Processing Error" in embed.title
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_quote_browser_with_audio_metadata(self, sample_audio_features):
|
|
"""Test quote browser displaying audio metadata."""
|
|
db_manager = AsyncMock()
|
|
|
|
# Mock quotes with audio metadata
|
|
quotes_with_audio = [
|
|
{
|
|
"id": 1,
|
|
"quote": "First quote with good audio quality",
|
|
"timestamp": datetime.now(timezone.utc),
|
|
"funny_score": 8.0,
|
|
"dark_score": 2.0,
|
|
"silly_score": 6.0,
|
|
"suspicious_score": 1.0,
|
|
"asinine_score": 3.0,
|
|
"overall_score": 7.0,
|
|
"audio_duration": 2.5,
|
|
"audio_quality": "high",
|
|
"speaker_confidence": 0.95,
|
|
"background_noise": 0.1,
|
|
},
|
|
{
|
|
"id": 2,
|
|
"quote": "Second quote with moderate audio",
|
|
"timestamp": datetime.now(timezone.utc),
|
|
"funny_score": 6.0,
|
|
"dark_score": 4.0,
|
|
"silly_score": 5.0,
|
|
"suspicious_score": 2.0,
|
|
"asinine_score": 4.0,
|
|
"overall_score": 5.5,
|
|
"audio_duration": 1.8,
|
|
"audio_quality": "medium",
|
|
"speaker_confidence": 0.72,
|
|
"background_noise": 0.3,
|
|
},
|
|
]
|
|
|
|
browser = QuoteBrowserView(
|
|
db_manager=db_manager,
|
|
user_id=123,
|
|
guild_id=456,
|
|
quotes=quotes_with_audio,
|
|
)
|
|
|
|
# Create page embed with audio info
|
|
embed = browser._create_page_embed()
|
|
|
|
# Should include audio quality indicators
|
|
embed_dict = embed.to_dict()
|
|
embed_text = str(embed_dict)
|
|
|
|
# Check for audio quality indicators
|
|
assert (
|
|
"high" in embed_text
|
|
or "medium" in embed_text
|
|
or "audio" in embed_text.lower()
|
|
)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert len(embed.fields) > 0
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_speaker_tagging_with_audio_confidence(self, sample_audio_features):
|
|
"""Test speaker tagging UI using audio processing confidence."""
|
|
db_manager = AsyncMock()
|
|
db_manager.update_quote_speaker.return_value = True
|
|
|
|
# Mock Discord members with audio confidence data
|
|
from tests.fixtures.mock_discord import MockDiscordMember
|
|
|
|
members = []
|
|
|
|
# Create members with varying audio confidence
|
|
confidence_data = [
|
|
{"user_id": 100, "username": "HighConfidence", "audio_confidence": 0.95},
|
|
{"user_id": 101, "username": "MediumConfidence", "audio_confidence": 0.75},
|
|
{"user_id": 102, "username": "LowConfidence", "audio_confidence": 0.45},
|
|
]
|
|
|
|
for data in confidence_data:
|
|
member = MockDiscordMember(
|
|
user_id=data["user_id"], username=data["username"]
|
|
)
|
|
member.display_name = data["username"]
|
|
member.audio_confidence = data["audio_confidence"] # Add audio confidence
|
|
members.append(member)
|
|
|
|
tagging_view = SpeakerTaggingView(
|
|
quote_id=123,
|
|
voice_members=members,
|
|
db_manager=db_manager,
|
|
)
|
|
|
|
# Verify buttons were created with confidence indicators
|
|
assert len(tagging_view.children) == 4 # 3 members + 1 unknown button
|
|
|
|
# In a real implementation, buttons would include confidence indicators
|
|
# e.g., "Tag HighConfidence (95%)" for high confidence speakers
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_feature_extraction_for_ui_display(
|
|
self, audio_processor, mock_audio_data
|
|
):
|
|
"""Test audio feature extraction integrated with UI display."""
|
|
# Create temporary audio file
|
|
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
|
# Write simple WAV header and data
|
|
temp_file.write(b"RIFF")
|
|
temp_file.write(
|
|
(len(mock_audio_data["audio_bytes"]) + 36).to_bytes(4, "little")
|
|
)
|
|
temp_file.write(b"WAVEfmt ")
|
|
temp_file.write((16).to_bytes(4, "little")) # PCM header size
|
|
temp_file.write((1).to_bytes(2, "little")) # PCM format
|
|
temp_file.write((1).to_bytes(2, "little")) # mono
|
|
temp_file.write((16000).to_bytes(4, "little")) # sample rate
|
|
temp_file.write((32000).to_bytes(4, "little")) # byte rate
|
|
temp_file.write((2).to_bytes(2, "little")) # block align
|
|
temp_file.write((16).to_bytes(2, "little")) # bits per sample
|
|
temp_file.write(b"data")
|
|
temp_file.write((len(mock_audio_data["audio_bytes"])).to_bytes(4, "little"))
|
|
temp_file.write(mock_audio_data["audio_bytes"])
|
|
|
|
temp_path = temp_file.name
|
|
|
|
try:
|
|
# Mock feature extraction
|
|
with patch.object(
|
|
audio_processor, "extract_audio_features"
|
|
) as mock_extract:
|
|
mock_extract.return_value = {
|
|
"duration": 2.0,
|
|
"rms_energy": 0.7,
|
|
"spectral_centroid_mean": 2000.0,
|
|
"pitch_mean": 200.0,
|
|
}
|
|
|
|
features = await audio_processor.extract_audio_features(temp_path)
|
|
|
|
# Create feature visualization embed
|
|
embed = discord.Embed(
|
|
title="🎵 Audio Features",
|
|
description="Extracted features for voice analysis",
|
|
color=0x8E44AD,
|
|
)
|
|
|
|
# Add feature visualizations
|
|
feature_text = "\n".join(
|
|
[
|
|
f"**Duration:** {features['duration']:.1f}s",
|
|
f"**Energy:** {features['rms_energy']:.2f}",
|
|
f"**Spectral Center:** {features['spectral_centroid_mean']:.0f} Hz",
|
|
f"**Average Pitch:** {features['pitch_mean']:.0f} Hz",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="📊 Features", value=feature_text, inline=False)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Audio Features" in embed.title
|
|
|
|
finally:
|
|
# Cleanup temp file
|
|
Path(temp_path).unlink(missing_ok=True)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_health_monitoring_in_ui(self, audio_processor):
|
|
"""Test audio system health monitoring in UI."""
|
|
# Get audio system health
|
|
health_status = await audio_processor.check_health()
|
|
|
|
# Create health status embed
|
|
embed = discord.Embed(
|
|
title="🔊 Audio System Health",
|
|
color=(
|
|
0x00FF00 if health_status.get("ffmpeg_available", False) else 0xFF0000
|
|
),
|
|
)
|
|
|
|
# Add system status
|
|
system_status = "\n".join(
|
|
[
|
|
f"**FFmpeg:** {'✅ Available' if health_status.get('ffmpeg_available', False) else '❌ Missing'}",
|
|
f"**Temp Directory:** {'✅ Writable' if health_status.get('temp_dir_writable', False) else '❌ Not writable'}",
|
|
f"**Supported Formats:** {', '.join(health_status.get('supported_formats', []))}",
|
|
]
|
|
)
|
|
|
|
embed.add_field(name="🏥 System Status", value=system_status, inline=False)
|
|
|
|
# Add capability status
|
|
capabilities = [
|
|
"Audio conversion",
|
|
"Noise reduction",
|
|
"Voice activity detection",
|
|
"Feature extraction",
|
|
"Format validation",
|
|
]
|
|
|
|
capability_text = "\n".join([f"✅ {cap}" for cap in capabilities])
|
|
|
|
embed.add_field(name="🎯 Capabilities", value=capability_text, inline=True)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Audio System Health" in embed.title
|
|
|
|
|
|
class TestAudioUIPerformanceIntegration:
|
|
"""Test performance integration between audio processing and UI."""
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_processing_progress_updates(self, audio_processor):
|
|
"""Test real-time audio processing progress in UI."""
|
|
|
|
# Mock processing stages with delays
|
|
async def mock_slow_processing():
|
|
stages = [
|
|
"Validating audio format",
|
|
"Converting to standard format",
|
|
"Applying noise reduction",
|
|
"Detecting voice activity",
|
|
"Extracting features",
|
|
]
|
|
|
|
results = []
|
|
for i, stage in enumerate(stages):
|
|
await asyncio.sleep(0.01) # Small delay to simulate processing
|
|
|
|
progress = {
|
|
"stage": stage,
|
|
"progress": (i + 1) / len(stages),
|
|
"completed": i + 1,
|
|
"total": len(stages),
|
|
}
|
|
results.append(progress)
|
|
|
|
return results
|
|
|
|
progress_updates = await mock_slow_processing()
|
|
|
|
# Verify progress tracking
|
|
assert len(progress_updates) == 5
|
|
assert progress_updates[-1]["progress"] == 1.0
|
|
assert all(update["stage"] for update in progress_updates)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_concurrent_audio_processing_ui_updates(self, audio_processor):
|
|
"""Test concurrent audio processing with UI updates."""
|
|
|
|
async def process_audio_with_ui_updates(clip_id):
|
|
# Simulate processing with progress updates
|
|
await asyncio.sleep(0.05)
|
|
|
|
return {
|
|
"clip_id": clip_id,
|
|
"status": "completed",
|
|
"features": {"duration": 2.0, "quality": "high"},
|
|
}
|
|
|
|
# Process multiple clips concurrently
|
|
tasks = [process_audio_with_ui_updates(i) for i in range(10)]
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
# All should complete successfully
|
|
assert len(results) == 10
|
|
assert all(result["status"] == "completed" for result in results)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_memory_usage_monitoring(
|
|
self, audio_processor, mock_audio_data
|
|
):
|
|
"""Test monitoring audio processing memory usage."""
|
|
# Simulate processing large audio files
|
|
large_audio_data = mock_audio_data["audio_bytes"] * 100 # 100x larger
|
|
|
|
# Mock memory-intensive processing
|
|
with patch.object(audio_processor, "process_audio_clip") as mock_process:
|
|
mock_process.return_value = b"processed_audio_data"
|
|
|
|
# Process multiple large clips
|
|
tasks = []
|
|
for _ in range(5):
|
|
task = audio_processor.process_audio_clip(large_audio_data)
|
|
tasks.append(task)
|
|
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
# Should handle memory efficiently
|
|
assert all(result is not None for result in results)
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_processing_timeout_handling(self, audio_processor):
|
|
"""Test handling audio processing timeouts in UI."""
|
|
# Mock slow processing that times out
|
|
with patch.object(audio_processor, "process_audio_clip") as mock_process:
|
|
|
|
async def slow_processing(*args, **kwargs):
|
|
await asyncio.sleep(10) # Very slow
|
|
return b"result"
|
|
|
|
mock_process.side_effect = slow_processing
|
|
|
|
# Should timeout quickly for UI responsiveness
|
|
try:
|
|
await asyncio.wait_for(
|
|
audio_processor.process_audio_clip(b"test_data"), timeout=0.1
|
|
)
|
|
pytest.fail("Should have timed out")
|
|
except asyncio.TimeoutError:
|
|
# Expected timeout
|
|
pass
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_audio_quality_realtime_feedback(
|
|
self, audio_processor, mock_audio_data
|
|
):
|
|
"""Test real-time audio quality feedback in UI."""
|
|
# Mock real-time quality analysis
|
|
quality_metrics = {
|
|
"volume_level": 0.7, # 70% volume
|
|
"noise_level": 0.2, # 20% noise
|
|
"clarity_score": 0.85, # 85% clarity
|
|
"clipping_detected": False,
|
|
"silence_ratio": 0.1, # 10% silence
|
|
}
|
|
|
|
# Create real-time quality embed
|
|
embed = discord.Embed(
|
|
title="🎙️ Real-time Audio Quality",
|
|
color=0x00FF00 if quality_metrics["clarity_score"] > 0.8 else 0xFF9900,
|
|
)
|
|
|
|
# Volume indicator
|
|
volume_bar = "█" * int(quality_metrics["volume_level"] * 10)
|
|
volume_bar += "░" * (10 - len(volume_bar))
|
|
|
|
embed.add_field(
|
|
name="🔊 Volume Level",
|
|
value=f"{volume_bar} {quality_metrics['volume_level']:.0%}",
|
|
inline=False,
|
|
)
|
|
|
|
# Noise indicator
|
|
noise_color = (
|
|
"🟢"
|
|
if quality_metrics["noise_level"] < 0.3
|
|
else "🟡" if quality_metrics["noise_level"] < 0.6 else "🔴"
|
|
)
|
|
|
|
embed.add_field(
|
|
name="🔇 Background Noise",
|
|
value=f"{noise_color} {quality_metrics['noise_level']:.0%}",
|
|
inline=True,
|
|
)
|
|
|
|
# Clarity score
|
|
clarity_color = (
|
|
"🟢"
|
|
if quality_metrics["clarity_score"] > 0.8
|
|
else "🟡" if quality_metrics["clarity_score"] > 0.6 else "🔴"
|
|
)
|
|
|
|
embed.add_field(
|
|
name="✨ Voice Clarity",
|
|
value=f"{clarity_color} {quality_metrics['clarity_score']:.0%}",
|
|
inline=True,
|
|
)
|
|
|
|
# Warnings
|
|
warnings = []
|
|
if quality_metrics["clipping_detected"]:
|
|
warnings.append("⚠️ Audio clipping detected")
|
|
if quality_metrics["silence_ratio"] > 0.5:
|
|
warnings.append("⚠️ High silence ratio")
|
|
if quality_metrics["volume_level"] < 0.3:
|
|
warnings.append("⚠️ Volume too low")
|
|
|
|
if warnings:
|
|
embed.add_field(name="⚠️ Warnings", value="\n".join(warnings), inline=False)
|
|
|
|
assert isinstance(embed, discord.Embed)
|
|
assert "Real-time Audio Quality" in embed.title
|