* refactor: replace module-level config caching with thread-safe lazy loading * refactor: migrate to registry-based architecture with new validation system * Merge branch 'main' into cleanup * feat: add secure graph routing with comprehensive security controls * fix: add cross-package dependencies to pyrefly search paths - Fix import resolution errors in business-buddy-tools package by adding ../business-buddy-core/src and ../business-buddy-extraction/src to search_path - Fix import resolution errors in business-buddy-extraction package by adding ../business-buddy-core/src to search_path - Resolves all 86 pyrefly import errors that were failing in CI/CD pipeline - All packages now pass pyrefly type checking with 0 errors The issue was that packages import from bb_core but pyrefly was only looking in local src directories, not in sibling package directories. * fix: resolve async function and security import issues Research.py fixes: - Create separate async config loader using load_config_async - Fix _get_cached_config_async to properly await async lazy loader - Prevents blocking event loop during config loading Planner.py fixes: - Move get_secure_router and execute_graph_securely imports to module level - Remove imports from exception handlers to prevent cascade failures - Improves reliability during security incident handling Both fixes ensure proper async behavior and more robust error handling.
331 lines
12 KiB
Python
Executable File
331 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""Demonstration script for the registry validation system.
|
|
|
|
This script shows how to use the comprehensive validation framework
|
|
to ensure agents can discover and deploy all registered components.
|
|
|
|
Usage:
|
|
python scripts/demo_validation_system.py [--full] [--save-report]
|
|
"""
|
|
|
|
import asyncio
|
|
import logging
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
# Add project root to path
|
|
project_root = Path(__file__).parent.parent
|
|
sys.path.insert(0, str(project_root / "src"))
|
|
|
|
from biz_bud.validation import ValidationRunner
|
|
from biz_bud.validation.agent_validators import (
|
|
BuddyAgentValidator,
|
|
CapabilityResolutionValidator,
|
|
ToolFactoryValidator,
|
|
)
|
|
from biz_bud.validation.base import BaseValidator
|
|
from biz_bud.validation.deployment_validators import (
|
|
EndToEndWorkflowValidator,
|
|
PerformanceValidator,
|
|
StateManagementValidator,
|
|
)
|
|
from biz_bud.validation.registry_validators import (
|
|
CapabilityConsistencyValidator,
|
|
ComponentDiscoveryValidator,
|
|
RegistryIntegrityValidator,
|
|
)
|
|
|
|
|
|
async def demo_basic_validation():
|
|
"""Demonstrate basic validation functionality."""
|
|
print("🔍 BASIC VALIDATION DEMO")
|
|
print("=" * 50)
|
|
|
|
# Create validation runner
|
|
runner = ValidationRunner()
|
|
|
|
# Register basic validators
|
|
print("📝 Registering basic validators...")
|
|
basic_validators: list[BaseValidator] = [
|
|
RegistryIntegrityValidator("nodes"),
|
|
RegistryIntegrityValidator("graphs"),
|
|
RegistryIntegrityValidator("tools"),
|
|
]
|
|
|
|
runner.register_validators(basic_validators)
|
|
print(f"✅ Registered {len(basic_validators)} validators")
|
|
|
|
# Run validations
|
|
print("\n🚀 Running basic validations...")
|
|
report = await runner.run_all_validations(parallel=True)
|
|
|
|
# Display summary
|
|
print(f"\n📊 VALIDATION SUMMARY")
|
|
print(f" Total validations: {report.summary.total_validations}")
|
|
print(f" Success rate: {report.summary.success_rate:.1f}%")
|
|
print(f" Duration: {report.summary.total_duration:.2f}s")
|
|
print(f" Issues found: {report.summary.total_issues}")
|
|
|
|
if report.summary.has_failures:
|
|
print(f" ⚠️ Failures detected!")
|
|
else:
|
|
print(f" ✅ All validations passed!")
|
|
|
|
return report
|
|
|
|
|
|
async def demo_comprehensive_validation():
|
|
"""Demonstrate comprehensive validation with all validators."""
|
|
print("\n\n🔍 COMPREHENSIVE VALIDATION DEMO")
|
|
print("=" * 50)
|
|
|
|
# Create validation runner
|
|
runner = ValidationRunner()
|
|
|
|
# Register comprehensive validators
|
|
print("📝 Registering comprehensive validators...")
|
|
validators: list[BaseValidator] = [
|
|
# Registry validators
|
|
RegistryIntegrityValidator("nodes"),
|
|
RegistryIntegrityValidator("graphs"),
|
|
RegistryIntegrityValidator("tools"),
|
|
ComponentDiscoveryValidator("nodes"),
|
|
ComponentDiscoveryValidator("graphs"),
|
|
ComponentDiscoveryValidator("tools"),
|
|
CapabilityConsistencyValidator("capability_consistency"),
|
|
|
|
# Agent validators
|
|
ToolFactoryValidator(),
|
|
BuddyAgentValidator(),
|
|
CapabilityResolutionValidator(),
|
|
|
|
# Deployment validators (safe mode - no side effects)
|
|
StateManagementValidator(),
|
|
PerformanceValidator(),
|
|
]
|
|
|
|
runner.register_validators(validators)
|
|
print(f"✅ Registered {len(validators)} validators")
|
|
|
|
# List registered validators
|
|
print("\n📋 Registered validators:")
|
|
for i, validator_name in enumerate(runner.list_validators(), 1):
|
|
print(f" {i:2d}. {validator_name}")
|
|
|
|
# Run comprehensive validation
|
|
print("\n🚀 Running comprehensive validation...")
|
|
print(" (This may take a moment...)")
|
|
|
|
report = await runner.run_all_validations(
|
|
parallel=True,
|
|
respect_dependencies=True
|
|
)
|
|
|
|
# Display detailed summary
|
|
print(f"\n📊 COMPREHENSIVE VALIDATION SUMMARY")
|
|
print(f" Total validations: {report.summary.total_validations}")
|
|
print(f" ✅ Passed: {report.summary.passed_validations}")
|
|
print(f" ❌ Failed: {report.summary.failed_validations}")
|
|
print(f" ⚠️ Errors: {report.summary.error_validations}")
|
|
print(f" ⏭️ Skipped: {report.summary.skipped_validations}")
|
|
print(f" 🎯 Success rate: {report.summary.success_rate:.1f}%")
|
|
print(f" ⏱️ Duration: {report.summary.total_duration:.2f}s")
|
|
|
|
# Issue breakdown
|
|
print(f"\n🔍 ISSUES BREAKDOWN")
|
|
print(f" 🔴 Critical: {report.summary.critical_issues}")
|
|
print(f" 🟠 Errors: {report.summary.error_issues}")
|
|
print(f" 🟡 Warnings: {report.summary.warning_issues}")
|
|
print(f" 🔵 Info: {report.summary.info_issues}")
|
|
print(f" 📊 Total: {report.summary.total_issues}")
|
|
|
|
# Show failed validations
|
|
failed_results = report.get_failed_results()
|
|
if failed_results:
|
|
print(f"\n❌ FAILED VALIDATIONS:")
|
|
for result in failed_results:
|
|
print(f" • {result.validator_name}: {result.status.value}")
|
|
for issue in result.issues[:2]: # Show first 2 issues
|
|
print(f" - {issue.message}")
|
|
|
|
# Show top capabilities found
|
|
capability_info = {}
|
|
for result in report.results:
|
|
if "capabilities" in result.metadata:
|
|
caps = result.metadata["capabilities"]
|
|
for cap in caps:
|
|
capability_info[cap] = capability_info.get(cap, 0) + 1
|
|
|
|
if capability_info:
|
|
print(f"\n🎯 TOP CAPABILITIES DISCOVERED:")
|
|
sorted_caps = sorted(capability_info.items(), key=lambda x: x[1], reverse=True)
|
|
for cap, count in sorted_caps[:10]: # Show top 10
|
|
print(f" • {cap}: {count} components")
|
|
|
|
return report
|
|
|
|
|
|
async def demo_single_validator():
|
|
"""Demonstrate running a single validator."""
|
|
print("\n\n🔍 SINGLE VALIDATOR DEMO")
|
|
print("=" * 50)
|
|
|
|
# Create and run tool factory validator
|
|
print("📝 Testing Tool Factory Validator...")
|
|
validator = ToolFactoryValidator()
|
|
|
|
print("🚀 Running tool factory validation...")
|
|
result = await validator.run_validation()
|
|
|
|
print(f"\n📊 TOOL FACTORY VALIDATION RESULT")
|
|
print(f" Status: {result.status.value}")
|
|
print(f" Duration: {result.duration:.2f}s")
|
|
print(f" Issues: {len(result.issues)}")
|
|
|
|
# Show metadata
|
|
if "node_tools" in result.metadata:
|
|
node_info = result.metadata["node_tools"]
|
|
print(f" 📋 Node Tools: {node_info.get('successful', 0)}/{node_info.get('total_tested', 0)} successful")
|
|
|
|
if "graph_tools" in result.metadata:
|
|
graph_info = result.metadata["graph_tools"]
|
|
print(f" 🌐 Graph Tools: {graph_info.get('successful', 0)}/{graph_info.get('total_tested', 0)} successful")
|
|
|
|
if "capability_tool_creation" in result.metadata:
|
|
cap_info = result.metadata["capability_tool_creation"]
|
|
print(f" 🎯 Capabilities Tested: {len(cap_info.get('tested_capabilities', []))}")
|
|
|
|
# Show issues if any
|
|
if result.issues:
|
|
print(f"\n⚠️ ISSUES FOUND:")
|
|
for issue in result.issues:
|
|
icon = {"critical": "🔴", "error": "🟠", "warning": "🟡", "info": "🔵"}.get(issue.severity.value, "❓")
|
|
print(f" {icon} {issue.message}")
|
|
|
|
return result
|
|
|
|
|
|
async def demo_capability_resolution():
|
|
"""Demonstrate capability resolution validation."""
|
|
print("\n\n🔍 CAPABILITY RESOLUTION DEMO")
|
|
print("=" * 50)
|
|
|
|
# Test capability resolution
|
|
print("📝 Testing Capability Resolution...")
|
|
validator = CapabilityResolutionValidator()
|
|
|
|
print("🚀 Running capability resolution validation...")
|
|
result = await validator.run_validation()
|
|
|
|
print(f"\n📊 CAPABILITY RESOLUTION RESULT")
|
|
print(f" Status: {result.status.value}")
|
|
print(f" Duration: {result.duration:.2f}s")
|
|
print(f" Issues: {len(result.issues)}")
|
|
|
|
# Show capability discovery details
|
|
if "capability_discovery" in result.metadata:
|
|
discovery_info = result.metadata["capability_discovery"]
|
|
print(f" 🎯 Total Capabilities: {discovery_info.get('total_capabilities', 0)}")
|
|
|
|
# Show sample capabilities
|
|
sources = discovery_info.get("capability_sources", {})
|
|
if sources:
|
|
print(f" 📋 Sample Capabilities:")
|
|
for cap, cap_sources in list(sources.items())[:5]: # Show first 5
|
|
source_count = len(cap_sources)
|
|
print(f" • {cap}: {source_count} source(s)")
|
|
|
|
# Show testing results
|
|
if "capability_testing" in result.metadata:
|
|
testing_info = result.metadata["capability_testing"]
|
|
tested = testing_info.get("tested", 0)
|
|
successful = testing_info.get("successful", 0)
|
|
print(f" ✅ Tool Creation: {successful}/{tested} successful")
|
|
|
|
return result
|
|
|
|
|
|
async def save_validation_report(report, filename="validation_report.txt"):
|
|
"""Save validation report to file."""
|
|
output_path = Path(filename)
|
|
|
|
print(f"\n💾 Saving validation report to {output_path}...")
|
|
|
|
# Generate comprehensive report
|
|
text_report = report.generate_text_report()
|
|
|
|
# Save to file
|
|
with open(output_path, "w", encoding="utf-8") as f:
|
|
f.write(text_report)
|
|
|
|
print(f"✅ Report saved to {output_path}")
|
|
print(f" Report size: {len(text_report):,} characters")
|
|
print(f" Report lines: {text_report.count(chr(10)) + 1:,}")
|
|
|
|
return output_path
|
|
|
|
|
|
async def main():
|
|
"""Main demonstration function."""
|
|
print("🚀 REGISTRY VALIDATION SYSTEM DEMONSTRATION")
|
|
print("=" * 60)
|
|
print("This demo shows how the validation system ensures agents")
|
|
print("can discover and deploy all registered components.")
|
|
print()
|
|
|
|
# Setup logging
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
# Check command line arguments
|
|
full_demo = "--full" in sys.argv
|
|
save_report = "--save-report" in sys.argv
|
|
|
|
try:
|
|
# Run basic demonstration
|
|
basic_report = await demo_basic_validation()
|
|
|
|
# Run single validator demo
|
|
await demo_single_validator()
|
|
|
|
# Run capability resolution demo
|
|
await demo_capability_resolution()
|
|
|
|
# Run comprehensive demo if requested
|
|
if full_demo:
|
|
comprehensive_report = await demo_comprehensive_validation()
|
|
final_report = comprehensive_report
|
|
else:
|
|
final_report = basic_report
|
|
print("\n💡 Run with --full for comprehensive validation demo")
|
|
|
|
# Save report if requested
|
|
if save_report:
|
|
await save_validation_report(final_report)
|
|
else:
|
|
print("\n💡 Add --save-report to save detailed report to file")
|
|
|
|
# Final summary
|
|
print(f"\n✅ DEMONSTRATION COMPLETE")
|
|
print(f" The validation system successfully:")
|
|
print(f" • ✅ Validated registry integrity")
|
|
print(f" • ✅ Tested component discovery")
|
|
print(f" • ✅ Verified agent integration")
|
|
print(f" • ✅ Checked capability resolution")
|
|
print(f" • ✅ Generated comprehensive reports")
|
|
print()
|
|
print(f"🎯 CONCLUSION: Agents can reliably discover and deploy")
|
|
print(f" all registered components through the validation system!")
|
|
|
|
return 0
|
|
|
|
except Exception as e:
|
|
print(f"\n❌ DEMONSTRATION FAILED: {str(e)}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
return 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
exit_code = asyncio.run(main())
|
|
sys.exit(exit_code)
|