342 lines
13 KiB
Python
342 lines
13 KiB
Python
"""Test helper functions and utilities."""
|
|
|
|
import hashlib
|
|
import json
|
|
import tempfile
|
|
from datetime import UTC, datetime
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
from code_quality_guard import (
|
|
QualityConfig,
|
|
analyze_code_quality,
|
|
check_code_issues,
|
|
check_cross_file_duplicates,
|
|
check_state_changes,
|
|
get_claude_quality_command,
|
|
should_skip_file,
|
|
store_pre_state,
|
|
verify_naming_conventions,
|
|
)
|
|
|
|
|
|
class TestHelperFunctions:
|
|
"""Test helper functions in the hook."""
|
|
|
|
def test_should_skip_file_default_patterns(self):
|
|
"""Test default skip patterns."""
|
|
config = QualityConfig()
|
|
|
|
# Test files that should be skipped
|
|
assert should_skip_file("test_example.py", config) is True
|
|
assert should_skip_file("example_test.py", config) is True
|
|
assert should_skip_file("/project/tests/file.py", config) is True
|
|
assert should_skip_file("/fixtures/data.py", config) is True
|
|
|
|
# Test files that should not be skipped
|
|
assert should_skip_file("example.py", config) is False
|
|
assert should_skip_file("src/main.py", config) is False
|
|
|
|
def test_should_skip_file_custom_patterns(self):
|
|
"""Test custom skip patterns."""
|
|
config = QualityConfig(skip_patterns=["ignore_", "/vendor/"])
|
|
|
|
assert should_skip_file("ignore_this.py", config) is True
|
|
assert should_skip_file("/vendor/lib.py", config) is True
|
|
assert (
|
|
should_skip_file("test_file.py", config) is False
|
|
) # Default pattern not included
|
|
|
|
def test_get_claude_quality_command_venv(self):
|
|
"""Prefer python module entrypoint when venv python exists."""
|
|
with patch("pathlib.Path.exists", side_effect=[True]):
|
|
cmd = get_claude_quality_command()
|
|
assert cmd[0].endswith(".venv/bin/python")
|
|
assert cmd[1:] == ["-m", "quality.cli.main"]
|
|
|
|
def test_get_claude_quality_command_cli_fallback(self):
|
|
"""Fallback to claude-quality script when python missing."""
|
|
with patch("pathlib.Path.exists", side_effect=[False, True]):
|
|
cmd = get_claude_quality_command()
|
|
assert len(cmd) == 1
|
|
assert cmd[0].endswith(".venv/bin/claude-quality")
|
|
|
|
def test_get_claude_quality_command_system(self):
|
|
"""Fall back to binary on PATH when venv options absent."""
|
|
with patch("pathlib.Path.exists", side_effect=[False, False]):
|
|
cmd = get_claude_quality_command()
|
|
assert cmd == ["claude-quality"]
|
|
|
|
def test_store_pre_state(self):
|
|
"""Test storing pre-modification state."""
|
|
test_content = "def func1(): pass\ndef func2(): pass"
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
|
|
with patch("pathlib.Path.mkdir") as mock_mkdir:
|
|
with patch("pathlib.Path.write_text") as mock_write:
|
|
store_pre_state(test_path, test_content)
|
|
|
|
# Verify cache directory created
|
|
mock_mkdir.assert_called_once()
|
|
_, mkdir_kwargs = mock_mkdir.call_args
|
|
assert mkdir_kwargs.get("exist_ok") is True
|
|
|
|
# Verify state was written
|
|
mock_write.assert_called_once()
|
|
written_data = json.loads(mock_write.call_args[0][0])
|
|
|
|
assert written_data["file_path"] == test_path
|
|
assert written_data["lines"] == 2
|
|
assert written_data["functions"] == 2
|
|
assert written_data["classes"] == 0
|
|
assert "content_hash" in written_data
|
|
assert "timestamp" in written_data
|
|
|
|
def test_check_state_changes_no_pre_state(self):
|
|
"""Test state changes when no pre-state exists."""
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
issues = check_state_changes(test_path)
|
|
assert issues == []
|
|
|
|
def test_check_state_changes_with_degradation(self):
|
|
"""Test state changes detecting degradation."""
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
hashlib.sha256(test_path.encode()).hexdigest()[:8]
|
|
|
|
pre_state = {
|
|
"file_path": test_path,
|
|
"timestamp": datetime.now(UTC).isoformat(),
|
|
"lines": 50,
|
|
"functions": 10,
|
|
"classes": 2,
|
|
}
|
|
|
|
current_content = "def func1(): pass" # Only 1 function now
|
|
|
|
with patch("pathlib.Path.exists", return_value=True):
|
|
with patch("pathlib.Path.read_text") as mock_read:
|
|
# First call reads pre-state, second reads current file
|
|
mock_read.side_effect = [json.dumps(pre_state), current_content]
|
|
|
|
issues = check_state_changes(test_path)
|
|
|
|
# Should detect function reduction
|
|
assert len(issues) > 0
|
|
assert any("Reduced functions" in issue for issue in issues)
|
|
|
|
def test_check_state_changes_file_size_increase(self):
|
|
"""Test detection of significant file size increase."""
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
|
|
pre_state = {
|
|
"file_path": test_path,
|
|
"lines": 100,
|
|
"functions": 5,
|
|
"classes": 1,
|
|
}
|
|
|
|
# Create content with 200 lines (2x increase)
|
|
current_content = "\n".join(f"# Line {i}" for i in range(200))
|
|
|
|
with patch("pathlib.Path.exists", return_value=True):
|
|
with patch("pathlib.Path.read_text") as mock_read:
|
|
mock_read.side_effect = [json.dumps(pre_state), current_content]
|
|
|
|
issues = check_state_changes(test_path)
|
|
|
|
assert len(issues) > 0
|
|
assert any("size increased significantly" in issue for issue in issues)
|
|
|
|
def test_check_cross_file_duplicates(self):
|
|
"""Test cross-file duplicate detection."""
|
|
config = QualityConfig(duplicate_threshold=0.8)
|
|
test_path = f"{tempfile.gettempdir()}/project/test.py"
|
|
|
|
with patch("subprocess.run") as mock_run:
|
|
mock_result = MagicMock()
|
|
mock_result.returncode = 0
|
|
mock_result.stdout = json.dumps(
|
|
{
|
|
"duplicates": [
|
|
{
|
|
"files": [
|
|
f"{tempfile.gettempdir()}/project/test.py",
|
|
f"{tempfile.gettempdir()}/project/other.py",
|
|
],
|
|
},
|
|
],
|
|
},
|
|
)
|
|
mock_run.return_value = mock_result
|
|
|
|
issues = check_cross_file_duplicates(test_path, config)
|
|
|
|
assert len(issues) > 0
|
|
assert "Cross-file duplication" in issues[0]
|
|
|
|
def test_check_cross_file_duplicates_no_duplicates(self):
|
|
"""Test cross-file check with no duplicates."""
|
|
config = QualityConfig()
|
|
test_path = f"{tempfile.gettempdir()}/project/test.py"
|
|
|
|
with patch("subprocess.run") as mock_run:
|
|
mock_result = MagicMock()
|
|
mock_result.returncode = 0
|
|
mock_result.stdout = json.dumps({"duplicates": []})
|
|
mock_run.return_value = mock_result
|
|
|
|
issues = check_cross_file_duplicates(test_path, config)
|
|
assert issues == []
|
|
|
|
def test_verify_naming_conventions_violations(self, non_pep8_code):
|
|
"""Test naming convention verification with violations."""
|
|
with patch("pathlib.Path.read_text", return_value=non_pep8_code):
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
issues = verify_naming_conventions(test_path)
|
|
|
|
assert len(issues) == 2
|
|
assert any("Non-PEP8 function names" in issue for issue in issues)
|
|
assert any("Non-PEP8 class names" in issue for issue in issues)
|
|
|
|
def test_verify_naming_conventions_clean(self, clean_code):
|
|
"""Test naming convention verification with clean code."""
|
|
with patch("pathlib.Path.read_text", return_value=clean_code):
|
|
test_path = f"{tempfile.gettempdir()}/test.py"
|
|
issues = verify_naming_conventions(test_path)
|
|
assert issues == []
|
|
|
|
def test_analyze_code_quality_all_checks(self):
|
|
"""Test analyze_code_quality with all checks enabled."""
|
|
config = QualityConfig(
|
|
duplicate_enabled=True,
|
|
complexity_enabled=True,
|
|
modernization_enabled=True,
|
|
)
|
|
|
|
test_content = "def test(): pass"
|
|
|
|
with patch("code_quality_guard.detect_internal_duplicates") as mock_dup:
|
|
with patch("subprocess.run") as mock_run:
|
|
# Setup mock returns
|
|
mock_dup.return_value = {"duplicates": []}
|
|
|
|
mock_result = MagicMock()
|
|
mock_result.returncode = 0
|
|
mock_result.stdout = json.dumps({"summary": {}})
|
|
mock_run.return_value = mock_result
|
|
|
|
analyze_code_quality(test_content, "test.py", config)
|
|
|
|
# Verify all checks were run
|
|
mock_dup.assert_called_once()
|
|
assert mock_run.call_count >= 2 # Complexity and modernization
|
|
|
|
def test_analyze_code_quality_disabled_checks(self):
|
|
"""Test analyze_code_quality with checks disabled."""
|
|
config = QualityConfig(
|
|
duplicate_enabled=False,
|
|
complexity_enabled=False,
|
|
modernization_enabled=False,
|
|
sourcery_enabled=False,
|
|
basedpyright_enabled=False,
|
|
pyrefly_enabled=False,
|
|
)
|
|
|
|
with patch("code_quality_guard.detect_internal_duplicates") as mock_dup:
|
|
with patch("subprocess.run") as mock_run:
|
|
results = analyze_code_quality("def test(): pass", "test.py", config)
|
|
|
|
# No checks should be run
|
|
mock_dup.assert_not_called()
|
|
mock_run.assert_not_called()
|
|
assert results == {}
|
|
|
|
def test_check_code_issues_internal_duplicates(self):
|
|
"""Test issue detection for internal duplicates."""
|
|
config = QualityConfig()
|
|
results = {
|
|
"internal_duplicates": {
|
|
"duplicates": [
|
|
{
|
|
"similarity": 0.95,
|
|
"description": "Similar functions",
|
|
"locations": [
|
|
{"name": "func1", "lines": "1-5"},
|
|
{"name": "func2", "lines": "7-11"},
|
|
],
|
|
},
|
|
],
|
|
},
|
|
}
|
|
|
|
has_issues, issues = check_code_issues(results, config)
|
|
|
|
assert has_issues is True
|
|
assert len(issues) > 0
|
|
assert "Internal duplication" in issues[0]
|
|
assert "95%" in issues[0]
|
|
|
|
def test_check_code_issues_complexity(self):
|
|
"""Test issue detection for complexity."""
|
|
config = QualityConfig(complexity_threshold=10)
|
|
results = {
|
|
"complexity": {
|
|
"summary": {"average_cyclomatic_complexity": 15},
|
|
"distribution": {"High": 2, "Very High": 1},
|
|
},
|
|
}
|
|
|
|
has_issues, issues = check_code_issues(results, config)
|
|
|
|
assert has_issues is True
|
|
assert any("High average complexity" in issue for issue in issues)
|
|
assert any("3 function(s) with high complexity" in issue for issue in issues)
|
|
|
|
def test_check_code_issues_modernization(self):
|
|
"""Test issue detection for modernization."""
|
|
config = QualityConfig(require_type_hints=True)
|
|
results = {
|
|
"modernization": {
|
|
"files": {
|
|
"test.py": [
|
|
{"issue_type": "use_enumerate"},
|
|
{"issue_type": "missing_return_type"},
|
|
{"issue_type": "missing_param_type"},
|
|
],
|
|
},
|
|
},
|
|
}
|
|
|
|
has_issues, issues = check_code_issues(results, config)
|
|
|
|
assert has_issues is True
|
|
assert any("Modernization needed" in issue for issue in issues)
|
|
|
|
def test_check_code_issues_type_hints_threshold(self):
|
|
"""Test type hint threshold detection."""
|
|
config = QualityConfig(require_type_hints=True)
|
|
|
|
# Create 15 type hint issues
|
|
type_issues = [{"issue_type": "missing_return_type"} for _ in range(15)]
|
|
|
|
results = {
|
|
"modernization": {
|
|
"files": {"test.py": type_issues},
|
|
},
|
|
}
|
|
|
|
has_issues, issues = check_code_issues(results, config)
|
|
|
|
assert has_issues is True
|
|
assert any("Many missing type hints" in issue for issue in issues)
|
|
assert "15" in issues[0]
|
|
|
|
def test_check_code_issues_no_issues(self):
|
|
"""Test when no issues are found."""
|
|
config = QualityConfig()
|
|
results = {}
|
|
|
|
has_issues, issues = check_code_issues(results, config)
|
|
|
|
assert has_issues is False
|
|
assert issues == []
|