Files
claude-scripts/tests/hooks/test_pretooluse.py
2025-09-17 17:01:02 +00:00

374 lines
13 KiB
Python

"""Test PreToolUse hook functionality."""
from unittest.mock import patch
from code_quality_guard import QualityConfig, pretooluse_hook
class TestPreToolUseHook:
"""Test PreToolUse hook behavior."""
def test_non_write_tool_allowed(self):
"""Test that non-write/edit tools are always allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Read",
"tool_input": {"file_path": "test.py"},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_non_python_file_allowed(self):
"""Test that non-Python files are always allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "test.js",
"content": "const x = 1;",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_test_file_skipped(self):
"""Test that test files are skipped when configured."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "test_example.py",
"content": "def test(): pass",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_clean_code_allowed(self, clean_code):
"""Test that clean code is allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": clean_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_complex_code_denied_strict(self, complex_code):
"""Test that complex code is denied in strict mode."""
config = QualityConfig(enforcement_mode="strict")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "quality check failed" in result["reason"].lower()
def test_complex_code_ask_warn_mode(self, complex_code):
"""Test that complex code triggers ask in warn mode."""
config = QualityConfig(enforcement_mode="warn")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "ask"
def test_complex_code_allowed_permissive(self, complex_code):
"""Test that complex code is allowed with warning in permissive mode."""
config = QualityConfig(enforcement_mode="permissive")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
assert "warning" in result.get("reason", "").lower()
def test_duplicate_code_detection(self, duplicate_code):
"""Test internal duplicate detection."""
config = QualityConfig(duplicate_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": duplicate_code,
},
}
with patch("code_quality_guard.detect_internal_duplicates") as mock_dup:
mock_dup.return_value = {
"duplicates": [
{
"similarity": 0.9,
"description": "Similar functions",
"locations": [
{"name": "func1", "lines": "1-5"},
{"name": "func2", "lines": "7-11"},
],
},
],
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"internal_duplicates": mock_dup.return_value,
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "duplication" in result["reason"].lower()
def test_edit_tool_handling(self):
"""Test Edit tool content extraction."""
config = QualityConfig()
hook_data = {
"tool_name": "Edit",
"tool_input": {
"file_path": "example.py",
"old_string": "def old():\n pass",
"new_string": "def new():\n return True",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Verify new_string was analyzed
call_args = mock_analyze.call_args[0]
assert "def new()" in call_args[0]
def test_multiedit_tool_handling(self):
"""Test MultiEdit tool content extraction."""
config = QualityConfig()
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{"old_string": "a", "new_string": "def func1():\n pass"},
{"old_string": "b", "new_string": "def func2():\n pass"},
],
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Verify concatenated content was analyzed
call_args = mock_analyze.call_args[0]
assert "def func1()" in call_args[0]
assert "def func2()" in call_args[0]
def test_state_tracking_enabled(self):
"""Test state tracking when enabled."""
config = QualityConfig(state_tracking_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test():\n pass",
},
}
with patch("code_quality_guard.store_pre_state") as mock_store:
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
pretooluse_hook(hook_data, config)
# Verify state was stored
mock_store.assert_called_once()
assert mock_store.call_args[0][0] == "example.py"
def test_exception_handling(self):
"""Test graceful handling of exceptions."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test():\n pass",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.side_effect = Exception("Analysis failed")
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
assert "error" in result.get("reason", "").lower()
def test_custom_skip_patterns(self):
"""Test custom skip patterns."""
config = QualityConfig(skip_patterns=["custom_skip_", "/ignored/"])
# Test custom pattern match
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "custom_skip_file.py",
"content": "bad code",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Test path pattern match
hook_data["tool_input"]["file_path"] = "/ignored/file.py"
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_modernization_issues(self, old_style_code):
"""Test modernization issue detection."""
config = QualityConfig(modernization_enabled=True, require_type_hints=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": old_style_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"modernization": {
"files": {
"test.py": [
{"issue_type": "use_enumerate", "line": 3},
{"issue_type": "use_is_none", "line": 4},
],
},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "modernization" in result["reason"].lower()
def test_type_hint_threshold(self):
"""Test type hint issue threshold."""
config = QualityConfig(require_type_hints=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test(): pass",
},
}
# Test with many type hint issues
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"modernization": {
"files": {
"test.py": [
{"issue_type": "missing_return_type", "line": i}
for i in range(15) # 15 type hint issues
],
},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "type hints" in result["reason"].lower()
def test_any_usage_denied(self):
"""Test that typing.Any usage triggers a denial."""
config = QualityConfig(enforcement_mode="strict")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "from typing import Any\n\n"
"def example(value: Any) -> None:\n pass\n",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "ask"
assert "any" in result["reason"].lower()
def test_any_usage_detected_in_multiedit(self):
"""Test that MultiEdit content is scanned for typing.Any usage."""
config = QualityConfig()
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{
"old_string": "pass",
"new_string": "from typing import Any\n",
},
{
"old_string": "pass",
"new_string": (
"def handler(arg: Any) -> str:\n"
" return str(arg)\n"
),
},
],
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "ask"
assert "any" in result["reason"].lower()