Files
claude-scripts/tests/hooks/test_pretooluse.py
Travis Vasceannie 9cf5baafb4 feat: implement test quality checks with enhanced guidance and external context integration
- Added a new QualityConfig class to manage test quality check configurations.
- Implemented test quality checks for specific rules in test files, including prevention of conditionals, loops, and generic exceptions.
- Integrated external context providers (Context7 and Firecrawl) for additional guidance on test quality violations.
- Enhanced error messaging to provide detailed, actionable guidance for detected issues.
- Updated README_HOOKS.md to document new test quality features and configuration options.
- Added unit tests to verify the functionality of test quality checks and their integration with the pretooluse_hook.
2025-09-29 20:59:32 +00:00

711 lines
26 KiB
Python

"""Test PreToolUse hook functionality."""
from unittest.mock import patch
from code_quality_guard import QualityConfig, pretooluse_hook
class TestPreToolUseHook:
"""Test PreToolUse hook behavior."""
def test_non_write_tool_allowed(self):
"""Test that non-write/edit tools are always allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Read",
"tool_input": {"file_path": "test.py"},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_tool_input_must_be_mapping(self):
"""Allow requests that provide a non-mapping tool_input payload."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": ["unexpected", "structure"],
}
with patch("code_quality_guard._perform_quality_check") as mock_check:
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
mock_check.assert_not_called()
def test_non_python_file_allowed(self):
"""Test that non-Python files are always allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "test.js",
"content": "const x = 1;",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_test_file_skipped(self):
"""Test that test files are skipped when configured."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "test_example.py",
"content": "def test(): pass",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_clean_code_allowed(self, clean_code):
"""Test that clean code is allowed."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": clean_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_complex_code_denied_strict(self, complex_code):
"""Test that complex code is denied in strict mode."""
config = QualityConfig(enforcement_mode="strict")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "quality check failed" in result["reason"].lower()
def test_complex_code_ask_warn_mode(self, complex_code):
"""Test that complex code triggers ask in warn mode."""
config = QualityConfig(enforcement_mode="warn")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "ask"
def test_complex_code_allowed_permissive(self, complex_code):
"""Test that complex code is allowed with warning in permissive mode."""
config = QualityConfig(enforcement_mode="permissive")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": complex_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"complexity": {
"summary": {"average_cyclomatic_complexity": 25},
"distribution": {"High": 1},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
assert "warning" in result.get("reason", "").lower()
def test_duplicate_code_detection(self, duplicate_code):
"""Test internal duplicate detection."""
config = QualityConfig(duplicate_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": duplicate_code,
},
}
with patch("code_quality_guard.detect_internal_duplicates") as mock_dup:
mock_dup.return_value = {
"duplicates": [
{
"similarity": 0.9,
"description": "Similar functions",
"locations": [
{"name": "func1", "lines": "1-5"},
{"name": "func2", "lines": "7-11"},
],
},
],
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"internal_duplicates": mock_dup.return_value,
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "duplication" in result["reason"].lower()
def test_edit_tool_handling(self):
"""Test Edit tool content extraction."""
config = QualityConfig()
hook_data = {
"tool_name": "Edit",
"tool_input": {
"file_path": "example.py",
"old_string": "def old():\n pass",
"new_string": "def new():\n return True",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Verify new_string was analyzed
call_args = mock_analyze.call_args[0]
assert "def new()" in call_args[0]
def test_multiedit_tool_handling(self):
"""Test MultiEdit tool content extraction."""
config = QualityConfig()
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{"old_string": "a", "new_string": "def func1():\n pass"},
{"old_string": "b", "new_string": "def func2():\n pass"},
],
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Verify concatenated content was analyzed
call_args = mock_analyze.call_args[0]
assert "def func1()" in call_args[0]
assert "def func2()" in call_args[0]
def test_multiedit_ignores_invalid_edits(self):
"""Ensure MultiEdit skips invalid edits while analyzing valid edits."""
config = QualityConfig()
kept_function = "def kept() -> None:\n return None"
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{"old_string": "a", "new_string": "from typing import Any\n"},
"not-a-dict",
{"old_string": "b"},
{"old_string": "c", "new_string": None},
{"old_string": "d", "new_string": kept_function},
],
},
}
with patch(
"code_quality_guard._perform_quality_check",
return_value=(False, []),
) as mock_check:
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
mock_check.assert_called_once()
analyzed_content = mock_check.call_args[0][1]
assert "def kept()" in analyzed_content
assert "typing.any" in result["reason"].lower()
def test_state_tracking_enabled(self):
"""Test state tracking when enabled."""
config = QualityConfig(state_tracking_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test():\n pass",
},
}
with patch("code_quality_guard.store_pre_state") as mock_store:
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
pretooluse_hook(hook_data, config)
# Verify state was stored
mock_store.assert_called_once()
assert mock_store.call_args[0][0] == "example.py"
def test_exception_handling(self):
"""Test graceful handling of exceptions."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test():\n pass",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.side_effect = Exception("Analysis failed")
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
assert "error" in result.get("reason", "").lower()
def test_custom_skip_patterns(self):
"""Test custom skip patterns."""
config = QualityConfig(skip_patterns=["custom_skip_", "/ignored/"])
# Test custom pattern match
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "custom_skip_file.py",
"content": "bad code",
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
# Test path pattern match
hook_data["tool_input"]["file_path"] = "/ignored/file.py"
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "allow"
def test_modernization_issues(self, old_style_code):
"""Test modernization issue detection."""
config = QualityConfig(modernization_enabled=True, require_type_hints=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": old_style_code,
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"modernization": {
"files": {
"test.py": [
{"issue_type": "use_enumerate", "line": 3},
{"issue_type": "use_is_none", "line": 4},
],
},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "modernization" in result["reason"].lower()
def test_type_hint_threshold(self):
"""Test type hint issue threshold."""
config = QualityConfig(require_type_hints=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "def test(): pass",
},
}
# Test with many type hint issues
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {
"modernization": {
"files": {
"test.py": [
{"issue_type": "missing_return_type", "line": i}
for i in range(15) # 15 type hint issues
],
},
},
}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "type hints" in result["reason"].lower()
def test_any_usage_denied_on_analysis_failure(self):
"""Deny when typing.Any is detected even if analysis raises errors."""
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": (
"from typing import Any\n\n"
"def sample(value: Any) -> None:\n return None"
),
},
}
with patch(
"code_quality_guard._perform_quality_check",
side_effect=RuntimeError("boom"),
):
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "typing.any" in result["reason"].lower()
assert "fix these issues" in result["reason"].lower()
def test_any_usage_denied(self):
"""Test that typing.Any usage triggers a denial."""
config = QualityConfig(enforcement_mode="strict")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": "from typing import Any\n\n"
"def example(value: Any) -> None:\n pass\n",
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "any" in result["reason"].lower()
def test_any_usage_detected_in_multiedit(self):
"""Test that MultiEdit content is scanned for typing.Any usage."""
config = QualityConfig()
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{
"old_string": "pass",
"new_string": "from typing import Any\n",
},
{
"old_string": "pass",
"new_string": (
"def handler(arg: Any) -> str:\n"
" return str(arg)\n"
),
},
],
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "any" in result["reason"].lower()
def test_type_ignore_usage_denied_on_analysis_failure(self):
config = QualityConfig()
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": (
"def sample() -> None:\n"
" value = unknown # type: ignore[attr-defined]\n"
),
},
}
with patch(
"code_quality_guard._perform_quality_check",
side_effect=RuntimeError("boom"),
):
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "type: ignore" in result["reason"].lower()
assert "fix these issues" in result["reason"].lower()
def test_type_ignore_usage_denied(self):
config = QualityConfig(enforcement_mode="strict")
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "example.py",
"content": (
"def example() -> None:\n" " value = unknown # type: ignore\n"
),
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "type: ignore" in result["reason"].lower()
def test_type_ignore_usage_detected_in_multiedit(self):
config = QualityConfig()
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "example.py",
"edits": [
{
"old_string": "pass",
"new_string": (
"def helper() -> None:\n" " pass # type: ignore\n"
),
},
{
"old_string": "pass",
"new_string": (
"def handler() -> None:\n"
" value = unknown # type: ignore[attr-defined]\n"
),
},
],
},
}
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
assert result["permissionDecision"] == "deny"
assert "type: ignore" in result["reason"].lower()
class TestTestQualityChecks:
"""Test test quality check functionality."""
def test_is_test_file_detection(self):
"""Test test file path detection."""
from code_quality_guard import is_test_file
# Test files in test directories
assert is_test_file("tests/test_example.py") is True
assert is_test_file("test/test_example.py") is True
assert is_test_file("testing/test_example.py") is True
assert is_test_file("src/tests/test_example.py") is True
assert is_test_file("project/tests/subdir/test_example.py") is True
# Non-test files
assert is_test_file("src/example.py") is False
assert is_test_file("example.py") is False
assert is_test_file("testsomething.py") is False
def test_test_quality_checks_enabled_for_test_files(self):
"""Test that test quality checks run for test files."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "tests/test_example.py",
"content": "def test_something():\n if True:\n pass",
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = ["Test Quality: no-conditionals-in-tests - Conditional found in test"]
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be denied due to test quality issues
assert result["permissionDecision"] == "deny"
assert "test quality" in result["reason"].lower()
mock_test_check.assert_called_once()
def test_test_quality_checks_disabled_for_non_test_files(self):
"""Test that test quality checks don't run for non-test files."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "src/example.py",
"content": "def test_something():\n if True:\n pass",
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = []
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be allowed since it's not a test file
assert result["permissionDecision"] == "allow"
mock_test_check.assert_not_called()
def test_test_quality_checks_disabled_when_config_disabled(self):
"""Test that test quality checks can be disabled via config."""
config = QualityConfig(test_quality_enabled=False)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "tests/test_example.py",
"content": "def test_something():\n if True:\n pass",
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = ["Test Quality: Issue found"]
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be allowed since test quality checks are disabled
assert result["permissionDecision"] == "allow"
mock_test_check.assert_not_called()
def test_test_quality_checks_with_clean_test_code(self):
"""Test that clean test code passes test quality checks."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "tests/test_example.py",
"content": "def test_something():\n assert True",
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = [] # No issues
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be allowed since no test quality issues
assert result["permissionDecision"] == "allow"
mock_test_check.assert_called_once()
def test_test_quality_checks_with_edit_tool(self):
"""Test test quality checks work with Edit tool."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "Edit",
"tool_input": {
"file_path": "tests/test_example.py",
"old_string": "def old():\n pass",
"new_string": "def test_new():\n if True:\n pass",
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = ["Test Quality: no-conditionals-in-tests - Conditional found in test"]
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be denied due to test quality issues
assert result["permissionDecision"] == "deny"
assert "test quality" in result["reason"].lower()
mock_test_check.assert_called_once()
def test_test_quality_checks_with_multiedit_tool(self):
"""Test test quality checks work with MultiEdit tool."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "MultiEdit",
"tool_input": {
"file_path": "tests/test_example.py",
"edits": [
{"old_string": "a", "new_string": "def test_func1():\n assert True"},
{"old_string": "b", "new_string": "def test_func2():\n if False:\n pass"},
],
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = ["Test Quality: no-conditionals-in-tests - Conditional found in test"]
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be denied due to test quality issues
assert result["permissionDecision"] == "deny"
assert "test quality" in result["reason"].lower()
mock_test_check.assert_called_once()
def test_test_quality_checks_combined_with_other_prechecks(self):
"""Test that test quality checks work alongside other prechecks."""
config = QualityConfig(test_quality_enabled=True)
hook_data = {
"tool_name": "Write",
"tool_input": {
"file_path": "tests/test_example.py",
"content": (
"from typing import Any\n\n"
"def test_something():\n"
" if True:\n"
" pass # type: ignore\n"
),
},
}
with patch("code_quality_guard.run_test_quality_checks") as mock_test_check:
mock_test_check.return_value = ["Test Quality: no-conditionals-in-tests - Conditional found in test"]
with patch("code_quality_guard.analyze_code_quality") as mock_analyze:
mock_analyze.return_value = {}
result = pretooluse_hook(hook_data, config)
# Should be denied due to multiple precheck issues
assert result["permissionDecision"] == "deny"
assert "any" in result["reason"].lower()
assert "type: ignore" in result["reason"].lower()
assert "test quality" in result["reason"].lower()
mock_test_check.assert_called_once()