331 lines
12 KiB
Python
331 lines
12 KiB
Python
"""Integration tests for the complete hook system."""
|
|
|
|
import json
|
|
import os
|
|
import tempfile
|
|
from pathlib import Path
|
|
from unittest.mock import patch
|
|
|
|
|
|
class TestHookIntegration:
|
|
"""Test complete hook integration scenarios."""
|
|
|
|
def test_main_entry_pretooluse(self):
|
|
"""Test main entry point detects PreToolUse."""
|
|
from code_quality_guard import main
|
|
|
|
hook_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": "test.py",
|
|
"content": "def test(): pass",
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin") as mock_stdin:
|
|
with patch("builtins.print"):
|
|
mock_stdin.read.return_value = json.dumps(hook_input)
|
|
mock_stdin.__iter__.return_value = [json.dumps(hook_input)]
|
|
|
|
with patch("json.load", return_value=hook_input):
|
|
with patch("code_quality_guard.pretooluse_hook") as mock_pre:
|
|
mock_pre.return_value = {"decision": "allow"}
|
|
main()
|
|
mock_pre.assert_called_once()
|
|
|
|
def test_main_entry_posttooluse(self):
|
|
"""Test main entry point detects PostToolUse."""
|
|
from code_quality_guard import main
|
|
|
|
hook_input = {
|
|
"tool_name": "Write",
|
|
"tool_output": {
|
|
"file_path": "test.py",
|
|
"status": "success",
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin") as mock_stdin:
|
|
with patch("builtins.print"):
|
|
mock_stdin.read.return_value = json.dumps(hook_input)
|
|
mock_stdin.__iter__.return_value = [json.dumps(hook_input)]
|
|
|
|
with patch("json.load", return_value=hook_input):
|
|
with patch("code_quality_guard.posttooluse_hook") as mock_post:
|
|
mock_post.return_value = {"decision": "allow"}
|
|
main()
|
|
mock_post.assert_called_once()
|
|
|
|
def test_main_invalid_json(self):
|
|
"""Test main handles invalid JSON input."""
|
|
from code_quality_guard import main
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch(
|
|
"json.load",
|
|
side_effect=json.JSONDecodeError("test", "test", 0),
|
|
):
|
|
main()
|
|
|
|
# Should print allow decision
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
|
|
def test_full_flow_clean_code(self, clean_code):
|
|
"""Test full flow with clean code."""
|
|
from code_quality_guard import main
|
|
|
|
# PreToolUse
|
|
pre_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": f"{tempfile.gettempdir()}/clean.py",
|
|
"content": clean_code,
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=pre_input):
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
|
|
# Simulate file write
|
|
test_file = Path(f"{tempfile.gettempdir()}/clean.py")
|
|
test_file.write_text(clean_code)
|
|
|
|
# PostToolUse
|
|
post_input = {
|
|
"tool_name": "Write",
|
|
"tool_output": {
|
|
"file_path": f"{tempfile.gettempdir()}/clean.py",
|
|
"status": "success",
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=post_input):
|
|
os.environ["QUALITY_SHOW_SUCCESS"] = "true"
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
assert "passed" in response.get("message", "").lower()
|
|
|
|
test_file.unlink(missing_ok=True)
|
|
|
|
def test_environment_configuration_flow(self):
|
|
"""Test that environment variables are properly used."""
|
|
from code_quality_guard import main
|
|
|
|
# Set strict environment
|
|
os.environ.update(
|
|
{
|
|
"QUALITY_ENFORCEMENT": "strict",
|
|
"QUALITY_COMPLEXITY_THRESHOLD": "5", # Very low threshold
|
|
"QUALITY_DUP_ENABLED": "false",
|
|
"QUALITY_COMPLEXITY_ENABLED": "true", # Keep complexity enabled
|
|
"QUALITY_MODERN_ENABLED": "false",
|
|
},
|
|
)
|
|
|
|
complex_code = """
|
|
def complex_func(a, b, c):
|
|
if a:
|
|
if b:
|
|
if c:
|
|
return 1
|
|
else:
|
|
return 2
|
|
else:
|
|
return 3
|
|
else:
|
|
return 4
|
|
"""
|
|
|
|
hook_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": "complex.py",
|
|
"content": complex_code,
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=hook_input):
|
|
with patch(
|
|
"code_quality_guard.analyze_code_quality",
|
|
) as mock_analyze:
|
|
# Mock the complexity analysis result
|
|
mock_analyze.return_value = {
|
|
"complexity": {
|
|
"summary": {
|
|
"average_cyclomatic_complexity": 8,
|
|
}, # Above threshold
|
|
"distribution": {"High": 1},
|
|
},
|
|
}
|
|
try:
|
|
main()
|
|
msg = "Expected SystemExit"
|
|
raise AssertionError(msg)
|
|
except SystemExit as e:
|
|
assert e.code == 2, "Expected exit code 2 for deny" # noqa: PT017
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
# Should be denied due to low complexity threshold
|
|
assert response["decision"] == "deny"
|
|
|
|
def test_skip_patterns_integration(self):
|
|
"""Test skip patterns work in integration."""
|
|
from code_quality_guard import main
|
|
|
|
# Test file should be skipped
|
|
hook_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": "test_something.py",
|
|
"content": "bad code with issues",
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=hook_input):
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
|
|
def test_state_tracking_flow(self, temp_python_file):
|
|
"""Test state tracking between pre and post."""
|
|
from code_quality_guard import main
|
|
|
|
os.environ["QUALITY_STATE_TRACKING"] = "true"
|
|
|
|
# PreToolUse - store state
|
|
initial_content = "def func1(): pass\ndef func2(): pass\ndef func3(): pass"
|
|
pre_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": str(temp_python_file),
|
|
"content": initial_content,
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=pre_input):
|
|
main()
|
|
|
|
# Simulate file modification (fewer functions)
|
|
modified_content = "def func1(): pass"
|
|
temp_python_file.write_text(modified_content)
|
|
|
|
# PostToolUse - check state
|
|
post_input = {
|
|
"tool_name": "Write",
|
|
"tool_output": {
|
|
"file_path": str(temp_python_file),
|
|
"status": "success",
|
|
},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=post_input):
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
# Should detect function reduction
|
|
if "message" in response:
|
|
assert (
|
|
"reduced" in response["message"].lower()
|
|
or len(response["message"]) == 0
|
|
)
|
|
|
|
def test_cross_tool_handling(self):
|
|
"""Test different tools are handled correctly."""
|
|
from code_quality_guard import main
|
|
|
|
tools = ["Write", "Edit", "MultiEdit", "Read", "Bash", "Task"]
|
|
|
|
for tool in tools:
|
|
if tool in ["Write", "Edit", "MultiEdit"]:
|
|
hook_input = {
|
|
"tool_name": tool,
|
|
"tool_input": {
|
|
"file_path": "test.py",
|
|
"content": "def test(): pass",
|
|
},
|
|
}
|
|
else:
|
|
hook_input = {
|
|
"tool_name": tool,
|
|
"tool_input": {},
|
|
}
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=hook_input):
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == "allow"
|
|
|
|
def test_enforcement_mode_progression(self, complex_code):
|
|
"""Test progression through enforcement modes."""
|
|
from code_quality_guard import main
|
|
|
|
hook_input = {
|
|
"tool_name": "Write",
|
|
"tool_input": {
|
|
"file_path": "complex.py",
|
|
"content": complex_code,
|
|
},
|
|
}
|
|
|
|
modes_and_decisions = [
|
|
("strict", "deny"),
|
|
("warn", "ask"),
|
|
("permissive", "allow"),
|
|
]
|
|
|
|
for mode, expected_decision in modes_and_decisions:
|
|
os.environ["QUALITY_ENFORCEMENT"] = mode
|
|
os.environ["QUALITY_COMPLEXITY_THRESHOLD"] = "10"
|
|
|
|
with patch("sys.stdin"):
|
|
with patch("builtins.print") as mock_print:
|
|
with patch("json.load", return_value=hook_input):
|
|
if expected_decision in ["deny", "ask"]:
|
|
# Expect SystemExit with code 2 for deny/ask decisions
|
|
try:
|
|
main()
|
|
msg = f"Expected SystemExit for {mode} mode"
|
|
raise AssertionError(msg)
|
|
except SystemExit as e:
|
|
assert e.code == 2, ( # noqa: PT017
|
|
f"Expected exit code 2 for {mode} mode"
|
|
)
|
|
else:
|
|
# Permissive mode should not exit
|
|
main()
|
|
|
|
printed = mock_print.call_args[0][0]
|
|
response = json.loads(printed)
|
|
assert response["decision"] == expected_decision
|