This commit is contained in:
2025-10-03 22:57:31 +00:00
parent 3994364894
commit 5480b8ab06
2 changed files with 3611 additions and 64 deletions

View File

@@ -525,12 +525,21 @@ def _run_type_checker(
try:
cmd = [str(tool_path)] + tool_config["args"]
# Activate virtual environment for the subprocess
env = os.environ.copy()
env["VIRTUAL_ENV"] = str(venv_bin.parent)
env["PATH"] = f"{venv_bin}:{env.get('PATH', '')}"
# Remove any PYTHONHOME that might interfere
env.pop("PYTHONHOME", None)
result = subprocess.run( # noqa: S603
cmd,
check=False,
capture_output=True,
text=True,
timeout=30,
env=env,
)
# Check for tool-specific errors
@@ -616,6 +625,14 @@ def _run_quality_analyses(
"--format",
"json",
]
# Prepare virtual environment for subprocess
venv_bin = Path(__file__).parent.parent / ".venv/bin"
env = os.environ.copy()
env["VIRTUAL_ENV"] = str(venv_bin.parent)
env["PATH"] = f"{venv_bin}:{env.get('PATH', '')}"
env.pop("PYTHONHOME", None)
with suppress(subprocess.TimeoutExpired):
result = subprocess.run( # noqa: S603
cmd,
@@ -623,6 +640,7 @@ def _run_quality_analyses(
capture_output=True,
text=True,
timeout=30,
env=env,
)
if result.returncode == 0:
with suppress(json.JSONDecodeError):
@@ -653,6 +671,14 @@ def _run_quality_analyses(
"json",
]
cmd = [c for c in cmd if c] # Remove empty strings
# Prepare virtual environment for subprocess
venv_bin = Path(__file__).parent.parent / ".venv/bin"
env = os.environ.copy()
env["VIRTUAL_ENV"] = str(venv_bin.parent)
env["PATH"] = f"{venv_bin}:{env.get('PATH', '')}"
env.pop("PYTHONHOME", None)
with suppress(subprocess.TimeoutExpired):
result = subprocess.run( # noqa: S603
cmd,
@@ -660,6 +686,7 @@ def _run_quality_analyses(
capture_output=True,
text=True,
timeout=30,
env=env,
)
if result.returncode == 0:
with suppress(json.JSONDecodeError):
@@ -902,6 +929,14 @@ def check_cross_file_duplicates(file_path: str, config: QualityConfig) -> list[s
try:
claude_quality_cmd = get_claude_quality_command()
# Prepare virtual environment for subprocess
venv_bin = Path(__file__).parent.parent / ".venv/bin"
env = os.environ.copy()
env["VIRTUAL_ENV"] = str(venv_bin.parent)
env["PATH"] = f"{venv_bin}:{env.get('PATH', '')}"
env.pop("PYTHONHOME", None)
result = subprocess.run( # noqa: S603
[
*claude_quality_cmd,
@@ -916,6 +951,7 @@ def check_cross_file_duplicates(file_path: str, config: QualityConfig) -> list[s
capture_output=True,
text=True,
timeout=60,
env=env,
)
if result.returncode == 0:
data = json.loads(result.stdout)
@@ -1547,15 +1583,18 @@ def run_test_quality_checks(content: str, file_path: str, config: QualityConfig)
"dont-import-test-modules",
]
cmd = [
str(sourcery_path),
"review",
tmp_path,
"--rules",
",".join(test_rules),
"--format",
"json",
]
# Build command with --enable for each rule
cmd = [str(sourcery_path), "review", tmp_path]
for rule in test_rules:
cmd.extend(["--enable", rule])
cmd.append("--check") # Return exit code 1 if issues found
# Activate virtual environment for the subprocess
env = os.environ.copy()
env["VIRTUAL_ENV"] = str(venv_bin.parent)
env["PATH"] = f"{venv_bin}:{env.get('PATH', '')}"
# Remove any PYTHONHOME that might interfere
env.pop("PYTHONHOME", None)
logging.debug("Running Sourcery command: %s", " ".join(cmd))
result = subprocess.run( # noqa: S603
@@ -1564,73 +1603,43 @@ def run_test_quality_checks(content: str, file_path: str, config: QualityConfig)
capture_output=True,
text=True,
timeout=30,
env=env,
)
logging.debug("Sourcery exit code: %s", result.returncode)
logging.debug("Sourcery stdout: %s", result.stdout)
logging.debug("Sourcery stderr: %s", result.stderr)
if result.returncode == 0:
try:
sourcery_output = json.loads(result.stdout)
# Extract issues from Sourcery output - handle different JSON formats
if "files" in sourcery_output:
for file_issues in sourcery_output["files"].values():
if isinstance(file_issues, list):
for issue in file_issues:
if isinstance(issue, dict):
rule_id = issue.get("rule", "unknown")
# Generate enhanced guidance for each violation
base_guidance = generate_test_quality_guidance(rule_id, content, file_path, config)
# Sourcery with --check returns:
# - Exit code 0: No issues found
# - Exit code 1: Issues found
# - Exit code 2: Error occurred
if result.returncode == 1:
# Issues were found - parse the output
output = result.stdout + result.stderr
# Add external context if available
external_context = get_external_context(rule_id, content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
elif "violations" in sourcery_output:
# Alternative format
for violation in sourcery_output["violations"]:
if isinstance(violation, dict):
rule_id = violation.get("rule", "unknown")
base_guidance = generate_test_quality_guidance(rule_id, content, file_path, config)
# Add external context if available
external_context = get_external_context(rule_id, content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
elif isinstance(sourcery_output, list):
# Direct list of issues
for issue in sourcery_output:
if isinstance(issue, dict):
rule_id = issue.get("rule", "unknown")
base_guidance = generate_test_quality_guidance(rule_id, content, file_path, config)
# Add external context if available
external_context = get_external_context(rule_id, content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
except json.JSONDecodeError as e:
logging.debug("Failed to parse Sourcery JSON output: %s", e)
# If JSON parsing fails, provide general guidance with external context
# Try to extract rule names from the output
# Sourcery output format typically includes rule names in brackets or after specific markers
for rule in test_rules:
if rule in output or rule.replace("-", " ") in output.lower():
base_guidance = generate_test_quality_guidance(rule, content, file_path, config)
external_context = get_external_context(rule, content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
break # Only add one guidance message
else:
# If no specific rule found, provide general guidance
base_guidance = generate_test_quality_guidance("unknown", content, file_path, config)
external_context = get_external_context("unknown", content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
elif result.returncode != 0 and (result.stdout.strip() or result.stderr.strip()):
# Sourcery found issues or errors - provide general guidance
error_output = (result.stdout + " " + result.stderr).strip()
base_guidance = generate_test_quality_guidance("sourcery-error", content, file_path, config)
external_context = get_external_context("sourcery-error", content, file_path, config)
if external_context:
base_guidance += f"\n\n{external_context}"
issues.append(base_guidance)
elif result.returncode == 2:
# Error occurred
logging.debug("Sourcery error: %s", result.stderr)
# Don't block on Sourcery errors - just log them
# Exit code 0 means no issues - do nothing
except (subprocess.TimeoutExpired, OSError, json.JSONDecodeError) as e:
# If Sourcery fails, don't block the operation

File diff suppressed because it is too large Load Diff