diff --git a/.backup/ban_stdlib_logger.rego b/.backup/ban_stdlib_logger.rego new file mode 100644 index 0000000..d6f372f --- /dev/null +++ b/.backup/ban_stdlib_logger.rego @@ -0,0 +1,163 @@ +# METADATA +# scope: package +# title: Ban Stdlib Logger +# description: Blocks use of stdlib logging in Python code +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.ban_stdlib_logger + +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +is_python_patch(patch_text) if { + contains(patch_text, ".py") +} + +is_python_patch(patch_text) if { + contains(patch_text, ".pyi") +} + +stdlib_logger_pattern := `import logging|from logging import|logging\.getLogger` +file_path_pattern := `\.py$` + +# Block Write/Edit operations that introduce stdlib logging + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + content := new_content + content != null + regex.match(stdlib_logger_pattern, content) + + decision := { + "rule_id": "PY-LOG-001", + "reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(stdlib_logger_pattern, content) + + decision := { + "rule_id": "PY-LOG-001", + "reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + lower_patch := lower(patch) + is_python_patch(lower_patch) + regex.match(stdlib_logger_pattern, patch) + + decision := { + "rule_id": "PY-LOG-001", + "reason": "Stdlib logging usage is prohibited. Use the project logging utilities instead.", + "severity": "HIGH" + } +} diff --git a/.backup/block_assertion_roulette.rego b/.backup/block_assertion_roulette.rego new file mode 100644 index 0000000..8c2671d --- /dev/null +++ b/.backup/block_assertion_roulette.rego @@ -0,0 +1,175 @@ +# METADATA +# scope: package +# title: Block Assertion Roulette +# description: Blocks multiple bare asserts in a single test without messages +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_assertion_roulette +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `tests?/.*\.py$` +assertion_pattern := `^\s*assert\s+[^,\n]+\n\s*assert\s+[^,\n]+$` + +# Block Write/Edit operations that introduce assertion roulette + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + content := new_content + content != null + regex.match(assertion_pattern, content) + + decision := { + "rule_id": "TEST-ASSERT-001", + "reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(assertion_pattern, content) + + decision := { + "rule_id": "TEST-ASSERT-001", + "reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + + regex.match(assertion_pattern, patch) + + decision := { + "rule_id": "TEST-ASSERT-001", + "reason": "Multiple bare asserts detected. Use one assert per test or add assertion messages.", + "severity": "HIGH" + } +} diff --git a/.backup/block_biome_ignore.rego b/.backup/block_biome_ignore.rego new file mode 100644 index 0000000..35516ad --- /dev/null +++ b/.backup/block_biome_ignore.rego @@ -0,0 +1,175 @@ +# METADATA +# scope: package +# title: Block Biome Ignore +# description: Blocks ignore directives in JS/TS files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_biome_ignore +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `\.(js|jsx|ts|tsx|mjs|cjs)$` +ignore_pattern := `//\s*biome-ignore|//\s*@ts-ignore|//\s*@ts-expect-error|//\s*@ts-nocheck|//\s*eslint-disable|/\*\s*eslint-disable` + +# Block Write/Edit operations that introduce ignore directives + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + content := new_content + content != null + regex.match(ignore_pattern, content) + + decision := { + "rule_id": "TS-LINT-002", + "reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(ignore_pattern, content) + + decision := { + "rule_id": "TS-LINT-002", + "reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + + regex.match(ignore_pattern, patch) + + decision := { + "rule_id": "TS-LINT-002", + "reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_biome_ignore_bash.rego b/.backup/block_biome_ignore_bash.rego new file mode 100644 index 0000000..e88d33d --- /dev/null +++ b/.backup/block_biome_ignore_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Biome Ignore (Bash) +# description: Blocks Bash commands that add ignore directives to JS/TS files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_biome_ignore_bash +import rego.v1 + +ignore_pattern := `(biome-ignore|@ts-ignore|@ts-expect-error|@ts-nocheck|eslint-disable).*\.(js|jsx|ts|tsx|mjs|cjs)` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(ignore_pattern, command) + + decision := { + "rule_id": "TS-LINT-001", + "reason": "Ignore directives for Biome/TypeScript/ESLint are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_broad_exception_handler.rego b/.backup/block_broad_exception_handler.rego new file mode 100644 index 0000000..c706d2f --- /dev/null +++ b/.backup/block_broad_exception_handler.rego @@ -0,0 +1,146 @@ +# METADATA +# scope: package +# title: Block Broad Exception Handler +# description: Blocks bare Exception handlers that only log +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_broad_exception_handler +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +handler_pattern := `except\s+Exception\s*(?:as\s+\w+)?:\s*\n\s+(?:logger\.|logging\.)` + +# Block Write/Edit operations that introduce broad exception handlers + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + content := new_content + content != null + regex.match(handler_pattern, content) + + decision := { + "rule_id": "PY-EXC-001", + "reason": "Broad Exception handlers that only log are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + content := edit_new_content(edit) + content != null + regex.match(handler_pattern, content) + + decision := { + "rule_id": "PY-EXC-001", + "reason": "Broad Exception handlers that only log are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + + regex.match(handler_pattern, patch) + + decision := { + "rule_id": "PY-EXC-001", + "reason": "Broad Exception handlers that only log are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_code_quality_test_bash.rego b/.backup/block_code_quality_test_bash.rego new file mode 100644 index 0000000..692c70b --- /dev/null +++ b/.backup/block_code_quality_test_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Code Quality Test (Bash) +# description: Blocks Bash edits to src/test/code-quality.test.ts +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_code_quality_test_bash +import rego.v1 + +pattern := `(sed|awk|cat\s*>|echo\s*>|tee|cp\s+.*code-quality\.test\.ts|mv\s+.*code-quality\.test\.ts|rm\s+.*code-quality\.test\.ts|>|>>).*code-quality\.test\.ts|code-quality\.test\.ts.*(>|>>|\|.*tee)` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "TS-QUALITY-001", + "reason": "Direct edits to src/test/code-quality.test.ts are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_code_quality_test_edits.rego b/.backup/block_code_quality_test_edits.rego new file mode 100644 index 0000000..7f5ced7 --- /dev/null +++ b/.backup/block_code_quality_test_edits.rego @@ -0,0 +1,127 @@ +# METADATA +# scope: package +# title: Block Code Quality Test (Edits) +# description: Blocks file edits to src/test/code-quality.test.ts +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_code_quality_test_edits +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `src/test/code-quality\.test\.ts$` + +# Block Write/Edit operations targeting code-quality test file + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "TS-QUALITY-002", + "reason": "Direct edits to src/test/code-quality.test.ts are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "TS-QUALITY-002", + "reason": "Direct edits to src/test/code-quality.test.ts are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_code_quality_test_serena.rego b/.backup/block_code_quality_test_serena.rego new file mode 100644 index 0000000..cff4ce8 --- /dev/null +++ b/.backup/block_code_quality_test_serena.rego @@ -0,0 +1,126 @@ +# METADATA +# scope: package +# title: Block Code Quality Test (Serena) +# description: Blocks Serena edits to src/test/code-quality.test.ts +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: +# - McpSerenaReplaceContent +# - McpSerenaReplaceSymbolBody +# - McpSerenaCreateTextFile +# - McpSerenaInsertBeforeSymbol +# - McpSerenaInsertAfterSymbol +# - McpSerenaRenameSymbol +package cupcake.policies.opencode.block_code_quality_test_serena +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `(^|/)src/test/code-quality\.test\.ts$` + +get_relative_path := path if { + path := tool_input.relative_path +} else := path if { + path := tool_input.path +} else := "" + +# Block Serena operations targeting code-quality test file + +deny contains decision if { + input.hook_event_name == "PreToolUse" + + tool_names := {"McpSerenaReplaceContent", "McpSerenaReplaceSymbolBody", "McpSerenaCreateTextFile", "McpSerenaInsertBeforeSymbol", "McpSerenaInsertAfterSymbol", "McpSerenaRenameSymbol"} + tool_name in tool_names + + file_path := get_relative_path + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "TS-QUALITY-003", + "reason": "Direct edits to src/test/code-quality.test.ts are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_code_quality_test_serena_plugin.rego b/.backup/block_code_quality_test_serena_plugin.rego new file mode 100644 index 0000000..fb87a2b --- /dev/null +++ b/.backup/block_code_quality_test_serena_plugin.rego @@ -0,0 +1,126 @@ +# METADATA +# scope: package +# title: Block Code Quality Test (Serena Plugin) +# description: Blocks Serena plugin edits to src/test/code-quality.test.ts +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: +# - McpPluginSerenaSerenaReplaceContent +# - McpPluginSerenaSerenaReplaceSymbolBody +# - McpPluginSerenaSerenaCreateTextFile +# - McpPluginSerenaSerenaInsertBeforeSymbol +# - McpPluginSerenaSerenaInsertAfterSymbol +# - McpPluginSerenaSerenaRenameSymbol +package cupcake.policies.opencode.block_code_quality_test_serena_plugin +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `(^|/)src/test/code-quality\.test\.ts$` + +get_relative_path := path if { + path := tool_input.relative_path +} else := path if { + path := tool_input.path +} else := "" + +# Block Serena plugin operations targeting code-quality test file + +deny contains decision if { + input.hook_event_name == "PreToolUse" + + tool_names := {"McpPluginSerenaSerenaReplaceContent", "McpPluginSerenaSerenaReplaceSymbolBody", "McpPluginSerenaSerenaCreateTextFile", "McpPluginSerenaSerenaInsertBeforeSymbol", "McpPluginSerenaSerenaInsertAfterSymbol", "McpPluginSerenaSerenaRenameSymbol"} + tool_name in tool_names + + file_path := get_relative_path + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "TS-QUALITY-004", + "reason": "Direct edits to src/test/code-quality.test.ts are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_datetime_now_fallback.rego b/.backup/block_datetime_now_fallback.rego new file mode 100644 index 0000000..f77719c --- /dev/null +++ b/.backup/block_datetime_now_fallback.rego @@ -0,0 +1,146 @@ +# METADATA +# scope: package +# title: Block datetime.now Fallback +# description: Blocks returning datetime.now() as a fallback +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_datetime_now_fallback +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +pattern := `return\s+datetime\.now\s*\(` + +# Block Write/Edit operations that introduce datetime.now fallback + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + content := new_content + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-DT-001", + "reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + content := edit_new_content(edit) + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-DT-001", + "reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + + regex.match(pattern, patch) + + decision := { + "rule_id": "PY-DT-001", + "reason": "Returning datetime.now() as a fallback is prohibited. Use a caller-provided timestamp.", + "severity": "HIGH" + } +} diff --git a/.backup/block_default_value_swallow.rego b/.backup/block_default_value_swallow.rego new file mode 100644 index 0000000..b051cd3 --- /dev/null +++ b/.backup/block_default_value_swallow.rego @@ -0,0 +1,146 @@ +# METADATA +# scope: package +# title: Block Default Value Swallow +# description: Blocks exception handlers that warn and return defaults +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_default_value_swallow +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +pattern := `except\s+\w*(?:Error|Exception).*?:\s*\n\s+.*?(?:logger\.|logging\.).*?(?:warning|warn).*?\n\s+return\s+(?:\w+Settings|Defaults?\(|default_|\{[^}]*\}|[A-Z_]+_DEFAULT)` + +# Block Write/Edit operations that swallow exceptions with defaults + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + content := new_content + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-EXC-002", + "reason": "Swallowing exceptions and returning defaults is prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + content := edit_new_content(edit) + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-EXC-002", + "reason": "Swallowing exceptions and returning defaults is prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + + regex.match(pattern, patch) + + decision := { + "rule_id": "PY-EXC-002", + "reason": "Swallowing exceptions and returning defaults is prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_duplicate_fixtures.rego b/.backup/block_duplicate_fixtures.rego new file mode 100644 index 0000000..bf974af --- /dev/null +++ b/.backup/block_duplicate_fixtures.rego @@ -0,0 +1,179 @@ +# METADATA +# scope: package +# title: Block Duplicate Fixtures +# description: Blocks redefining global pytest fixtures outside conftest.py +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_duplicate_fixtures +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `tests?/.*\.py$` +conftest_pattern := `tests?/conftest\.py$` +fixture_pattern := `@pytest\.fixture[^@]*\ndef\s+(mock_uow|crypto|meetings_dir|webhook_config|webhook_config_all_events|sample_datetime|calendar_settings|meeting_id|sample_meeting|recording_meeting|mock_grpc_context|mock_asr_engine|mock_optional_extras)\s*\(` + +# Block Write/Edit operations that introduce duplicate fixtures + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + not regex.match(conftest_pattern, file_path) + + content := new_content + content != null + regex.match(fixture_pattern, content) + + decision := { + "rule_id": "TEST-FIX-001", + "reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + not regex.match(conftest_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(fixture_pattern, content) + + decision := { + "rule_id": "TEST-FIX-001", + "reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + not regex.match(conftest_pattern, patch) + + regex.match(fixture_pattern, patch) + + decision := { + "rule_id": "TEST-FIX-001", + "reason": "Duplicate global fixtures are prohibited. Use tests/conftest.py fixtures instead.", + "severity": "HIGH" + } +} diff --git a/.backup/block_linter_config_frontend.rego b/.backup/block_linter_config_frontend.rego new file mode 100644 index 0000000..3bff2fe --- /dev/null +++ b/.backup/block_linter_config_frontend.rego @@ -0,0 +1,129 @@ +# METADATA +# scope: package +# title: Block Frontend Linter Config +# description: Blocks edits to frontend linter config files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_linter_config_frontend +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `(^|/)client/.*(?:\.?eslint(?:rc|\.config).*|\.?prettier(?:rc|\.config).*|biome\.json|tsconfig\.json|\.?rustfmt\.toml|\.?clippy\.toml)$` + +# Block Write/Edit operations targeting frontend linter configs + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "node_modules/") + + decision := { + "rule_id": "TS-CONFIG-002", + "reason": "Frontend linter/config file edits are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "node_modules/") + + decision := { + "rule_id": "TS-CONFIG-002", + "reason": "Frontend linter/config file edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_linter_config_frontend_bash.rego b/.backup/block_linter_config_frontend_bash.rego new file mode 100644 index 0000000..a356217 --- /dev/null +++ b/.backup/block_linter_config_frontend_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Frontend Linter Config (Bash) +# description: Blocks Bash edits to frontend linter config files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_linter_config_frontend_bash +import rego.v1 + +pattern := `(rm|mv|cp|sed|awk|chmod|chown|touch|truncate|tee|>|>>)\s.*client/.*(?:biome\.json|tsconfig\.json|\.?eslint(?:rc|\.config)|\.?prettier(?:rc|\.config)|\.?rustfmt\.toml|\.?clippy\.toml)` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "TS-CONFIG-001", + "reason": "Frontend linter/config file edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_linter_config_python.rego b/.backup/block_linter_config_python.rego new file mode 100644 index 0000000..f2f6479 --- /dev/null +++ b/.backup/block_linter_config_python.rego @@ -0,0 +1,129 @@ +# METADATA +# scope: package +# title: Block Python Linter Config +# description: Blocks edits to Python linter config files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_linter_config_python +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `(?:pyproject\.toml|\.?ruff\.toml|\.?pyrightconfig\.json|\.?mypy\.ini|setup\.cfg|\.flake8|tox\.ini|\.?pylintrc)$` + +# Block Write/Edit operations targeting Python linter configs + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "/.venv/") + + decision := { + "rule_id": "PY-CONFIG-002", + "reason": "Python linter/config file edits are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "/.venv/") + + decision := { + "rule_id": "PY-CONFIG-002", + "reason": "Python linter/config file edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_linter_config_python_bash.rego b/.backup/block_linter_config_python_bash.rego new file mode 100644 index 0000000..65229c7 --- /dev/null +++ b/.backup/block_linter_config_python_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Python Linter Config (Bash) +# description: Blocks Bash edits to Python linter config files +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_linter_config_python_bash +import rego.v1 + +pattern := `(rm|mv|cp|sed|awk|chmod|chown|touch|truncate|tee|>|>>)\s.*(?:pyproject\.toml|\.?ruff\.toml|\.?pyrightconfig\.json|\.?mypy\.ini|setup\.cfg|\.flake8|tox\.ini|\.?pylintrc)` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "PY-CONFIG-001", + "reason": "Python linter/config file edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_magic_numbers.rego b/.backup/block_magic_numbers.rego new file mode 100644 index 0000000..54bac75 --- /dev/null +++ b/.backup/block_magic_numbers.rego @@ -0,0 +1,178 @@ +# METADATA +# scope: package +# title: Block Magic Numbers +# description: Blocks introduction of magic numbers outside constants modules +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_magic_numbers +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `\.(py|ts|tsx|js|jsx)$` +number_pattern := `(?:timeout|delay|interval|duration|limit|max|min|size|count|threshold|retry|retries|attempts|port|width|height|margin|padding|offset|index|length|capacity|buffer|batch|chunk|page|rate|fps|dpi|quality|level|priority|weight|score|factor|multiplier|divisor|percentage|ratio|scale)\s*[=:]\s*([2-9]|[1-9]\d+)|(?:if|while|for|elif|range|slice|sleep|wait|setTimeout|setInterval)\s*\([^)]*([2-9]|[1-9]\d+)` + +# Block Write/Edit operations that introduce magic numbers + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "constants") + + content := new_content + content != null + regex.match(number_pattern, content) + + decision := { + "rule_id": "STYLE-001", + "reason": "Magic numbers are prohibited. Use named constants.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + not contains(lower(file_path), "constants") + + content := edit_new_content(edit) + content != null + regex.match(number_pattern, content) + + decision := { + "rule_id": "STYLE-001", + "reason": "Magic numbers are prohibited. Use named constants.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + not contains(lower(patch), "constants") + + regex.match(number_pattern, patch) + + decision := { + "rule_id": "STYLE-001", + "reason": "Magic numbers are prohibited. Use named constants.", + "severity": "HIGH" + } +} diff --git a/.backup/block_makefile_bash.rego b/.backup/block_makefile_bash.rego new file mode 100644 index 0000000..15e2b9a --- /dev/null +++ b/.backup/block_makefile_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Makefile Edit (Bash) +# description: Blocks Bash edits to Makefile +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_makefile_bash +import rego.v1 + +pattern := `(>>?\s*Makefile|sed\s+.*-i.*Makefile|sed\s+-i.*Makefile|perl\s+-[pi].*Makefile|tee\s+.*Makefile|(mv|cp)\s+\S+\s+Makefile\b|>\s*Makefile)` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "BUILD-001", + "reason": "Makefile edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_makefile_edit.rego b/.backup/block_makefile_edit.rego new file mode 100644 index 0000000..9f7da15 --- /dev/null +++ b/.backup/block_makefile_edit.rego @@ -0,0 +1,127 @@ +# METADATA +# scope: package +# title: Block Makefile Edit +# description: Blocks file edits to Makefile +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_makefile_edit +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `(?:^|/)Makefile$` + +# Block Write/Edit operations targeting Makefile + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "BUILD-002", + "reason": "Makefile edits are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + decision := { + "rule_id": "BUILD-002", + "reason": "Makefile edits are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_no_verify.rego b/.backup/block_no_verify.rego new file mode 100644 index 0000000..d399d1f --- /dev/null +++ b/.backup/block_no_verify.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Block Git --no-verify +# description: Blocks git commit --no-verify +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_no_verify +import rego.v1 + +pattern := `git\s+commit\s+.*--no-verify|git\s+commit\s+--no-verify` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "GIT-001", + "reason": "Git commit --no-verify is prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_silent_none_return.rego b/.backup/block_silent_none_return.rego new file mode 100644 index 0000000..bdb56b4 --- /dev/null +++ b/.backup/block_silent_none_return.rego @@ -0,0 +1,146 @@ +# METADATA +# scope: package +# title: Block Silent None Return +# description: Blocks exception handlers that log and return empty values +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_silent_none_return +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +pattern := `except\s+\w*Error.*?:\s*\n\s+.*?(?:logger\.|logging\.).*?\n\s+return\s+(?:None|\[\]|False|\{\}|0)` + +# Block Write/Edit operations that swallow exceptions with empty returns + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + content := new_content + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-EXC-003", + "reason": "Silent exception handlers returning empty values are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + content := edit_new_content(edit) + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "PY-EXC-003", + "reason": "Silent exception handlers returning empty values are prohibited.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + + regex.match(pattern, patch) + + decision := { + "rule_id": "PY-EXC-003", + "reason": "Silent exception handlers returning empty values are prohibited.", + "severity": "HIGH" + } +} diff --git a/.backup/block_test_loops_conditionals.rego b/.backup/block_test_loops_conditionals.rego new file mode 100644 index 0000000..b706398 --- /dev/null +++ b/.backup/block_test_loops_conditionals.rego @@ -0,0 +1,175 @@ +# METADATA +# scope: package +# title: Block Test Loops/Conditionals +# description: Blocks loops or conditionals inside tests with asserts +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_test_loops_conditionals +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `tests?/.*\.py$` +pattern := `def test_[^(]+\([^)]*\)[^:]*:[\s\S]*?\b(for|while|if)\s+[^:]+:[\s\S]*?assert` + +# Block Write/Edit operations that introduce loops/conditionals in tests + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + content := new_content + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "TEST-STRUCT-001", + "reason": "Loops or conditionals inside tests are prohibited. Use parametrization.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "TEST-STRUCT-001", + "reason": "Loops or conditionals inside tests are prohibited. Use parametrization.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + + regex.match(pattern, patch) + + decision := { + "rule_id": "TEST-STRUCT-001", + "reason": "Loops or conditionals inside tests are prohibited. Use parametrization.", + "severity": "HIGH" + } +} diff --git a/.backup/block_tests_quality.rego b/.backup/block_tests_quality.rego new file mode 100644 index 0000000..45233fd --- /dev/null +++ b/.backup/block_tests_quality.rego @@ -0,0 +1,130 @@ +# METADATA +# scope: package +# title: Block Tests Quality +# description: Blocks edits to tests/quality (except baselines.json) +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.block_tests_quality +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +file_path_pattern := `tests/quality/` +exclude_pattern := `baselines\.json$` + +# Block Write/Edit operations targeting tests/quality + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + not regex.match(exclude_pattern, file_path) + + decision := { + "rule_id": "TEST-QUALITY-002", + "reason": "Direct edits to tests/quality are prohibited (except baselines.json).", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + not regex.match(exclude_pattern, file_path) + + decision := { + "rule_id": "TEST-QUALITY-002", + "reason": "Direct edits to tests/quality are prohibited (except baselines.json).", + "severity": "HIGH" + } +} diff --git a/.backup/block_tests_quality_bash.rego b/.backup/block_tests_quality_bash.rego new file mode 100644 index 0000000..e65e9b0 --- /dev/null +++ b/.backup/block_tests_quality_bash.rego @@ -0,0 +1,27 @@ +# METADATA +# scope: package +# title: Block Tests Quality (Bash) +# description: Blocks Bash edits to tests/quality (except baselines.json) +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.block_tests_quality_bash +import rego.v1 + +pattern := `(rm|mv|cp|sed|awk|chmod|chown|touch|mkdir|rmdir|truncate|tee|>|>>)\s.*tests/quality/` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + not contains(lower(command), "tests/quality/baselines.json") + + decision := { + "rule_id": "TEST-QUALITY-001", + "reason": "Direct edits to tests/quality are prohibited (except baselines.json).", + "severity": "HIGH" + } +} diff --git a/.backup/example.rego b/.backup/example.rego new file mode 100644 index 0000000..0fb2487 --- /dev/null +++ b/.backup/example.rego @@ -0,0 +1,33 @@ +# METADATA +# scope: package +# title: Example Policy +# description: A minimal example policy that never fires +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.example + +import rego.v1 + +# This rule will never fire - it's just here to prevent OPA compilation issues +# It checks for a command that nobody would ever type +deny contains decision if { + input.tool_input.command == "CUPCAKE_EXAMPLE_RULE_THAT_NEVER_FIRES_12345" + decision := { + "reason": "This will never happen", + "severity": "LOW", + "rule_id": "EXAMPLE-001" + } +} + +# Replace the above with your actual policies +# Example of a real policy: +# deny contains decision if { +# contains(input.tool_input.command, "rm -rf /") +# decision := { +# "reason": "Dangerous command blocked", +# "severity": "HIGH", +# "rule_id": "SAFETY-001" +# } +# } diff --git a/.backup/prevent_any_type.rego b/.backup/prevent_any_type.rego new file mode 100644 index 0000000..89e4022 --- /dev/null +++ b/.backup/prevent_any_type.rego @@ -0,0 +1,181 @@ +# METADATA +# scope: package +# title: Ban Python Any Type +# description: Blocks introduction of typing.Any in Python code +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.prevent_any_type + +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +is_python_file(path) if { + endswith(path, ".py") +} + +is_python_file(path) if { + endswith(path, ".pyi") +} + +# Regex patterns indicating use of Any in type annotations/imports +any_type_patterns := [ + `(?m)^\s*from\s+typing\s+import\s+[^#\n]*\bAny\b`, + `\btyping\.Any\b`, + `:\s*Any\b`, + `:\s*"Any"`, + `:\s*'Any'`, + `->\s*Any\b`, + `->\s*"Any"`, + `->\s*'Any'`, + `\[\s*Any\s*\]`, + `\[\s*Any\s*,`, + `,\s*Any\s*\]`, + `,\s*Any\s*,`, + `Union\[[^\]]*\bAny\b[^\]]*\]`, + `Optional\[Any\]`, +] + +# Block Write/Edit operations that introduce Any in Python files +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + # Only enforce for Python files + file_path := lower(resolved_file_path) + is_python_file(file_path) + + content := new_content + content != null + + some pattern in any_type_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-001", + "reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + content := patch_content + content != null + + some pattern in any_type_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-001", + "reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := lower(edit_path(edit)) + is_python_file(file_path) + + content := edit_new_content(edit) + content != null + + some pattern in any_type_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-001", + "reason": "Use of Any is prohibited in Python type annotations/imports. Replace with Protocol, TypeVar, TypedDict, or a concrete type.", + "severity": "HIGH" + } +} diff --git a/.backup/prevent_type_suppression.rego b/.backup/prevent_type_suppression.rego new file mode 100644 index 0000000..e1ec86c --- /dev/null +++ b/.backup/prevent_type_suppression.rego @@ -0,0 +1,176 @@ +# METADATA +# scope: package +# title: Ban Python Type Suppression +# description: Blocks type suppression directives in Python code +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.prevent_type_suppression + +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + +is_python_file(path) if { + endswith(path, ".py") +} + +is_python_file(path) if { + endswith(path, ".pyi") +} + +# Regex patterns indicating type suppression directives +type_suppression_patterns := [ + `#\s*type:\s*ignore(\[[^\]]+\])?\b`, + `#\s*pyright:\s*ignore(\[[^\]]+\])?\b`, + `#\s*mypy:\s*ignore(\[[^\]]+\])?\b`, + `#\s*pyre-ignore\b`, + `#\s*pyre-fixme\b`, + `#\s*pyrefly:\s*ignore(\[[^\]]+\])?\b`, + `#\s*basedpyright:\s*ignore(\[[^\]]+\])?\b`, + `#\s*noqa\b`, + `#\s*noqa:\s*\w+`, +] + +# Block Write/Edit operations that introduce type suppression in Python files +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + # Only enforce for Python files + file_path := lower(resolved_file_path) + is_python_file(file_path) + + content := new_content + content != null + + some pattern in type_suppression_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-002", + "reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + content := patch_content + content != null + + some pattern in type_suppression_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-002", + "reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.", + "severity": "HIGH" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := lower(edit_path(edit)) + is_python_file(file_path) + + content := edit_new_content(edit) + content != null + + some pattern in type_suppression_patterns + regex.match(pattern, content) + + decision := { + "rule_id": "PY-TYPE-002", + "reason": "Type suppression directives are prohibited in Python code. Fix the underlying type/lint issues instead.", + "severity": "HIGH" + } +} diff --git a/.cupcake/policies/.inactive/warn_new_file_search.rego b/.backup/warn_baselines_edit.rego similarity index 75% rename from .cupcake/policies/.inactive/warn_new_file_search.rego rename to .backup/warn_baselines_edit.rego index 8968702..4d3410f 100644 --- a/.cupcake/policies/.inactive/warn_new_file_search.rego +++ b/.backup/warn_baselines_edit.rego @@ -1,21 +1,16 @@ # METADATA # scope: package -# title: Warn on New File Without Search -# description: Warns when creating new source files +# title: Warn on Baselines Edit +# description: Warns on edits to tests/quality/baselines.json # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] -package cupcake.policies.opencode.warn_new_file_search +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.warn_baselines_edit import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -98,9 +93,9 @@ edit_old_content(edit) := content if { content := edit.old_text } else := "" -file_path_pattern := `(^|/)(src|client/src|tests)/.*\.(py|ts|tsx|js|jsx)$` +file_path_pattern := `tests/quality/baselines\.json$` -# Warn on Write/Edit operations that create new files +# Warn on Write/Edit operations targeting baselines.json deny contains decision if { input.hook_event_name == "PreToolUse" @@ -109,11 +104,9 @@ deny contains decision if { file_path := resolved_file_path regex.match(file_path_pattern, file_path) - old_content == "" - decision := { - "rule_id": "PROCESS-001", - "reason": "Warning: creating a new source file. Ensure you searched for existing implementations.", + "rule_id": "TEST-QUALITY-004", + "reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.", "severity": "LOW" } } @@ -126,12 +119,9 @@ deny contains decision if { file_path := edit_path(edit) regex.match(file_path_pattern, file_path) - old_content := edit_old_content(edit) - old_content == "" - decision := { - "rule_id": "PROCESS-001", - "reason": "Warning: creating a new source file. Ensure you searched for existing implementations.", + "rule_id": "TEST-QUALITY-004", + "reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.", "severity": "LOW" } } diff --git a/.backup/warn_baselines_edit_bash.rego b/.backup/warn_baselines_edit_bash.rego new file mode 100644 index 0000000..0b67e3d --- /dev/null +++ b/.backup/warn_baselines_edit_bash.rego @@ -0,0 +1,26 @@ +# METADATA +# scope: package +# title: Warn on Baselines Edit (Bash) +# description: Warns on Bash edits to tests/quality/baselines.json +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.opencode.warn_baselines_edit_bash +import rego.v1 + +pattern := `(sed|awk|echo|cat|tee|>|>>|cp|mv).*tests/quality/baselines\.json` + +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + regex.match(pattern, command) + + decision := { + "rule_id": "TEST-QUALITY-003", + "reason": "Warning: editing tests/quality/baselines.json should be avoided unless explicitly required.", + "severity": "LOW" + } +} diff --git a/.backup/warn_large_file.rego b/.backup/warn_large_file.rego new file mode 100644 index 0000000..db96c11 --- /dev/null +++ b/.backup/warn_large_file.rego @@ -0,0 +1,175 @@ +# METADATA +# scope: package +# title: Warn on Large File +# description: Warns when writing large files (>= 500 lines) +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] +package cupcake.policies.opencode.warn_large_file +import rego.v1 + +tool_name := input.tool_name +tool_input := input.tool_input + +resolved_file_path := input.resolved_file_path if { + input.resolved_file_path != null +} else := tool_input.file_path if { + tool_input.file_path != null +} else := tool_input.filePath if { + tool_input.filePath != null +} else := tool_input.path if { + tool_input.path != null +} else := tool_input.notebook_path if { + tool_input.notebook_path != null +} else := tool_input.notebookPath if { + tool_input.notebookPath != null +} else := "" + +new_content := tool_input.new_string if { + tool_input.new_string != null +} else := tool_input.newText if { + tool_input.newText != null +} else := tool_input.new_text if { + tool_input.new_text != null +} else := tool_input.content if { + tool_input.content != null +} else := "" + +old_content := tool_input.old_string if { + tool_input.old_string != null +} else := tool_input.oldText if { + tool_input.oldText != null +} else := tool_input.old_text if { + tool_input.old_text != null +} else := tool_input.previousContent if { + tool_input.previousContent != null +} else := "" + +patch_content := tool_input.patch if { + tool_input.patch != null +} else := tool_input.patchText if { + tool_input.patchText != null +} else := tool_input.patch_text if { + tool_input.patch_text != null +} else := "" + +edit_path(edit) := path if { + edit.resolved_file_path != null + path := edit.resolved_file_path +} else := path if { + edit.file_path != null + path := edit.file_path +} else := path if { + edit.filePath != null + path := edit.filePath +} else := path if { + edit.path != null + path := edit.path +} else := "" + +edit_new_content(edit) := content if { + edit.new_string != null + content := edit.new_string +} else := content if { + edit.newText != null + content := edit.newText +} else := content if { + edit.new_text != null + content := edit.new_text +} else := content if { + edit.content != null + content := edit.content +} else := "" + +edit_old_content(edit) := content if { + edit.old_string != null + content := edit.old_string +} else := content if { + edit.oldText != null + content := edit.oldText +} else := content if { + edit.old_text != null + content := edit.old_text +} else := "" + + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "+++ b/") + path := replace(line, "+++ b/", "") + regex.match(pattern, path) +} + +patch_targets_path(pattern) if { + patch := patch_content + patch != null + lines := split(patch, "\n") + some line in lines + startswith(line, "--- a/") + path := replace(line, "--- a/", "") + regex.match(pattern, path) +} + +file_path_pattern := `\.(py|ts|tsx|js|jsx)$` +pattern := `(?:.*\n){500,}` + +# Warn on Write/Edit operations that introduce large file content + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Write", "Edit", "NotebookEdit"} + + file_path := resolved_file_path + regex.match(file_path_pattern, file_path) + + content := new_content + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "STYLE-002", + "reason": "Warning: file content exceeds 500 lines. Consider refactoring.", + "severity": "LOW" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name == "MultiEdit" + + some edit in tool_input.edits + file_path := edit_path(edit) + regex.match(file_path_pattern, file_path) + + content := edit_new_content(edit) + content != null + regex.match(pattern, content) + + decision := { + "rule_id": "STYLE-002", + "reason": "Warning: file content exceeds 500 lines. Consider refactoring.", + "severity": "LOW" + } +} + +deny contains decision if { + input.hook_event_name == "PreToolUse" + tool_name in {"Patch", "ApplyPatch"} + + patch := patch_content + patch != null + + patch_targets_path(file_path_pattern) + + regex.match(pattern, patch) + + decision := { + "rule_id": "STYLE-002", + "reason": "Warning: file content exceeds 500 lines. Consider refactoring.", + "severity": "LOW" + } +} diff --git a/.claude/ralph-loop.local.md b/.claude/ralph-loop.local.md deleted file mode 100644 index 81163c6..0000000 --- a/.claude/ralph-loop.local.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -active: true -iteration: 1 -max_iterations: 0 -completion_promise: null -started_at: "2026-01-20T02:31:55Z" -started_at: "2026-01-20T02:31:55Z" ---- - -proceed with the plan, i have also documented a copy in @.claudectx/codefixes.md. please use your agents iteratively to manage context and speed, however you must review the accuracy and value of each doc before moving to the next -proceed with the plan, i have also documented a copy in @.claudectx/codefixes.md. please use your agents iteratively to manage context and speed, however you must review the accuracy and value of each doc before moving to the next diff --git a/.claudectx/codefixes.md b/.claudectx/codefixes.md index 0febd02..e69de29 100644 --- a/.claudectx/codefixes.md +++ b/.claudectx/codefixes.md @@ -1,377 +0,0 @@ -╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ - Strategic CLAUDE.md Placement Analysis for NoteFlow - - Executive Summary - - This document analyzes optimal placement of CLAUDE.md files throughout the NoteFlow codebase to provide meaningful context for AI assistants. The analysis considers both - constrained (strategic) and unlimited scenarios. - - --- - Current State: Existing Documentation Files - - 10 CLAUDE.md/AGENTS.md Files Already Present - ┌─────────────────────┬───────────┬──────────────────────────────────────────────────────────┐ - │ Location │ File │ Focus │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ / │ CLAUDE.md │ Root orchestration, parallel execution, project overview │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ / │ AGENTS.md │ Architecture for non-Claude AI assistants │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /src/ │ CLAUDE.md │ Python backend entry point │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /src/ │ AGENTS.md │ Python backend for other AIs │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /src/noteflow/ │ CLAUDE.md │ Detailed Python standards (line limits, typing, modules) │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /src/noteflow/grpc/ │ CLAUDE.md │ gRPC security patterns │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /src/noteflow/grpc/ │ AGENTS.md │ gRPC security (duplicate) │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /client/ │ CLAUDE.md │ Tauri + React development │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /client/src/ │ CLAUDE.md │ TypeScript security rules │ - ├─────────────────────┼───────────┼──────────────────────────────────────────────────────────┤ - │ /docker/ │ CLAUDE.md │ Docker security and build patterns │ - └─────────────────────┴───────────┴──────────────────────────────────────────────────────────┘ - --- - Part 1: Strategic Placement (Constrained Resources) - - If limited to 5-7 additional files, prioritize these high-impact locations: - - Tier 1: Critical Gaps (Add These First) - - 1. /src/noteflow/infrastructure/CLAUDE.md - - Why: Infrastructure layer has 15+ adapters with distinct patterns (ASR, diarization, NER, summarization, calendar, webhooks, persistence). No unified guidance exists. - - 2. /src/noteflow/domain/CLAUDE.md - - Why: Domain layer defines entities, ports, rules, and value objects. Understanding DDD boundaries prevents architectural violations. - - 3. /src/noteflow/application/services/CLAUDE.md - - Why: 12+ services with distinct responsibilities. Service-level guidance prevents duplication and clarifies orchestration patterns. - - 4. /client/src/hooks/CLAUDE.md - - Why: 7 hook directories (audio, auth, data, processing, recording, sync, ui) with complex interdependencies. Prevents reinventing existing hooks. - - 5. /client/src-tauri/src/CLAUDE.md - - Why: Rust backend has commands, gRPC client, audio processing, state management. No Rust-specific guidance currently exists. - - Tier 2: High Value (Add Next) - - 6. /tests/CLAUDE.md - - Why: Testing conventions (fixtures, markers, quality gates) are scattered. Centralized guidance improves test quality. - - 7. /src/noteflow/infrastructure/persistence/CLAUDE.md - - Why: UnitOfWork pattern, repository hierarchy, capability flags, migrations are complex. Prevents incorrect persistence patterns. - - --- - Part 2: Unlimited Placement (Comprehensive Coverage) - - With no constraints, here's the complete list of 25+ locations where CLAUDE.md would add value: - - Python Backend (src/noteflow/) - ┌────────────────────────────────────────┬─────────────────────────────────────────────────────────┐ - │ Path │ Content Focus │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ domain/CLAUDE.md │ DDD entities, ports, value objects, rules engine │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ domain/entities/CLAUDE.md │ Entity relationships, state machines, invariants │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ domain/ports/CLAUDE.md │ Repository protocols, capability contracts │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ domain/rules/CLAUDE.md │ Rule modes (SIMPLE→EXPRESSION), registry, evaluation │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ application/CLAUDE.md │ Use case organization, service boundaries │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ application/services/CLAUDE.md │ Service catalog, dependency patterns │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/CLAUDE.md │ Adapter patterns, external integrations │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/asr/CLAUDE.md │ Whisper, VAD, segmentation, streaming │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/diarization/CLAUDE.md │ Job lifecycle, streaming vs offline, speaker assignment │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/ner/CLAUDE.md │ Backend abstraction, mapper, post-processing │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/summarization/CLAUDE.md │ Provider protocols, consent workflow, citation linking │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/persistence/CLAUDE.md │ UnitOfWork, repositories, migrations │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/calendar/CLAUDE.md │ OAuth flow, sync patterns, trigger detection │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ infrastructure/webhooks/CLAUDE.md │ Delivery, signing, retry logic │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ grpc/mixins/CLAUDE.md │ Mixin composition, streaming handlers │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ grpc/startup/CLAUDE.md │ Service initialization, dependency injection │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ config/CLAUDE.md │ Settings cascade, feature flags, environment loading │ - ├────────────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ cli/CLAUDE.md │ Command patterns, model management │ - └────────────────────────────────────────┴─────────────────────────────────────────────────────────┘ - Client (client/) - ┌──────────────────────────────────┬─────────────────────────────────────────────────────────┐ - │ Path │ Content Focus │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src/api/CLAUDE.md │ Adapter pattern, transport abstraction, type generation │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src/components/CLAUDE.md │ Component hierarchy, feature organization │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src/hooks/CLAUDE.md │ Hook catalog, composition patterns, state management │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src/lib/CLAUDE.md │ Utility catalog, AI providers, audio processing │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src-tauri/src/CLAUDE.md │ Rust patterns, command handlers, state │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src-tauri/src/commands/CLAUDE.md │ IPC contract, audio commands, recording session │ - ├──────────────────────────────────┼─────────────────────────────────────────────────────────┤ - │ src-tauri/src/grpc/CLAUDE.md │ gRPC client wrapper, type conversions │ - └──────────────────────────────────┴─────────────────────────────────────────────────────────┘ - Testing (tests/) - ┌─────────────────────────────┬────────────────────────────────────────────────────┐ - │ Path │ Content Focus │ - ├─────────────────────────────┼────────────────────────────────────────────────────┤ - │ tests/CLAUDE.md │ Test conventions, fixtures, markers, quality gates │ - ├─────────────────────────────┼────────────────────────────────────────────────────┤ - │ tests/fixtures/CLAUDE.md │ Shared fixtures catalog, usage patterns │ - ├─────────────────────────────┼────────────────────────────────────────────────────┤ - │ tests/integration/CLAUDE.md │ Integration test setup, testcontainers │ - └─────────────────────────────┴────────────────────────────────────────────────────┘ - Documentation (docs/) - ┌────────────────────────┬───────────────────────────────────────────┐ - │ Path │ Content Focus │ - ├────────────────────────┼───────────────────────────────────────────┤ - │ docs/sprints/CLAUDE.md │ Sprint structure, documentation standards │ - └────────────────────────┴───────────────────────────────────────────┘ - --- - Part 3: Mockup - /src/noteflow/infrastructure/CLAUDE.md - - # Infrastructure Layer Development Guide - - ## Overview - - The infrastructure layer (`src/noteflow/infrastructure/`) contains adapters that implement domain ports. These connect the application to external systems: databases, ML - models, cloud APIs, file systems. - - --- - - ## Architecture Principle: Hexagonal/Ports-and-Adapters - - Domain Ports (interfaces) Infrastructure Adapters (implementations) - ───────────────────────── ─────────────────────────────────────────── - NerPort → SpacyBackend, GlinerBackend - SummarizationProvider → CloudProvider, OllamaProvider, MockProvider - DiarizationEngine → DiartSession, PyannoteOffline - AssetRepository → FileSystemAssetRepository - UnitOfWork → SqlAlchemyUnitOfWork, MemoryUnitOfWork - CalendarProvider → GoogleCalendar, OutlookCalendar - - **Rule**: Infrastructure code imports domain; domain NEVER imports infrastructure. - - --- - - ## Adapter Catalog - - | Directory | Responsibility | Key Protocols | - |-----------|----------------|---------------| - | `asr/` | Speech-to-text (Whisper) | `TranscriptionResult` | - | `diarization/` | Speaker identification | `DiarizationEngine`, `DiarizationJob` | - | `ner/` | Named entity extraction | `NerPort` | - | `summarization/` | LLM summarization | `SummarizationProvider` | - | `persistence/` | Database (SQLAlchemy) | `UnitOfWork`, `*Repository` | - | `calendar/` | OAuth + event sync | `CalendarProvider` | - | `webhooks/` | Event delivery | `WebhookDeliveryService` | - | `export/` | PDF/HTML/Markdown | `ExportAdapter` | - | `audio/` | Recording/playback | `AudioDevice` | - | `crypto/` | Encryption | `Keystore` | - | `logging/` | Structured logging | `LogEventType` | - | `metrics/` | Observability | `MetricsCollector` | - | `gpu/` | GPU detection | `GpuInfo` | - - --- - - ## Common Patterns - - ### 1. Async Wrappers for Sync Libraries - - Many ML libraries (spaCy, faster-whisper) are synchronous. Wrap them: - - ```python - async def extract(self, text: str) -> list[NamedEntity]: - loop = asyncio.get_running_loop() - return await loop.run_in_executor( - None, # Default ThreadPoolExecutor - self._sync_extract, - text - ) - - 2. Backend Selection via Factory - - def create_ner_engine(config: NerConfig) -> NerPort: - match config.backend: - case "spacy": - return SpacyBackend(model=config.model_name) - case "gliner": - return GlinerBackend(model=config.model_name) - case _: - raise ValueError(f"Unknown NER backend: {config.backend}") - - 3. Capability Flags for Optional Features - - class SqlAlchemyUnitOfWork(UnitOfWork): - @property - def supports_entities(self) -> bool: - return True # Has EntityRepository - - @property - def supports_webhooks(self) -> bool: - return True # Has WebhookRepository - - Always check capability before accessing optional repository: - if uow.supports_entities: - entities = await uow.entities.get_by_meeting(meeting_id) - - 4. Provider Protocol Pattern - - class SummarizationProvider(Protocol): - async def summarize( - self, - segments: list[Segment], - template: SummarizationTemplate, - ) -> SummaryResult: ... - - @property - def requires_consent(self) -> bool: ... - - --- - Forbidden Patterns - - ❌ Direct database access outside persistence/ - # WRONG: Raw SQL in service layer - async with engine.connect() as conn: - result = await conn.execute(text("SELECT * FROM meetings")) - - ❌ Hardcoded API keys - # WRONG: Secrets in code - client = anthropic.Anthropic(api_key="sk-ant-...") - - ❌ Synchronous I/O in async context - # WRONG: Blocking the event loop - def load_model(self): - self.model = whisper.load_model("base") # Blocks! - - ❌ Domain imports in infrastructure - # WRONG: Infrastructure should implement domain ports, not modify domain - from noteflow.domain.entities import Meeting - meeting.state = "COMPLETED" # Don't mutate domain objects here - - --- - Testing Infrastructure Adapters - - Use Dependency Injection for Mocking - - # tests/infrastructure/ner/test_engine.py - @pytest.fixture - def mock_backend() -> NerBackend: - backend = Mock(spec=NerBackend) - backend.extract.return_value = [ - RawEntity(text="John", label="PERSON", start=0, end=4) - ] - return backend - - async def test_engine_uses_backend(mock_backend): - engine = NerEngine(backend=mock_backend) - result = await engine.extract("Hello John") - mock_backend.extract.assert_called_once() - - Integration Tests with Real Services - - # tests/integration/test_ner_integration.py - @pytest.mark.integration - @pytest.mark.requires_gpu - async def test_gliner_real_extraction(): - backend = GlinerBackend(model="urchade/gliner_base") - result = await backend.extract("Microsoft CEO Satya Nadella announced...") - assert any(e.label == "ORG" and "Microsoft" in e.text for e in result) - - --- - Adding a New Adapter - - 1. Define port in domain (domain/ports/) if not exists - 2. Create adapter directory (infrastructure//) - 3. Implement the protocol with proper async handling - 4. Add factory function for backend selection - 5. Write unit tests with mocked dependencies - 6. Write integration test with real external service - 7. Update gRPC startup (grpc/startup/services.py) for dependency injection - 8. Document in this file (update Adapter Catalog table) - - --- - Key Files - ┌───────────────────────────────┬──────────────────────────────────┐ - │ File │ Purpose │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ __init__.py │ Public exports for layer │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ */engine.py │ Main adapter implementation │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ */backends/ │ Multiple backend implementations │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ */mapper.py │ External→Domain type conversion │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ */post_processing.py │ Output normalization │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ persistence/unit_of_work/*.py │ Transaction management │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ persistence/repositories/*.py │ Data access │ - ├───────────────────────────────┼──────────────────────────────────┤ - │ persistence/models/*.py │ ORM definitions │ - └───────────────────────────────┴──────────────────────────────────┘ - --- - See Also - - - /src/noteflow/domain/ports/ — Port definitions - - /src/noteflow/grpc/startup/services.py — Dependency injection - - /tests/infrastructure/ — Adapter tests - - --- - - ## Part 4: Answer to "Would Your Answer Change With No Limit?" - - **Yes, significantly.** - - ### Constrained (5-7 files): - Focus on **layer boundaries** (domain, application, infrastructure) and **high-complexity areas** (hooks, Rust backend). Each file covers broad territory. - - ### Unlimited (25+ files): - Add **subsystem-specific documentation** for: - - Complex state machines (diarization jobs, recording lifecycle) - - Protocol patterns (summarization providers, NER backends) - - Cross-cutting concerns (rules engine, settings cascade) - - Test organization (fixtures, integration setup) - - The key difference: with unlimited resources, document **WHY decisions were made** (design rationale), not just **WHAT exists** (API reference). - - --- - - ## Recommendation - - ### Immediate Action (Phase 1) - Add these 3 files for maximum impact: - 1. `/src/noteflow/infrastructure/CLAUDE.md` — Adapter patterns (mockup above) - 2. `/src/noteflow/domain/CLAUDE.md` — DDD boundaries, entity relationships - 3. `/client/src-tauri/src/CLAUDE.md` — Rust patterns, IPC contracts - - ### Follow-up (Phase 2) - 4. `/src/noteflow/application/services/CLAUDE.md` — Service catalog - 5. `/client/src/hooks/CLAUDE.md` — Hook organization - 6. `/tests/CLAUDE.md` — Testing conventions - - ### Future (Phase 3) - Remaining 19+ files as the codebase grows and patterns stabilize. \ No newline at end of file diff --git a/.cupcake/helpers/commands.rego b/.cupcake/helpers/commands.rego new file mode 100644 index 0000000..671b15e --- /dev/null +++ b/.cupcake/helpers/commands.rego @@ -0,0 +1,49 @@ +# METADATA +# scope: package +# description: Helper functions for secure command analysis +package cupcake.helpers.commands + +import rego.v1 + +# Check if command contains a specific verb with proper word boundary anchoring +# This prevents bypass via extra whitespace: "git commit" or " git commit" +has_verb(command, verb) if { + pattern := concat("", ["(^|\\s)", verb, "(\\s|$)"]) + regex.match(pattern, command) +} + +# Check if command contains ANY of the dangerous verbs from a set +# More efficient than checking each verb individually in policy code +has_dangerous_verb(command, verb_set) if { + some verb in verb_set + has_verb(command, verb) +} + +# Detect symlink creation commands +# Matches: ln -s, ln -sf, ln -s -f, etc. +creates_symlink(command) if { + has_verb(command, "ln") + contains(command, "-s") +} + +# Check if symlink command involves a protected path +# IMPORTANT: Checks BOTH source and target (addresses TOB-EQTY-LAB-CUPCAKE-4) +# Blocks: ln -s .cupcake foo AND ln -s foo .cupcake +symlink_involves_path(command, protected_path) if { + creates_symlink(command) + contains(command, protected_path) +} + +# Detect output redirection operators that could bypass file protection +# Matches: >, >>, |, tee +has_output_redirect(command) if { + redirect_patterns := [ + `\s>\s`, # stdout redirect + `\s>>\s`, # stdout append + `\s\|\s`, # pipe + `(^|\s)tee(\s|$)`, # tee command + ] + some pattern in redirect_patterns + regex.match(pattern, command) +} + diff --git a/.cupcake/policies/opencode/ban_stdlib_logger.rego b/.cupcake/policies/opencode/ban_stdlib_logger.rego index 4803df0..d6f372f 100644 --- a/.cupcake/policies/opencode/ban_stdlib_logger.rego +++ b/.cupcake/policies/opencode/ban_stdlib_logger.rego @@ -5,18 +5,13 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.ban_stdlib_logger import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -114,7 +109,7 @@ file_path_pattern := `\.py$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -132,7 +127,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -151,7 +146,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_assertion_roulette.rego b/.cupcake/policies/opencode/block_assertion_roulette.rego index a5b59c3..8c2671d 100644 --- a/.cupcake/policies/opencode/block_assertion_roulette.rego +++ b/.cupcake/policies/opencode/block_assertion_roulette.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_assertion_roulette import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -126,7 +121,7 @@ assertion_pattern := `^\s*assert\s+[^,\n]+\n\s*assert\s+[^,\n]+$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -144,7 +139,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -163,7 +158,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_biome_ignore.rego b/.cupcake/policies/opencode/block_biome_ignore.rego index 04811ee..35516ad 100644 --- a/.cupcake/policies/opencode/block_biome_ignore.rego +++ b/.cupcake/policies/opencode/block_biome_ignore.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_biome_ignore import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -126,7 +121,7 @@ ignore_pattern := `//\s*biome-ignore|//\s*@ts-ignore|//\s*@ts-expect-error|//\s* deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -144,7 +139,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -163,7 +158,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_biome_ignore_bash.rego b/.cupcake/policies/opencode/block_biome_ignore_bash.rego index bb45e9d..e88d33d 100644 --- a/.cupcake/policies/opencode/block_biome_ignore_bash.rego +++ b/.cupcake/policies/opencode/block_biome_ignore_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_biome_ignore_bash import rego.v1 diff --git a/.cupcake/policies/opencode/block_broad_exception_handler.rego b/.cupcake/policies/opencode/block_broad_exception_handler.rego index 4405af9..c706d2f 100644 --- a/.cupcake/policies/opencode/block_broad_exception_handler.rego +++ b/.cupcake/policies/opencode/block_broad_exception_handler.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_broad_exception_handler import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ handler_pattern := `except\s+Exception\s*(?:as\s+\w+)?:\s*\n\s+(?:logger\.|loggi deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} content := new_content content != null @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits content := edit_new_content(edit) @@ -135,7 +130,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_code_quality_test_bash.rego b/.cupcake/policies/opencode/block_code_quality_test_bash.rego index db0bbac..692c70b 100644 --- a/.cupcake/policies/opencode/block_code_quality_test_bash.rego +++ b/.cupcake/policies/opencode/block_code_quality_test_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_code_quality_test_bash import rego.v1 diff --git a/.cupcake/policies/opencode/block_code_quality_test_edits.rego b/.cupcake/policies/opencode/block_code_quality_test_edits.rego index 9333d2a..7f5ced7 100644 --- a/.cupcake/policies/opencode/block_code_quality_test_edits.rego +++ b/.cupcake/policies/opencode/block_code_quality_test_edits.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_code_quality_test_edits import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ file_path_pattern := `src/test/code-quality\.test\.ts$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -118,7 +113,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/block_code_quality_test_serena.rego b/.cupcake/policies/opencode/block_code_quality_test_serena.rego index d4b9774..cff4ce8 100644 --- a/.cupcake/policies/opencode/block_code_quality_test_serena.rego +++ b/.cupcake/policies/opencode/block_code_quality_test_serena.rego @@ -5,17 +5,18 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: [] +# required_tools: +# - McpSerenaReplaceContent +# - McpSerenaReplaceSymbolBody +# - McpSerenaCreateTextFile +# - McpSerenaInsertBeforeSymbol +# - McpSerenaInsertAfterSymbol +# - McpSerenaRenameSymbol package cupcake.policies.opencode.block_code_quality_test_serena import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -111,7 +112,7 @@ get_relative_path := path if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_names := {"mcp__serena__replace_content", "mcp__serena__replace_symbol_body", "mcp__serena__create_text_file", "mcp__serena__insert_before_symbol", "mcp__serena__insert_after_symbol", "mcp__serena__rename_symbol"} + tool_names := {"McpSerenaReplaceContent", "McpSerenaReplaceSymbolBody", "McpSerenaCreateTextFile", "McpSerenaInsertBeforeSymbol", "McpSerenaInsertAfterSymbol", "McpSerenaRenameSymbol"} tool_name in tool_names file_path := get_relative_path diff --git a/.cupcake/policies/opencode/block_code_quality_test_serena_plugin.rego b/.cupcake/policies/opencode/block_code_quality_test_serena_plugin.rego index 1ffc43a..fb87a2b 100644 --- a/.cupcake/policies/opencode/block_code_quality_test_serena_plugin.rego +++ b/.cupcake/policies/opencode/block_code_quality_test_serena_plugin.rego @@ -5,17 +5,18 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: [] +# required_tools: +# - McpPluginSerenaSerenaReplaceContent +# - McpPluginSerenaSerenaReplaceSymbolBody +# - McpPluginSerenaSerenaCreateTextFile +# - McpPluginSerenaSerenaInsertBeforeSymbol +# - McpPluginSerenaSerenaInsertAfterSymbol +# - McpPluginSerenaSerenaRenameSymbol package cupcake.policies.opencode.block_code_quality_test_serena_plugin import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -111,7 +112,7 @@ get_relative_path := path if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_names := {"mcp__plugin_serena_serena__replace_content", "mcp__plugin_serena_serena__replace_symbol_body", "mcp__plugin_serena_serena__create_text_file", "mcp__plugin_serena_serena__insert_before_symbol", "mcp__plugin_serena_serena__insert_after_symbol", "mcp__plugin_serena_serena__rename_symbol"} + tool_names := {"McpPluginSerenaSerenaReplaceContent", "McpPluginSerenaSerenaReplaceSymbolBody", "McpPluginSerenaSerenaCreateTextFile", "McpPluginSerenaSerenaInsertBeforeSymbol", "McpPluginSerenaSerenaInsertAfterSymbol", "McpPluginSerenaSerenaRenameSymbol"} tool_name in tool_names file_path := get_relative_path diff --git a/.cupcake/policies/opencode/block_datetime_now_fallback.rego b/.cupcake/policies/opencode/block_datetime_now_fallback.rego index 626a5b5..f77719c 100644 --- a/.cupcake/policies/opencode/block_datetime_now_fallback.rego +++ b/.cupcake/policies/opencode/block_datetime_now_fallback.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_datetime_now_fallback import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ pattern := `return\s+datetime\.now\s*\(` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} content := new_content content != null @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits content := edit_new_content(edit) @@ -135,7 +130,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_default_value_swallow.rego b/.cupcake/policies/opencode/block_default_value_swallow.rego index 32d1e02..b051cd3 100644 --- a/.cupcake/policies/opencode/block_default_value_swallow.rego +++ b/.cupcake/policies/opencode/block_default_value_swallow.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_default_value_swallow import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ pattern := `except\s+\w*(?:Error|Exception).*?:\s*\n\s+.*?(?:logger\.|logging\.) deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} content := new_content content != null @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits content := edit_new_content(edit) @@ -135,7 +130,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_duplicate_fixtures.rego b/.cupcake/policies/opencode/block_duplicate_fixtures.rego index b793240..bf974af 100644 --- a/.cupcake/policies/opencode/block_duplicate_fixtures.rego +++ b/.cupcake/policies/opencode/block_duplicate_fixtures.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_duplicate_fixtures import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -127,7 +122,7 @@ fixture_pattern := `@pytest\.fixture[^@]*\ndef\s+(mock_uow|crypto|meetings_dir|w deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -146,7 +141,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -166,7 +161,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_linter_config_frontend.rego b/.cupcake/policies/opencode/block_linter_config_frontend.rego index 00d20bc..3bff2fe 100644 --- a/.cupcake/policies/opencode/block_linter_config_frontend.rego +++ b/.cupcake/policies/opencode/block_linter_config_frontend.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_linter_config_frontend import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ file_path_pattern := `(^|/)client/.*(?:\.?eslint(?:rc|\.config).*|\.?prettier(?: deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/block_linter_config_frontend_bash.rego b/.cupcake/policies/opencode/block_linter_config_frontend_bash.rego index 866259c..a356217 100644 --- a/.cupcake/policies/opencode/block_linter_config_frontend_bash.rego +++ b/.cupcake/policies/opencode/block_linter_config_frontend_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_linter_config_frontend_bash import rego.v1 diff --git a/.cupcake/policies/opencode/block_linter_config_python.rego b/.cupcake/policies/opencode/block_linter_config_python.rego index fdf85c9..f2f6479 100644 --- a/.cupcake/policies/opencode/block_linter_config_python.rego +++ b/.cupcake/policies/opencode/block_linter_config_python.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_linter_config_python import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ file_path_pattern := `(?:pyproject\.toml|\.?ruff\.toml|\.?pyrightconfig\.json|\. deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/block_linter_config_python_bash.rego b/.cupcake/policies/opencode/block_linter_config_python_bash.rego index cd61480..65229c7 100644 --- a/.cupcake/policies/opencode/block_linter_config_python_bash.rego +++ b/.cupcake/policies/opencode/block_linter_config_python_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_linter_config_python_bash import rego.v1 diff --git a/.cupcake/policies/opencode/block_magic_numbers.rego b/.cupcake/policies/opencode/block_magic_numbers.rego index 62ee131..54bac75 100644 --- a/.cupcake/policies/opencode/block_magic_numbers.rego +++ b/.cupcake/policies/opencode/block_magic_numbers.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_magic_numbers import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -126,7 +121,7 @@ number_pattern := `(?:timeout|delay|interval|duration|limit|max|min|size|count|t deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -145,7 +140,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -165,7 +160,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_makefile_bash.rego b/.cupcake/policies/opencode/block_makefile_bash.rego index 133a503..15e2b9a 100644 --- a/.cupcake/policies/opencode/block_makefile_bash.rego +++ b/.cupcake/policies/opencode/block_makefile_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_makefile_bash import rego.v1 diff --git a/.cupcake/policies/opencode/block_makefile_edit.rego b/.cupcake/policies/opencode/block_makefile_edit.rego index d60887d..9f7da15 100644 --- a/.cupcake/policies/opencode/block_makefile_edit.rego +++ b/.cupcake/policies/opencode/block_makefile_edit.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_makefile_edit import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ file_path_pattern := `(?:^|/)Makefile$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -118,7 +113,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/block_no_verify.rego b/.cupcake/policies/opencode/block_no_verify.rego index 0aa9dd0..d399d1f 100644 --- a/.cupcake/policies/opencode/block_no_verify.rego +++ b/.cupcake/policies/opencode/block_no_verify.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_no_verify import rego.v1 diff --git a/.cupcake/policies/opencode/block_silent_none_return.rego b/.cupcake/policies/opencode/block_silent_none_return.rego index 6b976ab..bdb56b4 100644 --- a/.cupcake/policies/opencode/block_silent_none_return.rego +++ b/.cupcake/policies/opencode/block_silent_none_return.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_silent_none_return import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ pattern := `except\s+\w*Error.*?:\s*\n\s+.*?(?:logger\.|logging\.).*?\n\s+return deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} content := new_content content != null @@ -119,7 +114,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits content := edit_new_content(edit) @@ -135,7 +130,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_test_loops_conditionals.rego b/.cupcake/policies/opencode/block_test_loops_conditionals.rego index 4c38e9a..b706398 100644 --- a/.cupcake/policies/opencode/block_test_loops_conditionals.rego +++ b/.cupcake/policies/opencode/block_test_loops_conditionals.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_test_loops_conditionals import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -126,7 +121,7 @@ pattern := `def test_[^(]+\([^)]*\)[^:]*:[\s\S]*?\b(for|while|if)\s+[^:]+:[\s\S] deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -144,7 +139,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -163,7 +158,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/policies/opencode/block_tests_quality.rego b/.cupcake/policies/opencode/block_tests_quality.rego index 6652ad4..45233fd 100644 --- a/.cupcake/policies/opencode/block_tests_quality.rego +++ b/.cupcake/policies/opencode/block_tests_quality.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.block_tests_quality import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -105,7 +100,7 @@ exclude_pattern := `baselines\.json$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -120,7 +115,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/block_tests_quality_bash.rego b/.cupcake/policies/opencode/block_tests_quality_bash.rego index 8e2ab9a..e65e9b0 100644 --- a/.cupcake/policies/opencode/block_tests_quality_bash.rego +++ b/.cupcake/policies/opencode/block_tests_quality_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.block_tests_quality_bash import rego.v1 diff --git a/.cupcake/policies/opencode/builtins/git_block_no_verify.rego b/.cupcake/policies/opencode/builtins/git_block_no_verify.rego new file mode 100644 index 0000000..4bb2ac9 --- /dev/null +++ b/.cupcake/policies/opencode/builtins/git_block_no_verify.rego @@ -0,0 +1,107 @@ +# METADATA +# scope: package +# title: Git Block No-Verify - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: HIGH +# id: BUILTIN-GIT-BLOCK-NO-VERIFY +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.builtins.git_block_no_verify + +import rego.v1 + +import data.cupcake.helpers.commands + +# Block git commands that bypass verification hooks +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + # Get the command from tool input + command := lower(input.tool_input.command) + + # Check if it's a git command with --no-verify flag + contains_git_no_verify(command) + + decision := { + "rule_id": "BUILTIN-GIT-BLOCK-NO-VERIFY", + "reason": "Git operations with --no-verify are not permitted. Commit hooks must run for code quality and security checks.", + "severity": "HIGH", + } +} + +# Check if command contains git with --no-verify flag +# Uses helper library to prevent spacing bypass (TOB-EQTY-LAB-CUPCAKE-3) +contains_git_no_verify(cmd) if { + # Check for git commit with --no-verify + commands.has_verb(cmd, "git") + commands.has_verb(cmd, "commit") + contains(cmd, "--no-verify") +} + +contains_git_no_verify(cmd) if { + # Check for git commit with -n (shorthand for --no-verify) + commands.has_verb(cmd, "git") + commands.has_verb(cmd, "commit") + regex.match(`\s-[a-z]*n[a-z]*\s`, concat(" ", [cmd, " "])) # Matches -n, -an, -nm, etc. +} + +contains_git_no_verify(cmd) if { + # Check for git push with --no-verify + commands.has_verb(cmd, "git") + commands.has_verb(cmd, "push") + contains(cmd, "--no-verify") +} + +contains_git_no_verify(cmd) if { + # Check for git merge with --no-verify + commands.has_verb(cmd, "git") + commands.has_verb(cmd, "merge") + contains(cmd, "--no-verify") +} + +# Also block attempts to disable hooks via config +deny contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := lower(input.tool_input.command) + + # Check if trying to disable hooks via git config + contains_hook_disable(command) + + decision := { + "rule_id": "BUILTIN-GIT-BLOCK-NO-VERIFY", + "reason": "Disabling git hooks is not permitted. Hooks are required for code quality and security.", + "severity": "HIGH", + } +} + +contains_hook_disable(cmd) if { + commands.has_verb(cmd, "git") + commands.has_verb(cmd, "config") + contains(cmd, "core.hooksPath") + contains(cmd, "/dev/null") +} + +contains_hook_disable(cmd) if { + # Detect attempts to chmod hooks to non-executable + commands.has_verb(cmd, "chmod") + regex.match(`\.git/hooks`, cmd) + regex.match(`-x|-[0-9]*0[0-9]*`, cmd) # Removing execute permission +} + +contains_hook_disable(cmd) if { + # Detect attempts to remove hook files + contains(cmd, ".git/hooks") + removal_cmds := {"rm", "unlink", "trash"} + commands.has_dangerous_verb(cmd, removal_cmds) +} + +contains_hook_disable(cmd) if { + # Detect moving/renaming hooks to disable them + commands.has_verb(cmd, "mv") + contains(cmd, ".git/hooks") +} diff --git a/.cupcake/policies/opencode/builtins/git_pre_check.rego b/.cupcake/policies/opencode/builtins/git_pre_check.rego new file mode 100644 index 0000000..db33349 --- /dev/null +++ b/.cupcake/policies/opencode/builtins/git_pre_check.rego @@ -0,0 +1,121 @@ +# METADATA +# scope: package +# title: Git Pre-Check - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: HIGH +# id: BUILTIN-GIT-CHECK +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.builtins.git_pre_check + +import rego.v1 + +# Check git operations and run validation before allowing +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + # Check if this is a git operation that needs validation + command := lower(input.params.command) + is_git_operation(command) + + # Run all configured checks + check_results := run_all_checks + + # Find any failed checks + failed_checks := [check | + some check in check_results + not check.success + ] + + # If any checks failed, halt the operation + count(failed_checks) > 0 + + # Build failure message + failure_messages := [msg | + some check in failed_checks + msg := concat("", ["- ", check.message]) + ] + + failure_list := concat("\n", failure_messages) + reason := concat("\n", ["Git pre-checks failed:", failure_list]) + + decision := { + "rule_id": "BUILTIN-GIT-CHECK", + "reason": reason, + "severity": "HIGH", + } +} + +# Check if command is a git operation that needs validation +is_git_operation(cmd) if { + git_patterns := { + "git commit", + "git push", + "git merge", + } + + some pattern in git_patterns + contains(cmd, pattern) +} + +# Run all configured pre-checks +run_all_checks := results if { + # Collect all git check signals + check_signals := [name | + some name, _ in input.signals + startswith(name, "__builtin_git_check_") + ] + + # Evaluate each check + results := [result | + some signal_name in check_signals + signal_result := input.signals[signal_name] + result := evaluate_check(signal_name, signal_result) + ] + + # Return results if we have any + count(results) > 0 +} else := [] + +# No checks configured + +# Evaluate a check result +evaluate_check(name, result) := check if { + # Parse the signal result which should contain exit_code and output + is_object(result) + check := { + "name": clean_signal_name(name), + "success": result.exit_code == 0, + "message": default_message(result), + } +} else := check if { + # Handle string results (command output) + is_string(result) + check := { + "name": clean_signal_name(name), + "success": true, # Assume success if we got output + "message": result, + } +} + +# Extract readable name from signal name +clean_signal_name(signal_name) := name if { + # Remove __builtin_git_check_ prefix and return the index + parts := split(signal_name, "__builtin_git_check_") + count(parts) > 1 + name := concat("Check ", [parts[1]]) +} else := signal_name + +# Get appropriate message from result +default_message(result) := msg if { + result.output != "" + msg := result.output +} else := msg if { + result.exit_code == 0 + msg := "Check passed" +} else := msg if { + msg := concat("", ["Check failed with exit code ", format_int(result.exit_code, 10)]) +} diff --git a/.cupcake/policies/opencode/builtins/opencode_always_inject_on_prompt.rego b/.cupcake/policies/opencode/builtins/opencode_always_inject_on_prompt.rego new file mode 100644 index 0000000..09cb42d --- /dev/null +++ b/.cupcake/policies/opencode/builtins/opencode_always_inject_on_prompt.rego @@ -0,0 +1,60 @@ +# METADATA +# scope: package +# title: Always Inject On Prompt - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: LOW +# id: BUILTIN-INJECT-PROMPT +# routing: +# required_events: ["UserPromptSubmit"] +package cupcake.policies.builtins.opencode_always_inject_on_prompt + +import rego.v1 + +# Inject configured context on every user prompt +add_context contains decision if { + input.hook_event_name == "UserPromptSubmit" + + # Get all configured context items + contexts := get_all_contexts + count(contexts) > 0 + + # Combine all contexts + combined_context := concat("\n\n", contexts) + + decision := { + "rule_id": "BUILTIN-INJECT-PROMPT", + "context": combined_context, + "severity": "LOW", + } +} + +# Get all configured contexts from signals +get_all_contexts := contexts if { + # Collect all builtin prompt context signals + signal_results := [value | + some key, value in input.signals + startswith(key, "__builtin_prompt_context_") + ] + + # Format each context appropriately + contexts := [ctx | + some result in signal_results + ctx := format_context(result) + ] + + # Ensure we have at least one context + count(contexts) > 0 +} else := [] + +# No signals available or no contexts configured + +# Format context based on its source +format_context(value) := formatted if { + # If it's a string, use it directly + is_string(value) + formatted := value +} else := formatted if { + # If it's an object/array, format as JSON + formatted := json.marshal(value) +} diff --git a/.cupcake/policies/opencode/builtins/opencode_enforce_full_file_read.rego b/.cupcake/policies/opencode/builtins/opencode_enforce_full_file_read.rego new file mode 100644 index 0000000..934fc62 --- /dev/null +++ b/.cupcake/policies/opencode/builtins/opencode_enforce_full_file_read.rego @@ -0,0 +1,63 @@ +# METADATA +# scope: package +# title: Enforce Full File Read - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: MEDIUM +# id: BUILTIN-ENFORCE-FULL-READ +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Read"] +package cupcake.policies.builtins.opencode_enforce_full_file_read + +import rego.v1 + +# Deny partial reads of files (MVP: enforce for all files) +deny contains decision if { + # Only apply to Read tool + input.hook_event_name == "PreToolUse" + input.tool_name == "Read" + + # Check if offset or limit parameters are present + has_partial_read_params + + # Get configured message from signal (with fallback) + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-ENFORCE-FULL-READ", + "reason": message, + "severity": "MEDIUM", + } +} + +# Check if the Read tool has offset or limit parameters +has_partial_read_params if { + # Check for offset parameter + "offset" in object.keys(input.tool_input) +} + +has_partial_read_params if { + # Check for limit parameter + "limit" in object.keys(input.tool_input) +} + +# Get configured message from builtin config +get_configured_message := msg if { + # Direct access to builtin config (no signal execution needed) + msg := input.builtin_config.opencode_enforce_full_file_read.message +} else := msg if { + # Fallback to default message + msg := "Please read the entire file first (files under 2000 lines must be read completely)" +} + +# Future enhancement: Get max lines threshold +# This would be used in a future version to check file size +# and only enforce full reads for files under the threshold +get_max_lines_threshold := lines if { + # Direct access to builtin config (no signal execution needed) + lines := input.builtin_config.opencode_enforce_full_file_read.max_lines +} else := lines if { + # Default to 2000 lines + lines := 2000 +} diff --git a/.cupcake/policies/opencode/builtins/post_edit_check.rego b/.cupcake/policies/opencode/builtins/post_edit_check.rego new file mode 100644 index 0000000..9d61bc6 --- /dev/null +++ b/.cupcake/policies/opencode/builtins/post_edit_check.rego @@ -0,0 +1,135 @@ +# METADATA +# scope: package +# title: Post Edit Check - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: MEDIUM +# id: BUILTIN-POST-EDIT +# routing: +# required_events: ["PostToolUse"] +# required_tools: ["Edit", "Write", "MultiEdit", "NotebookEdit"] +package cupcake.policies.builtins.post_edit_check + +import rego.v1 + +# Run validation after file edits +ask contains decision if { + input.hook_event_name == "PostToolUse" + + # Check if this was a file editing operation + editing_tools := {"Edit", "Write", "MultiEdit", "NotebookEdit"} + input.tool_name in editing_tools + + # Get the file that was edited + file_path := get_edited_file_path + file_path != "" + + # Get file extension + extension := get_file_extension(file_path) + extension != "" + + # Run validation for this file type + validation_result := run_validation_for_extension(extension, file_path) + + # If validation failed, ask for user confirmation + not validation_result.success + + question := concat("\n", [ + concat(" ", ["File validation failed for", file_path]), + validation_result.message, + "", + "Do you want to continue anyway?", + ]) + + decision := { + "rule_id": "BUILTIN-POST-EDIT", + "reason": question, + "question": question, + "severity": "MEDIUM", + } +} + +# Also provide feedback as context when validation succeeds +add_context contains context_msg if { + input.hook_event_name == "PostToolUse" + + editing_tools := {"Edit", "Write", "MultiEdit", "NotebookEdit"} + input.tool_name in editing_tools + + file_path := get_edited_file_path + file_path != "" + + extension := get_file_extension(file_path) + extension != "" + + validation_result := run_validation_for_extension(extension, file_path) + + # If validation succeeded, provide positive feedback + validation_result.success + + # add_context expects strings, not decision objects + context_msg := concat(" ", ["✓ Validation passed for", file_path]) +} + +# Extract file path from tool response/params +get_edited_file_path := path if { + path := input.params.file_path +} else := path if { + path := input.params.path +} else := "" + +# Get file extension from path +get_file_extension(path) := ext if { + parts := split(path, ".") + count(parts) > 1 + ext := parts[count(parts) - 1] +} else := "" + +# Run validation for a specific file extension +run_validation_for_extension(ext, file_path) := result if { + # Check if there's a configured validation signal for this extension + signal_name := concat("", ["__builtin_post_edit_", ext]) + signal_name in object.keys(input.signals) + + # Get the validation result from the signal + signal_result := input.signals[signal_name] + + # Parse the result based on its type + result := parse_validation_result(signal_result, file_path) +} else := result if { + # No validation configured for this extension + result := { + "success": true, + "message": "No validation configured - FALLBACK", + } +} + +# Parse validation result from signal +parse_validation_result(signal_result, file_path) := result if { + # Handle object results with exit_code (standard format from signal execution) + is_object(signal_result) + "exit_code" in object.keys(signal_result) + + result := { + "success": signal_result.exit_code == 0, + "message": default_validation_message(signal_result, file_path), + } +} else := result if { + # Handle string results (assume success if we got output) + is_string(signal_result) + result := { + "success": true, + "message": signal_result, + } +} + +# Generate appropriate validation message +default_validation_message(signal_result, file_path) := msg if { + signal_result.output != "" + msg := signal_result.output +} else := msg if { + signal_result.exit_code == 0 + msg := "Validation passed" +} else := msg if { + msg := concat("", ["Validation failed with exit code ", format_int(signal_result.exit_code, 10)]) +} diff --git a/.cupcake/policies/opencode/builtins/protected_paths.rego b/.cupcake/policies/opencode/builtins/protected_paths.rego new file mode 100644 index 0000000..b3165ac --- /dev/null +++ b/.cupcake/policies/opencode/builtins/protected_paths.rego @@ -0,0 +1,380 @@ +# METADATA +# scope: package +# title: Protected Paths - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: HIGH +# id: BUILTIN-PROTECTED-PATHS +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Edit", "Write", "MultiEdit", "NotebookEdit", "Bash"] +package cupcake.policies.builtins.protected_paths + +import data.cupcake.helpers.commands +import data.cupcake.helpers.paths +import rego.v1 + +# Block WRITE operations on protected paths (but allow reads) +# For regular tools (Edit, Write, NotebookEdit) +halt contains decision if { + input.hook_event_name == "PreToolUse" + + # Check for SINGLE-file writing tools only + single_file_tools := {"Edit", "Write", "NotebookEdit"} + input.tool_name in single_file_tools + + # Get the file path from tool input + # TOB-4 fix: Use canonical path (always provided by Rust preprocessing) + file_path := input.resolved_file_path + file_path != null + + # Check if path matches any protected path + is_protected_path(file_path) + + # Get configured message from signals + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-PROTECTED-PATHS", + "reason": concat("", [message, " (", file_path, ")"]), + "severity": "HIGH", + } +} + +# Block WRITE operations on protected paths - MultiEdit special handling +# MultiEdit has an array of edits, each with their own resolved_file_path +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "MultiEdit" + + # Check each edit in the edits array + some edit in input.tool_input.edits + file_path := edit.resolved_file_path + file_path != null + + # Check if THIS edit's path matches any protected path + is_protected_path(file_path) + + # Get configured message from signals + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-PROTECTED-PATHS", + "reason": concat("", [message, " (", file_path, ")"]), + "severity": "HIGH", + } +} + +# Block ALL Bash commands that reference protected paths UNLESS whitelisted +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + # Get the command + command := input.tool_input.command + lower_cmd := lower(command) + + # Check if any protected path is mentioned in the command + some protected_path in get_protected_paths + contains_protected_reference(lower_cmd, protected_path) + + # ONLY allow if it's a whitelisted read operation + not is_whitelisted_read_command(lower_cmd) + + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-PROTECTED-PATHS", + "reason": concat("", [message, " (only read operations allowed)"]), + "severity": "HIGH", + } +} + +# Block destructive commands that would affect a parent directory containing protected paths +# This catches cases like `rm -rf /home/user/*` when `/home/user/.cupcake/` is protected +# The `affected_parent_directories` field is populated by Rust preprocessing for destructive commands +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + # Get affected parent directories from preprocessing + # This is populated for commands like rm -rf, chmod -R, etc. + affected_dirs := input.affected_parent_directories + count(affected_dirs) > 0 + + # Check if any protected path is a CHILD of an affected directory + some affected_dir in affected_dirs + some protected_path in get_protected_paths + protected_is_child_of_affected(protected_path, affected_dir) + + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-PROTECTED-PATHS-PARENT", + "reason": concat("", [message, " (", protected_path, " would be affected by operation on ", affected_dir, ")"]), + "severity": "HIGH", + } +} + +# Block interpreter inline scripts (-c/-e flags) that mention protected paths +# This catches attacks like: python -c 'pathlib.Path("../my-favorite-file.txt").delete()' +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := input.tool_input.command + lower_cmd := lower(command) + + # Detect inline script execution with interpreters + interpreters := ["python", "python3", "python2", "ruby", "perl", "node", "php"] + some interp in interpreters + regex.match(concat("", ["(^|\\s)", interp, "\\s+(-c|-e)\\s"]), lower_cmd) + + # Check if any protected path is mentioned anywhere in the command + some protected_path in get_protected_paths + contains(lower_cmd, lower(protected_path)) + + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-PROTECTED-PATHS-SCRIPT", + "reason": concat("", [message, " (inline script mentions '", protected_path, "')"]), + "severity": "HIGH", + } +} + +# Extract file path from tool input +get_file_path_from_tool_input := path if { + path := input.tool_input.file_path +} else := path if { + path := input.tool_input.path +} else := path if { + path := input.tool_input.notebook_path +} else := path if { + # For MultiEdit, check if any edit targets a protected path + # Return the first protected path found + some edit in input.tool_input.edits + path := edit.file_path +} else := "" + +# Check if a path is protected +is_protected_path(path) if { + protected_paths := get_protected_paths + some protected_path in protected_paths + path_matches(path, protected_path) +} + +# Path matching logic (supports exact, directory prefix, filename, and glob patterns) +path_matches(path, pattern) if { + # Exact match (case-insensitive) + lower(path) == lower(pattern) +} + +path_matches(path, pattern) if { + # Filename match - pattern is just a filename (no path separators) + # Matches if the canonical path ends with the filename + not contains(pattern, "/") + not contains(pattern, "\\") + endswith(lower(path), concat("/", [lower(pattern)])) +} + +path_matches(path, pattern) if { + # Filename match for Windows paths + not contains(pattern, "/") + not contains(pattern, "\\") + endswith(lower(path), concat("\\", [lower(pattern)])) +} + +path_matches(path, pattern) if { + # Directory prefix match - absolute pattern (starts with /) + # Pattern: "/absolute/path/" matches "/absolute/path/file.txt" + endswith(pattern, "/") + startswith(pattern, "/") + startswith(lower(path), lower(pattern)) +} + +path_matches(path, pattern) if { + # Directory prefix match - relative pattern + # Pattern: "src/legacy/" should match "/tmp/project/src/legacy/file.rs" + # This handles canonical absolute paths against relative pattern configs + endswith(pattern, "/") + not startswith(pattern, "/") + + # Check if the pattern appears in the path as a directory component + # We need to match "/src/legacy/" not just any "src/legacy/" substring + contains(lower(path), concat("/", [lower(pattern)])) +} + +path_matches(path, pattern) if { + # Directory match without trailing slash - absolute pattern + # If pattern is "/absolute/path/src/legacy", match "/absolute/path/src/legacy/file.js" + not endswith(pattern, "/") + startswith(pattern, "/") + prefix := concat("", [lower(pattern), "/"]) + startswith(lower(path), prefix) +} + +path_matches(path, pattern) if { + # Directory match without trailing slash - relative pattern + # If pattern is "src/legacy", match "/tmp/project/src/legacy/file.js" + not endswith(pattern, "/") + not startswith(pattern, "/") + prefix := concat("/", [lower(pattern), "/"]) + contains(lower(path), prefix) +} + +path_matches(path, pattern) if { + # Glob pattern matching (simplified - just * wildcard for now) + contains(pattern, "*") + glob_match(lower(path), lower(pattern)) +} + +# Simple glob matching (supports * wildcard) +glob_match(path, pattern) if { + # Convert glob pattern to regex: * becomes .* + regex_pattern := replace(replace(pattern, ".", "\\."), "*", ".*") + regex_pattern_anchored := concat("", ["^", regex_pattern, "$"]) + regex.match(regex_pattern_anchored, path) +} + +# WHITELIST approach: Only these read operations are allowed on protected paths +is_whitelisted_read_command(cmd) if { + # Exclude dangerous sed variants FIRST + startswith(cmd, "sed -i") # In-place edit + false # Explicitly reject +} + +is_whitelisted_read_command(cmd) if { + # Check if command starts with a safe read-only command + safe_read_verbs := { + "cat", # Read file contents + "less", # Page through file + "more", # Page through file + "head", # Read first lines + "tail", # Read last lines + "grep", # Search in file + "egrep", # Extended grep + "fgrep", # Fixed string grep + "zgrep", # Grep compressed files + "wc", # Word/line count + "file", # Determine file type + "stat", # File statistics + "ls", # List files + "find", # Find files (read-only by default) + "awk", # Text processing (without output redirect) + "sed", # Stream editor (safe without -i flag) + "sort", # Sort lines + "uniq", # Filter unique lines + "diff", # Compare files + "cmp", # Compare files byte by byte + "md5sum", # Calculate checksum + "sha256sum", # Calculate checksum + "hexdump", # Display in hex + "strings", # Extract strings from binary + "od", # Octal dump + } + + some verb in safe_read_verbs + commands.has_verb(cmd, verb) + + # CRITICAL: Exclude sed -i specifically + # This check is NOT redundant with lines 188-192. OPA evaluates ALL rule bodies + # for is_whitelisted_read_command(). Body 1 (lines 188-192) explicitly rejects "sed -i", + # but OPA continues to evaluate Body 2 (this body). Without this check, "sed -i" + # would match the "sed" verb above and incorrectly be whitelisted. + # Whitespace variations (sed -i, sed\t-i) are normalized by preprocessing. + not startswith(cmd, "sed -i") + + # Ensure no output redirection + not commands.has_output_redirect(cmd) +} + +is_whitelisted_read_command(cmd) if { + # Also allow piped commands that start with safe reads + # e.g., "cat file.txt | grep pattern" + contains(cmd, "|") + parts := split(cmd, "|") + first_part := trim_space(parts[0]) + + # Check if first part starts with a safe command (avoid recursion) + safe_read_verbs := { + "cat", # Read file contents + "less", # Page through file + "more", # Page through file + "head", # Read first lines + "tail", # Read last lines + "grep", # Search in file + "wc", # Word/line count + "file", # Determine file type + "stat", # File statistics + "ls", # List files + } + + some verb in safe_read_verbs + commands.has_verb(first_part, verb) +} + +# Check if command references a protected path +contains_protected_reference(cmd, protected_path) if { + # Direct reference + contains(cmd, lower(protected_path)) +} + +contains_protected_reference(cmd, protected_path) if { + # Without trailing slash if it's a directory pattern + endswith(protected_path, "/") + path_without_slash := substring(lower(protected_path), 0, count(protected_path) - 1) + contains(cmd, path_without_slash) +} + +# Get configured message from builtin config +get_configured_message := msg if { + # Direct access to builtin config (no signal execution needed) + msg := input.builtin_config.protected_paths.message +} else := msg if { + # Fallback to default if config not present + msg := "This path is read-only and cannot be modified" +} + +# Get list of protected paths from builtin config +get_protected_paths := paths if { + # Direct access to builtin config (no signal execution needed) + paths := input.builtin_config.protected_paths.paths +} else := paths if { + # No paths configured - policy inactive + paths := [] +} + +# Check if a protected path is a child of an affected directory +# This is the "reverse" check for parent directory protection: +# protected_path: /home/user/.cupcake/config.yml +# affected_dir: /home/user/ +# Returns true because the protected path is inside the affected directory +protected_is_child_of_affected(protected_path, affected_dir) if { + # Normalize: ensure affected_dir ends with / + affected_normalized := ensure_trailing_slash(affected_dir) + + # Check if protected path starts with the affected directory + startswith(lower(protected_path), lower(affected_normalized)) +} + +protected_is_child_of_affected(protected_path, affected_dir) if { + # Also check exact match (rm -rf /home/user/.cupcake) + lower(protected_path) == lower(affected_dir) +} + +protected_is_child_of_affected(protected_path, affected_dir) if { + # Handle case where affected_dir is specified without trailing slash + # but protected_path has it as a prefix + not endswith(affected_dir, "/") + prefix := concat("", [lower(affected_dir), "/"]) + startswith(lower(protected_path), prefix) +} + +# Helper to ensure path ends with / +ensure_trailing_slash(path) := result if { + endswith(path, "/") + result := path +} else := result if { + result := concat("", [path, "/"]) +} diff --git a/.cupcake/policies/opencode/builtins/rulebook_security_guardrails.rego b/.cupcake/policies/opencode/builtins/rulebook_security_guardrails.rego new file mode 100644 index 0000000..86731e8 --- /dev/null +++ b/.cupcake/policies/opencode/builtins/rulebook_security_guardrails.rego @@ -0,0 +1,231 @@ +# METADATA +# scope: package +# title: Rulebook Security Guardrails - Builtin Policy +# authors: ["Cupcake Builtins"] +# custom: +# severity: HIGH +# id: BUILTIN-RULEBOOK-SECURITY +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Edit", "Write", "MultiEdit", "NotebookEdit", "Read", "Grep", "Glob", "Bash", "Task", "WebFetch"] +package cupcake.policies.builtins.rulebook_security_guardrails + +import rego.v1 + +import data.cupcake.helpers.commands + +# Block ANY tool operations targeting protected paths +halt contains decision if { + input.hook_event_name == "PreToolUse" + + # Check for ANY file operation tools (read, write, search, etc.) + file_operation_tools := { + "Edit", "Write", "MultiEdit", "NotebookEdit", # Writing tools + "Read", # Reading tools + "Grep", "Glob", # Search/listing tools + "WebFetch", # Could use file:// URLs + "Task", # Could spawn agent to bypass + } + input.tool_name in file_operation_tools + + # Check if any parameter contains a protected path (case-insensitive) + # TOB-4 fix: Prefer canonical path (input.resolved_file_path) when available, + # but fall back to raw tool_input fields for pattern-based tools (Glob/Grep) + # that don't have file paths that can be canonicalized + file_path := get_file_path_with_preprocessing_fallback + file_path != "" + is_protected_path(file_path) + + # Get configured message from signals (fallback to default) + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-RULEBOOK-SECURITY", + "reason": concat("", [message, " (blocked file operation on ", file_path, ")"]), + "severity": "HIGH", + } +} + +# Block Bash commands that reference any protected path +# Total lockdown - NO whitelist (unlike protected_paths builtin) +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + # Check if command references any protected path + # Bash tool uses tool_input.command, not params.command + command := lower(input.tool_input.command) + + # Iterate over all protected paths + some protected_path in get_protected_paths + contains_protected_reference(command, protected_path) + + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-RULEBOOK-SECURITY", + "reason": concat("", [message, " (detected protected path reference in bash command)"]), + "severity": "HIGH", + } +} + +# Block symlink creation involving any protected path (TOB-EQTY-LAB-CUPCAKE-4) +halt contains decision if { + input.hook_event_name == "PreToolUse" + input.tool_name == "Bash" + + command := lower(input.tool_input.command) + + # Check if command creates symlink involving ANY protected path (source OR target) + some protected_path in get_protected_paths + commands.symlink_involves_path(command, protected_path) + + message := get_configured_message + + decision := { + "rule_id": "BUILTIN-RULEBOOK-SECURITY", + "reason": concat("", [message, " (symlink creation involving protected path is not permitted)"]), + "severity": "HIGH", + } +} + +# Check if a file path matches any protected path +is_protected_path(path) if { + protected_paths := get_protected_paths + some protected_path in protected_paths + path_matches(path, protected_path) +} + +# Path matching logic (supports substring and directory matching) +path_matches(path, pattern) if { + # Exact match (case-insensitive) + lower(path) == lower(pattern) +} + +path_matches(path, pattern) if { + # Substring match - handles both file and directory references + # "/full/path/.cupcake/file" matches ".cupcake" + # "/full/path/secrets/api.key" matches "secrets/" + lower_path := lower(path) + lower_pattern := lower(pattern) + contains(lower_path, lower_pattern) +} + +path_matches(path, pattern) if { + # Directory match without trailing slash + # Pattern "secrets" should match "/full/path/secrets/file" + not endswith(pattern, "/") + lower_path := lower(path) + lower_pattern := lower(pattern) + + # Add slash to ensure directory boundary + pattern_with_slash := concat("", [lower_pattern, "/"]) + contains(lower_path, pattern_with_slash) +} + +path_matches(path, pattern) if { + # Canonical directory paths don't have trailing slashes + # Pattern ".cupcake/" should match canonical path "/tmp/xyz/.cupcake" + # This handles the case where preprocessing canonicalizes directory paths + endswith(pattern, "/") + pattern_without_slash := substring(pattern, 0, count(pattern) - 1) + lower_path := lower(path) + lower_pattern := lower(pattern_without_slash) + + # Ensure directory boundary by checking for /{pattern} suffix + path_suffix := concat("", ["/", lower_pattern]) + endswith(lower_path, path_suffix) +} + +path_matches(path, pattern) if { + # Protected path with trailing slash should also match without the slash + # This handles Glob patterns like ".cupcake*" matching protected path ".cupcake/" + # Also handles paths/patterns that reference the directory without trailing slash + endswith(pattern, "/") + pattern_without_slash := substring(pattern, 0, count(pattern) - 1) + contains(lower(path), lower(pattern_without_slash)) +} + +# Check if command references a protected path +contains_protected_reference(cmd, protected_path) if { + # Direct reference (case-insensitive) + contains(cmd, lower(protected_path)) +} + +contains_protected_reference(cmd, protected_path) if { + # Without trailing slash if it's a directory pattern + # "secrets/" pattern should also match "secrets" in command + endswith(protected_path, "/") + path_without_slash := substring(lower(protected_path), 0, count(protected_path) - 1) + contains(cmd, path_without_slash) +} + +# Get configured message from builtin config +get_configured_message := msg if { + # Direct access to builtin config (no signal execution needed) + msg := input.builtin_config.rulebook_security_guardrails.message +} else := msg if { + # Fallback to default if config not present + msg := "Cupcake configuration files are protected from modification" +} + +# Extract file path from tool input based on tool type +get_file_path_from_tool_input := path if { + # Standard file_path parameter (Edit, Write, MultiEdit, NotebookEdit, Read) + path := input.tool_input.file_path +} else := path if { + # Path parameter (Grep, Glob) + path := input.tool_input.path +} else := path if { + # Pattern parameter might contain path (Glob) + path := input.tool_input.pattern +} else := path if { + # URL parameter for WebFetch (could be file:// URL) + path := input.tool_input.url +} else := path if { + # Task prompt might contain .cupcake references + path := input.tool_input.prompt +} else := path if { + # Notebook path for NotebookEdit + path := input.tool_input.notebook_path +} else := path if { + # Some tools use params instead of tool_input + path := input.params.file_path +} else := path if { + path := input.params.path +} else := path if { + path := input.params.pattern +} else := "" + +# TOB-4 aware path extraction: Prefer canonical path from preprocessing, +# fall back to raw tool_input only for Glob (patterns can't be canonicalized) +# +# FIXED: GitHub Copilot review - Grep symlink bypass (TOB-4 defense) +# - Grep's 'path' field now uses canonical paths (closes symlink bypass) +# - Glob's 'pattern' field still uses raw patterns (can't be canonicalized) +# +# TODO: Known Glob limitations (complex pattern parsing required): +# - Glob(pattern="backup/**/*.rego") where "backup" is symlink to .cupcake +# - Glob(pattern="**/*.rego") searches symlinks without .cupcake in pattern +# - Requires pattern parsing before file expansion to fully address +get_file_path_with_preprocessing_fallback := path if { + # For Glob only, use raw pattern since it can't be canonicalized (e.g., "**/*.rs") + # Grep's 'path' field CAN be canonicalized, so it goes through TOB-4 defense + input.tool_name == "Glob" + path := get_file_path_from_tool_input +} else := input.resolved_file_path if { + # For other tools (including Grep), use canonical path from Rust preprocessing (TOB-4 defense) + input.resolved_file_path != "" +} else := path if { + # Final fallback + path := get_file_path_from_tool_input +} + +# Helper: Get list of protected paths from builtin config +get_protected_paths := paths if { + # Direct access to builtin config (no signal execution needed) + paths := input.builtin_config.rulebook_security_guardrails.protected_paths +} else := paths if { + # Default protected paths + paths := [".cupcake/"] +} diff --git a/.cupcake/policies/opencode/example.rego b/.cupcake/policies/opencode/example.rego new file mode 100644 index 0000000..0fb2487 --- /dev/null +++ b/.cupcake/policies/opencode/example.rego @@ -0,0 +1,33 @@ +# METADATA +# scope: package +# title: Example Policy +# description: A minimal example policy that never fires +# custom: +# routing: +# required_events: ["PreToolUse"] +# required_tools: ["Bash"] +package cupcake.policies.example + +import rego.v1 + +# This rule will never fire - it's just here to prevent OPA compilation issues +# It checks for a command that nobody would ever type +deny contains decision if { + input.tool_input.command == "CUPCAKE_EXAMPLE_RULE_THAT_NEVER_FIRES_12345" + decision := { + "reason": "This will never happen", + "severity": "LOW", + "rule_id": "EXAMPLE-001" + } +} + +# Replace the above with your actual policies +# Example of a real policy: +# deny contains decision if { +# contains(input.tool_input.command, "rm -rf /") +# decision := { +# "reason": "Dangerous command blocked", +# "severity": "HIGH", +# "rule_id": "SAFETY-001" +# } +# } diff --git a/.cupcake/policies/opencode/prevent_any_type.rego b/.cupcake/policies/opencode/prevent_any_type.rego index 0b9be0e..89e4022 100644 --- a/.cupcake/policies/opencode/prevent_any_type.rego +++ b/.cupcake/policies/opencode/prevent_any_type.rego @@ -5,18 +5,13 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.prevent_any_type import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -128,7 +123,7 @@ any_type_patterns := [ # Block Write/Edit operations that introduce Any in Python files deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} # Only enforce for Python files file_path := lower(resolved_file_path) @@ -149,7 +144,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} content := patch_content content != null @@ -166,7 +161,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := lower(edit_path(edit)) diff --git a/.cupcake/policies/opencode/prevent_type_suppression.rego b/.cupcake/policies/opencode/prevent_type_suppression.rego index 6410e39..e1ec86c 100644 --- a/.cupcake/policies/opencode/prevent_type_suppression.rego +++ b/.cupcake/policies/opencode/prevent_type_suppression.rego @@ -5,18 +5,13 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.prevent_type_suppression import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -123,7 +118,7 @@ type_suppression_patterns := [ # Block Write/Edit operations that introduce type suppression in Python files deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} # Only enforce for Python files file_path := lower(resolved_file_path) @@ -144,7 +139,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} content := patch_content content != null @@ -161,7 +156,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := lower(edit_path(edit)) diff --git a/.cupcake/policies/opencode/warn_baselines_edit.rego b/.cupcake/policies/opencode/warn_baselines_edit.rego index 463137f..4d3410f 100644 --- a/.cupcake/policies/opencode/warn_baselines_edit.rego +++ b/.cupcake/policies/opencode/warn_baselines_edit.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.warn_baselines_edit import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -104,7 +99,7 @@ file_path_pattern := `tests/quality/baselines\.json$` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -118,7 +113,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) diff --git a/.cupcake/policies/opencode/warn_baselines_edit_bash.rego b/.cupcake/policies/opencode/warn_baselines_edit_bash.rego index 4647ab8..0b67e3d 100644 --- a/.cupcake/policies/opencode/warn_baselines_edit_bash.rego +++ b/.cupcake/policies/opencode/warn_baselines_edit_bash.rego @@ -5,7 +5,7 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["bash"] +# required_tools: ["Bash"] package cupcake.policies.opencode.warn_baselines_edit_bash import rego.v1 diff --git a/.cupcake/policies/opencode/warn_large_file.rego b/.cupcake/policies/opencode/warn_large_file.rego index c49d9b7..db96c11 100644 --- a/.cupcake/policies/opencode/warn_large_file.rego +++ b/.cupcake/policies/opencode/warn_large_file.rego @@ -5,17 +5,12 @@ # custom: # routing: # required_events: ["PreToolUse"] -# required_tools: ["apply_patch", "edit", "multiedit", "notebookedit", "patch", "write"] +# required_tools: ["ApplyPatch", "Edit", "MultiEdit", "NotebookEdit", "Patch", "Write"] package cupcake.policies.opencode.warn_large_file import rego.v1 -tool_name := input.tool_name if { - input.tool_name != null -} else := input.tool - -tool_input := input.tool_input if { - input.tool_input != null -} else := input.args +tool_name := input.tool_name +tool_input := input.tool_input resolved_file_path := input.resolved_file_path if { input.resolved_file_path != null @@ -126,7 +121,7 @@ pattern := `(?:.*\n){500,}` deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"write", "edit", "notebookedit"} + tool_name in {"Write", "Edit", "NotebookEdit"} file_path := resolved_file_path regex.match(file_path_pattern, file_path) @@ -144,7 +139,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name == "multiedit" + tool_name == "MultiEdit" some edit in tool_input.edits file_path := edit_path(edit) @@ -163,7 +158,7 @@ deny contains decision if { deny contains decision if { input.hook_event_name == "PreToolUse" - tool_name in {"patch", "apply_patch"} + tool_name in {"Patch", "ApplyPatch"} patch := patch_content patch != null diff --git a/.cupcake/rulebook.yml b/.cupcake/rulebook.yml new file mode 100644 index 0000000..a56e5b2 --- /dev/null +++ b/.cupcake/rulebook.yml @@ -0,0 +1,216 @@ +# Cupcake Base Configuration Template +# This template demonstrates all available builtin abstractions and configuration options. +# Copy this file to .cupcake/rulebook.yml and uncomment/modify as needed. + +# ============================================================================ +# SIGNALS - External data providers +# ============================================================================ +# Signals are commands that provide data to policies. They can return strings +# or JSON structures. Convention: place scripts in .cupcake/signals/ directory +# for auto-discovery, or define explicitly here. + +signals: + # Example: Simple string signal + # current_branch: + # command: "git branch --show-current" + # timeout_seconds: 2 + + # Example: Structured JSON signal + # system_info: + # command: 'echo "{\"os\": \"$(uname)\", \"user\": \"$(whoami)\"}"' + # timeout_seconds: 5 + + # Note: Signals in .cupcake/signals/ directory are auto-discovered + # File signals/foo.sh becomes signal "foo" automatically + +# ============================================================================ +# ACTIONS - Response to policy violations +# ============================================================================ +# Actions are commands executed when policies trigger. Convention: place scripts +# in .cupcake/actions/ directory named by rule_id for auto-discovery. + +actions: + # Actions that run on ANY policy denial + # on_any_denial: + # - command: "echo 'Policy violation detected' >> audit.log" + + # Rule-specific actions (by rule_id) + # by_rule_id: + # SECURITY-001: + # - command: "notify-team --severity high" + # LINT-001: + # - command: "echo 'Code style violation'" + + # Note: Scripts in .cupcake/actions/ are auto-mapped by filename + # File actions/SECURITY-001.sh triggers for rule_id: SECURITY-001 + +# ============================================================================ +# BUILTINS - Higher-level policy abstractions +# ============================================================================ +# Builtins provide common security patterns without writing Rego policies. +# Each builtin can be enabled/disabled and configured independently. +# +# IMPORTANT: Builtins are ENABLED BY DEFAULT when configured. +# Simply configuring a builtin (even with just empty settings) enables it. +# To disable, either remove the configuration or set 'enabled: false'. +# +# FILE PROTECTION BUILTINS (Two-Tier System): +# 1. protected_paths: Makes specific paths read-only (read allowed, write blocked) +# 2. rulebook_security_guardrails: Total lockdown of paths (no read OR write) + +# ============================================================================ +# USAGE NOTES +# ============================================================================ +# 1. Builtins are processed BEFORE custom policies, allowing you to set +# foundational rules that custom policies can build upon. +# +# 2. Signal commands are executed with 'sh -c' and should output valid JSON +# for structured data, or plain text for simple strings. +# +# 3. All paths are relative to the project root (parent of .cupcake/) +# +# 4. Builtin policies are located in .cupcake/policies/builtins/ and are +# only compiled when their corresponding builtin is enabled. +# +# 5. For debugging, use --log-level debug when running Cupcake to see detailed +# information about builtin activation and signal execution. +# +# 6. LIMITATION: Due to Claude Code hook limitations, context can only be +# injected on UserPromptSubmit and SessionStart events. PreToolUse events +# do not support context injection. + +builtins: + # --------------------------------------------------------------------------- + # CLAUDE_CODE_ALWAYS_INJECT_ON_PROMPT - Add context to every user prompt (Claude Code only) + # --------------------------------------------------------------------------- + # Inject additional context with every user interaction. Useful for project + # guidelines, current state awareness, or team conventions. + # Note: Builtins are enabled by default when configured. Use 'enabled: false' to disable. + # Note: This builtin only works with Claude Code due to context injection support. + + # claude_code_always_inject_on_prompt: + # # enabled: true # Optional - defaults to true when configured + # context: + # # Static text context + # - "Follow SOLID principles and write comprehensive tests" + # - "This is a production system - be careful with database changes" + # + # # Dynamic context from command + # - command: "git status --short" + # - command: "date '+Today is %A, %B %d'" + # + # # Context from file + # - file: ".cupcake/coding-standards.md" + # - file: "docs/current-sprint-goals.md" + + # --------------------------------------------------------------------------- + # GIT_PRE_CHECK - Enforce checks before git operations + # --------------------------------------------------------------------------- + # Run validation before allowing git commits, pushes, or merges. Ensures + # code quality and prevents broken commits from entering the repository. + + git_pre_check: + enabled: true + checks: + - command: "echo Validation passed" + message: "Basic validation check" + + # Optional: only apply to certain operations + # operations: ["commit", "push"] # skip for merge + + # --------------------------------------------------------------------------- + # POST_EDIT_CHECK - Validate files after modification + # --------------------------------------------------------------------------- + # Run language-specific validation after files are edited. Provides immediate + # feedback about syntax errors, type issues, or style violations. + + # post_edit_check: + # # enabled: true # Optional - defaults to true when configured + # # Checks by file extension + # by_extension: + # "rs": + # command: "cargo check --message-format short" + # message: "Rust compilation check" + # + # "py": + # command: "python -m py_compile" + # message: "Python syntax validation" + # + # "tsx": + # command: "npx tsc --noEmit" + # message: "TypeScript type checking" + # + # "jsx": + # command: "npx eslint --quiet" + # message: "ESLint validation" + # + # "go": + # command: "go fmt && go vet" + # message: "Go format and vet check" + # + # # Checks by glob pattern (future enhancement) + # # by_pattern: + # # "src/**/*.test.ts": + # # command: "npm test -- --findRelatedTests" + # # message: "Running related tests" + + # --------------------------------------------------------------------------- + # GIT_BLOCK_NO_VERIFY - Prevent bypassing git commit hooks + # --------------------------------------------------------------------------- + # Blocks git commands that use --no-verify flag to bypass pre-commit hooks. + # This ensures code quality checks, linting, and security scans always run. + + git_block_no_verify: + enabled: true + message: "Git operations with --no-verify are not permitted" + # Optional: Add exceptions for specific environments + # exceptions: + # - "CI_ENVIRONMENT" + + # --------------------------------------------------------------------------- + # CLAUDE_CODE_ENFORCE_FULL_FILE_READ - Require complete file reads (Claude Code only) + # --------------------------------------------------------------------------- + # Ensures Claude reads entire files (up to a configurable limit) before + # processing. Prevents partial reads that might miss important context. + # Files larger than max_lines can still use offset/limit parameters. + # Note: This builtin only works with Claude Code. + + # claude_code_enforce_full_file_read: + # enabled: true + # max_lines: 2000 # Files under this size must be read completely + # message: "Please read the entire file first (files under 2000 lines must be read completely)" + + # --------------------------------------------------------------------------- + # PROTECTED_PATHS - User-defined read-only paths + # --------------------------------------------------------------------------- + # Declare specific files or directories as read-only while still allowing + # Claude to read and analyze them. Supports glob patterns. Uses a WHITELIST + # approach for bash commands - only known-safe read operations are allowed. + + protected_paths: + enabled: false + message: "System path modification blocked by policy" + paths: + - "/etc/" + - "/System/" + - "~/.ssh/" + + # Note: Read operations (cat, grep, less, etc.) are allowed + # Write operations (edit, rm, mv, sed -i, etc.) are blocked + + # --------------------------------------------------------------------------- + # RULEBOOK_SECURITY_GUARDRAILS - Cupcake configuration protection + # --------------------------------------------------------------------------- + # Protects the .cupcake directory and other critical paths from any + # modification or inspection. This is the highest security level - blocks + # BOTH read and write operations. Essential for protecting Cupcake's own + # configuration and sensitive system files. + + rulebook_security_guardrails: + message: "Cupcake configuration files are protected from modification" + # Protected paths (defaults to [".cupcake/"] if not specified) + protected_paths: + - ".cupcake/" + - ".git/hooks/" + # - "secrets/" # Add your own sensitive directories + diff --git a/.cupcake/system/evaluate.rego b/.cupcake/system/evaluate.rego index 85b85fe..cfba615 100644 --- a/.cupcake/system/evaluate.rego +++ b/.cupcake/system/evaluate.rego @@ -1,5 +1,9 @@ +package cupcake.system + +import rego.v1 + # METADATA -# scope: package +# scope: document # title: System Aggregation Entrypoint for Hybrid Model # authors: ["Cupcake Engine"] # custom: @@ -8,10 +12,11 @@ # routing: # required_events: [] # required_tools: [] -package cupcake.system - -import rego.v1 +# The single entrypoint for the Hybrid Model. +# This uses the `walk()` built-in to recursively traverse data.cupcake.policies, +# automatically discovering and aggregating all decision verbs from all loaded +# policies, regardless of their package name or nesting depth. evaluate := decision_set if { decision_set := { "halts": collect_verbs("halt"), @@ -23,12 +28,18 @@ evaluate := decision_set if { } } +# Helper function to collect all decisions for a specific verb type. +# Uses walk() to recursively find all instances of the verb across +# the entire policy hierarchy under data.cupcake.policies. collect_verbs(verb_name) := result if { + # Collect all matching verb sets from the policy tree verb_sets := [value | walk(data.cupcake.policies, [path, value]) path[count(path) - 1] == verb_name ] + # Flatten all sets into a single array + # Since Rego v1 decision verbs are sets, we need to convert to arrays all_decisions := [decision | some verb_set in verb_sets some decision in verb_set @@ -37,4 +48,5 @@ collect_verbs(verb_name) := result if { result := all_decisions } +# Default to empty arrays if no decisions found default collect_verbs(_) := [] diff --git a/.openagent_context.json b/.openagent_context.json new file mode 100644 index 0000000..c9edac6 --- /dev/null +++ b/.openagent_context.json @@ -0,0 +1,11 @@ +{ + "context_dir_name": "context", + "context_root": "/home/trav/context", + "mission_context": "/home/trav/context/b2331579-f252-4806-9dc4-1cf6c3ff4cd5", + "mission_id": "b2331579-f252-4806-9dc4-1cf6c3ff4cd5", + "working_dir": "/home/trav/repos/noteflow", + "workspace_id": "b293c268-0b8e-4ddc-ad53-5fb34e43237a", + "workspace_name": "noteflow", + "workspace_root": "/home/trav/repos/noteflow", + "workspace_type": "host" +} \ No newline at end of file diff --git a/.opencode/plugin/cupcake.js b/.opencode/plugin/cupcake.js new file mode 100644 index 0000000..b9ae1e8 --- /dev/null +++ b/.opencode/plugin/cupcake.js @@ -0,0 +1,380 @@ +/** + * Cupcake OpenCode Plugin + * + * Install: Copy this file to .opencode/plugin/cupcake.js + * + * This plugin integrates Cupcake policy enforcement with OpenCode. + * It intercepts tool executions and evaluates them against your policies. + */ + +// src/types.ts +var DEFAULT_CONFIG = { + enabled: true, + cupcakePath: "cupcake", + harness: "opencode", + logLevel: "warn", + // Default to warn - info/debug are noisy in TUI + timeoutMs: 5e3, + failMode: "closed", + cacheDecisions: false, + showToasts: true, + toastDurationMs: 5e3 +}; +function getToastVariant(decision) { + switch (decision) { + case "allow": + return "success"; + case "ask": + return "warning"; + case "deny": + case "block": + return "error"; + default: + return "info"; + } +} + +// src/event-builder.ts +function normalizeTool(tool) { + return tool; +} +function buildPreToolUseEvent(sessionId, cwd, tool, args, agent, messageId, callId) { + const event = { + hook_event_name: "PreToolUse", + session_id: sessionId, + cwd, + tool: normalizeTool(tool), + args + }; + if (agent) { + event.agent = agent; + } + if (messageId) { + event.message_id = messageId; + } + if (callId) { + event.call_id = callId; + } + return event; +} +function buildPermissionEvent(sessionId, cwd, permissionId, permissionType, title, metadata, pattern, messageId, callId) { + const event = { + hook_event_name: "PermissionRequest", + session_id: sessionId, + cwd, + permission_id: permissionId, + permission_type: permissionType, + title, + metadata + }; + if (pattern) { + event.pattern = pattern; + } + if (messageId) { + event.message_id = messageId; + } + if (callId) { + event.call_id = callId; + } + return event; +} + +// src/executor.ts +async function executeCupcake(config, event) { + const startTime = Date.now(); + const eventJson = JSON.stringify(event); + if (config.logLevel === "debug") { + console.error(`[cupcake] DEBUG: Executing cupcake`); + console.error(`[cupcake] DEBUG: Event:`, eventJson); + } + const proc = Bun.spawn([config.cupcakePath, "eval", "--harness", config.harness], { + stdin: "pipe", + stdout: "pipe", + stderr: "ignore" + }); + proc.stdin.write(eventJson); + proc.stdin.end(); + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => { + proc.kill(); + reject( + new Error( + `Policy evaluation timed out after ${config.timeoutMs}ms. Consider optimizing policies or increasing timeout.` + ) + ); + }, config.timeoutMs); + }); + try { + const [stdout, exitCode] = await Promise.race([ + Promise.all([new Response(proc.stdout).text(), proc.exited]), + timeoutPromise + ]); + const elapsed = Date.now() - startTime; + if (config.logLevel === "debug") { + console.error(`[cupcake] DEBUG: Cupcake response (${elapsed}ms):`, stdout); + } + if (exitCode !== 0) { + const error = new Error(`Cupcake exited with code ${exitCode}`); + if (config.failMode === "open") { + console.error(`[cupcake] ERROR: ${error.message}`); + console.error(`[cupcake] WARN: Allowing operation in fail-open mode.`); + return { decision: "allow" }; + } + throw error; + } + const response = JSON.parse(stdout); + if (config.logLevel === "debug") { + console.error(`[cupcake] DEBUG: Decision: ${response.decision} (${elapsed}ms)`); + } + return response; + } catch (error) { + if (config.failMode === "open") { + console.error(`[cupcake] ERROR: ${error.message}`); + console.error(`[cupcake] WARN: Allowing operation in fail-open mode.`); + return { decision: "allow" }; + } + throw error; + } +} + +// src/enforcer.ts +function formatDecision(response) { + const { decision, reason, rule_id, severity } = response; + let title; + let message; + let blocked = false; + switch (decision) { + case "allow": + title = "Allowed"; + message = reason || "Operation allowed by policy"; + break; + case "deny": + case "block": + title = "Policy Violation"; + message = reason || `Operation blocked by policy`; + blocked = true; + break; + case "ask": + title = "Approval Required"; + message = reason || "This operation requires approval"; + blocked = true; + break; + default: + title = "Unknown Decision"; + message = `Policy returned unknown decision: ${decision}`; + blocked = true; + } + if (rule_id || severity) { + const details = []; + if (rule_id) details.push(`Rule: ${rule_id}`); + if (severity) details.push(`Severity: ${severity}`); + message += ` +(${details.join(", ")})`; + } + return { + blocked, + title, + message, + variant: getToastVariant(decision), + decision, + ruleId: rule_id, + severity + }; +} +function formatErrorMessage(formatted) { + let message = ""; + if (formatted.decision === "deny" || formatted.decision === "block") { + message += "\u274C Policy Violation\n\n"; + } else if (formatted.decision === "ask") { + message += "\u26A0\uFE0F Approval Required\n\n"; + } + message += formatted.message; + if (formatted.decision === "ask") { + message += "\n\nNote: This operation requires manual approval. "; + message += "To proceed, review the policy and temporarily disable it if appropriate, "; + message += "then re-run the command."; + } + return message; +} + +// src/index.ts +import { existsSync, readFileSync } from "fs"; +import { join } from "path"; +function loadConfig(directory) { + const configPath = join(directory, ".cupcake", "opencode.json"); + if (existsSync(configPath)) { + try { + const configData = readFileSync(configPath, "utf-8"); + const userConfig = JSON.parse(configData); + return { ...DEFAULT_CONFIG, ...userConfig }; + } catch (error) { + console.error(`[cupcake] WARN: Failed to load config from ${configPath}: ${error.message}`); + console.error(`[cupcake] WARN: Using default configuration`); + } + } + return DEFAULT_CONFIG; +} +async function showToast(client, config, title, message, variant) { + if (!config.showToasts || !client) { + return; + } + try { + await client.tui.showToast({ + body: { + title, + message, + variant, + duration: config.toastDurationMs + } + }); + } catch (error) { + if (config.logLevel === "debug") { + console.error(`[cupcake] DEBUG: Failed to show toast: ${error.message}`); + } + } +} +function log(config, level, message, ...args) { + const levels = ["debug", "info", "warn", "error"]; + const configLevel = levels.indexOf(config.logLevel); + const messageLevel = levels.indexOf(level); + if (messageLevel >= configLevel) { + const prefix = `[cupcake] ${level.toUpperCase()}:`; + if (args.length > 0) { + console.error(prefix, message, ...args); + } else { + console.error(prefix, message); + } + } +} +var CupcakePlugin = async ({ directory, client }) => { + const config = loadConfig(directory); + if (!config.enabled) { + log(config, "debug", "Plugin is disabled in configuration"); + return {}; + } + log(config, "debug", "Cupcake plugin initialized"); + return { + /** + * Hook: tool.execute.before + * + * Fired before any tool execution. This is where we enforce policies. + * Throwing an error blocks the tool execution. + */ + "tool.execute.before": async (input, output) => { + try { + log(config, "debug", `tool.execute.before fired for ${input.tool}`); + log(config, "debug", "Args:", output.args); + const event = buildPreToolUseEvent( + input.sessionID || "unknown", + directory, + input.tool, + output.args, + void 0, + // agent - not provided in current hook + void 0, + // messageId - not provided in current hook + input.callID + ); + const response = await executeCupcake(config, event); + const formatted = formatDecision(response); + if (formatted.decision !== "allow") { + await showToast(client, config, formatted.title, formatted.message, formatted.variant); + } + if (formatted.blocked) { + throw new Error(formatErrorMessage(formatted)); + } + log(config, "debug", "Allowing tool execution"); + } catch (error) { + throw error; + } + }, + /** + * Hook: permission.ask + * + * Fired when OpenCode needs to request permission for an operation. + * This integrates with OpenCode's native permission UI. + * + * - Set output.status = "allow" to auto-approve + * - Set output.status = "deny" to auto-deny + * - Leave as "ask" to show native permission dialog + */ + "permission.ask": async (input, output) => { + try { + log(config, "debug", `permission.ask fired for ${input.type}`); + log(config, "debug", "Permission:", input); + const event = buildPermissionEvent( + input.sessionID, + directory, + input.id, + input.type, + input.title, + input.metadata, + input.pattern, + input.messageID, + input.callID + ); + const response = await executeCupcake(config, event); + switch (response.decision) { + case "allow": + output.status = "allow"; + log(config, "debug", `Auto-allowing permission: ${input.type}`); + break; + case "deny": + case "block": + output.status = "deny"; + log(config, "debug", `Auto-denying permission: ${input.type}`); + await showToast( + client, + config, + "Permission Denied", + response.reason || `Permission ${input.type} blocked by policy`, + "error" + ); + break; + case "ask": + default: + output.status = "ask"; + log(config, "debug", `Deferring permission to user: ${input.type}`); + if (response.reason) { + await showToast(client, config, "Approval Recommended", response.reason, "warning"); + } + break; + } + } catch (error) { + log(config, "error", `Permission evaluation failed: ${error.message}`); + output.status = "ask"; + } + }, + /** + * Hook: tool.execute.after + * + * Fired after tool execution. Used for audit logging. + * Cannot prevent execution (already happened). + */ + "tool.execute.after": async (input, output) => { + log(config, "debug", `tool.execute.after fired for ${input.tool}`); + log(config, "debug", "Output:", output.output?.substring(0, 200)); + }, + /** + * Hook: event + * + * Fired for all OpenCode events. Used for comprehensive audit logging. + */ + event: async ({ event }) => { + if (config.logLevel !== "debug") { + return; + } + const auditEvents = [ + "tool.executed", + "permission.replied", + "file.edited", + "session.created", + "session.aborted" + ]; + if (auditEvents.includes(event.type)) { + log(config, "debug", `Audit event: ${event.type}`, event.properties); + } + } + }; +}; +export { CupcakePlugin }; diff --git a/client/biome.json b/client/biome.json index 13c6b7d..e48309a 100644 --- a/client/biome.json +++ b/client/biome.json @@ -1,5 +1,5 @@ { - "$schema": "https://biomejs.dev/schemas/2.3.10/schema.json", + "$schema": "https://biomejs.dev/schemas/2.3.11/schema.json", "vcs": { "enabled": true, "clientKind": "git", diff --git a/client/src-tauri/src/commands/analytics.rs b/client/src-tauri/src/commands/analytics.rs new file mode 100644 index 0000000..2ec261b --- /dev/null +++ b/client/src-tauri/src/commands/analytics.rs @@ -0,0 +1,35 @@ +use std::sync::Arc; + +use tauri::State; + +use crate::error::Result; +use crate::grpc::types::analytics::{AnalyticsOverview, ListSpeakerStatsResult}; +use crate::state::AppState; + +#[tauri::command(rename_all = "snake_case")] +pub async fn get_analytics_overview( + state: State<'_, Arc>, + start_time: f64, + end_time: f64, + project_id: Option, + project_ids: Option>, +) -> Result { + state + .grpc_client + .get_analytics_overview(start_time, end_time, project_id, project_ids) + .await +} + +#[tauri::command(rename_all = "snake_case")] +pub async fn list_speaker_stats( + state: State<'_, Arc>, + start_time: f64, + end_time: f64, + project_id: Option, + project_ids: Option>, +) -> Result { + state + .grpc_client + .list_speaker_stats(start_time, end_time, project_id, project_ids) + .await +} diff --git a/client/src-tauri/src/commands/mod.rs b/client/src-tauri/src/commands/mod.rs index ae2bf15..b2272a6 100644 --- a/client/src-tauri/src/commands/mod.rs +++ b/client/src-tauri/src/commands/mod.rs @@ -2,6 +2,7 @@ //! //! Each module corresponds to a functional area and exposes #[tauri::command] functions. +mod analytics; mod annotation; mod apps; mod asr; @@ -26,6 +27,7 @@ mod shell; mod summary; mod streaming_config; mod sync; +mod tasks; mod triggers; mod webhooks; @@ -39,6 +41,7 @@ mod playback_tests; #[cfg(test)] mod recording_tests; +pub use analytics::*; pub use annotation::*; pub use apps::*; pub use asr::*; @@ -65,6 +68,7 @@ pub use shell::*; pub use summary::*; pub use streaming_config::*; pub use sync::*; +pub use tasks::*; pub use testing::*; pub use triggers::*; pub use webhooks::*; diff --git a/client/src-tauri/src/commands/tasks.rs b/client/src-tauri/src/commands/tasks.rs new file mode 100644 index 0000000..9f840c2 --- /dev/null +++ b/client/src-tauri/src/commands/tasks.rs @@ -0,0 +1,39 @@ +use std::sync::Arc; + +use tauri::State; + +use crate::error::Result; +use crate::grpc::types::tasks::{ListTasksResult, Task}; +use crate::state::AppState; + +#[tauri::command(rename_all = "snake_case")] +pub async fn list_tasks( + state: State<'_, Arc>, + statuses: Option>, + limit: Option, + offset: Option, + project_id: Option, + project_ids: Option>, + meeting_id: Option, +) -> Result { + state + .grpc_client + .list_tasks(statuses, limit, offset, project_id, project_ids, meeting_id) + .await +} + +#[tauri::command(rename_all = "snake_case")] +pub async fn update_task( + state: State<'_, Arc>, + task_id: String, + text: Option, + status: Option, + assignee_person_id: Option, + due_date: Option, + priority: Option, +) -> Result { + state + .grpc_client + .update_task(task_id, text, status, assignee_person_id, due_date, priority) + .await +} diff --git a/client/src-tauri/src/grpc/client/analytics.rs b/client/src-tauri/src/grpc/client/analytics.rs new file mode 100644 index 0000000..7755798 --- /dev/null +++ b/client/src-tauri/src/grpc/client/analytics.rs @@ -0,0 +1,80 @@ +use crate::error::Result; +use crate::grpc::noteflow as pb; +use crate::grpc::types::analytics::{ + AnalyticsOverview, DailyMeetingStats, ListSpeakerStatsResult, SpeakerStat, +}; + +use super::core::GrpcClient; + +impl GrpcClient { + pub async fn get_analytics_overview( + &self, + start_time: f64, + end_time: f64, + project_id: Option, + project_ids: Option>, + ) -> Result { + let mut client = self.get_client()?; + let response = client + .get_analytics_overview(pb::GetAnalyticsOverviewRequest { + start_time, + end_time, + project_id, + project_ids: project_ids.unwrap_or_default(), + }) + .await? + .into_inner(); + + Ok(AnalyticsOverview { + daily: response.daily.into_iter().map(map_daily_stats).collect(), + total_meetings: response.total_meetings, + total_duration: response.total_duration, + total_words: response.total_words, + total_segments: response.total_segments, + speaker_count: response.speaker_count, + }) + } + + pub async fn list_speaker_stats( + &self, + start_time: f64, + end_time: f64, + project_id: Option, + project_ids: Option>, + ) -> Result { + let mut client = self.get_client()?; + let response = client + .list_speaker_stats(pb::ListSpeakerStatsRequest { + start_time, + end_time, + project_id, + project_ids: project_ids.unwrap_or_default(), + }) + .await? + .into_inner(); + + Ok(ListSpeakerStatsResult { + speakers: response.speakers.into_iter().map(map_speaker_stat).collect(), + }) + } +} + +fn map_daily_stats(proto: pb::DailyMeetingStatsProto) -> DailyMeetingStats { + DailyMeetingStats { + date: proto.date, + meetings: proto.meetings, + total_duration: proto.total_duration, + word_count: proto.word_count, + } +} + +fn map_speaker_stat(proto: pb::SpeakerStatProto) -> SpeakerStat { + SpeakerStat { + speaker_id: proto.speaker_id, + display_name: proto.display_name, + total_time: proto.total_time, + segment_count: proto.segment_count, + meeting_count: proto.meeting_count, + avg_confidence: proto.avg_confidence, + } +} diff --git a/client/src-tauri/src/grpc/client/mod.rs b/client/src-tauri/src/grpc/client/mod.rs index 7f22d9b..41dc141 100644 --- a/client/src-tauri/src/grpc/client/mod.rs +++ b/client/src-tauri/src/grpc/client/mod.rs @@ -12,8 +12,11 @@ //! - `observability`: Logs and metrics operations (Sprint 9) //! - `asr`: ASR configuration operations (Sprint 19) //! - `streaming`: Streaming configuration operations (Sprint 20) +//! - `tasks`: Task management operations (Bugfinder Sprint) +//! - `analytics`: Analytics aggregate operations (Bugfinder Sprint) //! - `converters`: Protobuf to domain type converters +mod analytics; mod annotations; mod asr; mod streaming; @@ -29,6 +32,7 @@ mod oidc; mod preferences; mod projects; mod sync; +mod tasks; mod webhooks; // Re-export the main types diff --git a/client/src-tauri/src/grpc/client/tasks.rs b/client/src-tauri/src/grpc/client/tasks.rs new file mode 100644 index 0000000..c7c940e --- /dev/null +++ b/client/src-tauri/src/grpc/client/tasks.rs @@ -0,0 +1,144 @@ +use crate::error::Result; +use crate::grpc::noteflow as pb; +use crate::grpc::types::tasks::{ListTasksResult, Task, TaskStatus, TaskWithMeeting}; + +use super::core::GrpcClient; + +impl GrpcClient { + pub async fn list_tasks( + &self, + statuses: Option>, + limit: Option, + offset: Option, + project_id: Option, + project_ids: Option>, + meeting_id: Option, + ) -> Result { + let mut client = self.get_client()?; + let status_enums: Vec = statuses + .unwrap_or_default() + .iter() + .map(|s| string_to_task_status_proto(s)) + .collect(); + + let response = client + .list_tasks(pb::ListTasksRequest { + statuses: status_enums, + limit: limit.unwrap_or(100), + offset: offset.unwrap_or(0), + project_id, + project_ids: project_ids.unwrap_or_default(), + meeting_id, + }) + .await? + .into_inner(); + + Ok(ListTasksResult { + tasks: response.tasks.into_iter().map(map_task_with_meeting).collect(), + total_count: response.total_count, + }) + } + + pub async fn update_task( + &self, + task_id: String, + text: Option, + status: Option, + assignee_person_id: Option, + due_date: Option, + priority: Option, + ) -> Result { + let mut client = self.get_client()?; + let response = client + .update_task(pb::UpdateTaskRequest { + task_id, + text: text.unwrap_or_default(), + status: status.map(|s| string_to_task_status_proto(&s)).unwrap_or(0), + assignee_person_id: assignee_person_id.unwrap_or_default(), + due_date: due_date.unwrap_or(0.0), + priority: priority.unwrap_or(0), + }) + .await? + .into_inner(); + + response + .task + .map(map_task) + .ok_or_else(|| crate::error::Error::InvalidInput("Task not found".into())) + } +} + +fn string_to_task_status_proto(s: &str) -> i32 { + match s { + "open" => pb::TaskStatusProto::TaskStatusOpen as i32, + "done" => pb::TaskStatusProto::TaskStatusDone as i32, + "dismissed" => pb::TaskStatusProto::TaskStatusDismissed as i32, + _ => pb::TaskStatusProto::TaskStatusUnspecified as i32, + } +} + +fn task_status_proto_to_enum(status: i32) -> TaskStatus { + match status { + 1 => TaskStatus::Open, + 2 => TaskStatus::Done, + 3 => TaskStatus::Dismissed, + _ => TaskStatus::Open, + } +} + +fn map_task(proto: pb::TaskProto) -> Task { + Task { + id: proto.id, + meeting_id: if proto.meeting_id.is_empty() { + None + } else { + Some(proto.meeting_id) + }, + action_item_id: if proto.action_item_id == 0 { + None + } else { + Some(proto.action_item_id) + }, + text: proto.text, + status: task_status_proto_to_enum(proto.status), + assignee_person_id: if proto.assignee_person_id.is_empty() { + None + } else { + Some(proto.assignee_person_id) + }, + due_date: if proto.due_date == 0.0 { + None + } else { + Some(proto.due_date) + }, + priority: proto.priority, + completed_at: if proto.completed_at == 0.0 { + None + } else { + Some(proto.completed_at) + }, + } +} + +fn map_task_with_meeting(proto: pb::TaskWithMeetingProto) -> TaskWithMeeting { + TaskWithMeeting { + task: proto.task.map(map_task).unwrap_or_else(|| Task { + id: String::new(), + meeting_id: None, + action_item_id: None, + text: String::new(), + status: TaskStatus::Open, + assignee_person_id: None, + due_date: None, + priority: 0, + completed_at: None, + }), + meeting_title: proto.meeting_title, + meeting_created_at: proto.meeting_created_at, + project_id: if proto.project_id.is_empty() { + None + } else { + Some(proto.project_id) + }, + } +} diff --git a/client/src-tauri/src/grpc/noteflow.rs b/client/src-tauri/src/grpc/noteflow.rs index ceae16c..abb15e0 100644 --- a/client/src-tauri/src/grpc/noteflow.rs +++ b/client/src-tauri/src/grpc/noteflow.rs @@ -2378,6 +2378,152 @@ pub struct UpdateWorkspaceSettingsRequest { #[prost(message, optional, tag = "2")] pub settings: ::core::option::Option, } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskProto { + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub meeting_id: ::prost::alloc::string::String, + #[prost(int32, tag = "3")] + pub action_item_id: i32, + #[prost(string, tag = "4")] + pub text: ::prost::alloc::string::String, + #[prost(enumeration = "TaskStatusProto", tag = "5")] + pub status: i32, + #[prost(string, tag = "6")] + pub assignee_person_id: ::prost::alloc::string::String, + #[prost(double, tag = "7")] + pub due_date: f64, + #[prost(int32, tag = "8")] + pub priority: i32, + #[prost(double, tag = "9")] + pub completed_at: f64, + #[prost(double, tag = "10")] + pub created_at: f64, + #[prost(double, tag = "11")] + pub updated_at: f64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskWithMeetingProto { + #[prost(message, optional, tag = "1")] + pub task: ::core::option::Option, + #[prost(string, tag = "2")] + pub meeting_title: ::prost::alloc::string::String, + #[prost(double, tag = "3")] + pub meeting_created_at: f64, + #[prost(string, tag = "4")] + pub project_id: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListTasksRequest { + #[prost(enumeration = "TaskStatusProto", repeated, tag = "1")] + pub statuses: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub limit: i32, + #[prost(int32, tag = "3")] + pub offset: i32, + #[prost(string, optional, tag = "4")] + pub project_id: ::core::option::Option<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "5")] + pub project_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, optional, tag = "6")] + pub meeting_id: ::core::option::Option<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListTasksResponse { + #[prost(message, repeated, tag = "1")] + pub tasks: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub total_count: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateTaskRequest { + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub text: ::prost::alloc::string::String, + #[prost(enumeration = "TaskStatusProto", tag = "3")] + pub status: i32, + #[prost(string, tag = "4")] + pub assignee_person_id: ::prost::alloc::string::String, + #[prost(double, tag = "5")] + pub due_date: f64, + #[prost(int32, tag = "6")] + pub priority: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateTaskResponse { + #[prost(message, optional, tag = "1")] + pub task: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAnalyticsOverviewRequest { + #[prost(double, tag = "1")] + pub start_time: f64, + #[prost(double, tag = "2")] + pub end_time: f64, + #[prost(string, optional, tag = "3")] + pub project_id: ::core::option::Option<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "4")] + pub project_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DailyMeetingStatsProto { + #[prost(string, tag = "1")] + pub date: ::prost::alloc::string::String, + #[prost(int32, tag = "2")] + pub meetings: i32, + #[prost(double, tag = "3")] + pub total_duration: f64, + #[prost(int32, tag = "4")] + pub word_count: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAnalyticsOverviewResponse { + #[prost(message, repeated, tag = "1")] + pub daily: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub total_meetings: i32, + #[prost(double, tag = "3")] + pub total_duration: f64, + #[prost(int32, tag = "4")] + pub total_words: i32, + #[prost(int32, tag = "5")] + pub total_segments: i32, + #[prost(int32, tag = "6")] + pub speaker_count: i32, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SpeakerStatProto { + #[prost(string, tag = "1")] + pub speaker_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub display_name: ::prost::alloc::string::String, + #[prost(double, tag = "3")] + pub total_time: f64, + #[prost(int32, tag = "4")] + pub segment_count: i32, + #[prost(int32, tag = "5")] + pub meeting_count: i32, + #[prost(double, tag = "6")] + pub avg_confidence: f64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListSpeakerStatsRequest { + #[prost(double, tag = "1")] + pub start_time: f64, + #[prost(double, tag = "2")] + pub end_time: f64, + #[prost(string, optional, tag = "3")] + pub project_id: ::core::option::Option<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "4")] + pub project_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListSpeakerStatsResponse { + #[prost(message, repeated, tag = "1")] + pub speakers: ::prost::alloc::vec::Vec, +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum UpdateType { @@ -2816,6 +2962,38 @@ impl ProjectRoleProto { } } } +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TaskStatusProto { + TaskStatusUnspecified = 0, + TaskStatusOpen = 1, + TaskStatusDone = 2, + TaskStatusDismissed = 3, +} +impl TaskStatusProto { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::TaskStatusUnspecified => "TASK_STATUS_UNSPECIFIED", + Self::TaskStatusOpen => "TASK_STATUS_OPEN", + Self::TaskStatusDone => "TASK_STATUS_DONE", + Self::TaskStatusDismissed => "TASK_STATUS_DISMISSED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TASK_STATUS_UNSPECIFIED" => Some(Self::TaskStatusUnspecified), + "TASK_STATUS_OPEN" => Some(Self::TaskStatusOpen), + "TASK_STATUS_DONE" => Some(Self::TaskStatusDone), + "TASK_STATUS_DISMISSED" => Some(Self::TaskStatusDismissed), + _ => None, + } + } +} /// Generated client implementations. pub mod note_flow_service_client { #![allow( @@ -4912,6 +5090,106 @@ pub mod note_flow_service_client { .insert(GrpcMethod::new("noteflow.NoteFlowService", "GetActiveProject")); self.inner.unary(req, path, codec).await } + /// Task management (Bugfinder Sprint) + pub async fn list_tasks( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noteflow.NoteFlowService/ListTasks", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noteflow.NoteFlowService", "ListTasks")); + self.inner.unary(req, path, codec).await + } + pub async fn update_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noteflow.NoteFlowService/UpdateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noteflow.NoteFlowService", "UpdateTask")); + self.inner.unary(req, path, codec).await + } + /// Analytics (Bugfinder Sprint) + pub async fn get_analytics_overview( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noteflow.NoteFlowService/GetAnalyticsOverview", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("noteflow.NoteFlowService", "GetAnalyticsOverview"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn list_speaker_stats( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noteflow.NoteFlowService/ListSpeakerStats", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noteflow.NoteFlowService", "ListSpeakerStats")); + self.inner.unary(req, path, codec).await + } /// Project membership management (Sprint 18) pub async fn add_project_member( &mut self, diff --git a/client/src-tauri/src/grpc/types/analytics.rs b/client/src-tauri/src/grpc/types/analytics.rs new file mode 100644 index 0000000..9d98697 --- /dev/null +++ b/client/src-tauri/src/grpc/types/analytics.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DailyMeetingStats { + pub date: String, + pub meetings: i32, + pub total_duration: f64, + pub word_count: i32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AnalyticsOverview { + pub daily: Vec, + pub total_meetings: i32, + pub total_duration: f64, + pub total_words: i32, + pub total_segments: i32, + pub speaker_count: i32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SpeakerStat { + pub speaker_id: String, + pub display_name: String, + pub total_time: f64, + pub segment_count: i32, + pub meeting_count: i32, + pub avg_confidence: f64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ListSpeakerStatsResult { + pub speakers: Vec, +} diff --git a/client/src-tauri/src/grpc/types/mod.rs b/client/src-tauri/src/grpc/types/mod.rs index 6e98357..d178acd 100644 --- a/client/src-tauri/src/grpc/types/mod.rs +++ b/client/src-tauri/src/grpc/types/mod.rs @@ -3,6 +3,7 @@ //! These types mirror the protobuf definitions and are used for //! communication between Rust and the React frontend via Tauri. +pub mod analytics; pub mod asr; pub mod calendar; pub mod core; @@ -16,4 +17,5 @@ pub mod projects; pub mod results; pub mod streaming; pub mod sync; +pub mod tasks; pub mod webhooks; diff --git a/client/src-tauri/src/grpc/types/tasks.rs b/client/src-tauri/src/grpc/types/tasks.rs new file mode 100644 index 0000000..70b1062 --- /dev/null +++ b/client/src-tauri/src/grpc/types/tasks.rs @@ -0,0 +1,43 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum TaskStatus { + #[default] + Open, + Done, + Dismissed, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Task { + pub id: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub meeting_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub action_item_id: Option, + pub text: String, + pub status: TaskStatus, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub assignee_person_id: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub due_date: Option, + pub priority: i32, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub completed_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TaskWithMeeting { + pub task: Task, + pub meeting_title: String, + pub meeting_created_at: f64, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub project_id: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ListTasksResult { + pub tasks: Vec, + pub total_count: i32, +} diff --git a/client/src-tauri/src/lib.rs b/client/src-tauri/src/lib.rs index 688d726..c81779d 100644 --- a/client/src-tauri/src/lib.rs +++ b/client/src-tauri/src/lib.rs @@ -196,6 +196,12 @@ macro_rules! app_invoke_handler { commands::reset_test_recording_state, commands::inject_test_audio, commands::inject_test_tone, + // Tasks (2 commands) - Bugfinder Sprint + commands::list_tasks, + commands::update_task, + // Analytics (2 commands) - Bugfinder Sprint + commands::get_analytics_overview, + commands::list_speaker_stats, ] }; } diff --git a/client/src/api/adapters/tauri/api.ts b/client/src/api/adapters/tauri/api.ts index 6320805..9f8e114 100644 --- a/client/src/api/adapters/tauri/api.ts +++ b/client/src/api/adapters/tauri/api.ts @@ -1,5 +1,6 @@ import type { NoteFlowAPI } from '../../interface'; import type { TauriInvoke, TauriListen } from './types'; +import { createAnalyticsApi } from './sections/analytics'; import { createAnnotationApi } from './sections/annotations'; import { createAppsApi } from './sections/apps'; import { createAsrApi } from './sections/asr'; @@ -17,6 +18,7 @@ import { createPlaybackApi } from './sections/playback'; import { createPreferencesApi } from './sections/preferences'; import { createProjectApi } from './sections/projects'; import { createSummarizationApi } from './sections/summarization'; +import { createTaskApi } from './sections/tasks'; import { createTriggerApi } from './sections/triggers'; import { createWebhookApi } from './sections/webhooks'; @@ -42,5 +44,7 @@ export function createTauriAPI(invoke: TauriInvoke, listen: TauriListen): NoteFl ...createIntegrationApi(invoke), ...createObservabilityApi(invoke), ...createOidcApi(invoke), + ...createTaskApi(invoke), + ...createAnalyticsApi(invoke), }; } diff --git a/client/src/api/adapters/tauri/constants.ts b/client/src/api/adapters/tauri/constants.ts index 42afc50..a929bd1 100644 --- a/client/src/api/adapters/tauri/constants.ts +++ b/client/src/api/adapters/tauri/constants.ts @@ -142,6 +142,12 @@ export const TauriCommands = { CHECK_TEST_ENVIRONMENT: 'check_test_environment', INJECT_TEST_AUDIO: 'inject_test_audio', INJECT_TEST_TONE: 'inject_test_tone', + // Tasks (Bugfinder Sprint) + LIST_TASKS: 'list_tasks', + UPDATE_TASK: 'update_task', + // Analytics (Bugfinder Sprint) + GET_ANALYTICS_OVERVIEW: 'get_analytics_overview', + LIST_SPEAKER_STATS: 'list_speaker_stats', } as const; /** diff --git a/client/src/api/adapters/tauri/sections/analytics.ts b/client/src/api/adapters/tauri/sections/analytics.ts new file mode 100644 index 0000000..8076c90 --- /dev/null +++ b/client/src/api/adapters/tauri/sections/analytics.ts @@ -0,0 +1,32 @@ +import type { + AnalyticsOverview, + AnalyticsOverviewRequest, + ListSpeakerStatsRequest, + ListSpeakerStatsResponse, +} from '../../../types'; +import type { NoteFlowAPI } from '../../../interface'; +import { TauriCommands } from '../constants'; +import type { TauriInvoke } from '../types'; + +export function createAnalyticsApi( + invoke: TauriInvoke +): Pick { + return { + async getAnalyticsOverview(request: AnalyticsOverviewRequest): Promise { + return invoke(TauriCommands.GET_ANALYTICS_OVERVIEW, { + start_time: request.start_time, + end_time: request.end_time, + project_id: request.project_id, + project_ids: request.project_ids, + }); + }, + async listSpeakerStats(request: ListSpeakerStatsRequest): Promise { + return invoke(TauriCommands.LIST_SPEAKER_STATS, { + start_time: request.start_time, + end_time: request.end_time, + project_id: request.project_id, + project_ids: request.project_ids, + }); + }, + }; +} diff --git a/client/src/api/adapters/tauri/sections/tasks.ts b/client/src/api/adapters/tauri/sections/tasks.ts new file mode 100644 index 0000000..3e0d19c --- /dev/null +++ b/client/src/api/adapters/tauri/sections/tasks.ts @@ -0,0 +1,32 @@ +import type { ListTasksRequest, ListTasksResponse, Task, UpdateTaskRequest } from '../../../types'; +import type { NoteFlowAPI } from '../../../interface'; +import { TauriCommands } from '../constants'; +import type { TauriInvoke } from '../types'; + +export function createTaskApi( + invoke: TauriInvoke +): Pick { + return { + async listTasks(request: ListTasksRequest): Promise { + return invoke(TauriCommands.LIST_TASKS, { + statuses: request.statuses, + limit: request.limit, + offset: request.offset, + project_id: request.project_id, + project_ids: request.project_ids, + meeting_id: request.meeting_id, + }); + }, + async updateTask(request: UpdateTaskRequest): Promise { + const response = await invoke<{ task: Task }>(TauriCommands.UPDATE_TASK, { + task_id: request.task_id, + text: request.text, + status: request.status, + assignee_person_id: request.assignee_person_id, + due_date: request.due_date, + priority: request.priority, + }); + return response.task; + }, + }; +} diff --git a/client/src/api/adapters/tauri/sections/webhooks.ts b/client/src/api/adapters/tauri/sections/webhooks.ts index 88ca7f1..1ed99c6 100644 --- a/client/src/api/adapters/tauri/sections/webhooks.ts +++ b/client/src/api/adapters/tauri/sections/webhooks.ts @@ -21,7 +21,7 @@ function sanitizeWebhookRequest; + + // --- Tasks (Strategy B) --- + + listTasks(request: ListTasksRequest): Promise; + + updateTask(request: UpdateTaskRequest): Promise; + + // --- Analytics (Strategy B) --- + + getAnalyticsOverview(request: AnalyticsOverviewRequest): Promise; + + listSpeakerStats(request: ListSpeakerStatsRequest): Promise; } // --- API Instance Management --- diff --git a/client/src/api/types/enums.ts b/client/src/api/types/enums.ts index bcff231..0a43aae 100644 --- a/client/src/api/types/enums.ts +++ b/client/src/api/types/enums.ts @@ -118,3 +118,14 @@ export type WorkspaceRole = 'owner' | 'admin' | 'member' | 'viewer'; * - PROJECT_ROLE_ADMIN = 3 */ export type ProjectRole = 'viewer' | 'editor' | 'admin'; + +/** + * Task lifecycle status + * + * gRPC enum values: + * - TASK_STATUS_UNSPECIFIED = 0 + * - TASK_STATUS_OPEN = 1 + * - TASK_STATUS_DONE = 2 + * - TASK_STATUS_DISMISSED = 3 + */ +export type TaskStatus = 'open' | 'done' | 'dismissed'; diff --git a/client/src/api/types/features/analytics.ts b/client/src/api/types/features/analytics.ts new file mode 100644 index 0000000..d4f2d00 --- /dev/null +++ b/client/src/api/types/features/analytics.ts @@ -0,0 +1,49 @@ +/** + * Analytics Aggregate Types + * + * Types for server-backed analytics aggregates. + * These replace client-side computations for better scalability. + */ + +export interface DailyMeetingStats { + date: string; + meetings: number; + total_duration: number; + word_count: number; +} + +export interface AnalyticsOverview { + daily: DailyMeetingStats[]; + total_meetings: number; + total_duration: number; + total_words: number; + total_segments: number; + speaker_count: number; +} + +export interface AnalyticsOverviewRequest { + start_time: number; + end_time: number; + project_id?: string; + project_ids?: string[]; +} + +export interface SpeakerStat { + speaker_id: string; + display_name: string; + total_time: number; + segment_count: number; + meeting_count: number; + avg_confidence: number; +} + +export interface ListSpeakerStatsRequest { + start_time: number; + end_time: number; + project_id?: string; + project_ids?: string[]; +} + +export interface ListSpeakerStatsResponse { + speakers: SpeakerStat[]; +} diff --git a/client/src/api/types/features/asr.ts b/client/src/api/types/features/asr.ts index 32b54ec..dd62cdf 100644 --- a/client/src/api/types/features/asr.ts +++ b/client/src/api/types/features/asr.ts @@ -27,9 +27,9 @@ export type ASRDevice = 'unspecified' | 'cpu' | 'cuda'; export type ASRComputeType = 'unspecified' | 'int8' | 'float16' | 'float32'; /** - * Job status for background tasks + * Job status for ASR background tasks (extends base JobStatus with 'unspecified') */ -export type JobStatus = 'unspecified' | 'queued' | 'running' | 'completed' | 'failed' | 'cancelled'; +export type ASRJobStatus = 'unspecified' | 'queued' | 'running' | 'completed' | 'failed' | 'cancelled'; /** * Current ASR configuration and capabilities @@ -79,7 +79,7 @@ export interface UpdateASRConfigurationResult { jobId: string; /** Initial job status */ - status: JobStatus; + status: ASRJobStatus; /** Whether the request was accepted */ accepted: boolean; @@ -101,7 +101,7 @@ export interface ASRConfigurationJobStatus { jobId: string; /** Current status */ - status: JobStatus; + status: ASRJobStatus; /** Progress percentage (0.0-100.0) */ progressPercent: number; diff --git a/client/src/api/types/features/index.ts b/client/src/api/types/features/index.ts index 219d27b..6bca094 100644 --- a/client/src/api/types/features/index.ts +++ b/client/src/api/types/features/index.ts @@ -1,9 +1,10 @@ /** * NoteFlow Feature Types * - * Types for NER, calendar, webhooks, sync, observability, identity, OIDC, ASR, and model downloads. + * Types for NER, calendar, webhooks, sync, observability, identity, OIDC, ASR, model downloads, tasks, and analytics. */ +export * from './analytics'; export * from './asr'; export * from './calendar'; export * from './identity'; @@ -13,4 +14,5 @@ export * from './observability'; export * from './oidc'; export * from './streaming'; export * from './sync'; +export * from './tasks'; export * from './webhooks'; diff --git a/client/src/api/types/features/tasks.ts b/client/src/api/types/features/tasks.ts new file mode 100644 index 0000000..156d7b2 --- /dev/null +++ b/client/src/api/types/features/tasks.ts @@ -0,0 +1,52 @@ +import type { TaskStatus } from '../enums'; + +export interface Task { + id: string; + meeting_id: string | null; + action_item_id: number | null; + text: string; + status: TaskStatus; + assignee_person_id: string | null; + due_date: number | null; + priority: number; + completed_at: number | null; +} + +/** Task with associated meeting metadata */ +export interface TaskWithMeeting { + task: Task; + meeting_title: string; + meeting_created_at: number; + project_id: string | null; +} + +/** Request to list tasks with filtering */ +export interface ListTasksRequest { + statuses?: TaskStatus[]; + limit?: number; + offset?: number; + project_id?: string; + project_ids?: string[]; + meeting_id?: string; +} + +/** Response from listing tasks */ +export interface ListTasksResponse { + tasks: TaskWithMeeting[]; + total_count: number; +} + +/** Request to update a task */ +export interface UpdateTaskRequest { + task_id: string; + text?: string; + status?: TaskStatus; + assignee_person_id?: string | null; + due_date?: number | null; + priority?: number; +} + +/** Response from updating a task */ +export interface UpdateTaskResponse { + task: Task; +} diff --git a/client/src/api/types/requests/integrations.ts b/client/src/api/types/requests/integrations.ts index 0f97e07..781da34 100644 --- a/client/src/api/types/requests/integrations.ts +++ b/client/src/api/types/requests/integrations.ts @@ -115,23 +115,11 @@ export interface SyncHistoryEvent { error?: string; } -export interface GetOAuthClientConfigRequest { - workspace_id?: string; - provider: string; - integration_type?: Integration['type']; -} - -export interface GetOAuthClientConfigResponse { - config?: OAuthConfig; -} - -export interface SetOAuthClientConfigRequest { - workspace_id?: string; - provider: string; - integration_type?: Integration['type']; - config: OAuthConfig; -} - -export interface SetOAuthClientConfigResponse { - success: boolean; -} +// OAuth client config types are in features/calendar.ts +// Re-exported here for backwards compatibility +export type { + GetOAuthClientConfigRequest, + GetOAuthClientConfigResponse, + SetOAuthClientConfigRequest, + SetOAuthClientConfigResponse, +} from '../features/calendar'; diff --git a/client/src/components/features/analytics/logs-tab.tsx b/client/src/components/features/analytics/logs-tab.tsx index 730c6b9..b28aadc 100644 --- a/client/src/components/features/analytics/logs-tab.tsx +++ b/client/src/components/features/analytics/logs-tab.tsx @@ -138,7 +138,10 @@ export function LogsTab() { return true; } const metadataText = log.metadata ? JSON.stringify(log.metadata).toLowerCase() : ''; - const correlationText = [log.traceId, log.spanId].filter(Boolean).join(' ').toLowerCase(); + // Only server logs have traceId/spanId - use type guard + const traceId = 'traceId' in log ? log.traceId : undefined; + const spanId = 'spanId' in log ? log.spanId : undefined; + const correlationText = [traceId, spanId].filter(Boolean).join(' ').toLowerCase(); return ( log.message.toLowerCase().includes(query) || log.details?.toLowerCase().includes(query) || diff --git a/client/src/components/features/analytics/performance-tab.tsx b/client/src/components/features/analytics/performance-tab.tsx index f58e562..9f9abf7 100644 --- a/client/src/components/features/analytics/performance-tab.tsx +++ b/client/src/components/features/analytics/performance-tab.tsx @@ -125,7 +125,7 @@ export function PerformanceTab() { queryKey: ['performance-metrics'], queryFn: async () => { const api = getAPI(); - return api.getPerformanceMetrics({ history_minutes: 60 }); + return api.getPerformanceMetrics({ history_limit: 60 }); }, refetchInterval: METRICS_REFRESH_INTERVAL_MS, }); diff --git a/client/src/pages/Analytics.tsx b/client/src/pages/Analytics.tsx index c6e0426..8b27321 100644 --- a/client/src/pages/Analytics.tsx +++ b/client/src/pages/Analytics.tsx @@ -1,7 +1,7 @@ // Analytics page - Meeting insights and statistics import { useQuery } from '@tanstack/react-query'; -import { eachDayOfInterval, format, subDays } from 'date-fns'; +import { format, subDays } from 'date-fns'; import { Activity, BarChart3, @@ -31,7 +31,7 @@ import { YAxis, } from 'recharts'; import { getAPI } from '@/api/interface'; -import type { ListMeetingsResponse, Meeting } from '@/api/types'; +import type { AnalyticsOverview, ListMeetingsResponse, SpeakerStat } from '@/api/types'; import { SPEAKER_COLORS, SPEAKER_COLOR_CLASSES, @@ -57,6 +57,7 @@ import { import { cn } from '@/lib/utils'; const titleRowClass = flexLayout.itemsGap2; +const ANALYTICS_DAYS = 14; interface DailyStats { date: string; @@ -68,84 +69,42 @@ interface DailyStats { interface SpeakerStats { speakerId: string; + displayName: string; totalTime: number; percentage: number; + segmentCount: number; + meetingCount: number; } -function computeAnalytics(meetings: Meeting[]) { - const endDate = new Date(); - const startDate = subDays(endDate, 13); - const dateRange = eachDayOfInterval({ start: startDate, end: endDate }); +function transformDailyStats(overview: AnalyticsOverview): DailyStats[] { + return overview.daily.map((d) => ({ + date: d.date, + dateLabel: format(new Date(d.date), 'MMM d'), + meetings: d.meetings, + totalDuration: d.total_duration, + wordCount: d.word_count, + })); +} - const dailyMap = new Map(); - for (const date of dateRange) { - const key = format(date, 'yyyy-MM-dd'); - dailyMap.set(key, { - date: key, - dateLabel: format(date, 'MMM d'), - meetings: 0, - totalDuration: 0, - wordCount: 0, - }); - } - - const speakerMap = new Map(); - let totalSpeakingTime = 0; - let totalWords = 0; - let totalSegments = 0; - - for (const meeting of meetings) { - const meetingDate = format(new Date(meeting.created_at * 1000), 'yyyy-MM-dd'); - const dailyStats = dailyMap.get(meetingDate); - if (dailyStats) { - dailyStats.meetings += 1; - dailyStats.totalDuration += meeting.duration_seconds; - const words = meeting.segments.reduce((sum, seg) => sum + seg.words.length, 0); - dailyStats.wordCount += words; - } - - for (const segment of meeting.segments) { - const duration = segment.end_time - segment.start_time; - speakerMap.set(segment.speaker_id, (speakerMap.get(segment.speaker_id) || 0) + duration); - totalSpeakingTime += duration; - totalWords += segment.words.length; - totalSegments += 1; - } - } - - const dailyTrends = Array.from(dailyMap.values()); - const speakerStats: SpeakerStats[] = Array.from(speakerMap.entries()) - .map(([speakerId, totalTime]) => ({ - speakerId, - totalTime, - percentage: totalSpeakingTime > 0 ? (totalTime / totalSpeakingTime) * 100 : 0, - })) - .sort((a, b) => b.totalTime - a.totalTime); - - const avgDuration = - meetings.length > 0 - ? meetings.reduce((sum, m) => sum + m.duration_seconds, 0) / meetings.length - : 0; - const avgWordsPerMeeting = meetings.length > 0 ? totalWords / meetings.length : 0; - - return { - dailyTrends, - speakerStats, - totalMeetings: meetings.length, - totalDuration: meetings.reduce((sum, m) => sum + m.duration_seconds, 0), - totalWords, - totalSegments, - avgDuration, - avgWordsPerMeeting, - speakerCount: speakerMap.size, - }; +function transformSpeakerStats(speakers: SpeakerStat[]): SpeakerStats[] { + const totalTime = speakers.reduce((sum, s) => sum + s.total_time, 0); + return speakers.map((s) => ({ + speakerId: s.speaker_id, + displayName: s.display_name, + totalTime: s.total_time, + percentage: totalTime > 0 ? (s.total_time / totalTime) * 100 : 0, + segmentCount: s.segment_count, + meetingCount: s.meeting_count, + })); } function MeetingsAnalyticsContent({ - analytics, + overview, + speakerStats, chartConfig, }: { - analytics: ReturnType; + overview: AnalyticsOverview; + speakerStats: SpeakerStats[]; chartConfig: Record; }) { const chartId = useId(); @@ -158,32 +117,38 @@ function MeetingsAnalyticsContent({ `${Number(value).toLocaleString()} words`} /> ); + const dailyTrends = transformDailyStats(overview); + const avgDuration = + overview.total_meetings > 0 ? overview.total_duration / overview.total_meetings : 0; + const avgWordsPerMeeting = + overview.total_meetings > 0 ? overview.total_words / overview.total_meetings : 0; + return (
@@ -194,7 +159,7 @@ function MeetingsAnalyticsContent({ Meeting Duration Trends - Daily meeting duration over the last 14 days + Daily meeting duration over the last {ANALYTICS_DAYS} days
@@ -202,7 +167,7 @@ function MeetingsAnalyticsContent({ config={chartConfig} className={`${chartHeight.standard} min-w-[360px]`} > - + @@ -242,7 +207,7 @@ function MeetingsAnalyticsContent({
- +
- {analytics.speakerStats.length > 0 ? ( + {speakerStats.length > 0 ? ( - {analytics.speakerStats.map((stat, idx) => ( + {speakerStats.map((stat, idx) => (
- + @@ -358,7 +323,7 @@ function MeetingsAnalyticsContent({
- {analytics.speakerStats.length > 0 && ( + {speakerStats.length > 0 && ( Speaker Breakdown @@ -366,7 +331,7 @@ function MeetingsAnalyticsContent({
- {analytics.speakerStats.map((speaker, index) => { + {speakerStats.map((speaker, index) => { const speakerColorClass = SPEAKER_COLOR_CLASSES[index % SPEAKER_COLOR_CLASSES.length]; const barWidth = `${speaker.percentage}%`; @@ -377,7 +342,7 @@ function MeetingsAnalyticsContent({
- {speaker.speakerId} + {speaker.displayName} {formatDuration(speaker.totalTime)} ({speaker.percentage.toFixed(1)}%) @@ -406,20 +371,47 @@ function MeetingsAnalyticsContent({ export default function AnalyticsPage() { const [activeTab, setActiveTab] = useState('meetings'); + + const endTime = useMemo(() => Math.floor(Date.now() / 1000), []); + const startTime = useMemo(() => Math.floor(subDays(new Date(), ANALYTICS_DAYS - 1).getTime() / 1000), []); + + const { data: overview, isLoading: overviewLoading } = useQuery({ + queryKey: ['analytics-overview', startTime, endTime], + queryFn: () => + getAPI().getAnalyticsOverview({ + start_time: startTime, + end_time: endTime, + }), + }); + + const { data: speakerResponse, isLoading: speakersLoading } = useQuery({ + queryKey: ['speaker-stats', startTime, endTime], + queryFn: () => + getAPI().listSpeakerStats({ + start_time: startTime, + end_time: endTime, + }), + }); + + const speakerStats = useMemo( + () => (speakerResponse?.speakers ? transformSpeakerStats(speakerResponse.speakers) : []), + [speakerResponse] + ); + const { data: meetingsData } = useQuery({ queryKey: ['meetings', 'all'], queryFn: () => getAPI().listMeetings({ limit: 100 }), + enabled: activeTab === 'speech', }); - const analytics = useMemo( - () => (meetingsData?.meetings ? computeAnalytics(meetingsData.meetings) : null), - [meetingsData] - ); + const chartConfig = { meetings: { label: 'Meetings', color: 'hsl(var(--chart-1))' }, duration: { label: 'Duration (min)', color: 'hsl(var(--chart-2))' }, words: { label: 'Words', color: 'hsl(var(--chart-3))' }, }; + const isLoading = overviewLoading || speakersLoading; + return (
@@ -451,7 +443,7 @@ export default function AnalyticsPage() { - {!analytics ? ( + {isLoading || !overview ? (
{[1, 2, 3, 4].map((i) => ( @@ -460,7 +452,11 @@ export default function AnalyticsPage() {
) : ( - + )}
diff --git a/client/src/pages/People.tsx b/client/src/pages/People.tsx index 3c1fa11..55ee3ed 100644 --- a/client/src/pages/People.tsx +++ b/client/src/pages/People.tsx @@ -5,6 +5,7 @@ import { AnimatePresence, motion } from 'framer-motion'; import { Calendar, Clock, Edit2, Mic, TrendingUp, Users, X } from 'lucide-react'; import { useEffect, useMemo, useState } from 'react'; import { getAPI } from '@/api/interface'; +import type { SpeakerStat } from '@/api/types'; import { SuccessIcon } from '@/components/icons/status-icons'; import { Badge } from '@/components/ui/badge'; import { Button } from '@/components/ui/button'; @@ -12,28 +13,33 @@ import { Card, CardContent } from '@/components/ui/card'; import { Input } from '@/components/ui/input'; import { Progress } from '@/components/ui/progress'; import { SearchIcon } from '@/components/ui/search-icon'; +import { Skeleton } from '@/components/ui/skeleton'; import { formatDuration } from '@/lib/utils/format'; import { preferences } from '@/lib/preferences'; -import type { SpeakerStats } from '@/lib/audio/speaker'; -import { aggregateSpeakers, getSpeakerColorIndex } from '@/lib/audio/speaker'; +import { getSpeakerColorIndex } from '@/lib/audio/speaker'; import { cardPadding, typography } from '@/lib/ui/styles'; +/** Speaker stats with display name from preferences applied */ +interface DisplaySpeakerStat extends SpeakerStat { + displayName: string; +} + function SpeakerCard({ speaker, maxSpeakingTime, onRename, }: { - speaker: SpeakerStats; + speaker: DisplaySpeakerStat; maxSpeakingTime: number; onRename: (speakerId: string, newName: string) => void; }) { const [isEditing, setIsEditing] = useState(false); const [editName, setEditName] = useState(speaker.displayName); - const colorIndex = getSpeakerColorIndex(speaker.speakerId); + const colorIndex = getSpeakerColorIndex(speaker.speaker_id); const handleSave = () => { if (editName.trim()) { - onRename(speaker.speakerId, editName.trim()); + onRename(speaker.speaker_id, editName.trim()); setIsEditing(false); } }; @@ -44,7 +50,7 @@ function SpeakerCard({ }; const statValueClass = 'font-semibold'; - const speakingPercentage = (speaker.totalSpeakingTime / maxSpeakingTime) * 100; + const speakingPercentage = (speaker.total_time / maxSpeakingTime) * 100; return (
)} - {speaker.displayName !== speaker.speakerId && ( -

Original: {speaker.speakerId}

+ {speaker.displayName !== speaker.speaker_id && ( +

Original: {speaker.speaker_id}

)}
{/* Confidence Badge */} - {Math.round(speaker.avgConfidence * 100)}% confidence + {Math.round(speaker.avg_confidence * 100)}% confidence
@@ -121,7 +127,7 @@ function SpeakerCard({

Speaking Time

-

{formatDuration(speaker.totalSpeakingTime)}

+

{formatDuration(speaker.total_time)}

@@ -131,7 +137,7 @@ function SpeakerCard({

Meetings

-

{speaker.meetingCount}

+

{speaker.meeting_count}

@@ -141,7 +147,7 @@ function SpeakerCard({

Segments

-

{speaker.segmentCount}

+

{speaker.segment_count}

@@ -154,48 +160,49 @@ function SpeakerCard({ - - {/* Recent Meetings */} - {speaker.meetings.length > 0 && ( -
-

Recent Meetings

-
- {speaker.meetings.slice(0, 3).map((meeting) => ( - - {meeting.title.length > 25 ? `${meeting.title.slice(0, 25)}...` : meeting.title} - - ))} - {speaker.meetings.length > 3 && ( - +{speaker.meetings.length - 3} more - )} -
-
- )} ); } +const ANALYTICS_DAYS = 90; + export default function PeoplePage() { const [searchQuery, setSearchQuery] = useState(''); const [prefsVersion, setPrefsVersion] = useState(0); - const { data: meetingsData } = useQuery({ - queryKey: ['meetings', 'all'], - queryFn: () => getAPI().listMeetings({ limit: 100 }), + const timeRange = useMemo(() => { + const now = Date.now(); + const startTime = now - ANALYTICS_DAYS * 24 * 60 * 60 * 1000; + return { start_time: startTime, end_time: now }; + }, []); + + const { + data: speakerStatsResponse, + isLoading, + } = useQuery({ + queryKey: ['speakerStats', timeRange.start_time, timeRange.end_time], + queryFn: () => + getAPI().listSpeakerStats({ + start_time: timeRange.start_time, + end_time: timeRange.end_time, + }), }); useEffect(() => { return preferences.subscribe(() => setPrefsVersion((v) => v + 1)); }, []); - const speakers = useMemo(() => { - if (!meetingsData?.meetings) { + const speakers = useMemo((): DisplaySpeakerStat[] => { + if (!speakerStatsResponse?.speakers) { return []; } - return aggregateSpeakers(meetingsData.meetings); - }, [meetingsData, prefsVersion]); + return speakerStatsResponse.speakers.map((stat) => ({ + ...stat, + displayName: preferences.getGlobalSpeakerName(stat.speaker_id) || stat.display_name || stat.speaker_id, + })); + }, [speakerStatsResponse, prefsVersion]); const filteredSpeakers = useMemo(() => { if (!searchQuery.trim()) { @@ -204,22 +211,44 @@ export default function PeoplePage() { const query = searchQuery.toLowerCase(); return speakers.filter( (s) => - s.displayName.toLowerCase().includes(query) || s.speakerId.toLowerCase().includes(query) + s.displayName.toLowerCase().includes(query) || s.speaker_id.toLowerCase().includes(query) ); }, [speakers, searchQuery]); const maxSpeakingTime = useMemo(() => { - return Math.max(...speakers.map((s) => s.totalSpeakingTime), 1); + return Math.max(...speakers.map((s) => s.total_time), 1); }, [speakers]); const totalSpeakingTime = useMemo(() => { - return speakers.reduce((sum, s) => sum + s.totalSpeakingTime, 0); + return speakers.reduce((sum, s) => sum + s.total_time, 0); }, [speakers]); const handleRename = (speakerId: string, newName: string) => { preferences.setGlobalSpeakerName(speakerId, newName); }; + if (isLoading) { + return ( +
+
+ + +
+
+ {Array.from({ length: 4 }, (_, i) => ( + + ))} +
+ +
+ {Array.from({ length: 4 }, (_, i) => ( + + ))} +
+
+ ); + } + return (
{/* Header */} @@ -265,8 +294,10 @@ export default function PeoplePage() {
-

{meetingsData?.meetings?.length || 0}

-

Meetings Analyzed

+

+ {speakers.reduce((max, s) => Math.max(max, s.meeting_count), 0)} +

+

Max Meetings (Speaker)

@@ -302,7 +333,7 @@ export default function PeoplePage() { {filteredSpeakers.map((speaker) => ( = { + 3: 'high', + 2: 'medium', + 1: 'low', +}; + +function getPriorityLabel(priority: number): Priority { + return PRIORITY_LABELS[priority] ?? 'medium'; +} + +function getStatusFilters(statusFilter: StatusFilter): TaskStatus[] | undefined { + if (statusFilter === 'all') { + return undefined; + } + return [statusFilter]; +} + export default function TasksPage() { + const queryClient = useQueryClient(); const { activeProject, projects, isLoading: projectsLoading } = useProjects(); const [searchQuery, setSearchQuery] = useState(''); - const [statusFilter, setStatusFilter] = useState('pending'); + const [statusFilter, setStatusFilter] = useState('open'); const [priorityFilter, setPriorityFilter] = useState('all'); - const [completedTasks, setCompletedTasks] = useState>(() => { - const prefs = preferences.get(); - return new Set(prefs.completed_tasks.map((t) => `${t.meeting_id}:${t.task_text}`)); - }); const [projectScope, setProjectScope] = useState(() => { return preferences.get().tasks_project_scope ?? 'active'; }); const [selectedProjectIds, setSelectedProjectIds] = useState(() => { return preferences.get().tasks_project_ids ?? []; }); + const activeProjects = useMemo( () => projects.filter((project) => !project.is_archived), [projects] ); const resolvedProjectId = activeProject?.id; - // Skip fetching when no valid project context const shouldSkipFetch = (projectScope === 'selected' && selectedProjectIds.length === 0) || (projectScope === 'active' && !resolvedProjectId && !projectsLoading); - const { data: meetings = [], isLoading: loading } = useAsyncData( - () => - getAPI() - .listMeetings({ - limit: TASKS_PAGE_MEETINGS_LIMIT, - project_id: projectScope === 'active' ? resolvedProjectId : undefined, - project_ids: projectScope === 'selected' ? selectedProjectIds : undefined, - }) - .then((r) => r.meetings), - [projectScope, selectedProjectIds, resolvedProjectId], - { - initialData: [], - skip: shouldSkipFetch, - onError: (error) => { - addClientLog({ - level: 'warning', - source: 'app', - message: 'Failed to load tasks from meetings', - details: error, - metadata: { context: 'tasks_page_load', project_scope: projectScope }, - }); - }, - } - ); + const { data: tasksResponse, isLoading: loading } = useQuery({ + queryKey: ['tasks', statusFilter, projectScope, selectedProjectIds, resolvedProjectId], + queryFn: () => + getAPI().listTasks({ + statuses: getStatusFilters(statusFilter), + limit: 500, + project_id: projectScope === 'active' ? resolvedProjectId : undefined, + project_ids: projectScope === 'selected' ? selectedProjectIds : undefined, + }), + enabled: !shouldSkipFetch, + }); + + const updateTaskMutation = useMutation({ + mutationFn: (params: { taskId: string; status: TaskStatus }) => + getAPI().updateTask({ + task_id: params.taskId, + status: params.status, + }), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['tasks'] }); + }, + onError: (error) => { + addClientLog({ + level: 'warning', + source: 'app', + message: 'Failed to update task status', + details: error instanceof Error ? error.message : String(error), + metadata: { context: 'tasks_page_update' }, + }); + }, + }); useEffect(() => { preferences.setTasksProjectFilter(projectScope, selectedProjectIds); }, [projectScope, selectedProjectIds]); - // Aggregate all tasks from meetings - const allTasks = useMemo(() => { - const tasks: TaskWithMeeting[] = []; - meetings.forEach((meeting) => { - meeting.summary?.action_items.forEach((item) => { - tasks.push({ - ...item, - meetingId: meeting.id, - meetingTitle: meeting.title, - meetingDate: meeting.created_at, - projectId: meeting.project_id, - }); - }); - }); - return sortTasksByPriority(tasks); - }, [meetings]); + const allTasks = useMemo(() => tasksResponse?.tasks ?? [], [tasksResponse]); - // Filter tasks const filteredTasks = useMemo(() => { - return allTasks.filter((task) => { - const taskKey = `${task.meetingId}:${task.text}`; - const isCompleted = completedTasks.has(taskKey); + return allTasks.filter((taskWithMeeting) => { + const task = taskWithMeeting.task; - // Status filter - if (statusFilter === 'pending' && isCompleted) { - return false; - } - if (statusFilter === 'completed' && !isCompleted) { - return false; + if (priorityFilter !== 'all') { + const label = getPriorityLabel(task.priority); + if (label !== priorityFilter) { + return false; + } } - // Priority filter - if (priorityFilter !== 'all' && task.priority !== priorityFilter) { - return false; - } - - // Search filter if (searchQuery) { const query = searchQuery.toLowerCase(); if ( !task.text.toLowerCase().includes(query) && - !task.meetingTitle.toLowerCase().includes(query) && - !task.assignee?.toLowerCase().includes(query) + !taskWithMeeting.meeting_title.toLowerCase().includes(query) ) { return false; } @@ -134,42 +130,41 @@ export default function TasksPage() { return true; }); - }, [allTasks, statusFilter, priorityFilter, searchQuery, completedTasks]); + }, [allTasks, priorityFilter, searchQuery]); - const getTaskMeetingLink = (task: TaskWithMeeting): string => { - const projectId = task.projectId ?? activeProject?.id; - return projectId ? `/projects/${projectId}/meetings/${task.meetingId}` : '/projects'; + const getTaskMeetingLink = (taskWithMeeting: APITaskWithMeeting): string => { + const projectId = taskWithMeeting.project_id ?? activeProject?.id; + const meetingId = taskWithMeeting.task.meeting_id; + if (projectId && meetingId) { + return `/projects/${projectId}/meetings/${meetingId}`; + } + return '/projects'; }; - const handleToggleComplete = (task: TaskWithMeeting) => { - const taskKey = `${task.meetingId}:${task.text}`; - const isNowCompleted = preferences.toggleTaskCompletion(task.meetingId, task.text); - - setCompletedTasks((prev) => { - const next = new Set(prev); - if (isNowCompleted) { - next.add(taskKey); - } else { - next.delete(taskKey); - } - return next; - }); + const handleToggleComplete = (task: Task) => { + const newStatus: TaskStatus = task.status === 'done' ? 'open' : 'done'; + updateTaskMutation.mutate({ taskId: task.id, status: newStatus }); }; - const isTaskCompleted = (task: TaskWithMeeting): boolean => { - return completedTasks.has(`${task.meetingId}:${task.text}`); + const handleDismiss = (task: Task) => { + updateTaskMutation.mutate({ taskId: task.id, status: 'dismissed' }); }; - // Stats const stats = useMemo(() => { - const total = allTasks.length; - const completed = allTasks.filter((t) => completedTasks.has(`${t.meetingId}:${t.text}`)).length; - const pending = total - completed; - const highPriority = allTasks.filter( - (t) => t.priority === 'high' && !completedTasks.has(`${t.meetingId}:${t.text}`) + const openCount = allTasks.filter((t) => t.task.status === 'open').length; + const doneCount = allTasks.filter((t) => t.task.status === 'done').length; + const dismissedCount = allTasks.filter((t) => t.task.status === 'dismissed').length; + const highPriorityCount = allTasks.filter( + (t) => t.task.status === 'open' && t.task.priority >= 3 ).length; - return { total, completed, pending, highPriority }; - }, [allTasks, completedTasks]); + return { + total: allTasks.length, + open: openCount, + done: doneCount, + dismissed: dismissedCount, + highPriority: highPriorityCount, + }; + }, [allTasks]); if (loading) { return ( @@ -221,8 +216,8 @@ export default function TasksPage() { transition={{ delay: 0.05 }} className="grid grid-cols-2 sm:grid-cols-4 gap-4" > - - + + setStatusFilter(v as StatusFilter)}> - + - Pending + Open - + Done + + + Dismissed + All @@ -284,31 +283,37 @@ export default function TasksPage() { {filteredTasks.length === 0 ? ( ) : ( - {filteredTasks.map((task) => { - const isCompleted = isTaskCompleted(task); + {filteredTasks.map((taskWithMeeting) => { + const task = taskWithMeeting.task; + const isCompleted = task.status === 'done'; + const isDismissed = task.status === 'dismissed'; return ( {/* Checkbox */} @@ -329,11 +334,16 @@ export default function TasksPage() { 'mt-0.5 shrink-0 transition-colors', isCompleted ? 'text-success' - : 'text-muted-foreground hover:text-primary' + : isDismissed + ? 'text-muted-foreground' + : 'text-muted-foreground hover:text-primary' )} + disabled={updateTaskMutation.isPending} > {isCompleted ? ( + ) : isDismissed ? ( + ) : ( )} @@ -342,43 +352,50 @@ export default function TasksPage() { {/* Content */}

{task.text}

- {task.meetingTitle} + {taskWithMeeting.meeting_title} - {formatRelativeTime(task.meetingDate)} + {formatRelativeTime(taskWithMeeting.meeting_created_at)} - {task.assignee && ( - <> - - {task.assignee} - - )} {task.due_date && ( <> - - Due {formatDate(task.due_date)} - + Due {formatDate(task.due_date)} )}
- {/* Priority Badge */} + {/* Priority Badge & Actions */}
- - + + {task.status === 'open' && ( + + )} +

{entity.text}

{entity.confidence !== undefined && ( diff --git a/client/src/pages/meeting-detail/index.tsx b/client/src/pages/meeting-detail/index.tsx index 3e3ffb3..abed31a 100644 --- a/client/src/pages/meeting-detail/index.tsx +++ b/client/src/pages/meeting-detail/index.tsx @@ -4,9 +4,10 @@ * Displays a meeting's transcript, summary, entities, and playback controls. */ -import { useRef } from 'react'; +import { useRef, useState } from 'react'; import { useNavigate, useParams } from 'react-router-dom'; import { useVirtualizer } from '@tanstack/react-virtual'; +import { ChevronDown, ChevronRight, Sparkles } from 'lucide-react'; import { getAPI } from '@/api/interface'; import { isTauriEnvironment } from '@/api'; @@ -14,6 +15,12 @@ import type { ExportFormat } from '@/api/types'; import { ProcessingStatus } from '@/components/features/meetings'; import { SkeletonTranscript } from '@/components/ui/skeleton'; import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; +import { Button } from '@/components/ui/button'; +import { + Collapsible, + CollapsibleContent, + CollapsibleTrigger, +} from '@/components/ui/collapsible'; import { useGuardedMutation } from '@/hooks'; import { buildExportBlob, downloadBlob } from '@/lib/utils/download'; @@ -35,6 +42,7 @@ export default function MeetingDetailPage() { const { guard } = useGuardedMutation(); const transcriptScrollRef = useRef(null); const isTauri = isTauriEnvironment(); + const [isSummaryOpen, setIsSummaryOpen] = useState(true); const { meeting, @@ -173,10 +181,57 @@ export default function MeetingDetailPage() { )}
- {/* Transcript Panel */} -
-

Transcript

- {shouldVirtualizeTranscript ? ( + {/* Main Content Area: Summary + Transcript */} +
+ {/* Summary Section */} +
+ +
+ + + + + {!meeting.summary && !loading && ( + + )} +
+ + +
+ +
+
+
+
+ + {/* Transcript Section */} +
+

Transcript

+ {shouldVirtualizeTranscript ? (
)}
+
- {/* Summary Panel */} -
- + {/* Right Panel: Entities & Notes */} +
+ - - Summary - - - - void; + compact?: boolean; } -export function SummaryPanel({ summary, summaryMeta, onGenerateSummary }: SummaryPanelProps) { +export function SummaryPanel({ summary, summaryMeta, onGenerateSummary, compact }: SummaryPanelProps) { if (!summary) { + if (compact) { + return ( +
+ No summary available. Click "Generate Summary" to create one. +
+ ); + } return (

No summary generated yet

@@ -33,49 +41,61 @@ export function SummaryPanel({ summary, summaryMeta, onGenerateSummary }: Summar } return ( - <> -
-

Executive Summary

-

{summary.executive_summary}

-
- {summaryMeta.length > 0 && ( -
- {summaryMeta.map((meta) => ( - - {meta.label}: {meta.value} - - ))} +
+
+
+

Executive Summary

+

{summary.executive_summary}

+
+ + {summaryMeta.length > 0 && ( +
+ {summaryMeta.map((meta) => ( + + {meta.label}: + {meta.value} + + ))} +
+ )} + +
+

Key Points

+
    + {summary.key_points.map((kp) => ( +
  • + + {kp.text} +
  • + ))} +
- )} -
-

Key Points

-
    - {summary.key_points.map((kp) => ( -
  • - - {kp.text} -
  • - ))} -
+

Action Items

{summary.action_items.map((item) => ( - + -

{item.text}

+

{item.text}

{item.assignee && ( - {item.assignee} + + + {item.assignee} + )}
))} + {summary.action_items.length === 0 && ( +

No action items detected.

+ )}
- +
); } diff --git a/cupcake_tests/001_block_no_verify_deny.json b/cupcake_tests/001_block_no_verify_deny.json new file mode 100644 index 0000000..577fee6 --- /dev/null +++ b/cupcake_tests/001_block_no_verify_deny.json @@ -0,0 +1,9 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "bash", + "args": { + "command": "git commit --no-verify" + }, + "session_id": "test", + "cwd": "/tmp" +} \ No newline at end of file diff --git a/cupcake_tests/002_block_no_verify_allow.json b/cupcake_tests/002_block_no_verify_allow.json new file mode 100644 index 0000000..77425bc --- /dev/null +++ b/cupcake_tests/002_block_no_verify_allow.json @@ -0,0 +1,9 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "bash", + "args": { + "command": "git commit -m fix" + }, + "session_id": "test", + "cwd": "/tmp" +} \ No newline at end of file diff --git a/cupcake_tests/003_block_makefile_edit_deny.json b/cupcake_tests/003_block_makefile_edit_deny.json new file mode 100644 index 0000000..7a42eff --- /dev/null +++ b/cupcake_tests/003_block_makefile_edit_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/Makefile", + "new_string": "test:" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/004_block_linter_config_python_deny.json b/cupcake_tests/004_block_linter_config_python_deny.json new file mode 100644 index 0000000..798bd1e --- /dev/null +++ b/cupcake_tests/004_block_linter_config_python_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/pyproject.toml", + "new_string": "[tool.ruff]" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/005_block_linter_config_python_pyrightconfig_deny.json b/cupcake_tests/005_block_linter_config_python_pyrightconfig_deny.json new file mode 100644 index 0000000..0747847 --- /dev/null +++ b/cupcake_tests/005_block_linter_config_python_pyrightconfig_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/pyrightconfig.json", + "new_string": "{}" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/006_block_linter_config_frontend_deny.json b/cupcake_tests/006_block_linter_config_frontend_deny.json new file mode 100644 index 0000000..9a80a3d --- /dev/null +++ b/cupcake_tests/006_block_linter_config_frontend_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/client/biome.json", + "new_string": "{}" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/007_block_linter_config_frontend_tsconfig_deny.json b/cupcake_tests/007_block_linter_config_frontend_tsconfig_deny.json new file mode 100644 index 0000000..082f528 --- /dev/null +++ b/cupcake_tests/007_block_linter_config_frontend_tsconfig_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/client/tsconfig.json", + "new_string": "{}" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/008_block_tests_quality_deny.json b/cupcake_tests/008_block_tests_quality_deny.json new file mode 100644 index 0000000..6dcfa31 --- /dev/null +++ b/cupcake_tests/008_block_tests_quality_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/tests/quality/test_smells.py", + "new_string": "# mod" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/009_warn_baselines_edit_deny.json b/cupcake_tests/009_warn_baselines_edit_deny.json new file mode 100644 index 0000000..26de7aa --- /dev/null +++ b/cupcake_tests/009_warn_baselines_edit_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/tests/quality/baselines.json", + "new_string": "{}" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/010_block_code_quality_test_edits_deny.json b/cupcake_tests/010_block_code_quality_test_edits_deny.json new file mode 100644 index 0000000..ef826f4 --- /dev/null +++ b/cupcake_tests/010_block_code_quality_test_edits_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/src/test/code-quality.test.ts", + "new_string": "//" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/011_block_code_quality_test_bash_deny.json b/cupcake_tests/011_block_code_quality_test_bash_deny.json new file mode 100644 index 0000000..03dd97f --- /dev/null +++ b/cupcake_tests/011_block_code_quality_test_bash_deny.json @@ -0,0 +1,9 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "bash", + "args": { + "command": "sed -i s/x/y/ src/test/code-quality.test.ts" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/012_block_biome_ignore_deny.json b/cupcake_tests/012_block_biome_ignore_deny.json new file mode 100644 index 0000000..8fbdd10 --- /dev/null +++ b/cupcake_tests/012_block_biome_ignore_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/client/src/App.tsx", + "new_string": "// biome-ignore lint: reason" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/013_block_biome_ignore_ts_ignore_deny.json b/cupcake_tests/013_block_biome_ignore_ts_ignore_deny.json new file mode 100644 index 0000000..97df205 --- /dev/null +++ b/cupcake_tests/013_block_biome_ignore_ts_ignore_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/client/src/App.tsx", + "new_string": "// @ts-ignore" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/014_block_biome_ignore_allow.json b/cupcake_tests/014_block_biome_ignore_allow.json new file mode 100644 index 0000000..d9435c2 --- /dev/null +++ b/cupcake_tests/014_block_biome_ignore_allow.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/client/src/App.tsx", + "new_string": "const x = 1;" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/015_block_biome_ignore_bash_deny.json b/cupcake_tests/015_block_biome_ignore_bash_deny.json new file mode 100644 index 0000000..7567be7 --- /dev/null +++ b/cupcake_tests/015_block_biome_ignore_bash_deny.json @@ -0,0 +1,9 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "bash", + "args": { + "command": "echo // @ts-ignore >> client/src/App.tsx" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/016_block_magic_numbers_deny.json b/cupcake_tests/016_block_magic_numbers_deny.json new file mode 100644 index 0000000..fa06306 --- /dev/null +++ b/cupcake_tests/016_block_magic_numbers_deny.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/src/noteflow/service.py", + "new_string": "timeout = 30" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/cupcake_tests/017_block_magic_numbers_constants_allow.json b/cupcake_tests/017_block_magic_numbers_constants_allow.json new file mode 100644 index 0000000..af964db --- /dev/null +++ b/cupcake_tests/017_block_magic_numbers_constants_allow.json @@ -0,0 +1,10 @@ +{ + "hook_event_name": "PreToolUse", + "tool": "edit", + "args": { + "file_path": "/home/trav/repos/noteflow/src/noteflow/constants.py", + "new_string": "TIMEOUT = 30" + }, + "session_id": "test", + "cwd": "/home/trav/repos/noteflow" +} \ No newline at end of file diff --git a/docs/plans/2026-01-21-bugfinder-design.md b/docs/plans/2026-01-21-bugfinder-design.md new file mode 100644 index 0000000..f38fa9e --- /dev/null +++ b/docs/plans/2026-01-21-bugfinder-design.md @@ -0,0 +1,66 @@ +
+Review Bugfinder feedback in docs/sprints/phase-ongoing/Bugfinder/rpt1-8 to identify root causes across +Analytics, Home, Meetings, People, Post Meeting, and Tasks pages, then outline ideal fixes that restore +data visibility, interaction, and persistence. +
+ +
+- analytics_reports (branch finding: treat rpt1-rpt3 as equally urgent) + - rpt1 Analytics - Meetings: totals and charts depend on meeting.segments; ListMeetings returns + include_segments=false and no summary, so word counts and speaker stats remain zero. Root cause in + src/noteflow/grpc/mixins/meeting/meeting_mixin.py#L197 and client/src/pages/Analytics.tsx#L75. + - rpt2 Analytics - Performance: metrics come only from server psutil snapshots and heuristic latency + (src/noteflow/infrastructure/metrics/collector.py, client/src/components/features/analytics/performance-tab.tsx). + No client/server toggle exists, and high resource usage likely reflects ongoing post-processing or + other server workloads rather than a UI issue. + - rpt3 Analytics - Speech: entity map and speech patterns rely on meeting.segments/words in + client/src/components/features/analytics/speech-analysis-tab.tsx. With empty segments, entities are + blank and speech metrics show zeros; heuristics are placeholders rather than NER output. +- home_meetings (branch finding: missing data wiring and core interactions) + - rpt4 Home Page: MeetingCard renders meeting.segments[0]?.text so all cards show "No transcript + available" when ListMeetings omits segments (client/src/components/features/meetings/meeting-card.tsx). + Tasks come from meeting.summary.action_items, but ListMeetings omits summary, so tasks disappear on + reload (client/src/pages/Home.tsx, client/src/pages/Tasks.tsx). The Ask AI button has no handler + (client/src/components/layout/app-sidebar.tsx). Home uses HOME_RECENT_MEETINGS_LIMIT=5 and slices to 6, + conflicting with the requested "last 3" (client/src/lib/constants/timing.ts, client/src/pages/Home.tsx). + Summary generation does not update meeting titles, and there is no calendar-link signal in meeting + payloads to gate that behavior. + - rpt5 Meetings: UI calls listMeetings(limit=MEETINGS_PAGE_LIMIT) once with no offset or pagination UI, + even though the API returns total_count (client/src/pages/Meetings.tsx). +- people_post_meeting (branch finding: People list never populating due to data layer) + - rpt6 People: aggregateSpeakers uses meeting.segments and preferences-only speaker names + (client/src/lib/audio/speaker.ts, client/src/pages/People.tsx). With listMeetings returning empty + segments, speaker metrics are all zero. Speaker renames are local only; the renameSpeaker API exists + but is not used. + - rpt7 Post Meeting: transcript row clicks only trigger playback in Tauri; in web they only toggle + selection (client/src/pages/meeting-detail/use-playback.ts). Speaker renaming is hidden behind + double-click on SpeakerBadge and is local-only (client/src/components/common/badges/speaker-badge.tsx). + Summary tab is concise and relegated to the right panel; no dedicated view for call metrics or + background process status. Action items and entities are read-only. +- tasks_page (branch finding: task list empty) + - rpt8 Tasks: tasks are derived from meeting.summary.action_items, but summaries are not included in + ListMeetings, so lists are empty after refresh. Completion is stored in preferences only, and the + UI supports list view only (client/src/pages/Tasks.tsx). Backend already has TaskModel but no gRPC + endpoints, so edits/ownership/kanban are not supported. +
+ +
+- Selected strategy: build dedicated aggregate endpoints for analytics, speaker stats, and tasks. + - Add gRPC endpoints (e.g., GetAnalyticsOverview, ListSpeakerStats, ListTasks) with pagination and + time-series responses to replace client-side aggregation of segments/summary fields. + - Client Analytics, People, Home, and Tasks pages should consume these aggregates instead of listMeetings. +- Selected strategy: TaskModel CRUD as source of truth for action items. + - On summary persistence, create/update TaskModel rows linked to action_items for provenance. + - Add task CRUD endpoints (list/update/status/delete) to support owner, priority, due date, and kanban. + - Remove preferences-only completion state; sync status updates through the API. +- Selected strategy: move the LLM summary into the transcript panel and repurpose the right panel. + - Transcript pane shows verbose summary as the primary view; summary generation should target higher + verbosity templates by default. + - Right panel becomes call metrics (model/tokens/latency) plus processing status (summary/entities/ + diarization, background jobs) and editing surfaces for tasks/entities. +- Supporting fixes: wire Ask AI to a route or modal, add Meetings pagination using total_count, expose + renameSpeaker in the UI for persistent names, add Analytics client/server toggle for performance metrics, + and add calendar-link indicators so summary title updates only apply to non-calendar meetings. +- Transcript interaction: in web mode, highlight transcript rows on click and surface a desktop-playback + message until web playback is implemented. +
diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.md b/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.md new file mode 100644 index 0000000..02a5651 --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.md @@ -0,0 +1,9 @@ +![pasted_image_04836a5d-3226-48bf-b3ec-527cd202fad0.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\04836a5d-3226-48bf-b3ec-527cd202fad0.png) + +Issues + +1. The header cards for Total Words and Unique Speakers has always been 0 and never changed + +2. The graphs for Speaker Particiaption and Word Count Trends has never visualized any data + + diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.png b/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.png new file mode 100644 index 0000000..bfd6ecf Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt1/Analytics - Meetings.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.md b/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.md new file mode 100644 index 0000000..a580169 --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.md @@ -0,0 +1,10 @@ +![pasted_image_fdffb3dd-d491-4e25-b16d-5c8e3e554d1b.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\fdffb3dd-d491-4e25-b16d-5c8e3e554d1b.png) + +Issues + +1. The UI here is actually fine, however this is about 30 mins after my last recording and resources are still nearly fully allocated still - I suspect something to be hanging + + +Changes + +1. If the server and client are on differing IP addresses or devices, then there should be a switch or a toggle to view the Server or Client performance. \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.png b/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.png new file mode 100644 index 0000000..605a6e7 Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt2/Analytics - Performance.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.md b/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.md new file mode 100644 index 0000000..dd97491 --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.md @@ -0,0 +1,7 @@ +![pasted_image_9b688a19-5857-40fc-832a-02ce5ca149ef.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\9b688a19-5857-40fc-832a-02ce5ca149ef.png) + +Issues + +1. Entity Word Map has always been blank + +2. Speech patters are either mocked data or incomplete \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.png b/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.png new file mode 100644 index 0000000..ba1a70d Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt3/Analytics - Speech.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.md b/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.md new file mode 100644 index 0000000..4dfc71b --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.md @@ -0,0 +1,16 @@ +![pasted_image_88eda805-636e-4f39-8f35-bbb772d66f6b.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\88eda805-636e-4f39-8f35-bbb772d66f6b.png) + +Issues + +1. Every meeting says “No Transcript Available” no matter how long ago it finished processing + +2. Tasks and action items dont seem to be persisted + +3. Ask AI button does not seem to be working + + +Change requests + +1. Home screen should only show the last 3 recorded meetings, it currenly shows 5. + +2. Summarization task should also update the meeting title ONLY if the meeting isn’t connected to a calendar appointments \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.png b/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.png new file mode 100644 index 0000000..7316f9d Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt4/Home Page.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.md b/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.md new file mode 100644 index 0000000..d50a7d0 --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.md @@ -0,0 +1,5 @@ +![pasted_image_89fd9ba7-6eeb-4508-89b3-4e72f8cf5b99.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\89fd9ba7-6eeb-4508-89b3-4e72f8cf5b99.png) + +Issues + +1. The past recordings displayed does not seem to be paginated and risks excessive resource consumption \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.png b/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.png new file mode 100644 index 0000000..fe54647 Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt5/Meetings.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.md b/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.md new file mode 100644 index 0000000..e58d5c4 --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.md @@ -0,0 +1,7 @@ +![pasted_image_39c6141e-1ae5-44d8-9900-85fbd6755e99.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\39c6141e-1ae5-44d8-9900-85fbd6755e99.png) + +Issues + +1.  None of the diararized speakers identified in any of the meetings have ended up here. + +2. Metrics dont seem to be gathered or stored \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.png b/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.png new file mode 100644 index 0000000..238edd6 Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt6/People.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.md b/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.md new file mode 100644 index 0000000..8e81aba --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.md @@ -0,0 +1,18 @@ +![pasted_image_43050846-ab72-4fa6-b35f-067f01a67155.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\43050846-ab72-4fa6-b35f-067f01a67155.png) + +Issues + +1. Clicking on a transcript line doesn’t seem to do anything + +2. No way to update speaker names + + +Changes + +1. The LLM Generated Summary should be significantly more verbose and be what’s displayed  as a primary tab in the current transcript component. + +2. If the summary is moving to the transcript component, then executive summary should be replaced with call metrics, model/token usage, and any additional processes still running in the background. + +3. Should be able to edit or delete the tasks and action items, identify owners, criticality,  and deadline. + + 1. similar capabilities should be extended to extracted entities \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.png b/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.png new file mode 100644 index 0000000..d61f0c5 Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt7/Post Meeting.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.md b/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.md new file mode 100644 index 0000000..3b9648f --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.md @@ -0,0 +1,12 @@ +![pasted_image_a4e73673-65d1-4e02-8a95-d1929974f342.png](file://C:\Users\PC\AppData\Roaming\CherryStudio\Data\Files\a4e73673-65d1-4e02-8a95-d1929974f342.png) + +Issues + +1. Tasks only show up when they are first extracted from a recording but never persisted past restarts. + + +Changes + +1. Should be able to display in List, Card, and Kanban views + +2. Tasks should display what meeting they came from and link to that post meeting summary \ No newline at end of file diff --git a/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.png b/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.png new file mode 100644 index 0000000..4c45e8f Binary files /dev/null and b/docs/sprints/phase-ongoing/Bugfinder/rpt8/Tasks.png differ diff --git a/docs/sprints/phase-ongoing/Bugfinder/strategy-b-implementation.md b/docs/sprints/phase-ongoing/Bugfinder/strategy-b-implementation.md new file mode 100644 index 0000000..84aad6d --- /dev/null +++ b/docs/sprints/phase-ongoing/Bugfinder/strategy-b-implementation.md @@ -0,0 +1,399 @@ +# Sprint Bugfinder Strategy B: Aggregates + Tasks + Summary Layout + +> **Size**: XL | **Owner**: TBD | **Prerequisites**: `docs/plans/2026-01-21-bugfinder-design.md` +> **Phase**: Ongoing - Bugfinder follow-ups + +--- + +## Open Issues & Prerequisites + +> ✅ **Review Date**: 2026-01-21 — Strategy B selected for aggregates, TaskModel CRUD, and summary layout. + +### Blocking Issues + +| ID | Issue | Status | Resolution | +|----|-------|--------|------------| +| **B1** | None | ✅ | N/A | + +### Design Gaps to Address + +| ID | Gap | Resolution | +|----|-----|------------| +| G1 | Cache TTL and invalidation rules for analytics aggregates | Default to 60s TTL; invalidate on summary/segment writes if hook available | +| G2 | Task de-duplication key definition | Use meeting_id + normalized text; ignore case/punctuation | +| G3 | Summary verbosity default after layout move | Use existing template defaults, add client toggle in follow-up | + +### Prerequisite Verification + +| Prerequisite | Status | Notes | +|--------------|--------|-------| +| TaskModel exists in DB | ✅ | `src/noteflow/infrastructure/persistence/models/organization/task.py` | +| Action items persisted in summaries | ✅ | `src/noteflow/infrastructure/persistence/models/core/summary.py` | +| Meeting list supports project filters | ✅ | `src/noteflow/grpc/mixins/meeting/meeting_mixin.py` | + +--- + +## Validation Status (2026-01-22) + +### IMPLEMENTED + +| Component | Status | Notes | +|-----------|--------|-------| +| Analytics aggregate endpoints | ✅ Implemented | `GetAnalyticsOverview`, `ListSpeakerStats` gRPC methods + repositories | +| Task CRUD API | ✅ Implemented | Domain entity, service, repository, and gRPC endpoints (`ListTasks`, `UpdateTask`) | +| Analytics UI | ✅ Implemented | `Analytics.tsx` uses `getAnalyticsOverview` and `listSpeakerStats` | +| People page | ✅ Implemented | `People.tsx` uses server-backed `listSpeakerStats` endpoint | +| Tasks page | ✅ Implemented | `Tasks.tsx` uses server-backed `listTasks` and `updateTask` | + +### IMPLEMENTED (All Components Complete) + +**Summary layout in transcript panel** has been implemented: +- Summary now appears in a collapsible section above the transcript +- Right panel tabs reduced to Entities and Notes only +- Summary supports compact horizontal grid layout + +### PARTIALLY IMPLEMENTED + +| Component | Status | Notes | +|-----------|--------|-------| +| Task storage | ✅ Complete | TaskModel in DB, API implemented, task ingestion from summary action items via `_ingest_tasks_from_summary` in `_generation_mixin.py` | + +**Downstream impact**: Meeting Detail page (summary layout move). + +--- + +## Objective + +Provide server-backed analytics aggregates and task persistence so Analytics, People, and Tasks pages no +longer depend on loading meeting segments. Move the LLM summary into the transcript panel and repurpose +the right panel for metrics/status to align with Bugfinder feedback. + +--- + +## Key Decisions + +| Decision | Choice | Rationale | +|----------|--------|-----------| +| **Analytics data source** | Dedicated aggregate endpoints with in-memory caching | Scales better than listMeetings; avoids heavy payloads | +| **Task persistence** | TaskModel CRUD as source of truth | Enables edits, ownership, kanban, and sync across devices | +| **Summary layout** | Summary becomes primary content inside transcript panel | Matches requested UX priority and keeps metrics in sidebar | +| **Task dedupe** | Only new, deduped tasks on summary regen | Honors user directive; preserves user-edited tasks | + +--- + +## What Already Exists + +| Asset | Location | Implication | +|-------|----------|-------------| +| TaskModel schema + relationships | `src/noteflow/infrastructure/persistence/models/organization/task.py` | Reuse existing DB table and FK links | +| Action items in summaries | `src/noteflow/infrastructure/persistence/models/core/summary.py` | Seed tasks from action items | +| Summary save flow | `src/noteflow/grpc/mixins/summarization/_generation_mixin.py` | Hook task ingestion after summary persistence | +| Segment + word timing models | `src/noteflow/infrastructure/persistence/models/core/meeting.py` | Aggregate words/speakers for analytics | +| Usage aggregate query pattern | `src/noteflow/infrastructure/persistence/repositories/usage_event/_aggregations.py` | Reuse structure for analytics queries | +| Observability metrics endpoint | `src/noteflow/grpc/mixins/observability.py` | Co-locate performance data in analytics UI | + +--- + +## Scope + +| Task | Effort | Notes | +|------|--------|-------| +| **Domain Layer** | | | +| Add Task entity + TaskStatus enum | M | Align with DB statuses: open/done/dismissed | +| Add analytics DTOs (overview, speaker stats, speech metrics) | M | Plain dataclasses for service outputs | +| **Infrastructure Layer** | | | +| Build SqlAlchemyTaskRepository | L | CRUD, list filters (status, project_id(s), meeting_id) | +| Add analytics aggregation queries | L | Meeting trends, word counts, speaker stats, entity frequencies | +| Add index migration (tasks.status, tasks.meeting_id, segments.speaker_id) | M | Improve query performance | +| **Application Layer** | | | +| TaskService (CRUD + summary ingestion) | L | Create tasks from action items (dedupe) | +| AnalyticsService with TTL cache | L | Query aggregates, cache by workspace/project/date range | +| **API Layer** | | | +| Extend proto with analytics + task messages | L | New gRPC methods and enums | +| Add AnalyticsMixin + TaskMixin | L | Wire into `NoteFlowServicer` and stubs | +| Update Rust + TS generated clients | L | Regenerate stubs and adapters | +| **Client Layer** | | | +| Analytics page uses aggregate endpoints | M | Replace computeAnalytics with API data | +| People page uses speaker stats endpoint | M | Remove listMeetings dependency | +| Tasks page uses TaskModel data | L | Persist completion/status, keep project filters | +| Meeting detail layout: summary in transcript panel | L | Refactor transcript panel to include summary section | + +**Total Effort**: XL (1-2 days) + +--- + +## Domain Model + +### Task Entity + +```python +# src/noteflow/domain/entities/task.py + +class TaskStatus(Enum): + """Task lifecycle status.""" + + OPEN = "open" + DONE = "done" + DISMISSED = "dismissed" + + +@dataclass +class Task: + """User-managed task derived from an action item.""" + + id: UUID + workspace_id: UUID + meeting_id: UUID | None + action_item_id: int | None + text: str + status: TaskStatus = TaskStatus.OPEN + assignee_person_id: UUID | None = None + due_date: datetime | None = None + priority: int = 0 + completed_at: datetime | None = None + created_at: datetime = field(default_factory=utc_now) + updated_at: datetime = field(default_factory=utc_now) +``` + +**Constraints**: +- TaskStatus limited to open/done/dismissed (match DB constraint). +- Task dedupe key = meeting_id + normalized text (lowercase, trim, strip punctuation). + +--- + +## API Schema + +### Proto Additions (gRPC) + +```protobuf +enum TaskStatus { + TASK_STATUS_UNSPECIFIED = 0; + TASK_STATUS_OPEN = 1; + TASK_STATUS_DONE = 2; + TASK_STATUS_DISMISSED = 3; +} + +message Task { + string id = 1; + string meeting_id = 2; + int32 action_item_id = 3; + string text = 4; + TaskStatus status = 5; + string assignee_person_id = 6; + double due_date = 7; + int32 priority = 8; + double completed_at = 9; +} + +message TaskWithMeeting { + Task task = 1; + string meeting_title = 2; + double meeting_created_at = 3; + string project_id = 4; +} + +message ListTasksRequest { + repeated TaskStatus statuses = 1; + int32 limit = 2; + int32 offset = 3; + optional string project_id = 4; + repeated string project_ids = 5; + optional string meeting_id = 6; +} + +message ListTasksResponse { + repeated TaskWithMeeting tasks = 1; + int32 total_count = 2; +} + +message UpdateTaskRequest { + string task_id = 1; + string text = 2; + TaskStatus status = 3; + string assignee_person_id = 4; + double due_date = 5; + int32 priority = 6; +} + +message UpdateTaskResponse { + Task task = 1; +} + +message AnalyticsOverviewRequest { + double start_time = 1; + double end_time = 2; + optional string project_id = 3; + repeated string project_ids = 4; +} + +message DailyMeetingStats { + string date = 1; + int32 meetings = 2; + double total_duration = 3; + int32 word_count = 4; +} + +message AnalyticsOverviewResponse { + repeated DailyMeetingStats daily = 1; + int32 total_meetings = 2; + double total_duration = 3; + int32 total_words = 4; + int32 total_segments = 5; + int32 speaker_count = 6; +} + +message SpeakerStat { + string speaker_id = 1; + string display_name = 2; + double total_time = 3; + int32 segment_count = 4; + int32 meeting_count = 5; + double avg_confidence = 6; +} + +message ListSpeakerStatsRequest { + double start_time = 1; + double end_time = 2; + optional string project_id = 3; + repeated string project_ids = 4; +} + +message ListSpeakerStatsResponse { + repeated SpeakerStat speakers = 1; +} + +service NoteFlowService { + rpc ListTasks(ListTasksRequest) returns (ListTasksResponse); + rpc UpdateTask(UpdateTaskRequest) returns (UpdateTaskResponse); + rpc GetAnalyticsOverview(AnalyticsOverviewRequest) returns (AnalyticsOverviewResponse); + rpc ListSpeakerStats(ListSpeakerStatsRequest) returns (ListSpeakerStatsResponse); +} +``` + +--- + +## Migration Strategy + +### Phase 1: Schema +1. Add index on `noteflow.tasks.status` and `noteflow.tasks.meeting_id` for list filters. +2. Add index on `noteflow.segments.speaker_id` for speaker aggregates. + +### Phase 2: Backfill +1. Optional: backfill tasks from existing summaries by replaying summary action items per meeting. + +### Migration Risks + +| Risk | Mitigation | +|------|------------| +| Large segment tables slow aggregate queries | Use TTL cache + add indexes; consider materialized view later | +| Task dedupe false positives | Normalize text conservatively; log collisions | + +--- + +## Shared Types & Reuse Notes + +- **Aggregation pattern**: reuse SQL aggregation style from `src/noteflow/infrastructure/persistence/repositories/usage_event/_aggregations.py`. +- **Processing status UI**: reuse `ProcessingStatus` component in meeting detail sidebar. +- **Task priority badges**: reuse `client/src/components/common/PriorityBadge` and `client/src/types/task.ts`. + +--- + +## UI Components (if applicable) + +### Meeting Summary Section + +```tsx +// client/src/pages/meeting-detail/summary-panel.tsx + +export function SummaryPanel({ summary, summaryMeta, onGenerateSummary }: SummaryPanelProps) { + // Render executive summary, key points, and action items inside transcript panel +} +``` + +--- + +## Deliverables + +### Backend + +**Domain Layer**: +- [x] `src/noteflow/domain/entities/task.py` — Task entity + TaskStatus enum + +**Infrastructure Layer**: +- [x] `src/noteflow/infrastructure/persistence/repositories/task_repo.py` — Task CRUD + list filters +- [x] `src/noteflow/infrastructure/persistence/repositories/analytics_repo.py` — aggregate queries +- [x] `src/noteflow/infrastructure/persistence/repositories/__init__.py` — export repositories + +**Application Layer**: +- [x] `src/noteflow/application/services/tasks/service.py` — CRUD + summary ingestion +- [x] `src/noteflow/application/services/analytics/service.py` — cached aggregates + +**API Layer**: +- [x] `src/noteflow/grpc/proto/noteflow.proto` — new messages + RPCs (TaskStatusProto, TaskProto, DailyMeetingStatsProto, SpeakerStatProto, etc.) +- [x] `src/noteflow/grpc/proto/noteflow_pb2.pyi` — Python type stubs for new proto types +- [x] `src/noteflow/grpc/mixins/analytics_mixin.py` — GetAnalyticsOverview + ListSpeakerStats +- [x] `src/noteflow/grpc/mixins/tasks.py` — ListTasks + UpdateTask +- [x] `src/noteflow/grpc/mixins/converters/_domain.py` — Task/Analytics proto converters +- [x] `src/noteflow/grpc/service.py` — register mixins (AnalyticsMixin, TasksMixin) + +**Migrations**: +- [x] `src/noteflow/infrastructure/persistence/migrations/versions/v6w7x8y9z0a1_add_task_analytics_indexes.py` — indexes for tasks.status, tasks.meeting_id, segments.speaker_id + +### Client + +- [x] `client/src/api/types/features/analytics.ts` — analytics request/response types +- [x] `client/src/api/types/features/tasks.ts` — task request/response types +- [x] `client/src/api/interface.ts` — new API methods +- [x] `client/src/api/adapters/tauri/sections/analytics.ts` — analytics endpoint bridge +- [x] `client/src/api/adapters/tauri/sections/tasks.ts` — task CRUD bridge +- [x] `client/src-tauri/src/commands/analytics.rs` — Rust Tauri commands +- [x] `client/src-tauri/src/commands/tasks.rs` — Rust Tauri commands +- [x] `client/src-tauri/src/grpc/client/analytics.rs` — Rust gRPC client methods +- [x] `client/src-tauri/src/grpc/client/tasks.rs` — Rust gRPC client methods +- [x] `client/src-tauri/src/grpc/types/analytics.rs` — Rust type definitions +- [x] `client/src-tauri/src/grpc/types/tasks.rs` — Rust type definitions +- [x] `client/src/pages/Analytics.tsx` — use aggregates (uses getAnalyticsOverview, listSpeakerStats) +- [x] `client/src/pages/People.tsx` — use speaker stats (uses listSpeakerStats endpoint) +- [x] `client/src/pages/Tasks.tsx` — use TaskModel data (uses listTasks, updateTask) +- [x] `client/src/pages/meeting-detail/index.tsx` — summary in collapsible section above transcript +- [x] `client/src/pages/meeting-detail/summary-panel.tsx` — added compact mode with horizontal grid layout + +--- + +## Test Strategy + +### Fixtures to extend or create + +- `tests/conftest.py`: add Task fixtures for task lists +- `tests/infrastructure/`: add repository fixtures for analytics aggregates + +### Parameterized tests + +- Task status transitions: open/done/dismissed +- Analytics filters: project_id vs project_ids vs none + +### Core test cases + +- **Domain**: TaskStatus normalization + completion timestamp logic +- **Service**: summary ingestion dedupe, analytics cache hits/misses +- **API**: ListTasks pagination + filters, GetAnalyticsOverview range queries +- **Integration**: task list after summary regenerate adds only new tasks + +--- + +## Quality Gates + +- [x] `pytest tests/application/test_task_service.py` passes (19 tests) +- [x] `pytest tests/application/test_analytics_service.py` passes (10 tests) +- [x] `pytest tests/grpc/test_tasks_mixin.py` passes (7 tests) +- [x] `pytest tests/grpc/test_analytics_mixin.py` passes (6 tests) +- [ ] `make quality` passes +- [x] No type suppression comments or loose typing introduced + +--- + +## Post-Sprint + +- [ ] Add kanban view for tasks and inline editing +- [ ] Add NER-based entity analytics (NamedEntityModel aggregates) +- [ ] Materialized analytics tables if cache misses become expensive diff --git a/repomix-output.md b/repomix-output.md index 3139a20..022b763 100644 --- a/repomix-output.md +++ b/repomix-output.md @@ -29,7 +29,7 @@ The content is organized as follows: ## Notes - Some files may have been excluded based on .gitignore rules and Repomix's configuration - Binary files are not included in this packed representation. Please refer to the Repository Structure section for a complete list of file paths, including binary files -- Only files matching these patterns are included: client/src/components +- Only files matching these patterns are included: .cupcake/policies/opencode - Files matching these patterns are excluded: **/*_pb2.py, **/*_pb2_grpc.py, **/*.pb2.py, **/*.pb2_grpc.py, **/*.pyi, **/*.wav, **/*.nfaudio, **/*.m4a, **/*.mp3, **/*.mp4, **/*.mov, **/*.avi, **/*.mkv, **/*.flv, **/*.wmv, **/*.webm, **/*.m3u8, **/noteflow.rs, **/noteflow_pb2.py, src/noteflow_pb2.py, client/src-tauri/src/grpc/noteflow.rs, src/noteflow/grpc/proto/noteflow_pb2.py, src/noteflow/grpc/proto/noteflow_pb2_grpc.py, src/noteflow/grpc/proto/noteflow_pb2.pyi, **/persistence/migrations/**, **/node_modules/**, **/target/**, **/gen/**, **/__pycache__/**, **/*.pyc, **/.pytest_cache/**, **/.mypy_cache/**, **/.ruff_cache/**, **/dist/**, **/build/**, **/.vite/**, **/coverage/**, **/htmlcov/**, **/playwright-report/**, **/test-results/**, uv.lock, **/Cargo.lock, **/package-lock.json, **/bun.lockb, **/yarn.lock, **/*.lock, **/*.lockb, **/*.png, **/*.jpg, **/*.jpeg, **/*.gif, **/*.ico, **/*.svg, **/*.icns, **/*.webp, **/*.xml, **/icons/**, **/public/**, client/app-icon.png, **/*.md, .benchmarks/**, noteflow-api-spec.json, scratch.md, repomix-output.md, **/logs/**, **/status_line.json - Files matching patterns in .gitignore are excluded - Files matching default ignore patterns are excluded @@ -40,25563 +40,3638 @@ The content is organized as follows: # Directory Structure ``` -client/ - src/ - components/ - analytics/ - analytics-card-title.tsx - analytics-utils.ts - log-entry-config.ts - log-entry.tsx - log-timeline.tsx - logs-tab.test.tsx - logs-tab.tsx - performance-tab.test.tsx - performance-tab.tsx - speech-analysis-tab.tsx - integration-config-panel/ - auth-config.tsx - calendar-config.tsx - email-config.tsx - index.tsx - oidc-config.tsx - pkm-config.tsx - shared.tsx - webhook-config.tsx - projects/ - ProjectList.tsx - ProjectMembersPanel.tsx - ProjectScopeFilter.tsx - ProjectSettingsPanel.tsx - ProjectSidebar.tsx - ProjectSwitcher.tsx - recording/ - audio-device-selector.test.tsx - audio-device-selector.tsx - audio-level-meter.test.tsx - audio-level-meter.tsx - buffering-indicator.test.tsx - buffering-indicator.tsx - confidence-indicator.test.tsx - confidence-indicator.tsx - idle-state.test.tsx - idle-state.tsx - index.test.ts - index.ts - listening-state.tsx - notes-panel.tsx - partial-text-display.tsx - recording-components.test.tsx - recording-header.test.tsx - recording-header.tsx - speaker-distribution.test.tsx - speaker-distribution.tsx - stat-card.test.tsx - stat-card.tsx - stats-content.tsx - stats-panel.tsx - transcript-segment-card.tsx - vad-indicator.test.tsx - vad-indicator.tsx - settings/ - advanced-local-ai-settings/ - _constants.ts - index.tsx - model-auth-section.tsx - resource-fit-panel.tsx - streaming-config-section.tsx - transcription-engine-section.tsx - integrations-section/ - custom-integration-dialog.tsx - helpers.ts - index.tsx - integration-item.tsx - types.ts - use-integration-handlers.ts - ai-config-hooks.ts - ai-config-models.ts - ai-config-section.tsx - audio-devices-section.tsx - cloud-ai-toggle.tsx - connection-diagnostics-panel.tsx - developer-options-section.tsx - export-ai-section.test.tsx - export-ai-section.tsx - index.ts - integrations-section.tsx - medium-label.tsx - ollama-status-card.tsx - provider-config-card.tsx - quick-actions-section.tsx - recording-app-policy-section.tsx - server-connection-section.tsx - summarization-settings-panel.tsx - summarization-template-creator.tsx - summarization-templates-card.tsx - summarization-templates-list.tsx - summarization-templates-manager.tsx - template-content-label.tsx - ui/ - sidebar/ - constants.ts - context.tsx - group.tsx - index.tsx - layout.tsx - menu.tsx - primitives.tsx - accordion.tsx - alert-dialog.tsx - alert.tsx - aspect-ratio.tsx - avatar.tsx - badge.tsx - breadcrumb.tsx - button.tsx - calendar.tsx - card.tsx - carousel.tsx - chart.tsx - checkbox.tsx - collapsible.tsx - command.tsx - confirmation-dialog.tsx - context-menu.tsx - dialog.tsx - drawer.tsx - dropdown-menu.test.tsx - dropdown-menu.tsx - form.tsx - hover-card.tsx - icon-circle.tsx - inline-label.tsx - input-otp.tsx - input.tsx - label.tsx - loading-button.tsx - menubar.tsx - navigation-menu.tsx - pagination.tsx - popover.tsx - progress.tsx - radio-group.tsx - resizable.test.tsx - resizable.tsx - scroll-area.tsx - search-icon.tsx - select.tsx - separator.tsx - sheet.tsx - skeleton.tsx - slider.tsx - sonner.tsx - status-badge.tsx - switch.tsx - table.tsx - tabs.tsx - textarea.tsx - toast.tsx - toaster.tsx - toggle-group.tsx - toggle.tsx - tooltip.tsx - ui-components.test.tsx - use-toast.ts - annotation-type-badge.tsx - api-mode-indicator.test.tsx - api-mode-indicator.tsx - app-layout.tsx - app-sidebar.tsx - calendar-connection-panel.tsx - calendar-events-panel.tsx - confirmation-dialog.tsx - connection-status.tsx - dev-profiler.tsx - empty-state.tsx - entity-highlight.test.tsx - entity-highlight.tsx - entity-management-panel.test.tsx - entity-management-panel.tsx - error-boundary.tsx - integration-config-panel.tsx - meeting-card.tsx - meeting-state-badge.tsx - NavLink.tsx - offline-banner.test.tsx - offline-banner.tsx - preferences-sync-bridge.tsx - preferences-sync-status.test.tsx - preferences-sync-status.tsx - priority-badge.tsx - processing-status.test.tsx - processing-status.tsx - secure-storage-recovery-dialog.tsx - server-switch-confirmation-dialog.tsx - simulation-confirmation-dialog.tsx - speaker-badge.test.tsx - speaker-badge.tsx - stats-card.tsx - sync-control-panel.tsx - sync-history-log.tsx - sync-status-indicator.tsx - tauri-event-listener.tsx - timestamped-notes-editor.test.tsx - timestamped-notes-editor.tsx - top-bar.tsx - upcoming-meetings.tsx - webhook-settings-panel.tsx - workspace-switcher.test.tsx - workspace-switcher.tsx +.cupcake/ + policies/ + opencode/ + ban_stdlib_logger.rego + block_assertion_roulette.rego + block_biome_ignore_bash.rego + block_biome_ignore.rego + block_broad_exception_handler.rego + block_code_quality_test_bash.rego + block_code_quality_test_edits.rego + block_code_quality_test_serena_plugin.rego + block_code_quality_test_serena.rego + block_datetime_now_fallback.rego + block_default_value_swallow.rego + block_duplicate_fixtures.rego + block_linter_config_frontend_bash.rego + block_linter_config_frontend.rego + block_linter_config_python_bash.rego + block_linter_config_python.rego + block_magic_numbers.rego + block_makefile_bash.rego + block_makefile_edit.rego + block_no_verify.rego + block_silent_none_return.rego + block_test_loops_conditionals.rego + block_tests_quality_bash.rego + block_tests_quality.rego + prevent_any_type.rego + prevent_type_suppression.rego + warn_baselines_edit_bash.rego + warn_baselines_edit.rego + warn_large_file.rego ``` # Files -## File: client/src/components/analytics/analytics-card-title.tsx -````typescript - 1: import type { ComponentPropsWithoutRef } from 'react'; - 2: import { CardTitle } from '@/components/ui/card'; - 3: import { flexLayout } from '@/lib/styles'; - 4: import { cn } from '@/lib/utils'; - 5: - 6: type AnalyticsCardTitleProps = ComponentPropsWithoutRef; - 7: - 8: export function AnalyticsCardTitle({ className, ...props }: AnalyticsCardTitleProps) { - 9: return ; -10: } -```` - -## File: client/src/components/analytics/analytics-utils.ts -````typescript - 1: export const SPEAKER_COLORS = [ - 2: 'hsl(var(--chart-1))', - 3: 'hsl(var(--chart-2))', - 4: 'hsl(var(--chart-3))', - 5: 'hsl(var(--chart-4))', - 6: 'hsl(var(--chart-5))', - 7: ]; - 8: - 9: export const SPEAKER_COLOR_CLASSES = [ -10: 'bg-[hsl(var(--chart-1))]', -11: 'bg-[hsl(var(--chart-2))]', -12: 'bg-[hsl(var(--chart-3))]', -13: 'bg-[hsl(var(--chart-4))]', -14: 'bg-[hsl(var(--chart-5))]', -15: ]; -16: -17: export function speakerLabel(entry: unknown): string { -18: if (!entry || typeof entry !== 'object') { -19: return ''; -20: } -21: const record = entry as Record; -22: const speakerId = typeof record.speakerId === 'string' ? record.speakerId : null; -23: const percentage = typeof record.percentage === 'number' ? record.percentage : null; -24: if (!speakerId || percentage === null) { -25: return ''; -26: } -27: return `${speakerId}: ${percentage.toFixed(1)}%`; -28: } -29: -30: export function wordCountTickLabel(value: unknown): string { -31: const numeric = typeof value === 'number' ? value : Number(value); -32: if (!Number.isFinite(numeric)) { -33: return ''; -34: } -35: return numeric >= 1000 ? `${(numeric / 1000).toFixed(1)}k` : `${numeric}`; -36: } -```` - -## File: client/src/components/analytics/log-entry-config.ts -````typescript - 1: import { AlertCircle, AlertTriangle, Bug, Info, type LucideIcon } from 'lucide-react'; - 2: import type { LogLevel } from '@/api/types'; - 3: - 4: export interface LevelConfig { - 5: icon: LucideIcon; - 6: color: string; - 7: bgColor: string; - 8: } - 9: -10: export const levelConfig: Record = { -11: info: { icon: Info, color: 'text-blue-500', bgColor: 'bg-blue-500/10' }, -12: warning: { icon: AlertTriangle, color: 'text-amber-500', bgColor: 'bg-amber-500/10' }, -13: error: { icon: AlertCircle, color: 'text-red-500', bgColor: 'bg-red-500/10' }, -14: debug: { icon: Bug, color: 'text-purple-500', bgColor: 'bg-purple-500/10' }, -15: }; -```` - -## File: client/src/components/analytics/log-entry.tsx -````typescript - 1: /** - 2: * Log entry component for displaying individual or grouped log entries. - 3: */ - 4: - 5: import { format } from 'date-fns'; - 6: import { ChevronDown } from 'lucide-react'; - 7: import type { LogLevel, LogSource } from '@/api/types'; - 8: import { Badge } from '@/components/ui/badge'; - 9: import { Button } from '@/components/ui/button'; - 10: import { Collapsible, CollapsibleContent, CollapsibleTrigger } from '@/components/ui/collapsible'; - 11: import { formatRelativeTimeMs } from '@/lib/format'; - 12: import { toFriendlyMessage } from '@/lib/log-messages'; - 13: import type { SummarizedLog } from '@/lib/log-summarizer'; - 14: import { cn } from '@/lib/utils'; - 15: import { levelConfig } from './log-entry-config'; - 16: - 17: type LogOrigin = 'client' | 'server'; - 18: type ViewMode = 'friendly' | 'technical'; - 19: - 20: export interface LogEntryData { - 21: id: string; - 22: timestamp: number; - 23: level: LogLevel; - 24: source: LogSource; - 25: message: string; - 26: details?: string; - 27: metadata?: Record; - 28: traceId?: string; - 29: spanId?: string; - 30: origin: LogOrigin; - 31: } - 32: - 33: const sourceColors: Record = { - 34: app: 'bg-chart-1/20 text-chart-1', - 35: api: 'bg-chart-2/20 text-chart-2', - 36: sync: 'bg-chart-3/20 text-chart-3', - 37: auth: 'bg-chart-4/20 text-chart-4', - 38: system: 'bg-chart-5/20 text-chart-5', - 39: }; - 40: - 41: export interface LogEntryProps { - 42: summarized: SummarizedLog; - 43: viewMode: ViewMode; - 44: isExpanded: boolean; - 45: onToggleExpanded: () => void; - 46: } - 47: - 48: export function LogEntry({ summarized, viewMode, isExpanded, onToggleExpanded }: LogEntryProps) { - 49: const {log} = summarized; - 50: const config = levelConfig[log.level]; - 51: const Icon = config.icon; - 52: const hasDetails = log.details || log.metadata || log.traceId || log.spanId; - 53: - 54: // Get display message based on view mode - 55: const displayMessage = - 56: viewMode === 'friendly' - 57: ? toFriendlyMessage(log.message, (log.metadata as Record) ?? {}) - 58: : log.message; - 59: - 60: // Get display timestamp based on view mode - 61: const displayTimestamp = - 62: viewMode === 'friendly' - 63: ? formatRelativeTimeMs(log.timestamp) - 64: : format(new Date(log.timestamp), 'HH:mm:ss.SSS'); - 65: - 66: return ( - 67: - 68:
- 75:
- 76:
- 77: - 78:
- 79:
- 80:
- 81: - 87: {displayTimestamp} - 88: - 89: {viewMode === 'technical' && ( - 90: <> - 91: - 92: {log.source} - 93: - 94: - 95: {log.origin} - 96: - 97: - 98: )} - 99: {summarized.isGroup && summarized.count > 1 && ( -100: -101: {summarized.count}x -102: -103: )} -104:
-105:

{displayMessage}

-106: {viewMode === 'friendly' && summarized.isGroup && summarized.count > 1 && ( -107:

{summarized.count} similar events

-108: )} -109:
-110: {(hasDetails || viewMode === 'friendly') && ( -111: -112: -117: -118: )} -119:
-120: -121: -122: -128: -129:
-130:
-131: ); -132: } -133: -134: interface LogEntryDetailsProps { -135: log: LogEntryData; -136: summarized: SummarizedLog; -137: viewMode: ViewMode; -138: sourceColors: Record; -139: } -140: -141: function LogEntryDetails({ log, summarized, viewMode, sourceColors }: LogEntryDetailsProps) { -142: return ( -143:
-144: {/* Technical details shown when expanded in friendly mode */} -145: {viewMode === 'friendly' && ( -146:
-147:

{log.message}

-148:
-149: -150: {log.source} -151: -152: -153: {log.origin} -154: -155: {format(new Date(log.timestamp), 'HH:mm:ss.SSS')} -156:
-157:
-158: )} -159: {(log.traceId || log.spanId) && ( -160:
-161: {log.traceId && ( -162: -163: trace {log.traceId} -164: -165: )} -166: {log.spanId && ( -167: -168: span {log.spanId} -169: -170: )} -171:
-172: )} -173: {log.details &&

{log.details}

} -174: {log.metadata && ( -175:
-176:           {JSON.stringify(log.metadata, null, 2)}
-177:         
-178: )} -179: {/* Show grouped logs if this is a group */} -180: {summarized.isGroup && summarized.groupedLogs && summarized.groupedLogs.length > 1 && ( -181:
-182:

All {summarized.count} events:

-183:
-184: {summarized.groupedLogs.map((groupedLog) => ( -185:
-186: {format(new Date(groupedLog.timestamp), 'HH:mm:ss.SSS')} - {groupedLog.message} -187:
-188: ))} -189:
-190:
-191: )} -192:
-193: ); -194: } -```` - -## File: client/src/components/analytics/log-timeline.tsx -````typescript - 1: /** - 2: * Timeline view for grouped logs. - 3: * - 4: * Displays log groups as collapsible cards with summary headers, - 5: * time gap indicators, and expandable log details. - 6: */ - 7: - 8: import { format } from 'date-fns'; - 9: import { - 10: AlertCircle, - 11: AlertTriangle, - 12: ChevronDown, - 13: ChevronRight, - 14: Clock, - 15: Folder, - 16: Layers, - 17: } from 'lucide-react'; - 18: import { useState } from 'react'; - 19: import { Badge } from '@/components/ui/badge'; - 20: import { Button } from '@/components/ui/button'; - 21: import { Card, CardContent, CardHeader} from '@/components/ui/card'; - 22: import { - 23: Collapsible, - 24: CollapsibleContent, - 25: CollapsibleTrigger, - 26: } from '@/components/ui/collapsible'; - 27: import { formatGap, type LogGroup } from '@/lib/log-groups'; - 28: import { isErrorGroup, isWarningGroup } from '@/lib/log-group-summarizer'; - 29: import { cn } from '@/lib/utils'; - 30: import { LogEntry as LogEntryComponent, type LogEntryData } from './log-entry'; - 31: import type { SummarizedLog } from '@/lib/log-summarizer'; - 32: - 33: /** Props for the LogTimeline component */ - 34: interface LogTimelineProps { - 35: /** Grouped logs to display */ - 36: readonly groups: readonly LogGroup[]; - 37: /** Current view mode */ - 38: readonly viewMode: 'friendly' | 'technical'; - 39: /** Maximum logs to show per group before truncation */ - 40: readonly maxLogsPerGroup?: number; - 41: /** Set of expanded log IDs */ - 42: readonly expandedLogs: ReadonlySet; - 43: /** Callback when a log is toggled */ - 44: readonly onToggleLog: (id: string) => void; - 45: } - 46: - 47: /** Props for a single timeline group */ - 48: interface TimelineGroupProps { - 49: readonly group: LogGroup; - 50: readonly viewMode: 'friendly' | 'technical'; - 51: readonly maxLogs: number; - 52: readonly expandedLogs: ReadonlySet; - 53: readonly onToggleLog: (id: string) => void; - 54: readonly isFirst: boolean; - 55: readonly gapFromPrevious: number | undefined; - 56: } - 57: - 58: /** Get icon for group type */ - 59: function getGroupIcon(group: LogGroup) { - 60: if (isErrorGroup(group.summary)) { - 61: return ; - 62: } - 63: if (isWarningGroup(group.summary)) { - 64: return ; - 65: } - 66: - 67: switch (group.groupType) { - 68: case 'meeting': - 69: return ; - 70: case 'operation': - 71: return ; - 72: case 'time': - 73: return ; - 74: default: - 75: return ; - 76: } - 77: } - 78: - 79: /** Get background color for group header based on status */ - 80: function getGroupHeaderClass(group: LogGroup): string { - 81: if (isErrorGroup(group.summary)) { - 82: return 'bg-red-50 dark:bg-red-950/30 border-red-200 dark:border-red-900'; - 83: } - 84: if (isWarningGroup(group.summary)) { - 85: return 'bg-yellow-50 dark:bg-yellow-950/30 border-yellow-200 dark:border-yellow-900'; - 86: } - 87: return 'bg-muted/50'; - 88: } - 89: - 90: /** Format timestamp for group header */ - 91: function groupTimeLabel(timestamp: number): string { - 92: return format(new Date(timestamp), 'HH:mm:ss'); - 93: } - 94: - 95: /** Time gap indicator between groups */ - 96: function TimeGapIndicator({ gapMs }: { readonly gapMs: number }) { - 97: return ( - 98:
- 99:
-100:
-101: -102: {formatGap(gapMs)} -103:
-104:
-105:
-106: ); -107: } -108: -109: /** Single timeline group component */ -110: function TimelineGroup({ -111: group, -112: viewMode, -113: maxLogs, -114: expandedLogs, -115: onToggleLog, -116: isFirst, -117: gapFromPrevious, -118: }: TimelineGroupProps) { -119: const [isExpanded, setIsExpanded] = useState(isFirst); -120: const logsToShow = group.logs.slice(0, maxLogs); -121: const hiddenCount = group.logs.length - logsToShow.length; -122: -123: const { summary } = group; -124: const hasErrors = summary.levelCounts.error > 0; -125: const hasWarnings = summary.levelCounts.warning > 0; -126: -127: return ( -128: <> -129: {gapFromPrevious !== undefined && gapFromPrevious > 60000 && ( -130: -131: )} -132: -133: -134: -135: -136: -137:
-138:
-139: {isExpanded ? ( -140: -141: ) : ( -142: -143: )} -144: {getGroupIcon(group)} -145:
-146: {group.label} -147: {summary.text} -148:
-149:
-150: -151:
-152: {/* Level badges */} -153: {hasErrors && ( -154: -155: {summary.levelCounts.error} error{summary.levelCounts.error !== 1 ? 's' : ''} -156: -157: )} -158: {hasWarnings && ( -159: -163: {summary.levelCounts.warning} warning -164: {summary.levelCounts.warning !== 1 ? 's' : ''} -165: -166: )} -167: -168: {/* Log count */} -169: -170: {group.logs.length} log{group.logs.length !== 1 ? 's' : ''} -171: -172: -173: {/* Time range */} -174: -175: {groupTimeLabel(group.endTime)} -176: {group.startTime !== group.endTime && ` - ${groupTimeLabel(group.startTime)}`} -177: -178:
-179:
-180:
-181:
-182: -183: -184: -185:
-186: {logsToShow.map((log) => { -187: const summarized: SummarizedLog = { -188: log, -189: count: 1, -190: isGroup: false, -191: groupedLogs: undefined, -192: }; -193: return ( -194: onToggleLog(log.id)} -200: /> -201: ); -202: })} -203: -204: {hiddenCount > 0 && ( -205: -213: )} -214:
-215:
-216:
-217:
-218:
-219: -220: ); -221: } -222: -223: /** -224: * Timeline view for displaying grouped logs. -225: * -226: * Renders log groups as collapsible cards with: -227: * - Summary headers showing group type and stats -228: * - Time gap indicators between groups -229: * - Expandable log entries within each group -230: * - Truncation with "N more..." for large groups -231: */ -232: export function LogTimeline({ -233: groups, -234: viewMode, -235: maxLogsPerGroup = 10, -236: expandedLogs, -237: onToggleLog, -238: }: LogTimelineProps) { -239: if (groups.length === 0) { -240: return null; -241: } -242: -243: return ( -244:
-245: {groups.map((group, index) => { -246: // Calculate gap from previous group -247: const previousGroup = index > 0 ? groups[index - 1] : undefined; -248: const gapFromPrevious = previousGroup -249: ? previousGroup.startTime - group.endTime -250: : undefined; -251: -252: return ( -253: -263: ); -264: })} -265:
-266: ); -267: } -```` - -## File: client/src/components/analytics/logs-tab.test.tsx -````typescript - 1: import { QueryClient, QueryClientProvider, notifyManager } from '@tanstack/react-query'; - 2: import { act, fireEvent, render, screen, waitFor } from '@testing-library/react'; - 3: import type { ReactNode } from 'react'; - 4: import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; - 5: import * as apiInterface from '@/api/interface'; - 6: import type { GetRecentLogsResponse, LogEntry } from '@/api/types'; - 7: import { addClientLog, clearClientLogs } from '@/lib/client-logs'; - 8: import { LogsTab } from './logs-tab'; - 9: - 10: // Mock the API module - 11: vi.mock('@/api/interface', () => ({ - 12: getAPI: vi.fn(), - 13: })); - 14: - 15: const clientLogState = vi.hoisted(() => ({ - 16: store: [] as Array<{ - 17: id: string; - 18: timestamp: number; - 19: level: string; - 20: source: string; - 21: message: string; - 22: details?: string; - 23: metadata?: Record; - 24: origin: 'client'; - 25: }>, - 26: listeners: new Set<(logs: Array<{ - 27: id: string; - 28: timestamp: number; - 29: level: string; - 30: source: string; - 31: message: string; - 32: details?: string; - 33: metadata?: Record; - 34: origin: 'client'; - 35: }>) => void>(), - 36: })); - 37: - 38: vi.mock('@/lib/client-logs', () => ({ - 39: getClientLogs: () => [...clientLogState.store], - 40: subscribeClientLogs: (listener: (logs: typeof clientLogState.store) => void) => { - 41: clientLogState.listeners.add(listener); - 42: act(() => listener([...clientLogState.store])); - 43: return () => clientLogState.listeners.delete(listener); - 44: }, - 45: addClientLog: ( - 46: entry: Omit<(typeof clientLogState.store)[number], 'id' | 'timestamp' | 'origin'> - 47: ) => { - 48: const next = { - 49: ...entry, - 50: id: `client-log-${Date.now()}-${Math.random().toString(16).slice(2, 8)}`, - 51: timestamp: Date.now(), - 52: origin: 'client' as const, - 53: }; - 54: clientLogState.store.unshift(next); - 55: clientLogState.store.splice(500); - 56: for (const listener of clientLogState.listeners) { - 57: act(() => listener([...clientLogState.store])); - 58: } - 59: }, - 60: clearClientLogs: () => { - 61: clientLogState.store.splice(0); - 62: for (const listener of clientLogState.listeners) { - 63: act(() => listener([...clientLogState.store])); - 64: } - 65: }, - 66: })); - 67: - 68: // Simplify Radix-based UI components to avoid act warnings in tests. - 69: vi.mock('@/components/ui/select', () => ({ - 70: Select: ({ children }: { children: ReactNode }) =>
{children}
, - 71: SelectTrigger: ({ children }: { children: ReactNode }) => ( - 72: - 73: ), - 74: SelectValue: ({ placeholder }: { placeholder?: string }) => {placeholder}, - 75: SelectContent: ({ children }: { children: ReactNode }) =>
{children}
, - 76: SelectItem: ({ children }: { children: ReactNode }) =>
{children}
, - 77: })); - 78: - 79: vi.mock('@/components/ui/scroll-area', () => ({ - 80: ScrollArea: ({ children }: { children: ReactNode }) =>
{children}
, - 81: })); - 82: - 83: vi.mock('@/components/ui/collapsible', () => ({ - 84: Collapsible: ({ children }: { children: ReactNode }) =>
{children}
, - 85: CollapsibleTrigger: ({ children }: { children: ReactNode }) => {children}, - 86: CollapsibleContent: ({ children }: { children: ReactNode }) =>
{children}
, - 87: })); - 88: - 89: // Mock date-fns format for deterministic output - 90: vi.mock('date-fns', async () => { - 91: const actual = await vi.importActual('date-fns'); - 92: return { - 93: ...actual, - 94: format: vi.fn((_date: Date, formatStr: string) => { - 95: if (formatStr === 'HH:mm:ss.SSS') { - 96: return '12:34:56.789'; - 97: } - 98: if (formatStr === 'HH:mm:ss') { - 99: return '12:34:56'; -100: } -101: if (formatStr === 'yyyy-MM-dd-HHmmss') { -102: return '2025-01-01-123456'; -103: } -104: return '2025-01-01'; -105: }), -106: }; -107: }); -108: -109: // Mock formatRelativeTimeMs for deterministic output -110: vi.mock('@/lib/format', async () => { -111: const actual = await vi.importActual('@/lib/format'); -112: return { -113: ...actual, -114: formatRelativeTimeMs: vi.fn(() => 'Just now'), -115: }; -116: }); -117: -118: // Helper to create QueryClient wrapper -119: function createWrapper() { -120: const queryClient = new QueryClient({ -121: defaultOptions: { -122: queries: { -123: retry: false, -124: gcTime: 0, -125: }, -126: }, -127: }); -128: return function Wrapper({ children }: { children: ReactNode }) { -129: return {children}; -130: }; -131: } -132: -133: notifyManager.setNotifyFunction((fn) => { -134: act(fn); -135: }); -136: notifyManager.setBatchNotifyFunction((fn) => { -137: act(() => { -138: fn(); -139: }); -140: }); -141: notifyManager.setScheduler((fn) => { -142: fn(); -143: }); -144: -145: async function renderLogsTab() { -146: const wrapper = createWrapper(); -147: await act(async () => { -148: render(, { wrapper }); -149: await Promise.resolve(); -150: }); -151: } -152: -153: function createMockLogEntry(overrides: Partial = {}): LogEntry { -154: return { -155: timestamp: '2025-01-01T12:34:56.789Z', -156: level: 'info', -157: source: 'app', -158: message: 'Test log message', -159: details: {}, -160: ...overrides, -161: }; -162: } -163: -164: describe('LogsTab', () => { -165: const mockAPI = { -166: getRecentLogs: vi.fn<() => Promise>(), -167: }; -168: -169: beforeEach(() => { -170: vi.mocked(apiInterface.getAPI).mockReturnValue( -171: mockAPI as unknown as ReturnType -172: ); -173: vi.clearAllMocks(); -174: clearClientLogs(); -175: }); -176: -177: afterEach(() => { -178: vi.unstubAllGlobals(); -179: clearClientLogs(); -180: }); -181: -182: describe('Loading State', () => { -183: it('shows loading state while fetching logs', async () => { -184: mockAPI.getRecentLogs.mockImplementation(() => new Promise(() => {})); -185: -186: await renderLogsTab(); -187: -188: await waitFor(() => { -189: expect(screen.getByText('Loading logs...')).toBeInTheDocument(); -190: }); -191: }); -192: }); -193: -194: describe('Empty State', () => { -195: it('shows empty state when no logs', async () => { -196: mockAPI.getRecentLogs.mockResolvedValue({ logs: [], total_count: 0 }); -197: -198: await renderLogsTab(); -199: -200: await waitFor(() => { -201: expect(screen.getByText('No logs found')).toBeInTheDocument(); -202: }); -203: }); -204: -205: it('suggests adjusting filters when filtered with no results', async () => { -206: mockAPI.getRecentLogs.mockResolvedValue({ logs: [], total_count: 0 }); -207: -208: await renderLogsTab(); -209: -210: await waitFor(() => { -211: expect(screen.getByText('No logs found')).toBeInTheDocument(); -212: }); -213: -214: // Type a search query to trigger filter message -215: const searchInput = screen.getByPlaceholderText('Search logs...'); -216: fireEvent.change(searchInput, { target: { value: 'nonexistent' } }); -217: -218: expect(screen.getByText('Try adjusting your filters')).toBeInTheDocument(); -219: }); -220: }); -221: -222: describe('Log Display', () => { -223: it('renders log entries from API response', async () => { -224: mockAPI.getRecentLogs.mockResolvedValue({ -225: logs: [ -226: createMockLogEntry({ message: 'First log' }), -227: createMockLogEntry({ message: 'Second log', level: 'error' }), -228: ], -229: total_count: 2, -230: }); -231: -232: await renderLogsTab(); -233: -234: await waitFor(() => { -235: // Messages may appear multiple times (in main view and expanded details) -236: expect(screen.getAllByText('First log').length).toBeGreaterThan(0); -237: expect(screen.getAllByText('Second log').length).toBeGreaterThan(0); -238: }); -239: }); -240: -241: it('displays log stats for each level', async () => { -242: mockAPI.getRecentLogs.mockResolvedValue({ -243: logs: [ -244: createMockLogEntry({ level: 'info' }), -245: createMockLogEntry({ level: 'info' }), -246: createMockLogEntry({ level: 'error' }), -247: createMockLogEntry({ level: 'warning' }), -248: ], -249: total_count: 4, -250: }); -251: -252: await renderLogsTab(); -253: -254: await waitFor(() => { -255: // Check that stats are rendered (cards with numbers) -256: const statCards = screen.getAllByText(/^[0-9]+$/); -257: expect(statCards.length).toBeGreaterThan(0); -258: }); -259: }); -260: -261: it('shows source badges for log entries', async () => { -262: mockAPI.getRecentLogs.mockResolvedValue({ -263: logs: [createMockLogEntry({ source: 'api', message: 'API log' })], -264: total_count: 1, -265: }); -266: -267: await renderLogsTab(); -268: -269: await waitFor(() => { -270: expect(screen.getByText('api')).toBeInTheDocument(); -271: }); -272: }); -273: -274: it('renders client logs alongside server logs', async () => { -275: mockAPI.getRecentLogs.mockResolvedValue({ logs: [], total_count: 0 }); -276: addClientLog({ -277: level: 'warning', -278: source: 'system', -279: message: 'Recording blocked by app policy', -280: metadata: { rule_id: 'zoom' }, -281: }); -282: -283: await renderLogsTab(); -284: -285: await waitFor(() => { -286: // Messages may appear multiple times (in main view and expanded details) -287: expect(screen.getAllByText('Recording blocked by app policy').length).toBeGreaterThan(0); -288: expect(screen.getAllByText('client').length).toBeGreaterThan(0); -289: }); -290: }); -291: }); -292: -293: describe('Filtering', () => { -294: it('calls API with level filter when selected', async () => { -295: mockAPI.getRecentLogs.mockResolvedValue({ logs: [], total_count: 0 }); -296: -297: await renderLogsTab(); -298: -299: await waitFor(() => { -300: expect(mockAPI.getRecentLogs).toHaveBeenCalled(); -301: }); -302: -303: // Initial call with no filters -304: expect(mockAPI.getRecentLogs).toHaveBeenCalledWith( -305: expect.objectContaining({ -306: level: undefined, -307: source: undefined, -308: }) -309: ); -310: }); -311: -312: it('filters logs by search query client-side', async () => { -313: mockAPI.getRecentLogs.mockResolvedValue({ -314: logs: [ -315: createMockLogEntry({ message: 'Connection established' }), -316: createMockLogEntry({ message: 'User logged in' }), -317: createMockLogEntry({ message: 'Connection closed' }), -318: ], -319: total_count: 3, -320: }); -321: -322: await renderLogsTab(); -323: -324: await waitFor(() => { -325: // Messages may appear multiple times (in main view and expanded details) -326: expect(screen.getAllByText('Connection established').length).toBeGreaterThan(0); -327: }); -328: -329: // Search for "Connection" -330: const searchInput = screen.getByPlaceholderText('Search logs...'); -331: fireEvent.change(searchInput, { target: { value: 'Connection' } }); -332: -333: expect(screen.getAllByText('Connection established').length).toBeGreaterThan(0); -334: expect(screen.getAllByText('Connection closed').length).toBeGreaterThan(0); -335: expect(screen.queryAllByText('User logged in')).toHaveLength(0); -336: }); -337: -338: it('filters logs by metadata values', async () => { -339: mockAPI.getRecentLogs.mockResolvedValue({ -340: logs: [ -341: createMockLogEntry({ message: 'Metadata log', details: { request_id: 'req-99' } }), -342: createMockLogEntry({ message: 'Other log' }), -343: ], -344: total_count: 2, -345: }); -346: -347: await renderLogsTab(); -348: -349: await waitFor(() => { -350: // Messages may appear multiple times (in main view and expanded details) -351: expect(screen.getAllByText('Metadata log').length).toBeGreaterThan(0); -352: }); -353: -354: const searchInput = screen.getByPlaceholderText('Search logs...'); -355: fireEvent.change(searchInput, { target: { value: 'req-99' } }); -356: -357: expect(screen.getAllByText('Metadata log').length).toBeGreaterThan(0); -358: expect(screen.queryAllByText('Other log')).toHaveLength(0); -359: }); -360: }); -361: -362: describe('Refresh', () => { -363: it('refetches logs when refresh button clicked', async () => { -364: mockAPI.getRecentLogs.mockResolvedValue({ logs: [], total_count: 0 }); -365: -366: await renderLogsTab(); -367: -368: await waitFor(() => { -369: expect(mockAPI.getRecentLogs).toHaveBeenCalledTimes(1); -370: }); -371: -372: const refreshButton = screen.getByTitle('Refresh logs'); -373: fireEvent.click(refreshButton); -374: -375: await waitFor(() => { -376: expect(mockAPI.getRecentLogs).toHaveBeenCalledTimes(2); -377: }); -378: }); -379: }); -380: -381: describe('Log Details', () => { -382: it('renders log with metadata that can be expanded', async () => { -383: mockAPI.getRecentLogs.mockResolvedValue({ -384: logs: [ -385: createMockLogEntry({ -386: message: 'Log with details', -387: details: { key: 'value' }, -388: }), -389: ], -390: total_count: 1, -391: }); -392: -393: await renderLogsTab(); -394: -395: await waitFor(() => { -396: // Messages may appear multiple times (in main view and expanded details) -397: expect(screen.getAllByText('Log with details').length).toBeGreaterThan(0); -398: }); -399: -400: // Verify the log entry is rendered - metadata expansion is a UI detail -401: // The component shows expand buttons for entries with metadata -402: const logEntries = screen.getAllByText('Log with details'); -403: expect(logEntries.length).toBeGreaterThan(0); -404: }); -405: -406: it('shows trace and span badges when correlation IDs are present', async () => { -407: mockAPI.getRecentLogs.mockResolvedValue({ -408: logs: [ -409: createMockLogEntry({ -410: message: 'Trace log', -411: trace_id: 'trace-123', -412: span_id: 'span-456', -413: details: { request_id: 'req-99' }, -414: }), -415: ], -416: total_count: 1, -417: }); -418: -419: await renderLogsTab(); -420: -421: await waitFor(() => { -422: // Messages may appear multiple times (in main view and expanded details) -423: expect(screen.getAllByText('Trace log').length).toBeGreaterThan(0); -424: }); -425: -426: const toggleButton = screen.getByLabelText('Toggle log details'); -427: fireEvent.click(toggleButton); -428: -429: await waitFor(() => { -430: expect(screen.getAllByText(/trace-123/i).length).toBeGreaterThan(0); -431: expect(screen.getAllByText(/span-456/i).length).toBeGreaterThan(0); -432: expect(screen.getAllByText(/request_id/).length).toBeGreaterThan(0); -433: }); -434: -435: fireEvent.click(toggleButton); -436: }); -437: -438: it('handles logs without details', async () => { -439: mockAPI.getRecentLogs.mockResolvedValue({ -440: logs: [createMockLogEntry({ message: 'No details', details: undefined })], -441: total_count: 1, -442: }); -443: -444: await renderLogsTab(); -445: -446: await waitFor(() => { -447: // Messages may appear multiple times (in main view and expanded details) -448: expect(screen.getAllByText('No details').length).toBeGreaterThan(0); -449: }); -450: }); -451: }); -452: -453: describe('Export', () => { -454: it('exports logs and revokes the object URL', async () => { -455: const createObjectURL = vi.fn(() => 'blob:logs'); -456: const revokeObjectURL = vi.fn(); -457: const clickMock = vi -458: .spyOn(HTMLAnchorElement.prototype, 'click') -459: .mockImplementation(() => {}); -460: vi.stubGlobal('URL', { createObjectURL, revokeObjectURL }); -461: -462: mockAPI.getRecentLogs.mockResolvedValue({ -463: logs: [createMockLogEntry({ message: 'Export log' })], -464: total_count: 1, -465: }); -466: -467: await renderLogsTab(); -468: -469: await waitFor(() => { -470: // Messages may appear multiple times (in main view and expanded details) -471: expect(screen.getAllByText('Export log').length).toBeGreaterThan(0); -472: }); -473: -474: const exportButton = screen.getByTitle('Export logs'); -475: fireEvent.click(exportButton); -476: -477: expect(createObjectURL).toHaveBeenCalled(); -478: expect(revokeObjectURL).toHaveBeenCalledWith('blob:logs'); -479: clickMock.mockRestore(); -480: }); -481: }); -482: -483: describe('Footer', () => { -484: it('shows log count in footer', async () => { -485: // Use different messages to avoid summarization grouping -486: mockAPI.getRecentLogs.mockResolvedValue({ -487: logs: [ -488: createMockLogEntry({ message: 'First log' }), -489: createMockLogEntry({ message: 'Second log' }), -490: ], -491: total_count: 50, -492: }); -493: -494: await renderLogsTab(); -495: -496: await waitFor(() => { -497: expect(screen.getByText(/Showing 2 logs of 2 total/)).toBeInTheDocument(); -498: }); -499: }); -500: }); -501: }); -```` - -## File: client/src/components/analytics/logs-tab.tsx -````typescript - 1: import { useQuery } from '@tanstack/react-query'; - 2: import { format } from 'date-fns'; - 3: import { - 4: Clock, - 5: Download, - 6: Eye, - 7: FileText, - 8: Filter, - 9: Folder, - 10: Layers, - 11: List, - 12: RefreshCw, - 13: Search, - 14: Terminal, - 15: } from 'lucide-react'; - 16: import { useEffect, useMemo, useState } from 'react'; - 17: import { Timing } from '@/api/constants'; - 18: import { getAPI } from '@/api/interface'; - 19: import type { LogLevel as ApiLogLevel, LogSource as ApiLogSource } from '@/api/types'; - 20: import { LogEntry as LogEntryComponent, type LogEntryData } from '@/components/analytics/log-entry'; - 21: import { levelConfig } from '@/components/analytics/log-entry-config'; - 22: import { AnalyticsCardTitle } from '@/components/analytics/analytics-card-title'; - 23: import { LogTimeline } from '@/components/analytics/log-timeline'; - 24: import { Button } from '@/components/ui/button'; - 25: import { Card, CardContent, CardDescription, CardHeader } from '@/components/ui/card'; - 26: import { Input } from '@/components/ui/input'; - 27: import { ScrollArea } from '@/components/ui/scroll-area'; - 28: import { - 29: Select, - 30: SelectContent, - 31: SelectItem, - 32: SelectTrigger, - 33: SelectValue, - 34: } from '@/components/ui/select'; - 35: import { ToggleGroup, ToggleGroupItem } from '@/components/ui/toggle-group'; - 36: import { - 37: Tooltip, - 38: TooltipContent, - 39: TooltipProvider, - 40: TooltipTrigger, - 41: } from '@/components/ui/tooltip'; - 42: import { - 43: getClientLogs, - 44: subscribeClientLogs, - 45: type ClientLogEntry, - 46: } from '@/lib/client-logs'; - 47: import { convertLogEntry } from '@/lib/log-converters'; - 48: import { groupLogs, type GroupMode } from '@/lib/log-groups'; - 49: import { - 50: summarizeConsecutive, - 51: type SummarizableLog, - 52: type SummarizedLog, - 53: } from '@/lib/log-summarizer'; - 54: import { cardPadding, iconWithMargin } from '@/lib/styles'; - 55: import { cn } from '@/lib/utils'; - 56: - 57: type LogLevel = ApiLogLevel; - 58: type LogSource = ApiLogSource; - 59: type LogOrigin = 'client' | 'server'; - 60: type ViewMode = 'friendly' | 'technical'; - 61: - 62: const LOG_LEVELS: LogLevel[] = ['info', 'warning', 'error', 'debug']; - 63: - 64: export function LogsTab() { - 65: const [searchQuery, setSearchQuery] = useState(''); - 66: const [levelFilter, setLevelFilter] = useState('all'); - 67: const [sourceFilter, setSourceFilter] = useState('all'); - 68: const [originFilter, setOriginFilter] = useState('all'); - 69: const [expandedLogs, setExpandedLogs] = useState>(new Set()); - 70: const [clientLogs, setClientLogs] = useState(() => getClientLogs()); - 71: const [viewMode, setViewMode] = useState('friendly'); - 72: const [enableSummarization, setEnableSummarization] = useState(true); - 73: const [groupMode, setGroupMode] = useState('none'); - 74: - 75: useEffect(() => subscribeClientLogs(setClientLogs), []); - 76: - 77: // Fetch logs from backend - 78: const { - 79: data: logsResponse, - 80: isLoading, - 81: refetch, - 82: isRefetching, - 83: } = useQuery({ - 84: queryKey: ['logs', levelFilter, sourceFilter], - 85: queryFn: async () => { - 86: const api = getAPI(); - 87: return api.getRecentLogs({ - 88: limit: 500, - 89: level: levelFilter === 'all' ? undefined : levelFilter, - 90: source: sourceFilter === 'all' ? undefined : sourceFilter, - 91: }); - 92: }, - 93: refetchInterval: Timing.THIRTY_SECONDS_MS, - 94: }); - 95: - 96: const serverLogs = useMemo(() => { - 97: if (!logsResponse?.logs) { - 98: return []; - 99: } -100: return logsResponse.logs.map(convertLogEntry); -101: }, [logsResponse]); -102: -103: const mergedLogs = useMemo(() => { -104: const client = clientLogs.map((entry) => ({ -105: ...entry, -106: origin: 'client' as const, -107: })); -108: const combined = [...client, ...serverLogs]; -109: return combined.sort((a, b) => b.timestamp - a.timestamp); -110: }, [clientLogs, serverLogs]); -111: -112: // Client-side search filtering (level/source already filtered by API) -113: const filteredLogs = useMemo(() => { -114: const query = searchQuery.toLowerCase(); -115: return mergedLogs.filter((log) => { -116: if (originFilter !== 'all' && log.origin !== originFilter) { -117: return false; -118: } -119: if (levelFilter !== 'all' && log.level !== levelFilter) { -120: return false; -121: } -122: if (sourceFilter !== 'all' && log.source !== sourceFilter) { -123: return false; -124: } -125: if (query === '') { -126: return true; -127: } -128: const metadataText = log.metadata ? JSON.stringify(log.metadata).toLowerCase() : ''; -129: const correlationText = [log.traceId, log.spanId].filter(Boolean).join(' ').toLowerCase(); -130: return ( -131: log.message.toLowerCase().includes(query) || -132: log.details?.toLowerCase().includes(query) || -133: metadataText.includes(query) || -134: correlationText.includes(query) -135: ); -136: }); -137: }, [mergedLogs, searchQuery, originFilter, levelFilter, sourceFilter]); -138: -139: // Apply summarization when enabled -140: const summarizedLogs = useMemo(() => { -141: if (!enableSummarization) { -142: return filteredLogs.map((log) => ({ -143: log, -144: count: 1, -145: isGroup: false, -146: groupedLogs: undefined, -147: })); -148: } -149: return summarizeConsecutive(filteredLogs as SummarizableLog[]) as SummarizedLog[]; -150: }, [filteredLogs, enableSummarization]); -151: -152: // Group logs when in timeline mode -153: const logGroups = useMemo(() => { -154: if (groupMode === 'none') { -155: return []; -156: } -157: return groupLogs(filteredLogs, groupMode); -158: }, [filteredLogs, groupMode]); -159: -160: const logStats = useMemo>(() => { -161: return filteredLogs.reduce( -162: (stats, log) => { -163: stats[log.level]++; -164: return stats; -165: }, -166: { info: 0, warning: 0, error: 0, debug: 0 } -167: ); -168: }, [filteredLogs]); -169: -170: const toggleExpanded = (id: string) => { -171: setExpandedLogs((prev) => { -172: const next = new Set(prev); -173: if (next.has(id)) { -174: next.delete(id); -175: } else { -176: next.add(id); -177: } -178: return next; -179: }); -180: }; -181: -182: const handleRefresh = () => { -183: refetch(); -184: }; -185: -186: const exportLogs = () => { -187: const blob = new Blob([JSON.stringify(filteredLogs, null, 2)], { type: 'application/json' }); -188: const url = URL.createObjectURL(blob); -189: const a = document.createElement('a'); -190: a.href = url; -191: a.download = `logs-${format(new Date(), 'yyyy-MM-dd-HHmmss')}.json`; -192: a.click(); -193: URL.revokeObjectURL(url); -194: }; -195: -196: return ( -197:
-198: {/* Log Stats */} -199:
-200: {LOG_LEVELS.map((level) => { -201: const count = logStats[level]; -202: const config = levelConfig[level]; -203: const Icon = config.icon; -204: return ( -205: -206: -207:
-208: -209:
-210:
-211:

{count}

-212:

{level}

-213:
-214:
-215:
-216: ); -217: })} -218:
-219: -220: {/* Filters */} -221: -222: -223: -224: -225: Application Logs -226: -227: View and filter system and application logs -228: -229: -230:
-231:
-232: -233: setSearchQuery(e.target.value)} -237: className="pl-9" -238: /> -239:
-240:
-241: -257: -273: -286: -295: -298:
-299:
-300: -301: {/* View Mode Toggle */} -302:
-303:
-304: -305:
-306: View: -307: v && setViewMode(v as ViewMode)} -311: size="sm" -312: > -313: -314: -315: -316: -317: -318: -319: -320:

Friendly: Human-readable messages

-321:
-322:
-323: -324: -325: -326: -327: -328: -329: -330:

Technical: Raw log messages with IDs

-331:
-332:
-333:
-334:
-335: -336:
-337: -338: -339: -351: -352: -353:

-354: {enableSummarization -355: ? 'Showing grouped similar logs' -356: : 'Showing all individual logs'} -357:

-358:
-359:
-360:
-361: -362: {/* Group Mode Selector */} -363:
-364: Group: -365: v && setGroupMode(v as GroupMode)} -369: size="sm" -370: > -371: -372: -373: -374: -375: -376: -377: -378:

Flat list (no grouping)

-379:
-380:
-381: -382: -383: -384: -385: -386: -387: -388:

Group by meeting

-389:
-390:
-391: -392: -393: -394: -395: -396: -397: -398:

Group by time (5-minute gaps)

-399:
-400:
-401:
-402:
-403:
-404:
-405: -406: {groupMode !== 'none' ? ( -407: -408: {logGroups.length} group{logGroups.length !== 1 ? 's' : ''},{' '} -409: {filteredLogs.length} total logs -410: -411: ) : enableSummarization && summarizedLogs.some((s) => s.isGroup) ? ( -412: -413: {summarizedLogs.filter((s) => s.isGroup).length} groups,{' '} -414: {filteredLogs.length} total logs -415: -416: ) : null} -417:
-418: -419: {/* Log List */} -420: -421: {isLoading ? ( -422:
-423: -424:

Loading logs...

-425:
-426: ) : filteredLogs.length === 0 ? ( -427:
-428: -429:

No logs found

-430:

-431: {searchQuery || -432: levelFilter !== 'all' || -433: sourceFilter !== 'all' || -434: originFilter !== 'all' -435: ? 'Try adjusting your filters' -436: : 'Logs will appear here as events occur'} -437:

-438:
-439: ) : groupMode !== 'none' ? ( -440: -446: ) : ( -447:
-448: {summarizedLogs.map((summarized) => ( -449: } -452: viewMode={viewMode} -453: isExpanded={expandedLogs.has(summarized.log.id)} -454: onToggleExpanded={() => toggleExpanded(summarized.log.id)} -455: /> -456: ))} -457:
-458: )} -459:
-460: -461: {/* Footer */} -462:
-463: -464: Showing {summarizedLogs.length} -465: {enableSummarization && summarizedLogs.length !== filteredLogs.length -466: ? ` entries (${filteredLogs.length} logs)` -467: : ' logs'}{' '} -468: of {mergedLogs.length} total -469: -470: -471: {isRefetching ? 'Refreshing...' : `Last updated: ${format(new Date(), 'HH:mm:ss')}`} -472: -473:
-474:
-475:
-476:
-477: ); -478: } -```` - -## File: client/src/components/analytics/performance-tab.test.tsx -````typescript - 1: import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; - 2: import { fireEvent, render, screen, waitFor } from '@testing-library/react'; - 3: import { beforeEach, describe, expect, it, vi } from 'vitest'; - 4: import * as apiInterface from '@/api/interface'; - 5: import type { GetPerformanceMetricsResponse, PerformanceMetricsPoint } from '@/api/types'; - 6: import { PerformanceTab } from './performance-tab'; - 7: - 8: // Mock the API module - 9: vi.mock('@/api/interface', () => ({ - 10: getAPI: vi.fn(), - 11: })); - 12: - 13: // Mock date-fns format for deterministic output - 14: vi.mock('date-fns', async () => { - 15: const actual = await vi.importActual('date-fns'); - 16: return { - 17: ...actual, - 18: format: vi.fn(() => '12:00'), - 19: }; - 20: }); - 21: - 22: // Mock recharts to avoid rendering issues in tests - 23: vi.mock('recharts', () => ({ - 24: AreaChart: ({ children }: { children: React.ReactNode }) => ( - 25: - 28: ), - 29: Area: () => null, - 30: LineChart: ({ children }: { children: React.ReactNode }) => ( - 31: - 34: ), - 35: Line: () => null, - 36: XAxis: () => null, - 37: YAxis: () => null, - 38: CartesianGrid: () => null, - 39: ResponsiveContainer: ({ children }: { children: React.ReactNode }) => ( - 40:
{children}
- 41: ), - 42: })); - 43: - 44: // Mock chart components - 45: vi.mock('@/components/ui/chart', () => ({ - 46: ChartContainer: ({ children }: { children: React.ReactNode }) => ( - 47:
{children}
- 48: ), - 49: ChartTooltip: () => null, - 50: ChartTooltipContent: () => null, - 51: })); - 52: - 53: // Helper to create QueryClient wrapper - 54: function createWrapper() { - 55: const queryClient = new QueryClient({ - 56: defaultOptions: { - 57: queries: { - 58: retry: false, - 59: gcTime: 0, - 60: }, - 61: }, - 62: }); - 63: return function Wrapper({ children }: { children: React.ReactNode }) { - 64: return {children}; - 65: }; - 66: } - 67: - 68: function createMockMetricsPoint( - 69: overrides: Partial = {} - 70: ): PerformanceMetricsPoint { - 71: return { - 72: timestamp: Date.now() / 1000, - 73: cpu_percent: 25.0, - 74: memory_percent: 50.0, - 75: memory_mb: 4096, - 76: disk_percent: 35.0, - 77: network_bytes_sent: 1024, - 78: network_bytes_recv: 2048, - 79: process_memory_mb: 256, - 80: active_connections: 5, - 81: ...overrides, - 82: }; - 83: } - 84: - 85: describe('PerformanceTab', () => { - 86: const mockAPI = { - 87: getPerformanceMetrics: vi.fn<() => Promise>(), - 88: }; - 89: - 90: beforeEach(() => { - 91: vi.mocked(apiInterface.getAPI).mockReturnValue( - 92: mockAPI as unknown as ReturnType - 93: ); - 94: vi.clearAllMocks(); - 95: - 96: // Mock navigator properties - 97: Object.defineProperty(navigator, 'platform', { - 98: value: 'TestPlatform', - 99: configurable: true, -100: }); -101: Object.defineProperty(navigator, 'hardwareConcurrency', { -102: value: 8, -103: configurable: true, -104: }); -105: Object.defineProperty(navigator, 'onLine', { -106: value: true, -107: configurable: true, -108: }); -109: Object.defineProperty(navigator, 'deviceMemory', { -110: value: 16, -111: configurable: true, -112: }); -113: }); -114: -115: describe('Loading State', () => { -116: it('shows loading state while fetching metrics', async () => { -117: mockAPI.getPerformanceMetrics.mockImplementation(() => new Promise(() => {})); -118: -119: render(, { wrapper: createWrapper() }); -120: -121: expect(screen.getByText('Loading...')).toBeInTheDocument(); -122: }); -123: }); -124: -125: describe('Health Score', () => { -126: it('displays system health section', async () => { -127: mockAPI.getPerformanceMetrics.mockResolvedValue({ -128: current: createMockMetricsPoint(), -129: history: [], -130: }); -131: -132: render(, { wrapper: createWrapper() }); -133: -134: await waitFor(() => { -135: expect(screen.getByText('System Health')).toBeInTheDocument(); -136: }); -137: }); -138: -139: it('shows healthy status for good metrics', async () => { -140: mockAPI.getPerformanceMetrics.mockResolvedValue({ -141: current: createMockMetricsPoint({ cpu_percent: 20, memory_percent: 30 }), -142: history: [createMockMetricsPoint({ cpu_percent: 20, memory_percent: 30 })], -143: }); -144: -145: render(, { wrapper: createWrapper() }); -146: -147: await waitFor(() => { -148: expect(screen.getByText('Healthy')).toBeInTheDocument(); -149: expect(screen.getByText('All systems are running optimally')).toBeInTheDocument(); -150: }); -151: }); -152: -153: it('shows moderate status for elevated metrics', async () => { -154: mockAPI.getPerformanceMetrics.mockResolvedValue({ -155: current: createMockMetricsPoint({ cpu_percent: 60, memory_percent: 70 }), -156: history: [createMockMetricsPoint({ cpu_percent: 60, memory_percent: 70 })], -157: }); -158: -159: render(, { wrapper: createWrapper() }); -160: -161: await waitFor(() => { -162: expect(screen.getByText('Moderate')).toBeInTheDocument(); -163: expect(screen.getByText('Some metrics could be improved')).toBeInTheDocument(); -164: }); -165: }); -166: -167: it('shows degraded status for high resource usage', async () => { -168: mockAPI.getPerformanceMetrics.mockResolvedValue({ -169: current: createMockMetricsPoint({ cpu_percent: 95, memory_percent: 95 }), -170: history: [createMockMetricsPoint({ cpu_percent: 95, memory_percent: 95 })], -171: }); -172: -173: render(, { wrapper: createWrapper() }); -174: -175: // With very high CPU and memory, health score drops - verify the component renders -176: await waitFor(() => { -177: expect(screen.getByText('System Health')).toBeInTheDocument(); -178: }); -179: -180: // The health score should be visible (a number in the gauge) -181: const healthGauge = screen.getByText('System Health'); -182: expect(healthGauge).toBeInTheDocument(); -183: }); -184: }); -185: -186: describe('Metric Cards', () => { -187: it('displays CPU usage metric', async () => { -188: mockAPI.getPerformanceMetrics.mockResolvedValue({ -189: current: createMockMetricsPoint({ cpu_percent: 45 }), -190: history: [createMockMetricsPoint({ cpu_percent: 45 })], -191: }); -192: -193: render(, { wrapper: createWrapper() }); -194: -195: await waitFor(() => { -196: expect(screen.getByText('CPU Usage')).toBeInTheDocument(); -197: expect(screen.getByText('45.0')).toBeInTheDocument(); -198: }); -199: }); -200: -201: it('displays memory usage metric', async () => { -202: mockAPI.getPerformanceMetrics.mockResolvedValue({ -203: current: createMockMetricsPoint({ memory_percent: 60 }), -204: history: [createMockMetricsPoint({ memory_percent: 60 })], -205: }); -206: -207: render(, { wrapper: createWrapper() }); -208: -209: await waitFor(() => { -210: expect(screen.getByText('Memory Usage')).toBeInTheDocument(); -211: }); -212: }); -213: -214: it('displays network latency metric', async () => { -215: mockAPI.getPerformanceMetrics.mockResolvedValue({ -216: current: createMockMetricsPoint(), -217: history: [createMockMetricsPoint()], -218: }); -219: -220: render(, { wrapper: createWrapper() }); -221: -222: await waitFor(() => { -223: expect(screen.getByText('Network Latency')).toBeInTheDocument(); -224: }); -225: }); -226: -227: it('displays frame rate metric', async () => { -228: mockAPI.getPerformanceMetrics.mockResolvedValue({ -229: current: createMockMetricsPoint(), -230: history: [createMockMetricsPoint()], -231: }); -232: -233: render(, { wrapper: createWrapper() }); -234: -235: await waitFor(() => { -236: expect(screen.getByText('Frame Rate')).toBeInTheDocument(); -237: }); -238: }); -239: }); -240: -241: describe('Charts', () => { -242: it('renders CPU and memory chart', async () => { -243: mockAPI.getPerformanceMetrics.mockResolvedValue({ -244: current: createMockMetricsPoint(), -245: history: [createMockMetricsPoint(), createMockMetricsPoint()], -246: }); -247: -248: render(, { wrapper: createWrapper() }); -249: -250: await waitFor(() => { -251: expect(screen.getByText('CPU & Memory Over Time')).toBeInTheDocument(); -252: }); -253: }); -254: -255: it('renders network and rendering chart', async () => { -256: mockAPI.getPerformanceMetrics.mockResolvedValue({ -257: current: createMockMetricsPoint(), -258: history: [createMockMetricsPoint(), createMockMetricsPoint()], -259: }); -260: -261: render(, { wrapper: createWrapper() }); -262: -263: await waitFor(() => { -264: expect(screen.getByText('Network & Rendering')).toBeInTheDocument(); -265: }); -266: }); -267: }); -268: -269: describe('System Information', () => { -270: it('displays system information section', async () => { -271: mockAPI.getPerformanceMetrics.mockResolvedValue({ -272: current: createMockMetricsPoint(), -273: history: [], -274: }); -275: -276: render(, { wrapper: createWrapper() }); -277: -278: await waitFor(() => { -279: expect(screen.getByText('System Information')).toBeInTheDocument(); -280: }); -281: }); -282: -283: it('shows platform information', async () => { -284: mockAPI.getPerformanceMetrics.mockResolvedValue({ -285: current: createMockMetricsPoint(), -286: history: [], -287: }); -288: -289: render(, { wrapper: createWrapper() }); -290: -291: await waitFor(() => { -292: expect(screen.getByText('Platform')).toBeInTheDocument(); -293: expect(screen.getByText('TestPlatform')).toBeInTheDocument(); -294: }); -295: }); -296: -297: it('shows hardware info', async () => { -298: mockAPI.getPerformanceMetrics.mockResolvedValue({ -299: current: createMockMetricsPoint(), -300: history: [], -301: }); -302: -303: render(, { wrapper: createWrapper() }); -304: -305: await waitFor(() => { -306: // System info section shows CPU cores label -307: expect(screen.getByText('CPU Cores')).toBeInTheDocument(); -308: }); -309: }); -310: -311: it('shows network status', async () => { -312: mockAPI.getPerformanceMetrics.mockResolvedValue({ -313: current: createMockMetricsPoint(), -314: history: [], -315: }); -316: -317: render(, { wrapper: createWrapper() }); -318: -319: await waitFor(() => { -320: // Network Status is shown in system info -321: expect(screen.getByText('Network Status')).toBeInTheDocument(); -322: }); -323: }); -324: }); -325: -326: describe('Refresh', () => { -327: it('refetches metrics when refresh button clicked', async () => { -328: mockAPI.getPerformanceMetrics.mockResolvedValue({ -329: current: createMockMetricsPoint(), -330: history: [], -331: }); -332: -333: render(, { wrapper: createWrapper() }); -334: -335: await waitFor(() => { -336: expect(mockAPI.getPerformanceMetrics).toHaveBeenCalledTimes(1); -337: }); -338: -339: // Wait for button to show "Refresh" (not "Loading...") -340: await waitFor(() => { -341: expect(screen.getByRole('button', { name: /refresh/i })).toBeInTheDocument(); -342: }); -343: -344: const refreshButton = screen.getByRole('button', { name: /refresh/i }); -345: fireEvent.click(refreshButton); -346: -347: await waitFor(() => { -348: expect(mockAPI.getPerformanceMetrics).toHaveBeenCalledTimes(2); -349: }); -350: }); -351: }); -352: }); -```` - -## File: client/src/components/analytics/performance-tab.tsx -````typescript - 1: import { useQuery } from '@tanstack/react-query'; - 2: import { format } from 'date-fns'; - 3: import { - 4: Activity, - 5: Cpu, - 6: Gauge, - 7: HardDrive, - 8: type LucideIcon, - 9: RefreshCw, - 10: Server, - 11: Wifi, - 12: } from 'lucide-react'; - 13: import { useMemo } from 'react'; - 14: import { Area, AreaChart, CartesianGrid, Line, LineChart, XAxis, YAxis } from 'recharts'; - 15: import { getAPI } from '@/api/interface'; - 16: import { METRICS_REFRESH_INTERVAL_MS } from '@/lib/timing-constants'; - 17: import type { PerformanceMetricsPoint } from '@/api/types'; - 18: import { Badge } from '@/components/ui/badge'; - 19: import { Button } from '@/components/ui/button'; - 20: import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; - 21: import { ChartContainer, ChartTooltip, ChartTooltipContent } from '@/components/ui/chart'; - 22: import { chartStrokes, flexLayout, iconWithMargin, overflow, typography } from '@/lib/styles'; - 23: import { cn } from '@/lib/utils'; - 24: - 25: // Mapped performance metric for UI display - 26: interface PerformanceMetric { - 27: timestamp: number; - 28: cpu: number; - 29: memory: number; - 30: networkLatency: number; - 31: fps: number; - 32: } - 33: - 34: interface SystemInfo { - 35: platform: string; - 36: userAgent: string; - 37: language: string; - 38: cookiesEnabled: boolean; - 39: onLine: boolean; - 40: hardwareConcurrency: number; - 41: deviceMemory?: number; - 42: } - 43: - 44: // Convert API metric to UI display format - 45: function convertMetric(point: PerformanceMetricsPoint): PerformanceMetric { - 46: // Estimate network latency from active connections (more connections = higher latency) - 47: const estimatedLatency = 20 + point.active_connections * 3; - 48: return { - 49: timestamp: point.timestamp, - 50: cpu: point.cpu_percent, - 51: memory: point.memory_percent, - 52: networkLatency: estimatedLatency, - 53: fps: 60 - (point.cpu_percent > 80 ? 15 : point.cpu_percent > 50 ? 5 : 0), // Estimate FPS from CPU load - 54: }; - 55: } - 56: - 57: function getSystemInfo(): SystemInfo { - 58: return { - 59: platform: navigator.platform || 'Unknown', - 60: userAgent: navigator.userAgent, - 61: language: navigator.language, - 62: cookiesEnabled: navigator.cookieEnabled, - 63: onLine: navigator.onLine, - 64: hardwareConcurrency: navigator.hardwareConcurrency || 1, - 65: deviceMemory: navigator.deviceMemory, - 66: }; - 67: } - 68: - 69: interface MetricCardProps { - 70: icon: LucideIcon; - 71: title: string; - 72: value: number; - 73: unit: string; - 74: status: 'good' | 'warning' | 'critical'; - 75: trend?: 'up' | 'down' | 'stable'; - 76: } - 77: - 78: function MetricCard({ icon: Icon, title, value, unit, status, trend }: MetricCardProps) { - 79: const statusColors = { - 80: good: 'text-green-500 bg-green-500/10', - 81: warning: 'text-amber-500 bg-amber-500/10', - 82: critical: 'text-red-500 bg-red-500/10', - 83: }; - 84: - 85: return ( - 86: - 87: - 88:
- 89:
- 90: - 91:
- 92: {trend && ( - 93: - 94: {trend === 'up' && '↑'} - 95: {trend === 'down' && '↓'} - 96: {trend === 'stable' && '→'} - 97: - 98: )} - 99:
-100:
-101:

{title}

-102:

-103: {value.toFixed(1)} -104: {unit} -105:

-106:
-107:
-108:
-109: ); -110: } -111: -112: const headerRowClass = flexLayout.rowBetween; -113: const titleRowClass = flexLayout.itemsGap2; -114: -115: export function PerformanceTab() { -116: const systemInfo = useMemo(() => getSystemInfo(), []); -117: -118: // Fetch metrics from backend -119: const { -120: data: metricsResponse, -121: isLoading, -122: refetch, -123: isRefetching, -124: } = useQuery({ -125: queryKey: ['performance-metrics'], -126: queryFn: async () => { -127: const api = getAPI(); -128: return api.getPerformanceMetrics({ history_minutes: 60 }); -129: }, -130: refetchInterval: METRICS_REFRESH_INTERVAL_MS, -131: }); -132: -133: const performanceData = useMemo(() => { -134: if (!metricsResponse?.history) { -135: return []; -136: } -137: return metricsResponse.history.map(convertMetric); -138: }, [metricsResponse]); -139: -140: const handleRefresh = () => { -141: refetch(); -142: }; -143: -144: const latestMetrics = useMemo(() => { -145: return ( -146: performanceData[performanceData.length - 1] || { -147: cpu: 0, -148: memory: 0, -149: networkLatency: 0, -150: fps: 60, -151: } -152: ); -153: }, [performanceData]); -154: -155: const chartData = performanceData.map((m) => ({ -156: ...m, -157: time: format(new Date(m.timestamp), 'HH:mm'), -158: })); -159: -160: const chartConfig = { -161: cpu: { label: 'CPU %', color: 'hsl(var(--chart-1))' }, -162: memory: { label: 'Memory %', color: 'hsl(var(--chart-2))' }, -163: networkLatency: { label: 'Latency (ms)', color: 'hsl(var(--chart-3))' }, -164: fps: { label: 'FPS', color: 'hsl(var(--chart-4))' }, -165: }; -166: const gridProps = { strokeDasharray: '3 3', className: chartStrokes.muted }; -167: const defaultTooltip = ; -168: -169: const getStatus = ( -170: value: number, -171: thresholds: [number, number] -172: ): 'good' | 'warning' | 'critical' => { -173: if (value < thresholds[0]) { -174: return 'good'; -175: } -176: if (value < thresholds[1]) { -177: return 'warning'; -178: } -179: return 'critical'; -180: }; -181: -182: const healthScore = useMemo(() => { -183: const cpuScore = Math.max(0, 100 - latestMetrics.cpu); -184: const memScore = Math.max(0, 100 - latestMetrics.memory); -185: const latencyScore = Math.max(0, 100 - latestMetrics.networkLatency / 2); -186: const fpsScore = (latestMetrics.fps / 60) * 100; -187: return (cpuScore + memScore + latencyScore + fpsScore) / 4; -188: }, [latestMetrics]); -189: -190: return ( -191:
-192: {/* Overall Health */} -193: -194: -195:
-196: -197: -198: System Health -199: -200: Overall application and system performance -201:
-202: -213:
-214: -215:
-216:
-217: -218: Health Score: {healthScore}% -219: -228: = 70 -238: ? 'text-green-500' -239: : healthScore >= 40 -240: ? 'text-amber-500' -241: : 'text-red-500' -242: } -243: /> -244: -245:
-246: {Math.round(healthScore)} -247:
-248:
-249:
-250:

-251: {healthScore >= 70 ? 'Healthy' : healthScore >= 40 ? 'Moderate' : 'Needs Attention'} -252:

-253:

-254: {healthScore >= 70 -255: ? 'All systems are running optimally' -256: : healthScore >= 40 -257: ? 'Some metrics could be improved' -258: : 'Performance issues detected'} -259:

-260:
-261: -262: {navigator.onLine ? 'Online' : 'Offline'} -263: -264: {systemInfo.hardwareConcurrency} cores -265: {systemInfo.deviceMemory && ( -266: {systemInfo.deviceMemory}GB RAM -267: )} -268:
-269:
-270:
-271:
-272:
-273: -274: {/* Metric Cards */} -275:
-276: -284: -292: -300: = 50 ? 'good' : latestMetrics.fps >= 30 ? 'warning' : 'critical' -307: } -308: trend="stable" -309: /> -310:
-311: -312: {/* Performance Charts */} -313:
-314: -315: -316: -317: -318: CPU & Memory Over Time -319: -320: Resource utilization trends -321: -322: -323:
-324: -325: -326: -327: -328: -329: -330: -331: -332: -333: -334: -335: -336: -337: -342: -347: -348: -356: -364: -365: -366:
-367:
-368:
-369: -370: -371: -372: -373: -374: Network & Rendering -375: -376: Latency and frame rate metrics -377: -378: -379:
-380: -381: -382: -383: -388: -389: -390: -398: -406: -407: -408:
-409:
-410:
-411:
-412: -413: {/* System Info */} -414: -415: -416: -417: -418: System Information -419: -420: Client environment details -421: -422: -423:
-424:
-425:

Platform

-426:

{systemInfo.platform}

-427:
-428:
-429:

Language

-430:

{systemInfo.language}

-431:
-432:
-433:

CPU Cores

-434:

{systemInfo.hardwareConcurrency}

-435:
-436: {systemInfo.deviceMemory && ( -437:
-438:

Device Memory

-439:

{systemInfo.deviceMemory} GB

-440:
-441: )} -442:
-443:

Cookies

-444:

-445: {systemInfo.cookiesEnabled ? 'Enabled' : 'Disabled'} -446:

-447:
-448:
-449:

Network Status

-450:

{systemInfo.onLine ? 'Online' : 'Offline'}

-451:
-452:
-453:
-454:
-455:
-456: ); -457: } -```` - -## File: client/src/components/analytics/speech-analysis-tab.tsx -````typescript - 1: import { AlertCircle, Brain, Hash, Lightbulb, MessageSquare, TrendingUp } from 'lucide-react'; - 2: import { useMemo } from 'react'; - 3: import type { Meeting } from '@/api/types'; - 4: import { AnalyticsCardTitle } from '@/components/analytics/analytics-card-title'; - 5: import { Badge } from '@/components/ui/badge'; - 6: import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; - 7: import { Progress } from '@/components/ui/progress'; - 8: import { typography } from '@/lib/styles'; - 9: - 10: interface EntityData { - 11: text: string; - 12: type: 'topic' | 'action' | 'question' | 'keyword'; - 13: count: number; - 14: weight: number; - 15: } - 16: - 17: interface SpeechPattern { - 18: name: string; - 19: description: string; - 20: score: number; - 21: feedback: string; - 22: type: 'positive' | 'neutral' | 'improvement'; - 23: } - 24: - 25: const WORDS_PER_MINUTE_BASE = 60; - 26: const OPTIMAL_WPM_MIN = WORDS_PER_MINUTE_BASE * 2; - 27: const OPTIMAL_WPM_MAX = WORDS_PER_MINUTE_BASE * 3; - 28: const OPTIMAL_WPM_TARGET = (WORDS_PER_MINUTE_BASE * 5) / 2; - 29: - 30: function extractEntities(meetings: Meeting[]): EntityData[] { - 31: const entityMap = new Map(); - 32: - 33: // Common filler words to exclude - 34: const stopWords = new Set([ - 35: 'the', - 36: 'a', - 37: 'an', - 38: 'is', - 39: 'are', - 40: 'was', - 41: 'were', - 42: 'be', - 43: 'been', - 44: 'being', - 45: 'have', - 46: 'has', - 47: 'had', - 48: 'do', - 49: 'does', - 50: 'did', - 51: 'will', - 52: 'would', - 53: 'could', - 54: 'should', - 55: 'may', - 56: 'might', - 57: 'must', - 58: 'shall', - 59: 'can', - 60: 'need', - 61: 'dare', - 62: 'ought', - 63: 'used', - 64: 'to', - 65: 'of', - 66: 'in', - 67: 'for', - 68: 'on', - 69: 'with', - 70: 'at', - 71: 'by', - 72: 'from', - 73: 'as', - 74: 'into', - 75: 'through', - 76: 'during', - 77: 'before', - 78: 'after', - 79: 'above', - 80: 'below', - 81: 'between', - 82: 'under', - 83: 'again', - 84: 'further', - 85: 'then', - 86: 'once', - 87: 'here', - 88: 'there', - 89: 'when', - 90: 'where', - 91: 'why', - 92: 'how', - 93: 'all', - 94: 'each', - 95: 'few', - 96: 'more', - 97: 'most', - 98: 'other', - 99: 'some', -100: 'such', -101: 'no', -102: 'nor', -103: 'not', -104: 'only', -105: 'own', -106: 'same', -107: 'so', -108: 'than', -109: 'too', -110: 'very', -111: 'just', -112: 'and', -113: 'but', -114: 'if', -115: 'or', -116: 'because', -117: 'until', -118: 'while', -119: 'although', -120: 'though', -121: 'after', -122: 'that', -123: 'this', -124: 'these', -125: 'those', -126: 'i', -127: 'you', -128: 'he', -129: 'she', -130: 'it', -131: 'we', -132: 'they', -133: 'what', -134: 'which', -135: 'who', -136: 'whom', -137: 'me', -138: 'him', -139: 'her', -140: 'us', -141: 'them', -142: 'my', -143: 'your', -144: 'his', -145: 'its', -146: 'our', -147: 'their', -148: 'mine', -149: 'yours', -150: 'hers', -151: 'ours', -152: 'theirs', -153: 'um', -154: 'uh', -155: 'like', -156: 'yeah', -157: 'okay', -158: 'ok', -159: 'right', -160: 'well', -161: 'so', -162: 'actually', -163: 'basically', -164: 'literally', -165: 'really', -166: 'very', -167: 'just', -168: ]); -169: -170: for (const meeting of meetings) { -171: for (const segment of meeting.segments) { -172: for (const wordTiming of segment.words) { -173: const text = wordTiming.word.toLowerCase().replace(/[^a-z0-9]/g, ''); -174: if (text.length < 3 || stopWords.has(text)) { -175: continue; -176: } -177: -178: const existing = entityMap.get(text); -179: if (existing) { -180: existing.count++; -181: } else { -182: // Determine type based on heuristics -183: let type: EntityData['type'] = 'keyword'; -184: if (text.endsWith('ing') || text.endsWith('tion')) { -185: type = 'action'; -186: } else if (text.length > 8) { -187: type = 'topic'; -188: } -189: -190: entityMap.set(text, { count: 1, type }); -191: } -192: } -193: } -194: } -195: -196: // Convert to array and calculate weights -197: const maxCount = Math.max(...Array.from(entityMap.values()).map((e) => e.count), 1); -198: -199: return Array.from(entityMap.entries()) -200: .map(([text, { count, type }]) => ({ -201: text, -202: type, -203: count, -204: weight: count / maxCount, -205: })) -206: .sort((a, b) => b.count - a.count) -207: .slice(0, 50); -208: } -209: -210: function analyzeSpeechPatterns(meetings: Meeting[]): SpeechPattern[] { -211: if (meetings.length === 0) { -212: return []; -213: } -214: -215: // Calculate various metrics -216: let totalWords = 0; -217: let totalDuration = 0; -218: let questionCount = 0; -219: let fillerWords = 0; -220: const fillerWordSet = new Set([ -221: 'um', -222: 'uh', -223: 'like', -224: 'you know', -225: 'basically', -226: 'actually', -227: 'literally', -228: 'right', -229: ]); -230: -231: const speakerWordCounts = new Map(); -232: const sentenceLengths: number[] = []; -233: -234: for (const meeting of meetings) { -235: totalDuration += meeting.duration_seconds; -236: -237: for (const segment of meeting.segments) { -238: const wordCount = segment.words.length; -239: totalWords += wordCount; -240: sentenceLengths.push(wordCount); -241: -242: speakerWordCounts.set( -243: segment.speaker_id, -244: (speakerWordCounts.get(segment.speaker_id) || 0) + wordCount -245: ); -246: -247: for (const wordTiming of segment.words) { -248: const text = wordTiming.word.toLowerCase(); -249: if (text.includes('?')) { -250: questionCount++; -251: } -252: if (fillerWordSet.has(text.replace(/[^a-z\s]/g, ''))) { -253: fillerWords++; -254: } -255: } -256: } -257: } -258: -259: const avgWordsPerMinute = totalDuration > 0 ? totalWords / (totalDuration / 60) : 0; -260: const avgSentenceLength = -261: sentenceLengths.length > 0 -262: ? sentenceLengths.reduce((a, b) => a + b, 0) / sentenceLengths.length -263: : 0; -264: const fillerRatio = totalWords > 0 ? (fillerWords / totalWords) * 100 : 0; -265: const questionRatio = totalWords > 0 ? (questionCount / totalWords) * 1000 : 0; // per 1000 words -266: -267: const patterns: SpeechPattern[] = [ -268: { -269: name: 'Speaking Pace', -270: description: `${Math.round(avgWordsPerMinute)} words per minute`, -271: score: Math.min(100, Math.max(0, 100 - Math.abs(avgWordsPerMinute - OPTIMAL_WPM_TARGET) / 2)), -272: feedback: -273: avgWordsPerMinute < OPTIMAL_WPM_MIN -274: ? 'Consider speaking slightly faster for better engagement' -275: : avgWordsPerMinute > OPTIMAL_WPM_MAX -276: ? 'Try slowing down to improve clarity' -277: : 'Your pace is in the optimal range', -278: type: -279: avgWordsPerMinute >= OPTIMAL_WPM_MIN && avgWordsPerMinute <= OPTIMAL_WPM_MAX -280: ? 'positive' -281: : 'improvement', -282: }, -283: { -284: name: 'Clarity Score', -285: description: `Avg ${avgSentenceLength.toFixed(1)} words per segment`, -286: score: Math.min(100, Math.max(0, 100 - Math.abs(avgSentenceLength - 15) * 3)), -287: feedback: -288: avgSentenceLength > 25 -289: ? 'Breaking up longer segments can improve clarity' -290: : avgSentenceLength < 8 -291: ? 'Consider expanding on points for better context' -292: : 'Your segment lengths support good comprehension', -293: type: avgSentenceLength >= 8 && avgSentenceLength <= 25 ? 'positive' : 'neutral', -294: }, -295: { -296: name: 'Filler Word Usage', -297: description: `${fillerRatio.toFixed(2)}% of words are fillers`, -298: score: Math.max(0, 100 - fillerRatio * 20), -299: feedback: -300: fillerRatio > 3 -301: ? 'Practice pausing instead of using filler words' -302: : fillerRatio > 1 -303: ? 'Moderate filler usage - room for improvement' -304: : 'Excellent - minimal filler word usage', -305: type: fillerRatio <= 1 ? 'positive' : fillerRatio <= 3 ? 'neutral' : 'improvement', -306: }, -307: { -308: name: 'Engagement (Questions)', -309: description: `${questionRatio.toFixed(1)} questions per 1000 words`, -310: score: Math.min(100, questionRatio * 10), -311: feedback: -312: questionRatio < 2 -313: ? 'Try asking more questions to boost engagement' -314: : questionRatio > 10 -315: ? 'Good question frequency for interactive discussions' -316: : 'Balanced use of questions', -317: type: questionRatio >= 2 ? 'positive' : 'neutral', -318: }, -319: ]; -320: -321: return patterns; -322: } -323: -324: interface SpeechAnalysisTabProps { -325: meetings: Meeting[]; -326: } -327: -328: export function SpeechAnalysisTab({ meetings }: SpeechAnalysisTabProps) { -329: const entities = useMemo(() => extractEntities(meetings), [meetings]); -330: const patterns = useMemo(() => analyzeSpeechPatterns(meetings), [meetings]); -331: -332: const topEntities = entities.slice(0, 30); -333: const entityTypeColors: Record = { -334: topic: 'bg-chart-1/20 text-chart-1 border-chart-1/30', -335: action: 'bg-chart-2/20 text-chart-2 border-chart-2/30', -336: question: 'bg-chart-3/20 text-chart-3 border-chart-3/30', -337: keyword: 'bg-chart-4/20 text-chart-4 border-chart-4/30', -338: }; -339: -340: return ( -341:
-342: {/* Word Cloud / Entity Map */} -343: -344: -345: -346: -347: Entity Word Map -348: -349: -350: Most frequently mentioned words and phrases across all meetings -351: -352: -353: -354: {topEntities.length > 0 ? ( -355:
-356: {topEntities.map((entity) => { -357: const fontSize = 0.75 + entity.weight * 0.75; // 0.75rem to 1.5rem -358: return ( -359: -368: {entity.text} -369: ×{entity.count} -370: -371: ); -372: })} -373:
-374: ) : ( -375:
-376: -377:

No entity data available yet

-378:

Record some meetings to see extracted entities

-379:
-380: )} -381: -382: {/* Legend */} -383: {topEntities.length > 0 && ( -384:
-385:
-386:
-387: Topics -388:
-389:
-390:
-391: Actions -392:
-393:
-394:
-395: Keywords -396:
-397:
-398: )} -399: -400: -401: -402: {/* Speech Pattern Analysis */} -403: -404: -405: -406: -407: Speech Pattern Analysis -408: -409: Insights and feedback on your speaking patterns -410: -411: -412: {patterns.length > 0 ? ( -413:
-414: {patterns.map((pattern) => ( -415:
-416:
-417:
-418: {pattern.type === 'positive' && ( -419: -420: )} -421: {pattern.type === 'improvement' && ( -422: -423: )} -424: {pattern.type === 'neutral' && ( -425: -426: )} -427: {pattern.name} -428:
-429: {pattern.description} -430:
-431: -432:

-433: -434: {pattern.feedback} -435:

-436:
-437: ))} -438:
-439: ) : ( -440:
-441: -442:

No speech data to analyze

-443:

Record meetings to get personalized feedback

-444:
-445: )} -446:
-447:
-448: -449: {/* Top Keywords Table */} -450: {entities.length > 0 && ( -451: -452: -453: Top Keywords -454: Most used words ranked by frequency -455: -456: -457:
-458: {entities.slice(0, 20).map((entity, index) => ( -459:
-463: {index + 1}. -464: {entity.text} -465: {entity.count} -466:
-467: ))} -468:
-469:
-470:
-471: )} -472:
-473: ); -474: } -```` - -## File: client/src/components/integration-config-panel/auth-config.tsx -````typescript - 1: /** - 2: * OAuth/SSO authentication configuration. - 3: */ - 4: - 5: import { Globe, Key, Lock } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Input } from '@/components/ui/input'; - 9: import { Label } from '@/components/ui/label'; -10: -11: import { configPanelContentStyles, Field, SecretInput } from './shared'; -12: -13: interface AuthConfigProps { -14: integration: Integration; -15: onUpdate: (config: Partial) => void; -16: showSecrets: Record; -17: toggleSecret: (key: string) => void; -18: } -19: -20: export function AuthConfig({ integration, onUpdate, showSecrets, toggleSecret }: AuthConfigProps) { -21: const config = integration.oauth_config || { -22: client_id: '', -23: client_secret: '', -24: redirect_uri: '', -25: scopes: [], -26: }; -27: -28: return ( -29:
-30:
-31: }> -32: onUpdate({ oauth_config: { ...config, client_id: e.target.value } })} -35: placeholder="Enter client ID" -36: /> -37: -38: onUpdate({ oauth_config: { ...config, client_secret: value } })} -42: placeholder="Enter client secret" -43: showSecret={showSecrets.client_secret ?? false} -44: onToggleSecret={() => toggleSecret('client_secret')} -45: icon={} -46: /> -47:
-48: }> -49: onUpdate({ oauth_config: { ...config, redirect_uri: e.target.value } })} -52: placeholder="https://your-app.com/auth/callback" -53: /> -54:

-55: Configure this URL in your OAuth provider's settings -56:

-57:
-58:
-59: -60: -63: onUpdate({ -64: oauth_config: { -65: ...config, -66: scopes: e.target.value -67: .split(',') -68: .map((s) => s.trim()) -69: .filter(Boolean), -70: }, -71: }) -72: } -73: placeholder="openid, email, profile" -74: /> -75:

Comma-separated list of OAuth scopes

-76:
-77:
-78: ); -79: } -```` - -## File: client/src/components/integration-config-panel/calendar-config.tsx -````typescript - 1: /** - 2: * Calendar integration configuration. - 3: */ - 4: - 5: import { Globe, Key, Lock } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Badge } from '@/components/ui/badge'; - 9: import { Input } from '@/components/ui/input'; - 10: import { Label } from '@/components/ui/label'; - 11: import { - 12: Select, - 13: SelectContent, - 14: SelectItem, - 15: SelectTrigger, - 16: SelectValue, - 17: } from '@/components/ui/select'; - 18: import { Separator } from '@/components/ui/separator'; - 19: import { configPanelContentStyles, Field, SecretInput } from './shared'; - 20: - 21: interface CalendarConfigProps { - 22: integration: Integration; - 23: onUpdate: (config: Partial) => void; - 24: showSecrets: Record; - 25: toggleSecret: (key: string) => void; - 26: } - 27: - 28: export function CalendarConfig({ - 29: integration, - 30: onUpdate, - 31: showSecrets, - 32: toggleSecret, - 33: }: CalendarConfigProps) { - 34: const calConfig = integration.calendar_config || { - 35: sync_interval_minutes: 15, - 36: calendar_ids: [], - 37: }; - 38: const oauthConfig = integration.oauth_config || { - 39: client_id: '', - 40: client_secret: '', - 41: redirect_uri: '', - 42: scopes: [], - 43: }; - 44: - 45: return ( - 46:
- 47:
- 48: OAuth 2.0 - 49: Requires OAuth authentication - 50:
- 51:
- 52: }> - 53: - 56: onUpdate({ - 57: oauth_config: { ...oauthConfig, client_id: e.target.value }, - 58: }) - 59: } - 60: placeholder="Enter client ID" - 61: /> - 62: - 63: - 67: onUpdate({ oauth_config: { ...oauthConfig, client_secret: value } }) - 68: } - 69: placeholder="Enter client secret" - 70: showSecret={showSecrets.calendar_client_secret ?? false} - 71: onToggleSecret={() => toggleSecret('calendar_client_secret')} - 72: icon={} - 73: /> - 74:
- 75: }> - 76: - 79: onUpdate({ - 80: oauth_config: { ...oauthConfig, redirect_uri: e.target.value }, - 81: }) - 82: } - 83: placeholder="https://your-app.com/calendar/callback" - 84: /> - 85: - 86: - 87: - 88:
- 89: - 90: -108:
-109:
-110: -111: -114: onUpdate({ -115: calendar_config: { -116: ...calConfig, -117: calendar_ids: e.target.value -118: .split(',') -119: .map((s) => s.trim()) -120: .filter(Boolean), -121: }, -122: }) -123: } -124: placeholder="primary, work@example.com" -125: /> -126:

-127: Leave empty to sync all calendars, or specify calendar IDs -128:

-129:
-130:
-131: -132: -135: onUpdate({ -136: calendar_config: { ...calConfig, webhook_url: e.target.value }, -137: }) -138: } -139: placeholder="https://your-app.com/webhooks/calendar" -140: /> -141:

Receive real-time calendar updates

-142:
-143:
-144: ); -145: } -```` - -## File: client/src/components/integration-config-panel/email-config.tsx -````typescript - 1: /** - 2: * Email provider configuration. - 3: */ - 4: - 5: import { Key, Lock, Mail, Server } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Input } from '@/components/ui/input'; - 9: import { Label } from '@/components/ui/label'; - 10: import { - 11: Select, - 12: SelectContent, - 13: SelectItem, - 14: SelectTrigger, - 15: SelectValue, - 16: } from '@/components/ui/select'; - 17: import { Separator } from '@/components/ui/separator'; - 18: import { Switch } from '@/components/ui/switch'; - 19: import { IntegrationDefaults } from '@/lib/config'; - 20: import { configPanelContentStyles, Field, SecretInput, TestButton } from './shared'; - 21: - 22: interface EmailConfigProps { - 23: integration: Integration; - 24: onUpdate: (config: Partial) => void; - 25: onTest?: () => void; - 26: isTesting: boolean; - 27: showSecrets: Record; - 28: toggleSecret: (key: string) => void; - 29: } - 30: - 31: export function EmailConfig({ - 32: integration, - 33: onUpdate, - 34: onTest, - 35: isTesting, - 36: showSecrets, - 37: toggleSecret, - 38: }: EmailConfigProps) { - 39: const config = integration.email_config || { - 40: provider_type: 'api' as const, - 41: api_key: '', - 42: from_email: '', - 43: from_name: '', - 44: }; - 45: - 46: return ( - 47:
- 48:
- 49: - 50: - 66:
- 67: - 68: {config.provider_type === 'api' ? ( - 69: onUpdate({ email_config: { ...config, api_key: value } })} - 73: placeholder="Enter your API key" - 74: showSecret={showSecrets.email_api_key ?? false} - 75: onToggleSecret={() => toggleSecret('email_api_key')} - 76: icon={} - 77: /> - 78: ) : ( - 79: <> - 80:
- 81: }> - 82: - 85: onUpdate({ - 86: email_config: { ...config, smtp_host: e.target.value }, - 87: }) - 88: } - 89: placeholder="smtp.example.com" - 90: /> - 91: - 92:
- 93: - 94: - 98: onUpdate({ - 99: email_config: { -100: ...config, -101: smtp_port: parseInt(e.target.value, 10) || IntegrationDefaults.SMTP_PORT, -102: }, -103: }) -104: } -105: placeholder={String(IntegrationDefaults.SMTP_PORT)} -106: /> -107:
-108:
-109:
-110:
-111: -112: -115: onUpdate({ -116: email_config: { ...config, smtp_username: e.target.value }, -117: }) -118: } -119: placeholder="username@example.com" -120: /> -121:
-122: onUpdate({ email_config: { ...config, smtp_password: value } })} -126: placeholder="SMTP password" -127: showSecret={showSecrets.smtp_password ?? false} -128: onToggleSecret={() => toggleSecret('smtp_password')} -129: icon={} -130: /> -131:
-132:
-133: -136: onUpdate({ -137: email_config: { ...config, smtp_secure: checked }, -138: }) -139: } -140: /> -141: -142:
-143: -144: )} -145: -146: -147: -148:
-149: }> -150: -154: onUpdate({ -155: email_config: { ...config, from_email: e.target.value }, -156: }) -157: } -158: placeholder="noreply@example.com" -159: /> -160: -161:
-162: -163: -166: onUpdate({ -167: email_config: { ...config, from_name: e.target.value }, -168: }) -169: } -170: placeholder="NoteFlow" -171: /> -172:
-173:
-174: -175: -176:
-177: ); -178: } -```` - -## File: client/src/components/integration-config-panel/index.tsx -````typescript - 1: /** - 2: * Integration Configuration Panel Component. - 3: * - 4: * Renders configuration forms based on integration type. - 5: * Split into separate components for maintainability. - 6: */ - 7: - 8: import { useState } from 'react'; - 9: - 10: import type { Integration } from '@/api/types'; - 11: - 12: import { AuthConfig } from './auth-config'; - 13: import { CalendarConfig } from './calendar-config'; - 14: import { EmailConfig } from './email-config'; - 15: import { OIDCConfig } from './oidc-config'; - 16: import { PKMConfig } from './pkm-config'; - 17: import { WebhookConfig } from './webhook-config'; - 18: - 19: export interface IntegrationConfigPanelProps { - 20: integration: Integration; - 21: onUpdate: (config: Partial) => void; - 22: onTest?: () => void; - 23: isTesting?: boolean; - 24: } - 25: - 26: export function IntegrationConfigPanel({ - 27: integration, - 28: onUpdate, - 29: onTest, - 30: isTesting = false, - 31: }: IntegrationConfigPanelProps) { - 32: const [showSecrets, setShowSecrets] = useState>({}); - 33: const toggleSecret = (key: string) => setShowSecrets((prev) => ({ ...prev, [key]: !prev[key] })); - 34: - 35: // OAuth/SSO Configuration - 36: if (integration.type === 'auth') { - 37: return ( - 38: - 44: ); - 45: } - 46: - 47: // Email Configuration - 48: if (integration.type === 'email') { - 49: return ( - 50: - 58: ); - 59: } - 60: - 61: // Calendar Configuration - 62: if (integration.type === 'calendar') { - 63: return ( - 64: - 70: ); - 71: } - 72: - 73: // PKM Configuration - 74: if (integration.type === 'pkm') { - 75: return ( - 76: - 82: ); - 83: } - 84: - 85: // Custom/Webhook Configuration - 86: if (integration.type === 'custom') { - 87: return ( - 88: - 96: ); - 97: } - 98: - 99: // OIDC Provider Configuration -100: if (integration.type === 'oidc') { -101: return ( -102: -110: ); -111: } -112: -113: return ( -114:
-115: No configuration options available for this integration. -116:
-117: ); -118: } -```` - -## File: client/src/components/integration-config-panel/oidc-config.tsx -````typescript - 1: /** - 2: * OIDC provider configuration. - 3: */ - 4: - 5: import { Globe, Key, Lock } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Badge } from '@/components/ui/badge'; - 9: import { Input } from '@/components/ui/input'; - 10: import { Label } from '@/components/ui/label'; - 11: import { - 12: Select, - 13: SelectContent, - 14: SelectItem, - 15: SelectTrigger, - 16: SelectValue, - 17: } from '@/components/ui/select'; - 18: import { Switch } from '@/components/ui/switch'; - 19: import { formatTimestamp } from '@/lib/format'; - 20: - 21: import { Field, SecretInput, TestButton } from './shared'; - 22: - 23: interface OIDCConfigProps { - 24: integration: Integration; - 25: onUpdate: (config: Partial) => void; - 26: onTest?: () => void; - 27: isTesting: boolean; - 28: showSecrets: Record; - 29: toggleSecret: (key: string) => void; - 30: } - 31: - 32: export function OIDCConfig({ - 33: integration, - 34: onUpdate, - 35: onTest, - 36: isTesting, - 37: showSecrets, - 38: toggleSecret, - 39: }: OIDCConfigProps) { - 40: const config = integration.oidc_config || { - 41: preset: 'custom' as const, - 42: issuer_url: '', - 43: client_id: '', - 44: client_secret: '', - 45: scopes: ['openid', 'profile', 'email'], - 46: claim_mapping: { - 47: subject_claim: 'sub', - 48: email_claim: 'email', - 49: email_verified_claim: 'email_verified', - 50: name_claim: 'name', - 51: preferred_username_claim: 'preferred_username', - 52: groups_claim: 'groups', - 53: picture_claim: 'picture', - 54: }, - 55: require_email_verified: true, - 56: allowed_groups: [], - 57: }; - 58: - 59: return ( - 60:
- 61:
- 62: OIDC - 63: OpenID Connect Provider - 64:
- 65: - 66:
- 67: - 68: - 89:
- 90: - 91: }> - 92: onUpdate({ oidc_config: { ...config, issuer_url: e.target.value } })} - 95: placeholder="https://auth.example.com" - 96: /> - 97:

- 98: Base URL for OIDC discovery (/.well-known/openid-configuration) - 99:

-100:
-101: -102:
-103: }> -104: onUpdate({ oidc_config: { ...config, client_id: e.target.value } })} -107: placeholder="noteflow-client" -108: /> -109: -110: onUpdate({ oidc_config: { ...config, client_secret: value } })} -114: placeholder="Enter client secret" -115: showSecret={showSecrets.oidc_client_secret ?? false} -116: onToggleSecret={() => toggleSecret('oidc_client_secret')} -117: icon={} -118: /> -119:
-120: -121:
-122: -123: -126: onUpdate({ -127: oidc_config: { -128: ...config, -129: scopes: e.target.value -130: .split(',') -131: .map((s) => s.trim()) -132: .filter(Boolean), -133: }, -134: }) -135: } -136: placeholder="openid, profile, email, groups" -137: /> -138:

Comma-separated list of OAuth scopes

-139:
-140: -141:
-142: -145: onUpdate({ -146: oidc_config: { ...config, require_email_verified: checked }, -147: }) -148: } -149: /> -150: -151:
-152: -153: {config.discovery && ( -154:
-155:

Discovery Endpoints

-156:
-157:

-158: Authorization:{' '} -159: {config.discovery.authorization_endpoint} -160:

-161:

-162: Token:{' '} -163: {config.discovery.token_endpoint} -164:

-165: {config.discovery.userinfo_endpoint && ( -166:

-167: UserInfo:{' '} -168: {config.discovery.userinfo_endpoint} -169:

-170: )} -171:
-172: {config.discovery_refreshed_at && ( -173:

-174: Last refreshed: {formatTimestamp(config.discovery_refreshed_at)} -175:

-176: )} -177:
-178: )} -179: -180: -181:
-182: ); -183: } -```` - -## File: client/src/components/integration-config-panel/pkm-config.tsx -````typescript - 1: /** - 2: * Personal Knowledge Management (PKM) configuration. - 3: */ - 4: - 5: import { Database, FolderOpen, Key } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Button } from '@/components/ui/button'; - 9: import { Input } from '@/components/ui/input'; - 10: import { Label } from '@/components/ui/label'; - 11: import { Switch } from '@/components/ui/switch'; - 12: import { EXTERNAL_LINK_REL } from '@/lib/styles'; - 13: import { Field, SecretInput } from './shared'; - 14: - 15: interface PKMConfigProps { - 16: integration: Integration; - 17: onUpdate: (config: Partial) => void; - 18: showSecrets: Record; - 19: toggleSecret: (key: string) => void; - 20: } - 21: - 22: export function PKMConfig({ integration, onUpdate, showSecrets, toggleSecret }: PKMConfigProps) { - 23: const config = integration.pkm_config || { api_key: '', workspace_id: '', sync_enabled: false }; - 24: const isNotion = integration.name.toLowerCase().includes('notion'); - 25: const isObsidian = integration.name.toLowerCase().includes('obsidian'); - 26: - 27: return ( - 28:
- 29: {isNotion && ( - 30: <> - 31: onUpdate({ pkm_config: { ...config, api_key: value } })} - 35: placeholder="secret_xxxxxxxxxxxxxxxx" - 36: showSecret={showSecrets.notion_token ?? false} - 37: onToggleSecret={() => toggleSecret('notion_token')} - 38: icon={} - 39: /> - 40:

- 41: Create an integration at{' '} - 42: - 48: notion.so/my-integrations - 49: - 50:

- 51: }> - 52: - 55: onUpdate({ - 56: pkm_config: { ...config, database_id: e.target.value }, - 57: }) - 58: } - 59: placeholder="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - 60: /> - 61:

The ID from your Notion database URL

- 62:
- 63: - 64: )} - 65: - 66: {isObsidian && ( - 67: }> - 68:
- 69: - 72: onUpdate({ - 73: pkm_config: { ...config, vault_path: e.target.value }, - 74: }) - 75: } - 76: placeholder="/path/to/obsidian/vault" - 77: className="flex-1" - 78: /> - 79: - 82:
- 83:

Path to your Obsidian vault folder

- 84:
- 85: )} - 86: - 87: {!isNotion && !isObsidian && ( - 88: <> - 89: onUpdate({ pkm_config: { ...config, api_key: value } })} - 93: placeholder="Enter API key" - 94: showSecret={showSecrets.pkm_api_key ?? false} - 95: onToggleSecret={() => toggleSecret('pkm_api_key')} - 96: icon={} - 97: /> - 98:
- 99: -100: -103: onUpdate({ -104: pkm_config: { ...config, workspace_id: e.target.value }, -105: }) -106: } -107: placeholder="Enter workspace ID" -108: /> -109:
-110: -111: )} -112: -113:
-114: -117: onUpdate({ -118: pkm_config: { ...config, sync_enabled: checked }, -119: }) -120: } -121: /> -122: -123:
-124:
-125: ); -126: } -```` - -## File: client/src/components/integration-config-panel/shared.tsx -````typescript - 1: /** - 2: * Shared components for integration configuration panels. - 3: */ - 4: - 5: import { Eye, EyeOff, Loader2, RefreshCw } from 'lucide-react'; - 6: import type { ReactNode } from 'react'; - 7: - 8: import { Button } from '@/components/ui/button'; - 9: import { Input } from '@/components/ui/input'; - 10: import { Label } from '@/components/ui/label'; - 11: import { iconWithMargin, labelStyles } from '@/lib/styles'; - 12: - 13: /** Common container styles for config panel content sections. */ - 14: export const configPanelContentStyles = 'space-y-4 pt-2'; - 15: - 16: /** - 17: * Reusable form field wrapper with label and icon. - 18: */ - 19: export function Field({ - 20: label, - 21: icon, - 22: children, - 23: }: { - 24: label: string; - 25: icon?: ReactNode; - 26: children: ReactNode; - 27: }) { - 28: return ( - 29:
- 30: - 34: {children} - 35:
- 36: ); - 37: } - 38: - 39: /** - 40: * Secret input field with show/hide toggle. - 41: */ - 42: export function SecretInput({ - 43: label, - 44: value, - 45: onChange, - 46: placeholder, - 47: showSecret, - 48: onToggleSecret, - 49: icon, - 50: }: { - 51: label: string; - 52: value: string; - 53: onChange: (value: string) => void; - 54: placeholder: string; - 55: showSecret: boolean; - 56: onToggleSecret: () => void; - 57: icon?: ReactNode; - 58: }) { - 59: return ( - 60: - 61:
- 62: onChange(e.target.value)} - 66: placeholder={placeholder} - 67: className="pr-10" - 68: /> - 69: - 78:
- 79:
- 80: ); - 81: } - 82: - 83: /** - 84: * Test connection button. - 85: */ - 86: export function TestButton({ - 87: onTest, - 88: isTesting, - 89: label = 'Test Connection', - 90: Icon = RefreshCw, - 91: }: { - 92: onTest?: () => void; - 93: isTesting?: boolean; - 94: label?: string; - 95: Icon?: React.ElementType; - 96: }) { - 97: if (!onTest) { - 98: return null; - 99: } -100: return ( -101: -109: ); -110: } -```` - -## File: client/src/components/integration-config-panel/webhook-config.tsx -````typescript - 1: /** - 2: * Custom/Webhook integration configuration. - 3: */ - 4: - 5: import { Globe, Key } from 'lucide-react'; - 6: - 7: import type { Integration } from '@/api/types'; - 8: import { Input } from '@/components/ui/input'; - 9: import { Label } from '@/components/ui/label'; - 10: import { - 11: Select, - 12: SelectContent, - 13: SelectItem, - 14: SelectTrigger, - 15: SelectValue, - 16: } from '@/components/ui/select'; - 17: import { Field, SecretInput, TestButton } from './shared'; - 18: - 19: interface WebhookConfigProps { - 20: integration: Integration; - 21: onUpdate: (config: Partial) => void; - 22: onTest?: () => void; - 23: isTesting: boolean; - 24: showSecrets: Record; - 25: toggleSecret: (key: string) => void; - 26: } - 27: - 28: export function WebhookConfig({ - 29: integration, - 30: onUpdate, - 31: onTest, - 32: isTesting, - 33: showSecrets, - 34: toggleSecret, - 35: }: WebhookConfigProps) { - 36: const config = integration.webhook_config || { - 37: url: '', - 38: method: 'POST' as const, - 39: auth_type: 'none' as const, - 40: auth_value: '', - 41: }; - 42: - 43: return ( - 44:
- 45: }> - 46: - 49: onUpdate({ - 50: webhook_config: { ...config, url: e.target.value }, - 51: }) - 52: } - 53: placeholder="https://api.example.com/webhook" - 54: /> - 55: - 56:
- 57:
- 58: - 59: - 76:
- 77:
- 78: - 79: - 97:
- 98:
- 99: -100: {config.auth_type && config.auth_type !== 'none' && ( -101: onUpdate({ webhook_config: { ...config, auth_value: value } })} -111: placeholder={config.auth_type === 'basic' ? 'username:password' : 'Enter value'} -112: showSecret={showSecrets.webhook_auth ?? false} -113: onToggleSecret={() => toggleSecret('webhook_auth')} -114: icon={} -115: /> -116: )} -117: -118: -119:
-120: ); -121: } -```` - -## File: client/src/components/projects/ProjectList.tsx -````typescript - 1: // Project list component with create and manage actions - 2: - 3: import { Archive, FolderKanban, MoreHorizontal, Plus, Settings2, Trash2 } from 'lucide-react'; - 4: import { useMemo, useState } from 'react'; - 5: import { Link } from 'react-router-dom'; - 6: import { useProjects } from '@/contexts/project-state'; - 7: import { useWorkspace } from '@/contexts/workspace-state'; - 8: import { Button } from '@/components/ui/button'; - 9: import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; - 10: import { - 11: Dialog, - 12: DialogContent, - 13: DialogFooter, - 14: DialogHeader, - 15: DialogTitle, - 16: } from '@/components/ui/dialog'; - 17: import { - 18: DropdownMenu, - 19: DropdownMenuContent, - 20: DropdownMenuItem, - 21: DropdownMenuTrigger, - 22: } from '@/components/ui/dropdown-menu'; - 23: import { Input } from '@/components/ui/input'; - 24: import { Label } from '@/components/ui/label'; - 25: import { Switch } from '@/components/ui/switch'; - 26: import { Badge } from '@/components/ui/badge'; - 27: import { useGuardedMutation } from '@/hooks/use-guarded-mutation'; - 28: import { searchIcon } from '@/lib/styles'; - 29: import { cn } from '@/lib/utils'; - 30: - 31: export function ProjectList() { - 32: const { - 33: projects, - 34: activeProject, - 35: switchProject, - 36: createProject, - 37: archiveProject, - 38: restoreProject, - 39: deleteProject, - 40: isLoading, - 41: } = useProjects(); - 42: const { currentWorkspace } = useWorkspace(); - 43: const { guard } = useGuardedMutation(); - 44: const [searchQuery, setSearchQuery] = useState(''); - 45: const [showArchived, setShowArchived] = useState(false); - 46: const [createOpen, setCreateOpen] = useState(false); - 47: const [newName, setNewName] = useState(''); - 48: const [newSlug, setNewSlug] = useState(''); - 49: const [newDescription, setNewDescription] = useState(''); - 50: const closeDialog = () => setCreateOpen(false); - 51: - 52: const filteredProjects = useMemo(() => { - 53: const normalized = searchQuery.trim().toLowerCase(); - 54: return projects.filter((project) => { - 55: if (!showArchived && project.is_archived) { - 56: return false; - 57: } - 58: if (!normalized) { - 59: return true; - 60: } - 61: return ( - 62: project.name.toLowerCase().includes(normalized) || - 63: project.slug?.toLowerCase().includes(normalized) || - 64: project.description?.toLowerCase().includes(normalized) - 65: ); - 66: }); - 67: }, [projects, searchQuery, showArchived]); - 68: - 69: const handleCreate = async () => { - 70: if (!currentWorkspace) { - 71: return; - 72: } - 73: const name = newName.trim(); - 74: if (!name) { - 75: return; - 76: } - 77: const created = await guard(() => - 78: createProject({ - 79: workspace_id: currentWorkspace.id, - 80: name, - 81: slug: newSlug.trim() || undefined, - 82: description: newDescription.trim() || undefined, - 83: }) - 84: ); - 85: if (created) { - 86: setCreateOpen(false); - 87: setNewName(''); - 88: setNewSlug(''); - 89: setNewDescription(''); - 90: } - 91: }; - 92: - 93: const handleArchive = async (projectId: string) => { - 94: await guard(() => archiveProject(projectId), { - 95: title: 'Offline mode', - 96: message: 'Archiving projects requires an active server connection.', - 97: }); - 98: }; - 99: -100: const handleRestore = async (projectId: string) => { -101: await guard(() => restoreProject(projectId), { -102: title: 'Offline mode', -103: message: 'Restoring projects requires an active server connection.', -104: }); -105: }; -106: -107: const handleDelete = async (projectId: string) => { -108: if (!confirm('Delete this project? This cannot be undone.')) { -109: return; -110: } -111: await guard(() => deleteProject(projectId), { -112: title: 'Offline mode', -113: message: 'Deleting projects requires an active server connection.', -114: }); -115: }; -116: -117: return ( -118:
-119:
-120:
-121:

Projects

-122:

-123: Organize meetings and settings by project. -124:

-125:
-126: -130:
-131: -132:
-133:
-134: -135: setSearchQuery(event.target.value)} -139: className="pl-10" -140: /> -141:
-142:
-143: -148: -151:
-152:
-153: -154: {isLoading ? ( -155:
Loading projects...
-156: ) : ( -157:
-158: {filteredProjects.map((project) => { -159: const isActive = project.id === activeProject?.id; -160: return ( -161: -162: -163:
-164:
-165: {project.name} -166: {project.slug && ( -167:

/{project.slug}

-168: )} -169:
-170: -171: -172: -175: -176: -177: -178: -179: -180: Settings -181: -182: -183: {project.is_archived ? ( -184: handleRestore(project.id)} -186: className="gap-2" -187: > -188: -189: Restore -190: -191: ) : ( -192: handleArchive(project.id)} -194: className="gap-2" -195: disabled={project.is_default} -196: > -197: -198: Archive -199: -200: )} -201: handleDelete(project.id)} -203: className="gap-2 text-destructive focus:text-destructive" -204: disabled={project.is_default} -205: > -206: -207: Delete -208: -209: -210: -211:
-212:
-213: -214:

-215: {project.description || 'No description provided.'} -216:

-217:
-218: {project.is_default && Default} -219: {project.is_archived && Archived} -220: {isActive && Active} -221:
-222:
-223: -231: -234:
-235:
-236:
-237: ); -238: })} -239:
-240: )} -241: -242: -243: -244: -245: Create Project -246: -247:
-248:
-249: -252: setNewName(event.target.value)} -256: placeholder="e.g. Growth Experiments" -257: /> -258:
-259:
-260: -263: setNewSlug(event.target.value)} -267: placeholder="growth-experiments" -268: /> -269:
-270:
-271: -277: setNewDescription(event.target.value)} -281: placeholder="Short project description" -282: /> -283:
-284:
-285: -286: -289: -292: -293:
-294:
-295:
-296: ); -297: } -```` - -## File: client/src/components/projects/ProjectMembersPanel.tsx -````typescript - 1: // Project members management panel - 2: - 3: import { Trash2, UserPlus } from 'lucide-react'; - 4: import { useState } from 'react'; - 5: import { getAPI } from '@/api'; - 6: import type { ProjectMembership, ProjectRole } from '@/api/types'; - 7: import { useProjectMembers } from '@/hooks/use-project-members'; - 8: import { useGuardedMutation } from '@/hooks/use-guarded-mutation'; - 9: import { Button } from '@/components/ui/button'; - 10: import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; - 11: import { flexLayout } from '@/lib/styles'; - 12: import { - 13: Dialog, - 14: DialogContent, - 15: DialogFooter, - 16: DialogHeader, - 17: DialogTitle, - 18: } from '@/components/ui/dialog'; - 19: import { Input } from '@/components/ui/input'; - 20: import { Label } from '@/components/ui/label'; - 21: import { - 22: Select, - 23: SelectContent, - 24: SelectItem, - 25: SelectTrigger, - 26: SelectValue, - 27: } from '@/components/ui/select'; - 28: import { - 29: Table, - 30: TableBody, - 31: TableCell, - 32: TableHead, - 33: TableHeader, - 34: TableRow, - 35: } from '@/components/ui/table'; - 36: - 37: const roleOptions: ProjectRole[] = ['viewer', 'editor', 'admin']; - 38: - 39: export function ProjectMembersPanel({ projectId }: { projectId: string }) { - 40: const { members, isLoading, refresh } = useProjectMembers(projectId); - 41: const { guard } = useGuardedMutation(); - 42: const [dialogOpen, setDialogOpen] = useState(false); - 43: const [newUserId, setNewUserId] = useState(''); - 44: const [newRole, setNewRole] = useState('viewer'); - 45: const headerRowClass = flexLayout.rowBetween; - 46: const openDialog = () => setDialogOpen(true); - 47: const closeDialog = () => setDialogOpen(false); - 48: - 49: const handleAdd = async () => { - 50: const userId = newUserId.trim(); - 51: if (!userId) { - 52: return; - 53: } - 54: await guard(async () => { - 55: await getAPI().addProjectMember({ - 56: project_id: projectId, - 57: user_id: userId, - 58: role: newRole, - 59: }); - 60: await refresh(); - 61: setDialogOpen(false); - 62: setNewUserId(''); - 63: setNewRole('viewer'); - 64: }); - 65: }; - 66: - 67: const handleRoleChange = async (member: ProjectMembership, role: ProjectRole) => { - 68: if (member.role === role) { - 69: return; - 70: } - 71: await guard(async () => { - 72: await getAPI().updateProjectMemberRole({ - 73: project_id: projectId, - 74: user_id: member.user_id, - 75: role, - 76: }); - 77: await refresh(); - 78: }); - 79: }; - 80: - 81: const handleRemove = async (member: ProjectMembership) => { - 82: if (!confirm('Remove this member from the project?')) { - 83: return; - 84: } - 85: await guard(async () => { - 86: await getAPI().removeProjectMember({ - 87: project_id: projectId, - 88: user_id: member.user_id, - 89: }); - 90: await refresh(); - 91: }); - 92: }; - 93: - 94: return ( - 95: - 96: - 97: Members - 98: -102: -103: -104: {isLoading ? ( -105:
Loading members...
-106: ) : members.length === 0 ? ( -107:
No members yet.
-108: ) : ( -109: -110: -111: -112: User ID -113: Role -114: Actions -115: -116: -117: -118: {members.map((member) => ( -119: -120: {member.user_id} -121: -122: -137: -138: -139: -147: -148: -149: ))} -150: -151:
-152: )} -153:
-154: -155: -156: -157: -158: Add member -159: -160:
-161:
-162: -168: setNewUserId(event.target.value)} -172: placeholder="UUID of user" -173: /> -174:
-175:
-176: -179: -191:
-192:
-193: -194: -197: -200: -201:
-202:
-203:
-204: ); -205: } -```` - -## File: client/src/components/projects/ProjectScopeFilter.tsx -````typescript - 1: // Project scope filter controls shared between Meetings and Tasks pages. - 2: - 3: import type { Dispatch, SetStateAction } from 'react'; - 4: import { useMemo } from 'react'; - 5: import type { Project } from '@/api/types/projects'; - 6: import type { ProjectScope } from '@/api/types/requests'; - 7: import { Badge } from '@/components/ui/badge'; - 8: import { Button } from '@/components/ui/button'; - 9: import { Checkbox } from '@/components/ui/checkbox'; - 10: import { Label } from '@/components/ui/label'; - 11: import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'; - 12: - 13: interface ProjectScopeFilterProps { - 14: activeProjects: Project[]; - 15: projectScope: ProjectScope; - 16: selectedProjectIds: string[]; - 17: onProjectScopeChange: (scope: ProjectScope) => void; - 18: onSelectedProjectIdsChange: Dispatch>; - 19: projectsLoading?: boolean; - 20: resolvedProjectId?: string | null; - 21: idPrefix: string; - 22: className?: string; - 23: } - 24: - 25: export function ProjectScopeFilter({ - 26: activeProjects, - 27: projectScope, - 28: selectedProjectIds, - 29: onProjectScopeChange, - 30: onSelectedProjectIdsChange, - 31: projectsLoading = false, - 32: resolvedProjectId, - 33: idPrefix, - 34: className = 'flex flex-wrap items-center gap-2', - 35: }: ProjectScopeFilterProps) { - 36: const selectedProjectNames = useMemo(() => { - 37: if (projectScope !== 'selected' || selectedProjectIds.length === 0) { - 38: return ''; - 39: } - 40: if (activeProjects.length > 0 && selectedProjectIds.length === activeProjects.length) { - 41: return 'All projects'; - 42: } - 43: const nameMap = new Map(activeProjects.map((project) => [project.id, project.name])); - 44: const names = selectedProjectIds - 45: .map((id) => nameMap.get(id)) - 46: .filter((name): name is string => Boolean(name)); - 47: const display = names.slice(0, 2).join(', '); - 48: const remaining = names.length - 2; - 49: return remaining > 0 ? `${display} +${remaining} more` : display; - 50: }, [projectScope, selectedProjectIds, activeProjects]); - 51: - 52: const setSelectedProjects = (ids: string[]) => { - 53: onProjectScopeChange('selected'); - 54: onSelectedProjectIdsChange(ids); - 55: }; - 56: - 57: const handleProjectToggle = (projectId: string, checked: boolean | string) => { - 58: onProjectScopeChange('selected'); - 59: onSelectedProjectIdsChange((prev) => { - 60: if (checked) { - 61: return prev.includes(projectId) ? prev : [...prev, projectId]; - 62: } - 63: return prev.filter((id) => id !== projectId); - 64: }); - 65: }; - 66: - 67: return ( - 68:
- 69: - 77: - 84: - 85: - 86: - 93: - 94: - 95:
- 96:
Select projects
- 97:
- 98: -105: -108:
-109:
-110: {activeProjects.map((project) => ( -111:
-112: handleProjectToggle(project.id, checked)} -116: /> -117: -120:
-121: ))} -122:
-123: {projectScope === 'selected' && selectedProjectIds.length === 0 && ( -124:

Select at least one project.

-125: )} -126:
-127:
-128:
-129: {projectScope === 'selected' && selectedProjectNames && ( -130:
-131: {selectedProjectNames} -132: {selectedProjectIds.length} -133:
-134: )} -135:
-136: ); -137: } -```` - -## File: client/src/components/projects/ProjectSettingsPanel.tsx -````typescript - 1: // Project settings editor panel - 2: - 3: import { useEffect, useMemo, useState } from 'react'; - 4: import { getAPI } from '@/api/interface'; - 5: import type { - 6: ExportFormat, - 7: Project, - 8: ProjectSettings, - 9: SummarizationTemplate, - 10: TriggerRules, - 11: } from '@/api/types'; - 12: import { useProjects } from '@/contexts/project-state'; - 13: import { useWorkspace } from '@/contexts/workspace-state'; - 14: import { Button } from '@/components/ui/button'; - 15: import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; - 16: import { Input } from '@/components/ui/input'; - 17: import { Label } from '@/components/ui/label'; - 18: import { - 19: Select, - 20: SelectContent, - 21: SelectItem, - 22: SelectTrigger, - 23: SelectValue, - 24: } from '@/components/ui/select'; - 25: import { Switch } from '@/components/ui/switch'; - 26: import { Textarea } from '@/components/ui/textarea'; - 27: import { useGuardedMutation } from '@/hooks/use-guarded-mutation'; - 28: - 29: type ToggleMode = 'inherit' | 'enabled' | 'disabled'; - 30: type FormatMode = 'inherit' | ExportFormat; - 31: - 32: const splitPatterns = (value: string): string[] => - 33: value - 34: .split(/[\n,]+/g) - 35: .map((item) => item.trim()) - 36: .filter(Boolean); - 37: - 38: const toMode = (value?: boolean): ToggleMode => - 39: value === undefined ? 'inherit' : value ? 'enabled' : 'disabled'; - 40: - 41: const fromMode = (value: ToggleMode): boolean | undefined => { - 42: if (value === 'inherit') { - 43: return undefined; - 44: } - 45: return value === 'enabled'; - 46: }; - 47: - 48: export function ProjectSettingsPanel({ project }: { project: Project }) { - 49: const { updateProject } = useProjects(); - 50: const { guard } = useGuardedMutation(); - 51: const { currentWorkspace } = useWorkspace(); - 52: const [name, setName] = useState(project.name); - 53: const [slug, setSlug] = useState(project.slug ?? ''); - 54: const [description, setDescription] = useState(project.description ?? ''); - 55: - 56: const [exportFormat, setExportFormat] = useState('inherit'); - 57: const [includeAudio, setIncludeAudio] = useState('inherit'); - 58: const [includeTimestamps, setIncludeTimestamps] = useState('inherit'); - 59: const [autoStart, setAutoStart] = useState('inherit'); - 60: const [ragEnabled, setRagEnabled] = useState('inherit'); - 61: const [defaultTemplateId, setDefaultTemplateId] = useState<'inherit' | string>('inherit'); - 62: const [templates, setTemplates] = useState([]); - 63: const [templatesLoading, setTemplatesLoading] = useState(false); - 64: const [calendarPatterns, setCalendarPatterns] = useState(''); - 65: const [appPatterns, setAppPatterns] = useState(''); - 66: const [calendarInherit, setCalendarInherit] = useState(true); - 67: const [appInherit, setAppInherit] = useState(true); - 68: const [isSaving, setIsSaving] = useState(false); - 69: - 70: useEffect(() => { - 71: setName(project.name); - 72: setSlug(project.slug ?? ''); - 73: setDescription(project.description ?? ''); - 74: - 75: const exportRules = project.settings?.export_rules; - 76: setExportFormat(exportRules?.default_format ?? 'inherit'); - 77: setIncludeAudio(toMode(exportRules?.include_audio)); - 78: setIncludeTimestamps(toMode(exportRules?.include_timestamps)); - 79: - 80: const triggerRules = project.settings?.trigger_rules; - 81: setAutoStart(toMode(triggerRules?.auto_start_enabled)); - 82: const calendar = triggerRules?.calendar_match_patterns; - 83: setCalendarInherit(calendar === undefined); - 84: setCalendarPatterns(calendar?.join('\n') ?? ''); - 85: const apps = triggerRules?.app_match_patterns; - 86: setAppInherit(apps === undefined); - 87: setAppPatterns(apps?.join('\n') ?? ''); - 88: - 89: setRagEnabled(toMode(project.settings?.rag_enabled)); - 90: setDefaultTemplateId(project.settings?.default_summarization_template ?? 'inherit'); - 91: }, [project]); - 92: - 93: useEffect(() => { - 94: if (!currentWorkspace) { - 95: setTemplates([]); - 96: return; - 97: } - 98: let isActive = true; - 99: const loadTemplates = async () => { -100: setTemplatesLoading(true); -101: try { -102: const response = await getAPI().listSummarizationTemplates({ -103: workspace_id: currentWorkspace.id, -104: include_system: true, -105: include_archived: false, -106: limit: 200, -107: offset: 0, -108: }); -109: if (isActive) { -110: setTemplates(response.templates); -111: } -112: } catch { -113: if (isActive) { -114: setTemplates([]); -115: } -116: } finally { -117: if (isActive) { -118: setTemplatesLoading(false); -119: } -120: } -121: }; -122: void loadTemplates(); -123: return () => { -124: isActive = false; -125: }; -126: }, [currentWorkspace]); -127: -128: const settings = useMemo(() => { -129: const export_rules: ProjectSettings['export_rules'] = {}; -130: if (exportFormat !== 'inherit') { -131: export_rules.default_format = exportFormat; -132: } -133: const includeAudioValue = fromMode(includeAudio); -134: if (includeAudioValue !== undefined) { -135: export_rules.include_audio = includeAudioValue; -136: } -137: const includeTsValue = fromMode(includeTimestamps); -138: if (includeTsValue !== undefined) { -139: export_rules.include_timestamps = includeTsValue; -140: } -141: -142: const trigger_rules: TriggerRules = {}; -143: const autoStartValue = fromMode(autoStart); -144: if (autoStartValue !== undefined) { -145: trigger_rules.auto_start_enabled = autoStartValue; -146: } -147: if (!calendarInherit) { -148: trigger_rules.calendar_match_patterns = splitPatterns(calendarPatterns); -149: } -150: if (!appInherit) { -151: trigger_rules.app_match_patterns = splitPatterns(appPatterns); -152: } -153: -154: const merged: ProjectSettings = {}; -155: if (Object.keys(export_rules).length) { -156: merged.export_rules = export_rules; -157: } -158: if (Object.keys(trigger_rules).length) { -159: merged.trigger_rules = trigger_rules; -160: } -161: -162: const ragValue = fromMode(ragEnabled); -163: if (ragValue !== undefined) { -164: merged.rag_enabled = ragValue; -165: } -166: if (defaultTemplateId !== 'inherit' && defaultTemplateId.trim()) { -167: merged.default_summarization_template = defaultTemplateId.trim(); -168: } -169: return merged; -170: }, [ -171: exportFormat, -172: includeAudio, -173: includeTimestamps, -174: autoStart, -175: calendarInherit, -176: calendarPatterns, -177: appInherit, -178: appPatterns, -179: ragEnabled, -180: defaultTemplateId, -181: ]); -182: -183: const handleSave = async () => { -184: const trimmedName = name.trim(); -185: if (!trimmedName) { -186: return; -187: } -188: setIsSaving(true); -189: try { -190: await guard(() => -191: updateProject({ -192: project_id: project.id, -193: name: trimmedName, -194: slug: slug.trim() || undefined, -195: description: description.trim() || undefined, -196: settings, -197: }) -198: ); -199: } finally { -200: setIsSaving(false); -201: } -202: }; -203: -204: const selectedTemplate = -205: defaultTemplateId !== 'inherit' -206: ? templates.find((template) => template.id === defaultTemplateId) ?? null -207: : null; -208: const isMissingTemplate = defaultTemplateId !== 'inherit' && !selectedTemplate; -209: -210: return ( -211:
-212: -213: -214: Basics -215: -216: -217:
-218: -221: setName(event.target.value)} -225: /> -226:
-227:
-228: -231: setSlug(event.target.value)} -235: placeholder="project-slug" -236: /> -237:
-238:
-239: -242: