npx claudepluginhub george-popescu/bee-dev --plugin beeDefined in hooks/hooks.json
{
"Stop": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/stop-review-check.sh",
"timeout": 10
}
]
}
],
"PreCompact": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/save-session-context.sh",
"timeout": 10
}
]
}
],
"PreToolUse": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/pre-commit-gate.sh",
"timeout": 120
}
],
"matcher": "Bash"
}
],
"SessionEnd": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/session-end-summary.sh",
"timeout": 5
}
]
}
],
"PostToolUse": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/auto-lint.sh",
"timeout": 30
}
],
"matcher": "Write|Edit"
}
],
"SessionStart": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/load-context.sh",
"timeout": 10
},
{
"type": "command",
"command": "node ${CLAUDE_PLUGIN_ROOT}/scripts/setup-statusline.js",
"timeout": 5
}
]
}
],
"SubagentStop": [
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate implementer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) TDD red-green cycle verified: test files exist BEFORE implementation code, and agent output shows tests were run and FAILED before implementation was written, then PASSED after 2) Tests run and passing 3) Structured task notes written (files created, patterns used, decisions) 4) Task reported complete, not abandoned.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^implementer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate fixer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Fix Report section present with Finding ID, Status, Files Changed, What Changed, Tests fields 2) Tests were run after the fix (test results mentioned) 3) If tests failed after two attempts, changes were reverted 4) Only ONE finding was addressed (not multiple).\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^fixer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate researcher agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field. The researcher runs in TWO modes -- validate based on which mode was used:\n\n**Phase research mode** (signal: 'Research complete: [N] tasks enriched'): 1) Research notes were written to TASKS.md (mentions updating research sections) 2) Completion signal present\n\n**Spec/quick research mode** (signal: structured research summary returned in final message, no TASKS.md write): 1) Output contains structured findings (project structure, patterns, reusable code) 2) No TASKS.md write expected -- findings are returned inline\n\nFor BOTH modes: No production code was written (no file creation or modification outside TASKS.md and memory files).\n\nExactly one mode must be detected. Respond {\"ok\": true} if ALL checks for the detected mode pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^researcher$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate bug-detector agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains a findings section with '## Bugs Detected' heading with severity-grouped subsections (### Critical, ### High, ### Medium), OR the 'No bugs detected.' message 2) Each finding has a file:line reference in backtick format (e.g. `file:line`) 3) A total count summary is present (e.g. '**Total: X critical, Y high, Z medium**') or 'No bugs detected.' was used 4) Only HIGH confidence findings are reported (no speculative or low-confidence issues) 5) No code modifications were made -- this is a read-only agent that must not create or edit any files 6) Each finding MUST include an Evidence field (trace path), Impact field (user-facing consequence), and Test Gap field.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^bug-detector$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate pattern-reviewer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains '## Project Pattern Deviations' heading OR the 'No project pattern deviations found.' message 2) Each deviation shows an existing pattern vs current code comparison with '**Existing pattern:**' and '**This code:**' sub-items 3) file:line references are present in backtick format for each deviation 4) A total count is present (e.g. '**Total: X deviations**') or 'No project pattern deviations found.' was used 5) No code modifications were made -- this is a read-only agent that must not create or edit any files 6) Each finding MUST include an Evidence field (trace path), Impact field (user-facing consequence), and Test Gap field.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^pattern-reviewer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate plan-compliance-reviewer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains either '## Plan Compliance Findings' heading (code review mode) with categorized findings under '### Spec Gap' (SG-NNN), '### Cross-Phase Integration' (CI-NNN), '### Over-Scope' (OS-NNN), OR '## Plan Compliance Review: Phase' heading (plan review mode) with a '### Coverage Matrix' table showing COVERED/PARTIAL/NOT COVERED statuses 2) Appropriate categorization is present for the detected mode 3) A summary section is present with finding counts or coverage statistics 4) No code modifications were made other than requirements.md checkbox updates (which are expected in code review mode when marking covered requirements) 5) In code review mode, output MUST include an Acceptance Criteria checklist with explicit [x]/[ ] marks per criteria. Each finding MUST include Evidence, Impact, and Test Gap fields.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^plan-compliance-reviewer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate stack-reviewer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains '## Stack Best Practice Violations' heading with violations listed, OR 'No stack best practice violations found.' message, OR 'no stack skill loaded, skipping' early exit message 2) If violations are present, each violation references a specific stack rule category from the skill file 3) file:line references are present in backtick format for each violation 4) A total count is present (e.g. '**Total: X violations**') or one of the no-findings/skip messages was used 5) No code modifications were made -- this is a read-only agent that must not create or edit any files 6) Each finding MUST include an Evidence field (trace path), Impact field (user-facing consequence), and Test Gap field.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^stack-reviewer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate laravel-inertia-vue-bug-detector agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains a findings section with '## Bugs Detected' heading with severity-grouped subsections (### Critical, ### High, ### Medium), OR the 'No bugs detected.' message 2) Each finding has a file:line reference in backtick format (e.g. `file:line`) 3) A total count summary is present (e.g. '**Total: X critical, Y high, Z medium**') or 'No bugs detected.' was used 4) Only HIGH confidence findings are reported (no speculative or low-confidence issues) 5) No code modifications were made -- this is a read-only agent that must not create or edit any files.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^laravel-inertia-vue-bug-detector$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate laravel-inertia-vue-pattern-reviewer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains '## Project Pattern Deviations' heading OR the 'No project pattern deviations found.' message 2) Each deviation shows an existing pattern vs current code comparison with '**Existing pattern:**' and '**This code:**' sub-items 3) file:line references are present in backtick format for each deviation 4) A total count is present (e.g. '**Total: X deviations**') or 'No project pattern deviations found.' was used 5) No code modifications were made -- this is a read-only agent that must not create or edit any files.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^laravel-inertia-vue-pattern-reviewer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate laravel-inertia-vue-implementer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Test files created BEFORE implementation (TDD compliance) 2) Tests run and passing 3) Structured task notes written (files created, patterns used, decisions) 4) Task reported complete, not abandoned.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^laravel-inertia-vue-implementer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate quick-implementer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) TDD red-green cycle verified: test files exist BEFORE implementation code, and agent output shows tests were run and FAILED before implementation was written, then PASSED after 2) Tests run and passing 3) Structured task notes written (files created, patterns used, decisions) 4) Task reported complete with the exact signal: 'Task complete. [X] tests passing.'\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^quick-implementer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate spec-reviewer agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains '## Spec Review' heading 2) A '**Status:**' line is present with either 'Approved' or 'Issues Found' 3) If status is 'Issues Found', at least one issue is listed under '**Issues (if any):**' with [Section]: format 4) No files were modified -- this is a read-only agent that must not create or edit any files.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^spec-reviewer$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate discuss-partner agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and determine which mode the agent ran in by checking which completion signal is present, then validate accordingly:\n\n**Write-notes mode** (signal: 'Discussion notes written: {path}'): 1) The completion signal 'Discussion notes written:' is present with a file path 2) Required headings are present in the notes (required headings present: ## Topic, ## Discussion Summary, ## Notes for Spec Creation) 3) No code was generated -- discussion notes contain prose analysis, no code blocks or implementation\n\n**Scan mode** (signal: 'Scan complete:' with file and pattern counts): 1) The completion signal 'Scan complete:' is present 2) Output contains '## Relevant Files' heading with file paths listed 3) Output contains '## Existing Patterns' heading with pattern descriptions 4) No files written -- scan mode is read-only, the agent must not create or modify any files\n\nExactly one mode must be detected. If neither signal is found, validation fails.\n\nRespond {\"ok\": true} if ALL checks for the detected mode pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^discuss-partner$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate security-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-SEC-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No security issues found.' message 2) Each finding includes an Evidence section with actual code snippet 3) A '## Security Audit Summary' section is present at the end with files scanned count, finding counts by severity, and overall security posture rating 4) No code modifications were made -- this is a read-only agent that must not create or edit any files.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^security-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate error-handling-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-ERR-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No error handling issues found.' message 2) Each finding specifies the failure mode: CRASH, SILENT FAILURE, DATA LOSS, or DEGRADED UX 3) A '## Error Handling Audit Summary' section is present at the end with finding counts and overall maturity rating 4) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^error-handling-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate database-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-DB-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No database issues found.' message 2) A '## Database Audit Summary' section is present at the end with migration files scanned, model files scanned, N+1 patterns found, missing indexes count, and overall data layer health rating 3) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^database-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate architecture-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-ARCH-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No architecture issues found.' message 2) A '## Architecture Audit Summary' section is present at the end with total files analyzed, god files count, duplicated patterns, separation of concerns violations, and overall architecture health rating 3) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^architecture-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate api-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-API-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No API issues found.' message 2) An '## Endpoint Inventory' table is present listing discovered endpoints with Method, Path, Auth, Validation columns 3) A '## API Audit Summary' section is present at the end with total endpoints, endpoints without validation, and overall API health rating 4) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^api-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate frontend-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-FE-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No frontend issues found.' message 2) A '## Frontend Audit Summary' section is present at the end with components scanned, missing loading/error states, memory leak risks, accessibility issues, and overall frontend health rating 3) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^frontend-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate performance-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-PERF-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No performance issues found.' message 2) A '## Performance Audit Summary' section is present at the end with backend bottlenecks, frontend optimization issues, API performance issues, and overall performance health rating 3) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^performance-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate testing-auditor agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-TEST-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No testing issues found.' message 2) A '## Test Coverage Map' table is present mapping areas to coverage status 3) A '## Testing Audit Summary' section is present at the end with test files found, test cases found, critical paths covered ratio, test suite pass rate, and overall test health rating 4) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^testing-auditor$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate audit-bug-detector agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains findings using the audit finding format (### F-BUG-NNN prefix) with Severity, Category, File, Lines, Agent, Description, Evidence, Impact, Suggested Fix fields -- OR a 'No cross-layer bugs found.' message 2) Each finding includes a Flow and Trace section showing the end-to-end path (Component → API → Controller → Service → DB) 3) A '## Bug Detection Summary' section is present at the end with critical flows traced, cross-layer bugs found, and overall flow integrity rating 4) No code modifications were made -- this is a read-only agent.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^audit-bug-detector$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate audit-finding-validator agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) Output contains validation results for each finding with '### Validation: F-' prefix, including Verdict (CONFIRMED/FALSE POSITIVE/NEEDS CONTEXT), Confidence (HIGH/MEDIUM), File verified, Evidence verified, and Reason fields 2) A '## Validation Summary' section is present at the end with total findings validated, counts per verdict, and false positive rate percentage 3) No code modifications were made -- this is a read-only agent that must not create or edit any files.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^audit-finding-validator$"
},
{
"hooks": [
{
"type": "prompt",
"prompt": "Validate audit-report-generator agent output. The agent's last message and metadata are in the JSON below:\n$ARGUMENTS\n\nExtract the last_assistant_message field and check: 1) The agent wrote AUDIT-REPORT.md (mentions writing or creating the file) 2) The agent wrote audit-findings.json (mentions writing or creating the JSON file) 3) The report includes an Executive Summary table with severity counts 4) The report includes a Risk Assessment section 5) The report includes Recommendations grouped by Immediate Actions, Short-term Actions, and Technical Debt.\n\nRespond {\"ok\": true} if ALL pass, or {\"ok\": false, \"reason\": \"<failure>\"} if ANY fail.",
"timeout": 30
}
],
"matcher": "^audit-report-generator$"
}
],
"SubagentStart": [
{
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/scripts/inject-memory.sh",
"timeout": 5
}
],
"matcher": ".*"
}
]
}"{\"riskFlags\": {\"touchesBash\": true, \"matchAllTools\": false, \"touchesFileWrites\": true}, \"typeStats\": {\"prompt\": 24, \"command\": 8}, \"eventStats\": {\"Stop\": 1, \"PreCompact\": 1, \"PreToolUse\": 1, \"SessionEnd\": 1, \"PostToolUse\": 1, \"SessionStart\": 2, \"SubagentStop\": 24, \"SubagentStart\": 1}, \"originCounts\": {\"absolutePaths\": 0, \"pluginScripts\": 8, \"projectScripts\": 0}, \"timeoutStats\": {\"commandsWithoutTimeout\": 0}}"