{"name":"maxwellcalkin-sentinel-ai","owner":{"name":"ClaudePluginHub"},"plugins":[{"name":"maxwellcalkin-sentinel-ai","source":{"source":"github","repo":"maxwellcalkin/sentinel-ai"},"description":"Real-time safety scanning for LLM interactions. Detects prompt injection, PII leaks, harmful content, toxicity, obfuscation, secrets, and dangerous tool calls — 600-case benchmark at 100% accuracy with sub-millisecond latency.","version":"0.15.0","strict":true,"keywords":["safety","guardrails","security","prompt-injection","pii","scanning","secrets","toxicity","harmful-content","obfuscation"],"category":"deployment"}]}