Zero Script QA - Testing methodology without test scripts. Uses structured JSON logging and real-time Docker monitoring for verification. Triggers: zero script qa, log-based testing, docker logs, 제로 스크립트 QA, ゼロスクリプトQA, 零脚本QA
/plugin marketplace add popup-studio-ai/bkit-claude-code/plugin install bkit@bkit-marketplaceThis skill inherits all available tools. When active, it can use any tool Claude has access to.
Zero Script QA is a methodology that verifies features through structured logs and real-time monitoring without writing test scripts.
Traditional: Write test code → Execute → Check results → Maintain
Zero Script: Build log infrastructure → Manual UX test → AI log analysis → Auto issue detection
{
"timestamp": "2026-01-08T10:30:00.000Z",
"level": "INFO",
"service": "api",
"request_id": "req_abc123",
"message": "API Request completed",
"data": {
"method": "POST",
"path": "/api/users",
"status": 200,
"duration_ms": 45
}
}
| Field | Type | Description |
|---|---|---|
| timestamp | ISO 8601 | Time of occurrence |
| level | string | DEBUG, INFO, WARNING, ERROR |
| service | string | Service name (api, web, worker, etc.) |
| request_id | string | Request tracking ID |
| message | string | Log message |
| data | object | Additional data (optional) |
| Environment | Minimum Level | Purpose |
|---|---|---|
| Local | DEBUG | Development and QA |
| Staging | DEBUG | QA and integration testing |
| Production | INFO | Operations monitoring |
Client → API Gateway → Backend → Database
↓ ↓ ↓ ↓
req_abc req_abc req_abc req_abc
Trackable with same Request ID across all layers
// middleware.ts
import { v4 as uuidv4 } from 'uuid';
export function generateRequestId(): string {
return `req_${uuidv4().slice(0, 8)}`;
}
// Propagate via header
headers['X-Request-ID'] = requestId;
// API client
const requestId = headers['X-Request-ID'] || generateRequestId();
// Include in all logs
logger.info('Processing request', { request_id: requestId });
// Include in header when calling downstream services
await fetch(url, {
headers: { 'X-Request-ID': requestId }
});
# middleware/logging.py
import logging
import time
import uuid
import json
from fastapi import Request
class JsonFormatter(logging.Formatter):
def format(self, record):
log_record = {
"timestamp": self.formatTime(record),
"level": record.levelname,
"service": "api",
"request_id": getattr(record, 'request_id', 'N/A'),
"message": record.getMessage(),
}
if hasattr(record, 'data'):
log_record["data"] = record.data
return json.dumps(log_record)
class LoggingMiddleware:
async def __call__(self, request: Request, call_next):
request_id = request.headers.get('X-Request-ID', f'req_{uuid.uuid4().hex[:8]}')
request.state.request_id = request_id
start_time = time.time()
# Request logging
logger.info(
f"Request started",
extra={
'request_id': request_id,
'data': {
'method': request.method,
'path': request.url.path,
'query': str(request.query_params)
}
}
)
response = await call_next(request)
duration = (time.time() - start_time) * 1000
# Response logging (including 200 OK!)
logger.info(
f"Request completed",
extra={
'request_id': request_id,
'data': {
'status': response.status_code,
'duration_ms': round(duration, 2)
}
}
)
response.headers['X-Request-ID'] = request_id
return response
# services/user_service.py
def create_user(data: dict, request_id: str):
logger.info("Creating user", extra={
'request_id': request_id,
'data': {'email': data['email']}
})
# Business logic
user = User(**data)
db.add(user)
db.commit()
logger.info("User created", extra={
'request_id': request_id,
'data': {'user_id': user.id}
})
return user
// lib/logger.ts
type LogLevel = 'DEBUG' | 'INFO' | 'WARNING' | 'ERROR';
interface LogData {
request_id?: string;
[key: string]: any;
}
const LOG_LEVELS: Record<LogLevel, number> = {
DEBUG: 0,
INFO: 1,
WARNING: 2,
ERROR: 3,
};
const MIN_LEVEL = process.env.NODE_ENV === 'production' ? 'INFO' : 'DEBUG';
function log(level: LogLevel, message: string, data?: LogData) {
if (LOG_LEVELS[level] < LOG_LEVELS[MIN_LEVEL]) return;
const logEntry = {
timestamp: new Date().toISOString(),
level,
service: 'web',
request_id: data?.request_id || 'N/A',
message,
data: data ? { ...data, request_id: undefined } : undefined,
};
console.log(JSON.stringify(logEntry));
}
export const logger = {
debug: (msg: string, data?: LogData) => log('DEBUG', msg, data),
info: (msg: string, data?: LogData) => log('INFO', msg, data),
warning: (msg: string, data?: LogData) => log('WARNING', msg, data),
error: (msg: string, data?: LogData) => log('ERROR', msg, data),
};
// lib/api-client.ts
import { logger } from './logger';
import { v4 as uuidv4 } from 'uuid';
export async function apiClient<T>(
endpoint: string,
options: RequestInit = {}
): Promise<T> {
const requestId = `req_${uuidv4().slice(0, 8)}`;
const startTime = Date.now();
logger.info('API Request started', {
request_id: requestId,
method: options.method || 'GET',
endpoint,
});
try {
const response = await fetch(`/api${endpoint}`, {
...options,
headers: {
'Content-Type': 'application/json',
'X-Request-ID': requestId,
...options.headers,
},
});
const duration = Date.now() - startTime;
const data = await response.json();
// Log 200 OK too!
logger.info('API Request completed', {
request_id: requestId,
status: response.status,
duration_ms: duration,
});
if (!response.ok) {
logger.error('API Request failed', {
request_id: requestId,
status: response.status,
error: data.error,
});
throw new ApiError(data.error);
}
return data;
} catch (error) {
logger.error('API Request error', {
request_id: requestId,
error: error instanceof Error ? error.message : 'Unknown error',
});
throw error;
}
}
http {
log_format json_combined escape=json '{'
'"timestamp":"$time_iso8601",'
'"level":"INFO",'
'"service":"nginx",'
'"request_id":"$http_x_request_id",'
'"message":"HTTP Request",'
'"data":{'
'"remote_addr":"$remote_addr",'
'"method":"$request_method",'
'"uri":"$request_uri",'
'"status":$status,'
'"body_bytes_sent":$body_bytes_sent,'
'"request_time":$request_time,'
'"upstream_response_time":"$upstream_response_time",'
'"http_referer":"$http_referer",'
'"http_user_agent":"$http_user_agent"'
'}'
'}';
access_log /var/log/nginx/access.log json_combined;
}
version: '3.8'
services:
api:
build: ./backend
environment:
- LOG_LEVEL=DEBUG
- LOG_FORMAT=json
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
web:
build: ./frontend
environment:
- NODE_ENV=development
depends_on:
- api
nginx:
image: nginx:alpine
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
ports:
- "80:80"
depends_on:
- api
- web
# Stream all service logs
docker compose logs -f
# Specific service only
docker compose logs -f api
# Filter errors only
docker compose logs -f | grep '"level":"ERROR"'
# Track specific Request ID
docker compose logs -f | grep 'req_abc123'
# Start development environment
docker compose up -d
# Start log monitoring (Claude Code monitors)
docker compose logs -f
User tests actual features in browser:
1. Sign up attempt
2. Login attempt
3. Use core features
4. Test edge cases
Claude Code in real-time:
1. Monitor log stream
2. Detect error patterns
3. Detect abnormal response times
4. Track entire flow via Request ID
5. Auto-document issues
# QA Issue Report
## Issues Found
### ISSUE-001: Insufficient error handling on login failure
- **Request ID**: req_abc123
- **Severity**: Medium
- **Reproduction path**: Login → Wrong password
- **Log**:
```json
{"level":"ERROR","message":"Login failed","data":{"error":"Invalid credentials"}}
---
## Issue Detection Patterns
### 1. Error Detection
```json
{"level":"ERROR","message":"..."}
→ Report immediately
{"data":{"duration_ms":3000}}
→ Warning when exceeding 1000ms
3+ consecutive failures on same endpoint
→ Report potential system issue
{"data":{"status":500}}
→ Report 5xx errors immediately
| Phase | Zero Script QA Integration |
|---|---|
| Phase 4 (API) | API response logging verification |
| Phase 6 (UI) | Frontend logging verification |
| Phase 7 (Security) | Security event logging verification |
| Phase 8 (Review) | Log quality review |
| Phase 9 (Deployment) | Production log level configuration |
Based on bkamp.ai notification feature development:
| Cycle | Pass Rate | Bug Found | Fix Applied |
|---|---|---|---|
| 1st | 30% | DB schema mismatch | Schema migration |
| 2nd | 45% | NULL handling missing | Add null checks |
| 3rd | 55% | Routing error | Fix deeplinks |
| 4th | 65% | Type mismatch | Fix enum types |
| 5th | 70% | Calculation error | Fix count logic |
| 6th | 75% | Event missing | Add event triggers |
| 7th | 82% | Cache sync issue | Fix cache invalidation |
| 8th | 89% | Stable | Final polish |
┌─────────────────────────────────────────────────────────────┐
│ Iterative Test Cycle │
├─────────────────────────────────────────────────────────────┤
│ │
│ Cycle N: │
│ 1. Run test script (E2E or manual) │
│ 2. Claude monitors logs in real-time │
│ 3. Record pass/fail results │
│ 4. Claude identifies root cause of failures │
│ 5. Fix code immediately (hot reload) │
│ 6. Document: Cycle N → Bug → Fix │
│ │
│ Repeat until acceptable pass rate (>85%) │
│ │
└─────────────────────────────────────────────────────────────┘
#!/bin/bash
# E2E Test Script Template
API_URL="http://localhost:8000"
TOKEN="your-test-token"
PASS_COUNT=0
FAIL_COUNT=0
SKIP_COUNT=0
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[0;33m'
NC='\033[0m'
test_feature_action() {
echo -n "Testing: Feature action... "
response=$(curl -s -X POST "$API_URL/api/v1/feature/action" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d '{"param": "value"}')
if [[ "$response" == *"expected_result"* ]]; then
echo -e "${GREEN}✅ PASS${NC}"
((PASS_COUNT++))
else
echo -e "${RED}❌ FAIL${NC}"
echo "Response: $response"
((FAIL_COUNT++))
fi
}
# Run all tests
test_feature_action
# ... more tests
# Summary
echo ""
echo "═══════════════════════════════════════"
echo "Test Results:"
echo -e " ${GREEN}✅ PASS: $PASS_COUNT${NC}"
echo -e " ${RED}❌ FAIL: $FAIL_COUNT${NC}"
echo -e " ${YELLOW}⏭️ SKIP: $SKIP_COUNT${NC}"
echo "═══════════════════════════════════════"
# Feature Test Results - Cycle N
## Summary
- **Date**: YYYY-MM-DD
- **Feature**: {feature name}
- **Pass Rate**: N%
- **Tests**: X passed / Y total
## Results
| Test Case | Status | Notes |
|-----------|--------|-------|
| Test 1 | ✅ | |
| Test 2 | ❌ | {error description} |
| Test 3 | ⏭️ | {skip reason} |
## Bugs Found
### BUG-001: {Title}
- **Root Cause**: {description}
- **Fix**: {what was changed}
- **Files**: `path/to/file.py:123`
## Next Cycle Plan
- {what to test next}
This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code.
This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins.
This skill should be used when the user asks to "create a hook", "add a PreToolUse/PostToolUse/Stop hook", "validate tool use", "implement prompt-based hooks", "use ${CLAUDE_PLUGIN_ROOT}", "set up event-driven automation", "block dangerous commands", or mentions hook events (PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification). Provides comprehensive guidance for creating and implementing Claude Code plugin hooks with focus on advanced prompt-based hooks API.