Provides patterns for AI-first apps using LLMs for classification, extraction, generation, and reasoning. Includes TypeScript LLM client with Anthropic/Zod, prompt templates, project structure, and LLM testing strategies.
npx claudepluginhub joshuarweaver/cascade-code-languages-misc-2 --plugin alinaqi-claude-bootstrapThis skill uses the workspace's default tool permissions.
For AI-first applications where LLMs handle logical operations.
Searches, retrieves, and installs Agent Skills from prompts.chat registry using MCP tools like search_skills and get_skill. Activates for finding skills, browsing catalogs, or extending Claude.
Searches prompts.chat for AI prompt templates by keyword or category, retrieves by ID with variable handling, and improves prompts via AI. Use for discovering or enhancing prompts.
Checks Next.js compilation errors using a running Turbopack dev server after code edits. Fixes actionable issues before reporting complete. Replaces `next build`.
For AI-first applications where LLMs handle logical operations.
LLM for logic, code for plumbing.
Use LLMs for:
Use traditional code for:
project/
├── src/
│ ├── core/
│ │ ├── prompts/ # Prompt templates
│ │ │ ├── classify.ts
│ │ │ └── extract.ts
│ │ ├── llm/ # LLM client and utilities
│ │ │ ├── client.ts # LLM client wrapper
│ │ │ ├── schemas.ts # Response schemas (Zod)
│ │ │ └── index.ts
│ │ └── services/ # Business logic using LLM
│ ├── infra/
│ └── ...
├── tests/
│ ├── unit/
│ ├── integration/
│ └── llm/ # LLM-specific tests
│ ├── fixtures/ # Saved responses for deterministic tests
│ ├── evals/ # Evaluation test suites
│ └── mocks/ # Mock LLM responses
└── _project_specs/
└── prompts/ # Prompt specifications
// core/llm/client.ts
import Anthropic from '@anthropic-ai/sdk';
import { z } from 'zod';
const client = new Anthropic();
interface LLMCallOptions<T> {
prompt: string;
schema: z.ZodSchema<T>;
model?: string;
maxTokens?: number;
}
export async function llmCall<T>({
prompt,
schema,
model = 'claude-sonnet-4-20250514',
maxTokens = 1024,
}: LLMCallOptions<T>): Promise<T> {
const response = await client.messages.create({
model,
max_tokens: maxTokens,
messages: [{ role: 'user', content: prompt }],
});
const text = response.content[0].type === 'text'
? response.content[0].text
: '';
// Parse and validate response
const parsed = JSON.parse(text);
return schema.parse(parsed);
}
// core/llm/schemas.ts
import { z } from 'zod';
export const ClassificationSchema = z.object({
category: z.enum(['support', 'sales', 'feedback', 'other']),
confidence: z.number().min(0).max(1),
reasoning: z.string(),
});
export type Classification = z.infer<typeof ClassificationSchema>;
// core/prompts/classify.ts
export function classifyTicketPrompt(ticket: string): string {
return `Classify this support ticket into one of these categories:
- support: Technical issues or help requests
- sales: Pricing, plans, or purchase inquiries
- feedback: Suggestions or complaints
- other: Anything else
Respond with JSON:
{
"category": "...",
"confidence": 0.0-1.0,
"reasoning": "brief explanation"
}
Ticket:
${ticket}`;
}
// core/prompts/index.ts
export const PROMPTS = {
classify: {
v1: classifyTicketPromptV1,
v2: classifyTicketPromptV2, // improved accuracy
current: classifyTicketPromptV2,
},
} as const;
// tests/llm/mocks/classify.mock.ts
export const mockClassifyResponse = {
category: 'support',
confidence: 0.95,
reasoning: 'User is asking for help with login',
};
// tests/unit/services/ticket.test.ts
import { classifyTicket } from '../../../src/core/services/ticket';
import { mockClassifyResponse } from '../../llm/mocks/classify.mock';
// Mock the LLM client
vi.mock('../../../src/core/llm/client', () => ({
llmCall: vi.fn().mockResolvedValue(mockClassifyResponse),
}));
describe('classifyTicket', () => {
it('returns classification for ticket', async () => {
const result = await classifyTicket('I cannot log in');
expect(result.category).toBe('support');
expect(result.confidence).toBeGreaterThan(0.9);
});
});
// tests/llm/fixtures/classify.fixtures.json
{
"support_ticket": {
"input": "I can't reset my password",
"expected_category": "support",
"raw_response": "{\"category\":\"support\",\"confidence\":0.98,\"reasoning\":\"Password reset is a support issue\"}"
}
}
// tests/llm/classify.fixture.test.ts
import fixtures from './fixtures/classify.fixtures.json';
import { ClassificationSchema } from '../../src/core/llm/schemas';
describe('Classification Response Parsing', () => {
Object.entries(fixtures).forEach(([name, fixture]) => {
it(`parses ${name} correctly`, () => {
const parsed = JSON.parse(fixture.raw_response);
const result = ClassificationSchema.parse(parsed);
expect(result.category).toBe(fixture.expected_category);
});
});
});
// tests/llm/evals/classify.eval.test.ts
import { classifyTicket } from '../../../src/core/services/ticket';
const TEST_CASES = [
{ input: 'How much does the pro plan cost?', expected: 'sales' },
{ input: 'The app crashes when I click save', expected: 'support' },
{ input: 'You should add dark mode', expected: 'feedback' },
{ input: 'What time is it in Tokyo?', expected: 'other' },
];
describe('Classification Accuracy (Eval)', () => {
// Skip in regular CI, run nightly
const runEvals = process.env.RUN_LLM_EVALS === 'true';
it.skipIf(!runEvals)('achieves >90% accuracy on test set', async () => {
let correct = 0;
for (const testCase of TEST_CASES) {
const result = await classifyTicket(testCase.input);
if (result.category === testCase.expected) correct++;
}
const accuracy = correct / TEST_CASES.length;
expect(accuracy).toBeGreaterThan(0.9);
}, 60000); // 60s timeout for LLM calls
});
# .github/workflows/quality.yml (add to existing)
jobs:
quality:
# ... existing steps ...
- name: Run Tests (with LLM mocks)
run: npm run test:coverage
llm-evals:
runs-on: ubuntu-latest
# Run nightly or on-demand
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
steps:
- uses: actions/checkout@v4
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install dependencies
run: npm ci
- name: Run LLM Evals
run: npm run test:evals
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
RUN_LLM_EVALS: 'true'
// core/llm/client.ts - add tracking
interface LLMMetrics {
model: string;
inputTokens: number;
outputTokens: number;
latencyMs: number;
cost: number;
}
export async function llmCallWithMetrics<T>(
options: LLMCallOptions<T>
): Promise<{ result: T; metrics: LLMMetrics }> {
const start = Date.now();
const response = await client.messages.create({...});
const metrics: LLMMetrics = {
model: options.model,
inputTokens: response.usage.input_tokens,
outputTokens: response.usage.output_tokens,
latencyMs: Date.now() - start,
cost: calculateCost(response.usage, options.model),
};
// Log or send to monitoring
console.log('[LLM]', metrics);
return { result: parsed, metrics };
}