Quality testing patterns with pytest for FastAPI projects. **TDD-first approach**.
From pythonnpx claudepluginhub ruslan-korneev/claude-plugins --plugin pythonThis skill uses the workspace's default tool permissions.
examples/conftest.pyreferences/fastapi-testing.mdreferences/fixtures.mdreferences/mocking.mdDesigns and optimizes AI agent action spaces, tool definitions, observation formats, error recovery, and context for higher task completion rates.
Enables AI agents to execute x402 payments with per-task budgets, spending controls, and non-custodial wallets via MCP tools. Use when agents pay for APIs, services, or other agents.
Compares coding agents like Claude Code and Aider on custom YAML-defined codebase tasks using git worktrees, measuring pass rate, cost, time, and consistency.
Quality testing patterns with pytest for FastAPI projects. TDD-first approach.
Use this skill when the user:
# ✅ Good — describes what, when, and expectation
async def test_create_user_with_valid_data_returns_user_with_id():
async def test_create_user_with_duplicate_email_raises_conflict_error():
async def test_get_user_by_id_when_not_exists_raises_not_found():
# ❌ Bad
async def test_create_user():
async def test_user():
async def test_1():
# ✅ Good — one test = one scenario
async def test_create_user_returns_user_with_id(service):
result = await service.create(user_data)
assert result.id is not None
async def test_create_user_saves_email(service):
result = await service.create(user_data)
assert result.email == user_data["email"]
# ❌ Bad — tests multiple things
async def test_create_user(service):
result = await service.create(user_data)
assert result.id is not None
assert result.email == user_data["email"]
assert result.created_at is not None
assert await service.get(result.id) == result
async def test_create_order_calculates_total(service):
# Arrange — prepare data
items = [
{"product_id": 1, "quantity": 2, "price": 100},
{"product_id": 2, "quantity": 1, "price": 50},
]
# Act — execute action
order = await service.create(items=items)
# Assert — verify result
assert order.total == 250
Tests should not depend on each other:
# ✅ Good — each test is independent
@pytest.fixture
async def session(engine):
async with engine.connect() as conn:
await conn.begin()
async with AsyncSession(bind=conn) as session:
yield session
await conn.rollback() # Rollback after each test
# ❌ Bad — shared state
_created_user = None
async def test_create_user(service):
global _created_user
_created_user = await service.create(data)
async def test_get_user(service):
user = await service.get(_created_user.id) # Depends on previous test!
# ✅ Use parametrize for similar checks
@pytest.mark.parametrize("email,is_valid", [
("user@example.com", True),
("user@subdomain.example.com", True),
("invalid", False),
("@example.com", False),
("user@", False),
])
def test_email_validation(email: str, is_valid: bool):
assert validate_email(email) == is_valid
# ❌ Do NOT use parametrize for different scenarios
# Better to have separate tests with clear names
async def test_create_user_success(service):
...
async def test_create_user_duplicate_email(service):
...
# pyproject.toml
[tool.pytest.ini_options]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
@pytest.fixture
async def session() -> AsyncGenerator[AsyncSession, None]:
async with async_session_maker() as session:
yield session
@pytest.fixture
async def client(session: AsyncSession) -> AsyncGenerator[AsyncClient, None]:
async with AsyncClient(transport=ASGITransport(app=app)) as client:
yield client
More details: ${CLAUDE_PLUGIN_ROOT}/skills/pytest-patterns/references/fastapi-testing.md
from httpx import ASGITransport, AsyncClient
async def test_create_user_endpoint(client: AsyncClient):
response = await client.post(
"/api/v1/users",
json={"email": "test@example.com", "name": "Test"},
)
assert response.status_code == 201
assert response.json()["id"] is not None
@pytest.fixture
async def client(session: AsyncSession):
def override_session():
yield session
app.dependency_overrides[get_session] = override_session
async with AsyncClient(transport=ASGITransport(app=app)) as client:
yield client
app.dependency_overrides.clear()
More details: ${CLAUDE_PLUGIN_ROOT}/skills/pytest-patterns/references/fixtures.md
# tests/conftest.py
@pytest.fixture(scope="session")
def engine():
return create_async_engine(TEST_DATABASE_URL)
@pytest.fixture
async def session(engine) -> AsyncGenerator[AsyncSession, None]:
async with engine.connect() as conn:
await conn.begin()
session = AsyncSession(bind=conn)
yield session
await conn.rollback()
@pytest.fixture
async def client(session) -> AsyncGenerator[AsyncClient, None]:
app.dependency_overrides[get_session] = lambda: session
async with AsyncClient(transport=ASGITransport(app=app)) as c:
yield c
app.dependency_overrides.clear()
Use real database connection, not mocks. Test database is created specifically for tests.
# ✅ Good — real test database
@pytest.fixture(scope="session")
def engine():
return create_async_engine("postgresql+asyncpg://test:test@localhost:5432/test_db")
@pytest.fixture
async def session(engine) -> AsyncGenerator[AsyncSession, None]:
async with engine.connect() as conn:
await conn.begin()
async with AsyncSession(bind=conn) as session:
yield session
await conn.rollback() # Isolation via transactions
# ❌ Bad — mock database
@pytest.fixture
def mock_session():
return AsyncMock(spec=AsyncSession) # Doesn't test real SQL!
Each worker gets its own DB: gw{N}_test_{dbname}
def get_test_db_name(worker_id: str, base_name: str = "myapp") -> str:
"""gw0 → gw0_test_myapp, master → test_myapp"""
if worker_id == "master":
return f"test_{base_name}"
return f"{worker_id}_test_{base_name}"
@pytest.fixture(scope="session")
def database_url(worker_id: str) -> str:
db_name = get_test_db_name(worker_id)
return f"postgresql+asyncpg://test:test@localhost:5432/{db_name}"
More details: ${CLAUDE_PLUGIN_ROOT}/skills/pytest-patterns/references/fixtures.md
More details: ${CLAUDE_PLUGIN_ROOT}/skills/pytest-patterns/references/mocking.md
datetime.now()), random, uuidfrom unittest.mock import AsyncMock
@pytest.fixture
def mock_email_service():
"""Mock EXTERNAL email service (SendGrid, AWS SES)."""
mock = AsyncMock()
mock.send.return_value = True
return mock
@pytest.fixture
def mock_payment_gateway():
"""Mock EXTERNAL payment gateway."""
mock = AsyncMock()
mock.charge.return_value = {"status": "success", "transaction_id": "123"}
return mock
Branch coverage — one of the most valuable metrics, not just line coverage.
def process(value: int | None) -> str:
if value is not None:
return f"Value: {value}"
return "No value"
# Line coverage 100% with one test:
def test_process():
assert process(42) == "Value: 42"
# But branch `return "No value"` is NOT tested!
# Branch coverage requires both paths:
def test_process_with_value():
assert process(42) == "Value: 42"
def test_process_with_none():
assert process(None) == "No value"
# pyproject.toml
[tool.coverage.run]
source = ["src"]
branch = true # IMPORTANT: enable branch coverage
[tool.coverage.report]
fail_under = 80
show_missing = true
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"raise NotImplementedError",
]
# Run tests with coverage
pytest --cov=src --cov-branch --cov-report=term-missing
# HTML report for detailed analysis
pytest --cov=src --cov-branch --cov-report=html
# Open htmlcov/index.html — shows uncovered branches
/test:first <feature> — create test BEFORE implementation (TDD)/test:fixture <name> — create a fixture/test:mock <dependency> — create a mocktest-reviewer — coverage and quality analysis