Python testing best practices with pytest. Covers unit, integration, async tests, mocking, fixtures. Triggers: "напиши тесты", "write tests", "add tests", "test coverage", "pytest"
/plugin marketplace add timequity/vibe-coder/plugin install vibe-coder@vibe-coderThis skill inherits all available tools. When active, it can use any tool Claude has access to.
agents/python-test-writer.mdreferences/async-testing.mdreferences/fixtures.mdreferences/mocking.mdModern Python testing with pytest ecosystem.
ALWAYS use:
pytest — test runnerpytest-cov — coveragepytest-asyncio — async testspytest-mock — mocking (wraps unittest.mock)hypothesis — property-based testing (when appropriate)respx / pytest-httpx — HTTP mocking for httpxaioresponses — HTTP mocking for aiohttpNEVER use:
unittest style (use pytest native)nose (deprecated)mock standalone (use pytest-mock)uv add --dev pytest pytest-cov pytest-asyncio pytest-mock
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
addopts = [
"-ra",
"-q",
"--strict-markers",
"--strict-config",
]
markers = [
"slow: marks tests as slow",
"integration: marks tests as integration tests",
]
[tool.coverage.run]
source = ["src"]
branch = true
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"raise NotImplementedError",
]
project/
├── src/
│ └── mypackage/
│ ├── __init__.py
│ └── service.py
└── tests/
├── conftest.py # Shared fixtures
├── unit/
│ └── test_service.py
└── integration/
└── test_api.py
# tests/unit/test_calculator.py
import pytest
from mypackage.calculator import add, divide
def test_add_positive_numbers():
assert add(2, 3) == 5
def test_add_negative_numbers():
assert add(-1, -1) == -2
def test_divide_by_zero_raises():
with pytest.raises(ZeroDivisionError):
divide(1, 0)
import pytest
@pytest.mark.parametrize("input,expected", [
("hello", "HELLO"),
("World", "WORLD"),
("", ""),
("123", "123"),
])
def test_uppercase(input, expected):
assert input.upper() == expected
@pytest.mark.parametrize("a,b,expected", [
(1, 2, 3),
(0, 0, 0),
(-1, 1, 0),
])
def test_add(a, b, expected):
assert add(a, b) == expected
# tests/conftest.py
import pytest
from mypackage.database import Database
@pytest.fixture
def sample_user():
"""Simple data fixture."""
return {"id": 1, "name": "Test User", "email": "test@example.com"}
@pytest.fixture
def db():
"""Setup/teardown fixture."""
database = Database(":memory:")
database.connect()
yield database
database.disconnect()
@pytest.fixture(scope="module")
def expensive_resource():
"""Shared across module (use sparingly)."""
resource = create_expensive_resource()
yield resource
resource.cleanup()
import pytest
from mypackage.api import fetch_user
# With asyncio_mode = "auto", no decorator needed
async def test_fetch_user():
user = await fetch_user(1)
assert user["id"] == 1
# Async fixture
@pytest.fixture
async def async_client():
async with AsyncClient() as client:
yield client
async def test_with_async_client(async_client):
response = await async_client.get("/users")
assert response.status_code == 200
from unittest.mock import AsyncMock
import pytest
def test_send_email(mocker):
"""Mock external service."""
mock_send = mocker.patch("mypackage.email.send_email")
mock_send.return_value = True
result = notify_user("test@example.com", "Hello")
assert result is True
mock_send.assert_called_once_with("test@example.com", "Hello")
async def test_external_api(mocker):
"""Mock async function."""
mock_fetch = mocker.patch(
"mypackage.client.fetch_data",
new_callable=AsyncMock,
return_value={"data": "mocked"}
)
result = await process_data()
assert result["data"] == "mocked"
mock_fetch.assert_awaited_once()
import pytest
import httpx
import respx
@respx.mock
async def test_api_call():
respx.get("https://api.example.com/users/1").respond(
json={"id": 1, "name": "John"}
)
async with httpx.AsyncClient() as client:
response = await client.get("https://api.example.com/users/1")
assert response.json()["name"] == "John"
# Or as fixture
@pytest.fixture
def mock_api():
with respx.mock:
yield respx
async def test_with_fixture(mock_api):
mock_api.get("https://api.example.com/data").respond(json={"ok": True})
# ... test code
import pytest
from mypackage.validator import validate_email
def test_invalid_email_raises():
with pytest.raises(ValueError) as exc_info:
validate_email("not-an-email")
assert "Invalid email format" in str(exc_info.value)
def test_specific_exception_attributes():
with pytest.raises(ValidationError) as exc_info:
validate_input({"bad": "data"})
assert exc_info.value.field == "email"
assert exc_info.value.code == "required"
import pytest
@pytest.mark.slow
def test_complex_calculation():
"""Run with: pytest -m slow"""
result = heavy_computation()
assert result is not None
@pytest.mark.integration
async def test_database_connection():
"""Run with: pytest -m integration"""
async with get_connection() as conn:
assert await conn.ping()
@pytest.mark.skip(reason="Not implemented yet")
def test_future_feature():
pass
@pytest.mark.skipif(sys.platform == "win32", reason="Unix only")
def test_unix_specific():
pass
# Run all tests
pytest
# With coverage
pytest --cov --cov-report=term-missing
# Specific file/test
pytest tests/unit/test_service.py
pytest tests/unit/test_service.py::test_specific_function
# By marker
pytest -m "not slow"
pytest -m integration
# Verbose with print output
pytest -v -s
# Stop on first failure
pytest -x
# Run last failed
pytest --lf
# Parallel (requires pytest-xdist)
pytest -n auto
# Terminal report
pytest --cov=src --cov-report=term-missing
# HTML report
pytest --cov=src --cov-report=html
open htmlcov/index.html
# Fail if below threshold
pytest --cov=src --cov-fail-under=80
test_<what>_<condition>_<expected>pytest.raises for exception testingtime.sleep() in testsUse the python-test-writer agent for generating comprehensive tests:
Task: python-test-writer
Prompt: Write tests for src/mypackage/service.py
This skill should be used when the user asks to "create an agent", "add an agent", "write a subagent", "agent frontmatter", "when to use description", "agent examples", "agent tools", "agent colors", "autonomous agent", or needs guidance on agent structure, system prompts, triggering conditions, or agent development best practices for Claude Code plugins.
This skill should be used when the user asks to "create a slash command", "add a command", "write a custom command", "define command arguments", "use command frontmatter", "organize commands", "create command with file references", "interactive command", "use AskUserQuestion in command", or needs guidance on slash command structure, YAML frontmatter fields, dynamic arguments, bash execution in commands, user interaction patterns, or command development best practices for Claude Code.
This skill should be used when the user asks to "create a hook", "add a PreToolUse/PostToolUse/Stop hook", "validate tool use", "implement prompt-based hooks", "use ${CLAUDE_PLUGIN_ROOT}", "set up event-driven automation", "block dangerous commands", or mentions hook events (PreToolUse, PostToolUse, Stop, SubagentStop, SessionStart, SessionEnd, UserPromptSubmit, PreCompact, Notification). Provides comprehensive guidance for creating and implementing Claude Code plugin hooks with focus on advanced prompt-based hooks API.