Provides Python testing strategies using pytest: TDD cycle, fixtures, mocking, parametrization, assertions, exception handling, and coverage measurement.
From atum-systemnpx claudepluginhub arnwaldn/atum-system --plugin atum-systemThis skill uses the workspace's default tool permissions.
Provides UI/UX resources: 50+ styles, color palettes, font pairings, guidelines, charts for web/mobile across React, Next.js, Vue, Svelte, Tailwind, React Native, Flutter. Aids planning, building, reviewing interfaces.
Fetches up-to-date documentation from Context7 for libraries and frameworks like React, Next.js, Prisma. Use for setup questions, API references, and code examples.
Integrates PayPal payments with express checkout, subscriptions, refunds, and IPN. Includes JS SDK for frontend buttons and Python REST API for backend capture.
Comprehensive testing strategies for Python applications using pytest, TDD methodology, and best practices.
Always follow the TDD cycle:
# Step 1: Write failing test (RED)
def test_add_numbers():
result = add(2, 3)
assert result == 5
# Step 2: Write minimal implementation (GREEN)
def add(a, b):
return a + b
# Step 3: Refactor if needed (REFACTOR)
pytest --cov to measure coveragepytest --cov=mypackage --cov-report=term-missing --cov-report=html
import pytest
def test_addition():
"""Test basic addition."""
assert 2 + 2 == 4
def test_string_uppercase():
"""Test string uppercasing."""
text = "hello"
assert text.upper() == "HELLO"
def test_list_append():
"""Test list append."""
items = [1, 2, 3]
items.append(4)
assert 4 in items
assert len(items) == 4
# Equality
assert result == expected
# Inequality
assert result != unexpected
# Truthiness
assert result # Truthy
assert not result # Falsy
assert result is True # Exactly True
assert result is False # Exactly False
assert result is None # Exactly None
# Membership
assert item in collection
assert item not in collection
# Comparisons
assert result > 0
assert 0 <= result <= 100
# Type checking
assert isinstance(result, str)
# Exception testing (preferred approach)
with pytest.raises(ValueError):
raise ValueError("error message")
# Check exception message
with pytest.raises(ValueError, match="invalid input"):
raise ValueError("invalid input provided")
# Check exception attributes
with pytest.raises(ValueError) as exc_info:
raise ValueError("error message")
assert str(exc_info.value) == "error message"
import pytest
@pytest.fixture
def sample_data():
"""Fixture providing sample data."""
return {"name": "Alice", "age": 30}
def test_sample_data(sample_data):
"""Test using the fixture."""
assert sample_data["name"] == "Alice"
assert sample_data["age"] == 30
@pytest.fixture
def database():
"""Fixture with setup and teardown."""
# Setup
db = Database(":memory:")
db.create_tables()
db.insert_test_data()
yield db # Provide to test
# Teardown
db.close()
def test_database_query(database):
"""Test database operations."""
result = database.query("SELECT * FROM users")
assert len(result) > 0
# Function scope (default) - runs for each test
@pytest.fixture
def temp_file():
with open("temp.txt", "w") as f:
yield f
os.remove("temp.txt")
# Module scope - runs once per module
@pytest.fixture(scope="module")
def module_db():
db = Database(":memory:")
db.create_tables()
yield db
db.close()
# Session scope - runs once per test session
@pytest.fixture(scope="session")
def shared_resource():
resource = ExpensiveResource()
yield resource
resource.cleanup()
@pytest.fixture(params=[1, 2, 3])
def number(request):
"""Parameterized fixture."""
return request.param
def test_numbers(number):
"""Test runs 3 times, once for each parameter."""
assert number > 0
@pytest.fixture
def user():
return User(id=1, name="Alice")
@pytest.fixture
def admin():
return User(id=2, name="Admin", role="admin")
def test_user_admin_interaction(user, admin):
"""Test using multiple fixtures."""
assert admin.can_manage(user)
@pytest.fixture(autouse=True)
def reset_config():
"""Automatically runs before every test."""
Config.reset()
yield
Config.cleanup()
def test_without_fixture_call():
# reset_config runs automatically
assert Config.get_setting("debug") is False
# tests/conftest.py
import pytest
@pytest.fixture
def client():
"""Shared fixture for all tests."""
app = create_app(testing=True)
with app.test_client() as client:
yield client
@pytest.fixture
def auth_headers(client):
"""Generate auth headers for API testing."""
response = client.post("/api/login", json={
"username": "test",
"password": "test"
})
token = response.json["token"]
return {"Authorization": f"Bearer {token}"}
@pytest.mark.parametrize("input,expected", [
("hello", "HELLO"),
("world", "WORLD"),
("PyThOn", "PYTHON"),
])
def test_uppercase(input, expected):
"""Test runs 3 times with different inputs."""
assert input.upper() == expected
@pytest.mark.parametrize("a,b,expected", [
(2, 3, 5),
(0, 0, 0),
(-1, 1, 0),
(100, 200, 300),
])
def test_add(a, b, expected):
"""Test addition with multiple inputs."""
assert add(a, b) == expected
@pytest.mark.parametrize("input,expected", [
("valid@email.com", True),
("invalid", False),
("@no-domain.com", False),
], ids=["valid-email", "missing-at", "missing-domain"])
def test_email_validation(input, expected):
"""Test email validation with readable test IDs."""
assert is_valid_email(input) is expected
@pytest.fixture(params=["sqlite", "postgresql", "mysql"])
def db(request):
"""Test against multiple database backends."""
if request.param == "sqlite":
return Database(":memory:")
elif request.param == "postgresql":
return Database("postgresql://localhost/test")
elif request.param == "mysql":
return Database("mysql://localhost/test")
def test_database_operations(db):
"""Test runs 3 times, once for each database."""
result = db.query("SELECT 1")
assert result is not None
# Mark slow tests
@pytest.mark.slow
def test_slow_operation():
time.sleep(5)
# Mark integration tests
@pytest.mark.integration
def test_api_integration():
response = requests.get("https://api.example.com")
assert response.status_code == 200
# Mark unit tests
@pytest.mark.unit
def test_unit_logic():
assert calculate(2, 3) == 5
# Run only fast tests
pytest -m "not slow"
# Run only integration tests
pytest -m integration
# Run integration or slow tests
pytest -m "integration or slow"
# Run tests marked as unit but not slow
pytest -m "unit and not slow"
[pytest]
markers =
slow: marks tests as slow
integration: marks tests as integration tests
unit: marks tests as unit tests
django: marks tests as requiring Django
from unittest.mock import patch, Mock
@patch("mypackage.external_api_call")
def test_with_mock(api_call_mock):
"""Test with mocked external API."""
api_call_mock.return_value = {"status": "success"}
result = my_function()
api_call_mock.assert_called_once()
assert result["status"] == "success"
@patch("mypackage.Database.connect")
def test_database_connection(connect_mock):
"""Test with mocked database connection."""
connect_mock.return_value = MockConnection()
db = Database()
db.connect()
connect_mock.assert_called_once_with("localhost")
@patch("mypackage.api_call")
def test_api_error_handling(api_call_mock):
"""Test error handling with mocked exception."""
api_call_mock.side_effect = ConnectionError("Network error")
with pytest.raises(ConnectionError):
api_call()
api_call_mock.assert_called_once()
@patch("builtins.open", new_callable=mock_open)
def test_file_reading(mock_file):
"""Test file reading with mocked open."""
mock_file.return_value.read.return_value = "file content"
result = read_file("test.txt")
mock_file.assert_called_once_with("test.txt", "r")
assert result == "file content"
@patch("mypackage.DBConnection", autospec=True)
def test_autospec(db_mock):
"""Test with autospec to catch API misuse."""
db = db_mock.return_value
db.query("SELECT * FROM users")
# This would fail if DBConnection doesn't have query method
db_mock.assert_called_once()
class TestUserService:
@patch("mypackage.UserRepository")
def test_create_user(self, repo_mock):
"""Test user creation with mocked repository."""
repo_mock.return_value.save.return_value = User(id=1, name="Alice")
service = UserService(repo_mock.return_value)
user = service.create_user(name="Alice")
assert user.name == "Alice"
repo_mock.return_value.save.assert_called_once()
@pytest.fixture
def mock_config():
"""Create a mock with a property."""
config = Mock()
type(config).debug = PropertyMock(return_value=True)
type(config).api_key = PropertyMock(return_value="test-key")
return config
def test_with_mock_config(mock_config):
"""Test with mocked config properties."""
assert mock_config.debug is True
assert mock_config.api_key == "test-key"
import pytest
@pytest.mark.asyncio
async def test_async_function():
"""Test async function."""
result = await async_add(2, 3)
assert result == 5
@pytest.mark.asyncio
async def test_async_with_fixture(async_client):
"""Test async with async fixture."""
response = await async_client.get("/api/users")
assert response.status_code == 200
@pytest.fixture
async def async_client():
"""Async fixture providing async test client."""
app = create_app()
async with app.test_client() as client:
yield client
@pytest.mark.asyncio
async def test_api_endpoint(async_client):
"""Test using async fixture."""
response = await async_client.get("/api/data")
assert response.status_code == 200
@pytest.mark.asyncio
@patch("mypackage.async_api_call")
async def test_async_mock(api_call_mock):
"""Test async function with mock."""
api_call_mock.return_value = {"status": "ok"}
result = await my_async_function()
api_call_mock.assert_awaited_once()
assert result["status"] == "ok"
def test_divide_by_zero():
"""Test that dividing by zero raises ZeroDivisionError."""
with pytest.raises(ZeroDivisionError):
divide(10, 0)
def test_custom_exception():
"""Test custom exception with message."""
with pytest.raises(ValueError, match="invalid input"):
validate_input("invalid")
def test_exception_with_details():
"""Test exception with custom attributes."""
with pytest.raises(CustomError) as exc_info:
raise CustomError("error", code=400)
assert exc_info.value.code == 400
assert "error" in str(exc_info.value)
import tempfile
import os
def test_file_processing():
"""Test file processing with temp file."""
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as f:
f.write("test content")
temp_path = f.name
try:
result = process_file(temp_path)
assert result == "processed: test content"
finally:
os.unlink(temp_path)
def test_with_tmp_path(tmp_path):
"""Test using pytest's built-in temp path fixture."""
test_file = tmp_path / "test.txt"
test_file.write_text("hello world")
result = process_file(str(test_file))
assert result == "hello world"
# tmp_path automatically cleaned up
def test_with_tmpdir(tmpdir):
"""Test using pytest's tmpdir fixture."""
test_file = tmpdir.join("test.txt")
test_file.write("data")
result = process_file(str(test_file))
assert result == "data"
tests/
├── conftest.py # Shared fixtures
├── __init__.py
├── unit/ # Unit tests
│ ├── __init__.py
│ ├── test_models.py
│ ├── test_utils.py
│ └── test_services.py
├── integration/ # Integration tests
│ ├── __init__.py
│ ├── test_api.py
│ └── test_database.py
└── e2e/ # End-to-end tests
├── __init__.py
└── test_user_flow.py
class TestUserService:
"""Group related tests in a class."""
@pytest.fixture(autouse=True)
def setup(self):
"""Setup runs before each test in this class."""
self.service = UserService()
def test_create_user(self):
"""Test user creation."""
user = self.service.create_user("Alice")
assert user.name == "Alice"
def test_delete_user(self):
"""Test user deletion."""
user = User(id=1, name="Bob")
self.service.delete_user(user)
assert not self.service.user_exists(1)
test_user_login_with_invalid_credentials_failspytest.raises@pytest.fixture
def client():
app = create_app(testing=True)
return app.test_client()
def test_get_user(client):
response = client.get("/api/users/1")
assert response.status_code == 200
assert response.json["id"] == 1
def test_create_user(client):
response = client.post("/api/users", json={
"name": "Alice",
"email": "alice@example.com"
})
assert response.status_code == 201
assert response.json["name"] == "Alice"
@pytest.fixture
def db_session():
"""Create a test database session."""
session = Session(bind=engine)
session.begin_nested()
yield session
session.rollback()
session.close()
def test_create_user(db_session):
user = User(name="Alice", email="alice@example.com")
db_session.add(user)
db_session.commit()
retrieved = db_session.query(User).filter_by(name="Alice").first()
assert retrieved.email == "alice@example.com"
class TestCalculator:
@pytest.fixture
def calculator(self):
return Calculator()
def test_add(self, calculator):
assert calculator.add(2, 3) == 5
def test_divide_by_zero(self, calculator):
with pytest.raises(ZeroDivisionError):
calculator.divide(10, 0)
[pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
addopts =
--strict-markers
--disable-warnings
--cov=mypackage
--cov-report=term-missing
--cov-report=html
markers =
slow: marks tests as slow
integration: marks tests as integration tests
unit: marks tests as unit tests
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"--strict-markers",
"--cov=mypackage",
"--cov-report=term-missing",
"--cov-report=html",
]
markers = [
"slow: marks tests as slow",
"integration: marks tests as integration tests",
"unit: marks tests as unit tests",
]
# Run all tests
pytest
# Run specific file
pytest tests/test_utils.py
# Run specific test
pytest tests/test_utils.py::test_function
# Run with verbose output
pytest -v
# Run with coverage
pytest --cov=mypackage --cov-report=html
# Run only fast tests
pytest -m "not slow"
# Run until first failure
pytest -x
# Run and stop on N failures
pytest --maxfail=3
# Run last failed tests
pytest --lf
# Run tests with pattern
pytest -k "test_user"
# Run with debugger on failure
pytest --pdb
| Pattern | Usage |
|---|---|
pytest.raises() | Test expected exceptions |
@pytest.fixture() | Create reusable test fixtures |
@pytest.mark.parametrize() | Run tests with multiple inputs |
@pytest.mark.slow | Mark slow tests |
pytest -m "not slow" | Skip slow tests |
@patch() | Mock functions and classes |
tmp_path fixture | Automatic temp directory |
pytest --cov | Generate coverage report |
assert | Simple and readable assertions |
Remember: Tests are code too. Keep them clean, readable, and maintainable. Good tests catch bugs; great tests prevent them.