From sciagent-skills
Analyzes biological data with scikit-bio: sequence manipulation (DNA/RNA/protein), alignments, phylogenetic trees (NJ/UPGMA), diversity (Shannon/UniFrac), ordination (PCoA), stats (PERMANOVA), formats (FASTA/BIOM). For microbiomes/ecology.
npx claudepluginhub jaechang-hits/sciagent-skills --plugin sciagent-skillsThis skill uses the workspace's default tool permissions.
scikit-bio is a comprehensive Python library for biological data analysis, spanning sequence manipulation, alignment, phylogenetics, microbial ecology, and multivariate statistics. It provides specialized data structures (DNA, RNA, Protein, DistanceMatrix, TreeNode, TabularMSA) that integrate with the broader Python scientific stack.
Processes biological sequences, alignments, phylogenetic trees, diversity metrics (alpha/beta, UniFrac), ordination (PCoA), PERMANOVA, and formats like FASTA/Newick for microbiome analysis using scikit-bio.
Processes biological sequences, alignments, phylogenetic trees, diversity metrics (UniFrac), ordination (PCoA), PERMANOVA, and formats (FASTA/Newick) using scikit-bio.
Analyzes alignments (FASTA/PHYLIP/Nexus) and trees (Newick): computes treeness, RCV, parsimony sites, evolutionary rate, DVMC, tree length, GC content using PhyKIT, Biopython, DendroPy. Builds NJ/UPGMA/parsimony trees, Robinson-Foulds distances, Mann-Whitney U tests.
Share bugs, ideas, or general feedback.
scikit-bio is a comprehensive Python library for biological data analysis, spanning sequence manipulation, alignment, phylogenetics, microbial ecology, and multivariate statistics. It provides specialized data structures (DNA, RNA, Protein, DistanceMatrix, TreeNode, TabularMSA) that integrate with the broader Python scientific stack.
pip install scikit-bio
# Optional: pip install biom-format — HDF5 BIOM table support
# Optional: pip install matplotlib seaborn — visualization
import skbio
from skbio.diversity import alpha_diversity, beta_diversity
from skbio.stats.distance import permanova
from skbio.stats.ordination import pcoa
import numpy as np
# Sample OTU counts (samples × features)
counts = np.array([[10, 20, 30], [15, 25, 5], [5, 10, 40], [20, 5, 15]])
sample_ids = ['S1', 'S2', 'S3', 'S4']
grouping = ['control', 'control', 'treatment', 'treatment']
# Alpha diversity
shannon = alpha_diversity('shannon', counts, ids=sample_ids)
print(f"Shannon diversity: {shannon.values}") # [1.09, 1.04, 0.94, 1.03]
# Beta diversity → PCoA → PERMANOVA
bc_dm = beta_diversity('braycurtis', counts, ids=sample_ids)
pcoa_results = pcoa(bc_dm)
results = permanova(bc_dm, grouping, permutations=999)
print(f"PERMANOVA p-value: {results['p-value']}")
from skbio import DNA, RNA, Protein
# Create and manipulate sequences
dna = DNA('ATCGATCGATCG', metadata={'id': 'gene1', 'description': 'test'})
rc = dna.reverse_complement()
rna = dna.transcribe()
protein = rna.translate()
print(f"DNA: {dna}, RC: {rc}, Protein: {protein}")
# Motif finding and k-mer analysis
motif_positions = dna.find_with_regex('ATG.{3}')
kmer_freqs = dna.kmer_frequencies(k=3)
print(f"3-mer frequencies: {dict(list(kmer_freqs.items())[:3])}")
# Sequence properties
print(f"Has degenerates: {dna.has_degenerates()}")
print(f"GC content: {dna.gc_content():.2f}")
degapped = dna.degap() # Remove gap characters
# Metadata: sequence-level, positional, interval
dna = DNA('ATCGATCG', metadata={'id': 'seq1'},
positional_metadata={'quality': [30, 35, 40, 38, 32, 36, 34, 33]})
dna.interval_metadata.add([(0, 4)], metadata={'type': 'promoter'})
print(f"Quality scores: {list(dna.positional_metadata['quality'])}")
from skbio import DNA
from skbio.alignment import local_pairwise_align_ssw, TabularMSA
# Pairwise local alignment (Smith-Waterman via SSW)
seq1 = DNA('ACTCGATCGATCGATCGATCG')
seq2 = DNA('ATCGATCGATCGATCGATCGA')
alignment, score, start_end = local_pairwise_align_ssw(seq1, seq2)
print(f"Score: {score}, Positions: {start_end}")
# Multiple sequence alignment from file
msa = TabularMSA.read('alignment.fasta', constructor=DNA)
consensus = msa.consensus()
conservation = msa.conservation()
print(f"Consensus: {consensus[:20]}, Conservation: {conservation[:5]}")
from skbio import TreeNode, DistanceMatrix
from skbio.tree import nj, upgma
# Build tree from distance matrix
data = [[0, 5, 9, 9], [5, 0, 10, 10], [9, 10, 0, 8], [9, 10, 8, 0]]
dm = DistanceMatrix(data, ids=['A', 'B', 'C', 'D'])
tree = nj(dm)
print(tree.ascii_art())
# Tree operations
subtree = tree.shear(['A', 'B', 'C']) # Prune to subset
tips = [node.name for node in tree.tips()]
lca = tree.lowest_common_ancestor(['A', 'B'])
print(f"Tips: {tips}, LCA children: {len(list(lca.children))}")
# Tree comparison
tree2 = upgma(dm)
rf_dist = tree.compare_rfd(tree2)
cophenetic_dm = tree.cophenetic_matrix()
print(f"Robinson-Foulds distance: {rf_dist}")
from skbio.diversity import alpha_diversity, beta_diversity
import numpy as np
counts = np.array([[10, 20, 30, 0], [15, 25, 5, 10], [5, 10, 40, 2]])
sample_ids = ['S1', 'S2', 'S3']
# Alpha diversity (multiple metrics)
for metric in ['shannon', 'simpson', 'observed_otus', 'pielou_e']:
alpha = alpha_diversity(metric, counts, ids=sample_ids)
print(f"{metric}: {alpha.values.round(3)}")
# Beta diversity
bc_dm = beta_diversity('braycurtis', counts, ids=sample_ids)
jaccard_dm = beta_diversity('jaccard', counts, ids=sample_ids)
print(f"Bray-Curtis S1-S2: {bc_dm['S1', 'S2']:.3f}")
# Phylogenetic diversity (requires tree + OTU IDs)
from skbio.diversity import alpha_diversity, beta_diversity
faith_pd = alpha_diversity('faith_pd', counts, ids=sample_ids,
tree=tree, otu_ids=feature_ids)
unifrac_dm = beta_diversity('unweighted_unifrac', counts,
ids=sample_ids, tree=tree, otu_ids=feature_ids)
w_unifrac_dm = beta_diversity('weighted_unifrac', counts,
ids=sample_ids, tree=tree, otu_ids=feature_ids)
print(f"Faith PD: {faith_pd.values}")
from skbio.stats.ordination import pcoa, cca
# PCoA from distance matrix
pcoa_results = pcoa(bc_dm)
pc1 = pcoa_results.samples['PC1']
pc2 = pcoa_results.samples['PC2']
prop = pcoa_results.proportion_explained
print(f"PC1 explains {prop.iloc[0]:.1%}, PC2 explains {prop.iloc[1]:.1%}")
# CCA with environmental variables (constrained ordination)
# species_matrix: samples × species counts
# env_matrix: samples × environmental variables
cca_results = cca(species_matrix, env_matrix)
biplot_scores = cca_results.biplot_scores
print(f"Biplot scores shape: {biplot_scores.shape}")
# Save/load ordination results
pcoa_results.write('pcoa_results.txt')
loaded = skbio.OrdinationResults.read('pcoa_results.txt')
from skbio.stats.distance import permanova, anosim, permdisp, mantel
grouping = ['control', 'control', 'treatment']
# PERMANOVA — test group differences
perm_results = permanova(bc_dm, grouping, permutations=999)
print(f"PERMANOVA: F={perm_results['test statistic']:.3f}, "
f"p={perm_results['p-value']:.4f}")
# ANOSIM — alternative group test
anosim_results = anosim(bc_dm, grouping, permutations=999)
print(f"ANOSIM: R={anosim_results['test statistic']:.3f}, "
f"p={anosim_results['p-value']:.4f}")
# PERMDISP — test homogeneity of dispersions
permdisp_results = permdisp(bc_dm, grouping, permutations=999)
# Mantel test — correlation between distance matrices
r, p_value, n = mantel(bc_dm, jaccard_dm, method='pearson', permutations=999)
print(f"Mantel: r={r:.3f}, p={p_value:.4f}")
import skbio
# Read various formats
seq = skbio.DNA.read('input.fasta', format='fasta')
tree = skbio.TreeNode.read('tree.nwk')
dm = skbio.DistanceMatrix.read('distances.txt')
# Memory-efficient reading (generator for large files)
for seq in skbio.io.read('large.fasta', format='fasta', constructor=skbio.DNA):
print(f"{seq.metadata['id']}: {len(seq)} bp")
# Write and convert formats
seq.write('output.fasta', format='fasta')
seqs = list(skbio.io.read('input.fastq', format='fastq', constructor=skbio.DNA))
skbio.io.write(seqs, format='fasta', into='output.fasta')
from skbio import DistanceMatrix
import numpy as np
# Create from array
data = np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
dm = DistanceMatrix(data, ids=['A', 'B', 'C'])
# Access and slice
print(f"A-B distance: {dm['A', 'B']}")
subset = dm.filter(['A', 'C'])
condensed = dm.condensed_form() # scipy-compatible
df = dm.to_data_frame()
print(f"Shape: {df.shape}")
| Metric Type | Non-Phylogenetic | Phylogenetic (requires tree) |
|---|---|---|
| Alpha diversity | Shannon, Simpson, observed_otus, Pielou's evenness | Faith's PD |
| Beta diversity | Bray-Curtis, Jaccard | Unweighted UniFrac, Weighted UniFrac |
Diversity functions require integer abundance counts, not relative frequencies. If you have proportions, multiply back:
# Wrong: relative abundance
# counts = np.array([0.1, 0.5, 0.4])
# Correct: integer counts
counts = np.array([10, 50, 40])
import skbio
from skbio import TreeNode
from skbio.diversity import alpha_diversity, beta_diversity
from skbio.stats.ordination import pcoa
from skbio.stats.distance import permanova
import numpy as np
# 1. Load data
counts = np.loadtxt('otu_table.tsv', delimiter='\t', dtype=int, skiprows=1)
sample_ids = ['S1', 'S2', 'S3', 'S4', 'S5', 'S6']
feature_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4']
tree = TreeNode.read('phylogeny.nwk')
grouping = ['control', 'control', 'control', 'treatment', 'treatment', 'treatment']
# 2. Alpha diversity
shannon = alpha_diversity('shannon', counts, ids=sample_ids)
faith = alpha_diversity('faith_pd', counts, ids=sample_ids,
tree=tree, otu_ids=feature_ids)
print(f"Mean Shannon - Control: {shannon[:3].mean():.2f}, "
f"Treatment: {shannon[3:].mean():.2f}")
# 3. Beta diversity + PCoA
unifrac_dm = beta_diversity('unweighted_unifrac', counts,
ids=sample_ids, tree=tree, otu_ids=feature_ids)
pcoa_results = pcoa(unifrac_dm)
print(f"PC1: {pcoa_results.proportion_explained.iloc[0]:.1%} variance")
# 4. Statistical testing
results = permanova(unifrac_dm, grouping, permutations=999)
print(f"PERMANOVA p={results['p-value']:.4f}")
from skbio import DNA, DistanceMatrix
from skbio.tree import nj
from skbio.alignment import local_pairwise_align_ssw
import numpy as np
# 1. Read sequences
seqs = list(skbio.io.read('sequences.fasta', format='fasta', constructor=DNA))
n = len(seqs)
print(f"Loaded {n} sequences")
# 2. Compute pairwise distances (Hamming-like via alignment scores)
dist_data = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
_, score, _ = local_pairwise_align_ssw(seqs[i], seqs[j])
max_len = max(len(seqs[i]), len(seqs[j]))
dist_data[i, j] = dist_data[j, i] = 1 - (score / max_len)
# 3. Build tree
ids = [s.metadata.get('id', f'seq{i}') for i, s in enumerate(seqs)]
dm = DistanceMatrix(dist_data, ids=ids)
tree = nj(dm)
print(tree.ascii_art())
# 4. Analyze tree
tree.write('output.nwk', format='newick')
cophenetic = tree.cophenetic_matrix()
print(f"Cophenetic matrix shape: {cophenetic.shape}")
skbio.io.read with constructor=DNA)positional_metadata['quality']find_with_regex() and sequence slicingfind_with_regex('ATG.{3}')dna.transcribe().translate()skbio.io.write()| Parameter | Function | Default | Effect |
|---|---|---|---|
metric | alpha/beta_diversity | Required | Diversity metric name (e.g., 'shannon', 'braycurtis') |
permutations | permanova/anosim/mantel | 999 | Number of permutations; higher = more precise p-values |
tree | alpha/beta_diversity | None | Required for phylogenetic metrics (Faith PD, UniFrac) |
otu_ids | alpha/beta_diversity | None | Feature IDs mapping counts to tree tips |
method | mantel | 'pearson' | Correlation method: 'pearson' or 'spearman' |
constructor | io.read | None | Sequence class for parsing (DNA, RNA, Protein) |
format | io.read/write | Auto | File format (fasta, fastq, newick, etc.) |
genetic_code | translate | 1 | NCBI genetic code (1=standard, 11=bacterial) |
k | kmer_frequencies | Required | k-mer length for frequency calculation |
skbio.io.read() returns a generator; avoid list() on millions of sequencestree and otu_ids; prune tree to feature set with tree.shear(feature_ids)from skbio.diversity import subsample_counts, alpha_diversity
import numpy as np
# Rarefy to minimum depth
min_depth = min(counts.sum(axis=1))
rarefied = np.array([subsample_counts(row, n=min_depth) for row in counts])
print(f"Rarefied to {min_depth} counts per sample")
# Calculate diversity on rarefied data
shannon = alpha_diversity('shannon', rarefied, ids=sample_ids)
from skbio.diversity import partial_beta_diversity
import itertools
# Only compute specific pairs (saves time on large matrices)
pairs = list(itertools.combinations(sample_ids[:10], 2))
partial_dm = partial_beta_diversity('braycurtis', counts,
ids=sample_ids, id_pairs=pairs)
print(f"Computed {len(pairs)} pairwise distances")
from skbio import Table
import numpy as np
# Read BIOM table
table = Table.read('feature_table.biom')
print(f"Samples: {table.shape[1]}, Features: {table.shape[0]}")
# Filter low-abundance features
filtered = table.filter(lambda row, id_, md: row.sum() > 10, axis='observation')
# Convert to pandas
df = table.to_dataframe()
# Normalize to relative abundance
rel_abundance = df.div(df.sum(axis=0), axis=1)
| Problem | Cause | Solution |
|---|---|---|
ValueError: Ids must be unique | Duplicate IDs in DistanceMatrix/sequences | Deduplicate IDs: ids = list(dict.fromkeys(ids)) |
ValueError: Counts must be integers | Relative abundance passed to diversity | Use integer counts; multiply back: (proportions * 1000).astype(int) |
| Memory error on large FASTA | Loading all sequences at once | Use generator: for seq in skbio.io.read(...) |
| Tree tip / OTU ID mismatch | Phylogenetic diversity fails | Prune tree: tree = tree.shear(feature_ids) |
| PERMANOVA p=0.001 but groups overlap in PCoA | Significant dispersion difference | Run permdisp() to check; PERMANOVA tests location AND dispersion |
| Wrong translation | Default genetic code (standard) used for bacteria | Set genetic_code=11 for bacterial/archaeal sequences |
| Slow NJ tree construction | O(n³) for large n | Use GME or BME for >1000 taxa: from skbio.tree import gme |
references/extended_api.md — Extended API reference covering advanced alignment parameters (SSW, gap penalties, CIGAR), tree construction algorithms (NJ vs UPGMA vs GME/BME), BIOM table manipulation, protein embeddings, and integration patterns with QIIME 2, pandas, and scikit-learnNot migrated from original: the original's single api_reference.md (749 lines) was condensed into references/extended_api.md with focus on capabilities not covered inline. Omitted content: basic examples duplicating Core API, verbose troubleshooting (covered in Troubleshooting table above).