Sheaf neural network coordination via graph Laplacians for distributed
From asi-skillsnpx claudepluginhub plurigrid/asiThis skill uses the workspace's default tool permissions.
research/2005.12798.pdfresearch/2206.08702.pdfresearch/2501.19207.pdfresearch/2504.02049.pdfresearch/2507.00647.pdfresearch/barbero22a.pdfresearch/gebhartom_sheafnn.pdfresearch/jakobhansen_learningsheaves.pdfGuides Next.js Cache Components and Partial Prerendering (PPR) with cacheComponents enabled. Implements 'use cache', cacheLife(), cacheTag(), revalidateTag(), static/dynamic optimization, and cache debugging.
Migrates code, prompts, and API calls from Claude Sonnet 4.0/4.5 or Opus 4.1 to Opus 4.5, updating model strings on Anthropic, AWS, GCP, Azure platforms.
Facilitates interactive brainstorming sessions using diverse creative techniques and ideation methods. Activates when users say 'help me brainstorm' or 'help me ideate'.
Trit: 0 (ERGODIC - coordinator) Color: Green (#26D826)
Implements sheaf neural network coordination using graph Laplacians for:
The sheaf Laplacian generalizes the graph Laplacian by associating vector spaces to nodes and linear maps to edges:
L_F = D^\top D
where D is the coboundary operator:
(Df)_e = F_{e,t} f_t - F_{e,s} f_s
F_{e,v} : F(v) → F(e) (restriction maps)
Sheaf diffusion for consensus:
\frac{dx}{dt} = -L_F x
At equilibrium: L_F x = 0 (harmonic sections)
For directed graphs with cooperative behavior:
L_{in} = D_{in}^\top D_{in} (gathering information)
L_{out} = D_{out}^\top D_{out} (conveying information)
import torch
import torch.nn as nn
class SheafLaplacian(nn.Module):
"""Learnable sheaf Laplacian for graph coordination."""
def __init__(self, num_nodes, stalk_dim, edge_index):
super().__init__()
self.num_nodes = num_nodes
self.stalk_dim = stalk_dim
self.edge_index = edge_index
# Learnable restriction maps F_{e,v}
num_edges = edge_index.shape[1]
self.restriction_maps = nn.Parameter(
torch.randn(num_edges, 2, stalk_dim, stalk_dim)
)
def build_laplacian(self):
"""Construct sheaf Laplacian from restriction maps."""
L = torch.zeros(
self.num_nodes * self.stalk_dim,
self.num_nodes * self.stalk_dim
)
for e, (s, t) in enumerate(self.edge_index.T):
F_es = self.restriction_maps[e, 0] # Source restriction
F_et = self.restriction_maps[e, 1] # Target restriction
# Add edge contribution to Laplacian
# L[s,s] += F_es^T F_es, L[t,t] += F_et^T F_et
# L[s,t] -= F_es^T F_et, L[t,s] -= F_et^T F_es
return L
def diffuse(self, x, steps=10, dt=0.1):
"""Run sheaf diffusion for consensus."""
L = self.build_laplacian()
for _ in range(steps):
x = x - dt * (L @ x)
return x
def harmonic_extension(self, boundary_values, boundary_mask):
"""Extend boundary values harmonically."""
L = self.build_laplacian()
# Solve L_interior x_interior = -L_boundary x_boundary
return solve_harmonic(L, boundary_values, boundary_mask)
class CooperativeSheafNN(nn.Module):
"""Cooperative SNN with in/out degree control."""
def __init__(self, in_dim, hidden_dim, out_dim, edge_index):
super().__init__()
self.sheaf = SheafLaplacian(
num_nodes=edge_index.max() + 1,
stalk_dim=hidden_dim,
edge_index=edge_index
)
self.encoder = nn.Linear(in_dim, hidden_dim)
self.decoder = nn.Linear(hidden_dim, out_dim)
# Cooperative gates: control gather vs convey
self.gather_gate = nn.Parameter(torch.ones(1))
self.convey_gate = nn.Parameter(torch.ones(1))
def forward(self, x, edge_index):
h = self.encoder(x)
# Cooperative diffusion
h_diffused = self.sheaf.diffuse(h)
# Apply cooperative gates
h_out = self.gather_gate * h + self.convey_gate * h_diffused
return self.decoder(h_out)
using Catlab, Catlab.CategoricalAlgebra
# Define sheaf schema
@present SchSheaf(FreeSchema) begin
V::Ob # Vertices (nodes)
E::Ob # Edges
src::Hom(E, V)
tgt::Hom(E, V)
# Stalks as vector spaces
F_V::AttrType # Stalk at vertex
F_E::AttrType # Stalk at edge
stalk_v::Attr(V, F_V)
stalk_e::Attr(E, F_E)
end
@acset_type SheafGraph(SchSheaf)
function build_sheaf_laplacian(sg::SheafGraph, restrictions)
"""Build sheaf Laplacian from restriction maps."""
n = nparts(sg, :V)
d = size(restrictions[1], 1) # Stalk dimension
L = zeros(n * d, n * d)
for e in parts(sg, :E)
s = sg[e, :src]
t = sg[e, :tgt]
F_s, F_t = restrictions[e]
# Add contributions
si, ti = (s-1)*d+1:s*d, (t-1)*d+1:t*d
L[si, si] += F_s' * F_s
L[ti, ti] += F_t' * F_t
L[si, ti] -= F_s' * F_t
L[ti, si] -= F_t' * F_s
end
return L
end
function sheaf_diffusion(L, x0; steps=100, dt=0.01)
"""Run sheaf diffusion to reach harmonic section."""
x = copy(x0)
for _ in 1:steps
x = x - dt * (L * x)
end
return x
end
This skill participates in balanced triads:
sheaf-cohomology (-1) ⊗ sheaf-laplacian-coordination (0) ⊗ forward-forward-learning (+1) = 0 ✓
persistent-homology (-1) ⊗ sheaf-laplacian-coordination (0) ⊗ gay-mcp (+1) = 0 ✓
# Agents reach consensus via sheaf diffusion
agents = SheafLaplacian(num_agents=5, stalk_dim=16, topology=ring_graph)
initial_beliefs = torch.randn(5, 16)
consensus = agents.diffuse(initial_beliefs, steps=50)
# All agents now have aligned beliefs in harmonic section
# Handle graphs where connected nodes have different labels
model = CooperativeSheafNN(in_dim=32, hidden_dim=64, out_dim=10, edge_index=data.edge_index)
# Sheaf structure allows nodes to maintain distinct representations
# while still communicating through restriction maps
# Decentralized optimization via sheaf Laplacian flow
def distributed_optimize(local_gradients, topology):
sheaf = SheafLaplacian(topology)
# Average gradients via harmonic extension
global_gradient = sheaf.harmonic_extension(local_gradients)
return global_gradient
;; In parallel_color_fork.clj
(defn sheaf-coordinate-forks
"Coordinate parallel color forks via sheaf diffusion"
[forks topology]
(let [sheaf (build-sheaf-laplacian forks topology)
consensus (sheaf-diffuse sheaf (map :color forks))]
(mapv #(assoc %1 :coordinated-color %2) forks consensus)))
sheaf-cohomology - Čech cohomology for local-to-global verificationopen-games - Compositional game theory for agent coordinationacsets-algebraic-databases - Functorial databases underlying sheaf structureforward-forward-learning - Local learning complementing sheaf diffusion@article{hansen2020sheaf,
title={Sheaf Neural Networks},
author={Hansen, Jakob and Gebhart, Thomas},
journal={arXiv:2012.06333},
year={2020}
}
@article{bodnar2022neural,
title={Neural Sheaf Diffusion},
author={Bodnar, Cristian and Di Giovanni, Francesco and others},
journal={arXiv:2202.04579},
year={2022}
}
@article{ribeiro2025cooperative,
title={Cooperative Sheaf Neural Networks},
author={Ribeiro, André and others},
journal={arXiv:2507.00647},
year={2025}
}
This skill connects to the K-Dense-AI/claude-scientific-skills ecosystem:
category-theory: 139 citations in bib.duckdbThis skill maps to Cat# = Comod(P) as a bicomodule in the equipment structure:
Trit: -1 (MINUS)
Home: Prof
Poly Op: ⊗
Kan Role: Adj
Color: #FF6B6B
The skill participates in triads satisfying:
(-1) + (0) + (+1) ≡ 0 (mod 3)
This ensures compositional coherence in the Cat# equipment structure.