npx claudepluginhub krzysztofsurdy/code-virtuoso --plugin playbooks-virtuosoThis skill is limited to using the following tools:
Complete reference for the LangChain ecosystem — models, agents, tools, retrieval, memory, middleware, streaming, multi-agent orchestration, LangGraph workflows, Deep Agents, and provider integrations for Python 3.10+.
references/agents.mdreferences/deep-agents.mdreferences/integrations.mdreferences/langgraph-advanced.mdreferences/langgraph-core.mdreferences/langgraph-state.mdreferences/memory.mdreferences/messages.mdreferences/middleware.mdreferences/models.mdreferences/multi-agent.mdreferences/providers.mdreferences/retrieval.mdreferences/runtime.mdreferences/streaming.mdreferences/testing.mdreferences/tools.mdProvides quick reference for LangChain 1.0 core concepts like Agents, Tools, Memory, Middleware, and runtime context. Useful for creating AI agents, defining tools, managing memory, or integrating with models.
Designs LLM applications using LangChain 1.x and LangGraph for agents, state management, memory, tool integration, and workflows.
Builds production-grade AI agents with LangChain 0.1+ and LangGraph, including ReAct agents, multi-agent orchestration, memory systems, RAG pipelines, and observability via LangSmith.
Share bugs, ideas, or general feedback.
Complete reference for the LangChain ecosystem — models, agents, tools, retrieval, memory, middleware, streaming, multi-agent orchestration, LangGraph workflows, Deep Agents, and provider integrations for Python 3.10+.
from langchain.chat_models import init_chat_model
from langgraph.prebuilt import create_agent
model = init_chat_model("anthropic:claude-sonnet-4-20250514")
def get_weather(city: str) -> str:
"""Get weather for a city."""
return f"Sunny, 72F in {city}"
agent = create_agent(model, [get_weather])
response = agent.invoke(
{"messages": [{"role": "user", "content": "What's the weather in SF?"}]}
)
from pydantic import BaseModel
class SearchQuery(BaseModel):
query: str
year: int
structured_model = model.with_structured_output(SearchQuery)
result = structured_model.invoke("Who won the World Cup in 2022?")
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_core.vectorstores import InMemoryVectorStore
docs = WebBaseLoader("https://example.com").load()
chunks = RecursiveCharacterTextSplitter(chunk_size=1000).split_documents(docs)
vector_store = InMemoryVectorStore.from_documents(chunks, OpenAIEmbeddings())
retriever_tool = vector_store.as_retriever()
from langgraph.prebuilt import create_agent
billing_agent = create_agent(model, [lookup_billing], name="billing")
tech_agent = create_agent(model, [check_status], name="tech_support")
supervisor = create_agent(
model,
[billing_agent, tech_agent],
prompt="Route to the appropriate specialist."
)
from langgraph.graph import StateGraph, START, END
graph = StateGraph(dict)
graph.add_node("process", process_fn)
graph.add_node("review", review_fn)
graph.add_edge(START, "process")
graph.add_edge("process", "review")
graph.add_edge("review", END)
app = graph.compile()
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "Hello"}]},
stream_mode="messages"
):
print(chunk)
init_chat_model() for provider-agnostic model initializationcreate_agent over building custom agent loopswith_structured_output() for type-safe LLM responsesstream_mode="messages" for token-level streaming to frontends