Apply production-ready LangChain SDK patterns for chains, agents, and memory. Use when implementing LangChain integrations, refactoring code, or establishing team coding standards for LangChain applications. Trigger with phrases like "langchain SDK patterns", "langchain best practices", "langchain code patterns", "idiomatic langchain", "langchain architecture".
/plugin marketplace add jeremylongshore/claude-code-plugins-plus-skills/plugin install langchain-pack@claude-code-plugins-plusThis skill is limited to using the following tools:
Production-ready patterns for LangChain applications including LCEL chains, structured output, and error handling.
langchain-install-auth setupfrom pydantic import BaseModel, Field
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
class SentimentResult(BaseModel):
"""Structured output for sentiment analysis."""
sentiment: str = Field(description="positive, negative, or neutral")
confidence: float = Field(description="Confidence score 0-1")
reasoning: str = Field(description="Brief explanation")
llm = ChatOpenAI(model="gpt-4o-mini")
structured_llm = llm.with_structured_output(SentimentResult)
prompt = ChatPromptTemplate.from_template(
"Analyze the sentiment of: {text}"
)
chain = prompt | structured_llm
# Returns typed SentimentResult
result: SentimentResult = chain.invoke({"text": "I love LangChain!"})
print(f"Sentiment: {result.sentiment} ({result.confidence})")
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_core.runnables import RunnableWithFallbacks
primary = ChatOpenAI(model="gpt-4o")
fallback = ChatAnthropic(model="claude-3-5-sonnet-20241022")
# Automatically falls back on failure
robust_llm = primary.with_fallbacks([fallback])
response = robust_llm.invoke("Hello!")
import asyncio
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
llm = ChatOpenAI(model="gpt-4o-mini")
prompt = ChatPromptTemplate.from_template("Summarize: {text}")
chain = prompt | llm
async def process_batch(texts: list[str]) -> list:
"""Process multiple texts concurrently."""
inputs = [{"text": t} for t in texts]
results = await chain.abatch(inputs, config={"max_concurrency": 5})
return results
# Usage
results = asyncio.run(process_batch(["text1", "text2", "text3"]))
from langchain_openai import ChatOpenAI
from langchain_core.callbacks import StreamingStdOutCallbackHandler
llm = ChatOpenAI(
model="gpt-4o-mini",
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()]
)
# Streams tokens to stdout as they arrive
for chunk in llm.stream("Tell me a story"):
# Each chunk contains partial content
pass
from langchain_openai import ChatOpenAI
from langchain_core.globals import set_llm_cache
from langchain_community.cache import SQLiteCache
# Enable SQLite caching
set_llm_cache(SQLiteCache(database_path=".langchain_cache.db"))
llm = ChatOpenAI(model="gpt-4o-mini")
# First call hits API
response1 = llm.invoke("What is 2+2?")
# Second identical call uses cache (no API cost)
response2 = llm.invoke("What is 2+2?")
from langchain_core.exceptions import OutputParserException
from openai import RateLimitError, APIError
def safe_invoke(chain, input_data, max_retries=3):
"""Invoke chain with error handling."""
for attempt in range(max_retries):
try:
return chain.invoke(input_data)
except RateLimitError:
if attempt < max_retries - 1:
time.sleep(2 ** attempt)
continue
raise
except OutputParserException as e:
# Handle parsing failures
return {"error": str(e), "raw": e.llm_output}
except APIError as e:
raise RuntimeError(f"API error: {e}")
Proceed to langchain-core-workflow-a for chains and prompts workflow.
This skill should be used when the user asks to "create a hookify rule", "write a hook rule", "configure hookify", "add a hookify rule", or needs guidance on hookify rule syntax and patterns.
Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, or applications. Generates creative, polished code that avoids generic AI aesthetics.