Introduction
As AI applications become more complex, single-agent systems often fall short of handling intricate, multi-step tasks. Multi-agent systems offer a solution by breaking down complex problems into specialized, collaborative agents. LangGraph, LangChain's framework for building stateful, multi-actor applications, provides powerful patterns for orchestrating these intelligent agent workflows.
What is LangGraph?
LangGraph is a library for building stateful, multi-actor applications with LLMs. It extends LangChain's capabilities by providing:
- State management across multiple agents
- Graph-based workflow orchestration
- Conditional routing between agents
- Human-in-the-loop interactions
- Parallel execution capabilities
Core Multi-Agent Patterns
1. Sequential Agent Pattern
The most basic pattern where agents work in a predefined sequence, each building upon the previous agent's output.
from langgraph.graph import StateGraph, END
from typing import TypedDict
class AgentState(TypedDict):
input: str
research_output: str
analysis_output: str
final_output: str
def research_agent(state: AgentState):
# Research agent gathers information
research_result = llm.invoke(f"Research the topic: {state['input']}")
return {"research_output": research_result}
def analysis_agent(state: AgentState):
# Analysis agent processes research
analysis = llm.invoke(f"Analyze this research: {state['research_output']}")
return {"analysis_output": analysis}
def writer_agent(state: AgentState):
# Writer agent creates final output
final_text = llm.invoke(f"Write a report based on: {state['analysis_output']}")
return {"final_output": final_text}
# Build the graph
workflow = StateGraph(AgentState)
workflow.add_node("research", research_agent)
workflow.add_node("analysis", analysis_agent)
workflow.add_node("writer", writer_agent)
workflow.add_edge("research", "analysis")
workflow.add_edge("analysis", "writer")
workflow.add_edge("writer", END)
workflow.set_entry_point("research")
app = workflow.compile()2. Supervisor Pattern
A supervisor agent coordinates and delegates tasks to specialized worker agents based on the input type or complexity.
def supervisor_agent(state: AgentState):
supervisor_prompt = """
You are a supervisor managing specialized agents:
- code_agent: Handles programming tasks
- math_agent: Solves mathematical problems
- research_agent: Conducts research
Analyze the input and route to appropriate agent.
Input: {input}
Respond with: code_agent, math_agent, research_agent, or FINISH
"""
response = llm.invoke(supervisor_prompt.format(input=state["input"]))
return {"next_agent": response.strip()}
def route_to_agent(state: AgentState):
next_agent = state.get("next_agent", "")
if next_agent == "FINISH":
return END
return next_agent
# Graph with conditional routing
workflow = StateGraph(AgentState)
workflow.add_node("supervisor", supervisor_agent)
workflow.add_node("code_agent", code_specialist)
workflow.add_node("math_agent", math_specialist)
workflow.add_node("research_agent", research_specialist)
workflow.add_conditional_edges(
"supervisor",
route_to_agent,
{
"code_agent": "code_agent",
"math_agent": "math_agent",
"research_agent": "research_agent",
END: END
}
)
workflow.set_entry_point("supervisor")3. Collaborative Review Pattern
Multiple agents work on the same task and review each other's work to improve quality.
def initial_draft_agent(state: AgentState):
draft = llm.invoke(f"Write initial draft for: {state['input']}")
return {"draft": draft, "reviews": []}
def reviewer_agent_1(state: AgentState):
review = llm.invoke(f"Review for clarity: {state['draft']}")
reviews = state["reviews"] + [{"type": "clarity", "feedback": review}]
return {"reviews": reviews}
def reviewer_agent_2(state: AgentState):
review = llm.invoke(f"Review for accuracy: {state['draft']}")
reviews = state["reviews"] + [{"type": "accuracy", "feedback": review}]
return {"reviews": reviews}
def revision_agent(state: AgentState):
all_reviews = "\n".join([r["feedback"] for r in state["reviews"]])
revised = llm.invoke(f"Revise based on reviews:\nOriginal: {state['draft']}\nReviews: {all_reviews}")
return {"final_output": revised}
# Parallel review pattern
workflow = StateGraph(AgentState)
workflow.add_node("draft", initial_draft_agent)
workflow.add_node("review_1", reviewer_agent_1)
workflow.add_node("review_2", reviewer_agent_2)
workflow.add_node("revise", revision_agent)
workflow.add_edge("draft", "review_1")
workflow.add_edge("draft", "review_2")
workflow.add_edge(["review_1", "review_2"], "revise")4. Hierarchical Agent Pattern
Agents are organized in a hierarchy where higher-level agents delegate to lower-level specialists.
class HierarchicalState(TypedDict):
task: str
subtasks: list
results: dict
final_output: str
def task_planner(state: HierarchicalState):
"""High-level agent that breaks down complex tasks"""
subtasks = llm.invoke(f"Break down this task into subtasks: {state['task']}")
return {"subtasks": subtasks.split('\n')}
def research_specialist(state: HierarchicalState):
"""Specialized agent for research tasks"""
research_tasks = [task for task in state['subtasks'] if 'research' in task.lower()]
results = state.get('results', {})
for task in research_tasks:
result = llm.invoke(f"Research: {task}")
results[task] = result
return {"results": results}
def analysis_specialist(state: HierarchicalState):
"""Specialized agent for analysis tasks"""
analysis_tasks = [task for task in state['subtasks'] if 'analyze' in task.lower()]
results = state.get('results', {})
for task in analysis_tasks:
result = llm.invoke(f"Analyze: {task}")
results[task] = result
return {"results": results}5. Human-in-the-Loop Pattern
Incorporates human feedback and approval at critical decision points.
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.sqlite import SqliteSaver
def human_review_node(state: AgentState):
"""Pause for human review"""
return {"needs_review": True, "review_content": state["draft"]}
def process_human_feedback(state: AgentState):
"""Process human feedback and continue"""
if state.get("human_approved"):
return {"final_output": state["draft"]}
else:
# Revise based on human feedback
revised = llm.invoke(f"Revise based on feedback: {state['human_feedback']}")
return {"draft": revised, "needs_review": True}
# Create graph with checkpointing for human interaction
memory = SqliteSaver.from_conn_string(":memory:")
workflow = StateGraph(AgentState)
workflow.add_node("create_draft", draft_agent)
workflow.add_node("human_review", human_review_node)
workflow.add_node("process_feedback", process_human_feedback)
app = workflow.compile(checkpointer=memory)