Basic LangGraph Agent
# agent.py
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_agent_executor
from langserve import add_routes
from fastapi import FastAPI
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant"),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
model = ChatOpenAI(temperature=0)
agent = {"messages": lambda x: x["messages"], "agent_scratchpad": lambda x: []} | prompt | model
app = create_agent_executor(agent, [])
fastapi_app = FastAPI(title="LangGraph Agent")
add_routes(fastapi_app, app, path="/chat")
if __name__ == "__main__":
import uvicorn
uvicorn.run(fastapi_app, host="localhost", port=2024)
Run at http://localhost:2024/chat
with graph ID chat
.
Human-in-the-Loop Example
# agent.py
from langgraph.prebuilt import create_agent_executor, interrupt
from langchain_core.tools import tool
@tool
def write_email(subject: str, body: str, to: str):
return f"Draft email to {to} with subject {subject} sent."
tools = [write_email]
model = ChatOpenAI(model="gpt-4-turbo-preview").bind_tools(tools)
def handle_interrupt(state):
messages = state["messages"]
if isinstance(messages[-1].content, list):
for msg in messages[-1].content:
if isinstance(msg, ToolInvocation) and msg.name == "write_email":
return interrupt(messages, {"type": "interrupt", "args": {"type": "response", "studio": msg.args}})
return {"messages": messages}
agent = {"messages": lambda x: x["messages"], "agent_scratchpad": lambda x: []} | prompt | model | handle_interrupt
app = create_agent_executor(agent, tools)
fastapi_app = FastAPI(title="LangGraph Agent")
add_routes(fastapi_app, app, path="/email_agent")
if __name__ == "__main__":
import uvicorn
uvicorn.run(fastapi_app, host="localhost", port=2024)
Run at http://localhost:2024/email_agent
with graph ID email_agent
.