Use Agent Memory with LangGraph
In this article, you will learn how to connect Agent Memory to LangGraph in two ways:
- a prebuilt ReAct agent that calls a memory-search tool when needed;
- a custom flow built with
StateGraph(MessagesState).
Tip: For package setup, see Get Started with Agent Memory. If you need a local Oracle AI Database for this example, see Run Oracle AI Database Locally.
Agent Memory Setup
Start by configuring the Agent Memory client, a LangGraph chat model, and a reusable search_memory tool. The Agent Memory client also uses its own LLM to periodically extract durable memories from recent thread messages, while the LangGraph model handles agent responses and tool use.
from typing import Annotated
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph
from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory
embedder = Embedder(
model="YOUR_EMBEDDING_MODEL",
api_base="YOUR_EMBEDDING_API_BASE",
api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ... #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
llm=llm,
)
@tool
def search_memory(
query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
"""Search Oracle Agent Memory for durable user facts relevant to the current request."""
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
if not results:
return "No relevant memory found."
return "\n".join(result.content for result in results)
def _latest_user_message(state: MessagesState) -> str:
for message in reversed(state["messages"]):
if getattr(message, "type", None) == "human":
return str(message.content)
if getattr(message, "role", None) == "user":
return str(message.content)
return ""
def _build_memory_context(query: str) -> str:
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
memory_context = "\n".join(f"- {result.content}" for result in results)
return memory_context or "- No relevant memory found."
Prebuilt ReAct Agent
LangChain provides a prebuilt ReAct-style agent on top of the LangGraph runtime. You can expose search_memory as one of its tools and let the agent decide when durable memory should be queried.
Configure the Prebuilt Agent
react_agent = create_agent(
model=langgraph_llm,
tools=[search_memory],
system_prompt=(
"You are a support agent. When the user asks about durable facts from "
"prior sessions, call the search_memory tool before answering."
),
)
react_memory_thread = agent_memory.create_thread(
thread_id="langgraph_react_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
Persist User Context After a Prebuilt ReAct Session
After the first execution completes, append the exchanged messages to Agent Memory and store any durable fact that should be reused later.
react_session_1 = react_agent.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
react_assistant_reply = react_session_1["messages"][-1].content
print(react_assistant_reply)
#I can help with that. What error are you seeing?
#add_messages will add messages to the DB and extract memories automatically
react_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": react_assistant_reply,
},
]
)
#add_memory adds memory to the DB
react_memory_thread.add_memory("The user is John, a Python developer.")
Reuse Memory in a New Prebuilt ReAct Session
When a later execution starts, reopen the same Agent Memory thread and let the prebuilt agent call search_memory before answering.
react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")
react_session_2 = react_agent.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
react_remembered_reply = react_session_2["messages"][-1].content
print(react_remembered_reply)
Output:
The user is John, a Python developer.
Custom Flow
If you need tighter control over the orchestration, build a custom LangGraph flow and inject Agent Memory results directly into your model node.
Configure the Custom Flow
def call_model(state: MessagesState):
from langchain_core.messages import SystemMessage
query = _latest_user_message(state)
memory_context = _build_memory_context(query)
response = langgraph_llm.invoke(
[
SystemMessage(
content=(
"You are a support agent. Use the durable memory below when it is "
"relevant to the current user request.\n\n"
f"Durable memory:\n{memory_context}"
)
),
*state["messages"],
]
)
return {"messages": [response]}
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
thread_id="langgraph_flow_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
Persist User Context After a Flow Session
After the first flow execution completes, append the exchanged messages to Agent Memory and store any durable fact that should be reused later.
flow_session_1 = flow_graph.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
flow_assistant_reply = flow_session_1["messages"][-1].content
print(flow_assistant_reply)
#I can help with that. What error are you seeing?
flow_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": flow_assistant_reply,
},
]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")
Reuse Memory in a New Flow Session
When a later flow execution starts, reopen the same Agent Memory thread and let the graph search durable memory to answer with prior user context.
flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")
flow_session_2 = flow_graph.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
flow_remembered_reply = flow_session_2["messages"][-1].content
print(flow_remembered_reply)
Output:
The user is John, a Python developer.
Disable Automatic Extraction
If you only want to persist messages and add durable memories manually, create the Agent Memory client with extract_memories=False and write the memory rows yourself.
manual_agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
thread_id="langgraph_manual_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
manual_memory_thread.add_messages(
[
{
"role": "user",
"content": "Please remember that I prefer concise code reviews.",
},
{
"role": "assistant",
"content": "Understood. I will keep responses concise.",
},
]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")
Conclusion
In this article we learned how to connect Agent Memory to LangGraph with either a prebuilt ReAct agent or a custom StateGraph(MessagesState) flow, persist thread messages after each session, and reuse durable memory across later executions.
Tip: Having learned how to integrate Agent Memory with LangGraph, you may also be interested in Integrate Agent Memory with WayFlow.
Full Code
#Copyright © 2026 Oracle and/or its affiliates.
#isort:skip_file
#fmt: off
#Agent Memory Code Example - Integration with LangGraph
#-------------------------------------------------------
#How to use:
#Create a new Python virtual environment and install the latest oracleagentmemory version.
#You can now run the script
#1. As a Python file:
#```bash
#python integration_with_langgraph.py
#```
#2. As a Notebook (in VSCode):
#When viewing the file,
#- press the keys Ctrl + Enter to run the selected cell
#- or Shift + Enter to run the selected cell and move to the cell below
##Configure Oracle Memory and LangGraph setup
#%%
from typing import Annotated
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph
from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory
embedder = Embedder(
model="YOUR_EMBEDDING_MODEL",
api_base="YOUR_EMBEDDING_API_BASE",
api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ... #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
llm=llm,
)
@tool
def search_memory(
query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
"""Search Oracle Agent Memory for durable user facts relevant to the current request."""
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
if not results:
return "No relevant memory found."
return "\n".join(result.content for result in results)
def _latest_user_message(state: MessagesState) -> str:
for message in reversed(state["messages"]):
if getattr(message, "type", None) == "human":
return str(message.content)
if getattr(message, "role", None) == "user":
return str(message.content)
return ""
def _build_memory_context(query: str) -> str:
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
memory_context = "\n".join(f"- {result.content}" for result in results)
return memory_context or "- No relevant memory found."
##Configure a prebuilt LangGraph ReAct agent
#%%
react_agent = create_agent(
model=langgraph_llm,
tools=[search_memory],
system_prompt=(
"You are a support agent. When the user asks about durable facts from "
"prior sessions, call the search_memory tool before answering."
),
)
react_memory_thread = agent_memory.create_thread(
thread_id="langgraph_react_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
##Persist user context after a prebuilt ReAct session
#%%
react_session_1 = react_agent.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
react_assistant_reply = react_session_1["messages"][-1].content
print(react_assistant_reply)
#I can help with that. What error are you seeing?
react_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": react_assistant_reply,
},
]
)
react_memory_thread.add_memory("The user is John, a Python developer.")
##Reuse memory in a new prebuilt ReAct session
#%%
react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")
react_session_2 = react_agent.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
react_remembered_reply = react_session_2["messages"][-1].content
print(react_remembered_reply)
#The user is John, a Python developer.
##Configure a custom LangGraph flow
#%%
def call_model(state: MessagesState):
from langchain_core.messages import SystemMessage
query = _latest_user_message(state)
memory_context = _build_memory_context(query)
response = langgraph_llm.invoke(
[
SystemMessage(
content=(
"You are a support agent. Use the durable memory below when it is "
"relevant to the current user request.\n\n"
f"Durable memory:\n{memory_context}"
)
),
*state["messages"],
]
)
return {"messages": [response]}
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
thread_id="langgraph_flow_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
##Persist user context after a flow session
#%%
flow_session_1 = flow_graph.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
flow_assistant_reply = flow_session_1["messages"][-1].content
print(flow_assistant_reply)
#I can help with that. What error are you seeing?
flow_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": flow_assistant_reply,
},
]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")
##Reuse memory in a new flow session
#%%
flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")
flow_session_2 = flow_graph.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
flow_remembered_reply = flow_session_2["messages"][-1].content
print(flow_remembered_reply)
#The user is John, a Python developer.
##Disable automatic memory extraction
#%%
manual_agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
thread_id="langgraph_manual_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
manual_memory_thread.add_messages(
[
{
"role": "user",
"content": "Please remember that I prefer concise code reviews.",
},
{
"role": "assistant",
"content": "Understood. I will keep responses concise.",
},
]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")