Utiliser la mémoire de l'agent avec LangGraph
Dans cet article, vous apprendrez à connecter la mémoire de l'agent à LangGraph de deux façons :
- un agent ReAct prédéfini qui appelle un outil de recherche de mémoire si nécessaire ;
- un flux personnalisé créé avec
StateGraph(MessagesState).
A savoir : Pour la configuration des packages, reportez-vous à Introduction à la mémoire de l'agent. Si vous avez besoin d'une instance Oracle AI Database locale pour cet exemple, reportez-vous à Exécution locale d'Oracle AI Database.
Configuration de la mémoire de l'agent
Commencez par configurer le client de mémoire d'agent, un modèle de discussion LangGraph et un outil search_memory réutilisable. Le client de mémoire d'agent utilise également son propre LLM pour extraire périodiquement les mémoires durables des messages de thread récents, tandis que le modèle LangGraph gère les réponses d'agent et l'utilisation des outils.
from typing import Annotated
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph
from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory
embedder = Embedder(
model="YOUR_EMBEDDING_MODEL",
api_base="YOUR_EMBEDDING_API_BASE",
api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ... #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
llm=llm,
)
@tool
def search_memory(
query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
"""Search Oracle Agent Memory for durable user facts relevant to the current request."""
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
if not results:
return "No relevant memory found."
return "\n".join(result.content for result in results)
def _latest_user_message(state: MessagesState) -> str:
for message in reversed(state["messages"]):
if getattr(message, "type", None) == "human":
return str(message.content)
if getattr(message, "role", None) == "user":
return str(message.content)
return ""
def _build_memory_context(query: str) -> str:
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
memory_context = "\n".join(f"- {result.content}" for result in results)
return memory_context or "- No relevant memory found."
Agent réactif prédéfini
LangChain fournit un agent prédéfini de type Réaction en plus de l'exécution LangGraph. Vous pouvez exposer search_memory comme l'un de ses outils et laisser l'agent décider du moment où la mémoire durable doit être interrogée.
Configuration de l'agent prédéfini
react_agent = create_agent(
model=langgraph_llm,
tools=[search_memory],
system_prompt=(
"You are a support agent. When the user asks about durable facts from "
"prior sessions, call the search_memory tool before answering."
),
)
react_memory_thread = agent_memory.create_thread(
thread_id="langgraph_react_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
Conserver le contexte utilisateur après une session de réaction prédéfinie
Une fois la première exécution terminée, ajoutez les messages échangés à la mémoire de l'agent et stockez tout fait durable qui doit être réutilisé ultérieurement.
react_session_1 = react_agent.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
react_assistant_reply = react_session_1["messages"][-1].content
print(react_assistant_reply)
#I can help with that. What error are you seeing?
#add_messages will add messages to the DB and extract memories automatically
react_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": react_assistant_reply,
},
]
)
#add_memory adds memory to the DB
react_memory_thread.add_memory("The user is John, a Python developer.")
Réutiliser la mémoire dans une nouvelle session de réaction prédéfinie
Lorsqu'une exécution ultérieure démarre, rouvrez le même thread de mémoire d'agent et laissez l'agent prédéfini appeler search_memory avant de répondre.
react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")
react_session_2 = react_agent.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
react_remembered_reply = react_session_2["messages"][-1].content
print(react_remembered_reply)
Sortie :
The user is John, a Python developer.
Flux personnalisé
Si vous avez besoin d'un contrôle plus strict sur l'orchestration, créez un flux LangGraph personnalisé et injectez des résultats de mémoire d'agent directement dans votre noeud de modèle.
Configurer le flux personnalisé
def call_model(state: MessagesState):
from langchain_core.messages import SystemMessage
query = _latest_user_message(state)
memory_context = _build_memory_context(query)
response = langgraph_llm.invoke(
[
SystemMessage(
content=(
"You are a support agent. Use the durable memory below when it is "
"relevant to the current user request.\n\n"
f"Durable memory:\n{memory_context}"
)
),
*state["messages"],
]
)
return {"messages": [response]}
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
thread_id="langgraph_flow_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
Persistance du contexte utilisateur après une session de flux
Une fois la première exécution de flux terminée, ajoutez les messages échangés à la mémoire de l'agent et stockez tout fait durable qui doit être réutilisé ultérieurement.
flow_session_1 = flow_graph.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
flow_assistant_reply = flow_session_1["messages"][-1].content
print(flow_assistant_reply)
#I can help with that. What error are you seeing?
flow_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": flow_assistant_reply,
},
]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")
Réutiliser la mémoire dans une nouvelle session de flux
Lorsqu'une exécution de flux ultérieure démarre, rouvrez le même thread de mémoire d'agent et laissez le graphique rechercher la mémoire durable pour répondre au contexte utilisateur précédent.
flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")
flow_session_2 = flow_graph.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
flow_remembered_reply = flow_session_2["messages"][-1].content
print(flow_remembered_reply)
Sortie :
The user is John, a Python developer.
Désactiver l'extraction automatique
Si vous souhaitez uniquement rendre persistants les messages et ajouter manuellement des mémoires durables, créez le client de mémoire d'agent avec extract_memories=False et écrivez les lignes de mémoire vous-même.
manual_agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
thread_id="langgraph_manual_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
manual_memory_thread.add_messages(
[
{
"role": "user",
"content": "Please remember that I prefer concise code reviews.",
},
{
"role": "assistant",
"content": "Understood. I will keep responses concise.",
},
]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")
Conclusion
Dans cet article, nous avons appris à connecter la mémoire d'agent à LangGraph à l'aide d'un agent ReAct prédéfini ou d'un flux StateGraph(MessagesState) personnalisé, à persister les messages de thread après chaque session et à réutiliser la mémoire durable lors des exécutions ultérieures.
A savoir : Après avoir appris à intégrer la mémoire d'agent à LangGraph, vous pouvez également être intéressé par Intégrer la mémoire d'agent à WayFlow.
Code complet
#Copyright © 2026 Oracle and/or its affiliates.
#isort:skip_file
#fmt: off
#Agent Memory Code Example - Integration with LangGraph
#-------------------------------------------------------
#How to use:
#Create a new Python virtual environment and install the latest oracleagentmemory version.
#You can now run the script
#1. As a Python file:
#```bash
#python integration_with_langgraph.py
#```
#2. As a Notebook (in VSCode):
#When viewing the file,
#- press the keys Ctrl + Enter to run the selected cell
#- or Shift + Enter to run the selected cell and move to the cell below
##Configure Oracle Memory and LangGraph setup
#%%
from typing import Annotated
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph
from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory
embedder = Embedder(
model="YOUR_EMBEDDING_MODEL",
api_base="YOUR_EMBEDDING_API_BASE",
api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ... #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
model="gpt-4.1-mini",
api_key="YOUR_OPENAI_API_KEY",
)
#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
llm=llm,
)
@tool
def search_memory(
query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
"""Search Oracle Agent Memory for durable user facts relevant to the current request."""
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
if not results:
return "No relevant memory found."
return "\n".join(result.content for result in results)
def _latest_user_message(state: MessagesState) -> str:
for message in reversed(state["messages"]):
if getattr(message, "type", None) == "human":
return str(message.content)
if getattr(message, "role", None) == "user":
return str(message.content)
return ""
def _build_memory_context(query: str) -> str:
results = agent_memory.search(
query=query,
user_id=user_id,
agent_id=agent_id,
max_results=3,
record_types=["memory"],
)
memory_context = "\n".join(f"- {result.content}" for result in results)
return memory_context or "- No relevant memory found."
##Configure a prebuilt LangGraph ReAct agent
#%%
react_agent = create_agent(
model=langgraph_llm,
tools=[search_memory],
system_prompt=(
"You are a support agent. When the user asks about durable facts from "
"prior sessions, call the search_memory tool before answering."
),
)
react_memory_thread = agent_memory.create_thread(
thread_id="langgraph_react_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
##Persist user context after a prebuilt ReAct session
#%%
react_session_1 = react_agent.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
react_assistant_reply = react_session_1["messages"][-1].content
print(react_assistant_reply)
#I can help with that. What error are you seeing?
react_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": react_assistant_reply,
},
]
)
react_memory_thread.add_memory("The user is John, a Python developer.")
##Reuse memory in a new prebuilt ReAct session
#%%
react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")
react_session_2 = react_agent.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
react_remembered_reply = react_session_2["messages"][-1].content
print(react_remembered_reply)
#The user is John, a Python developer.
##Configure a custom LangGraph flow
#%%
def call_model(state: MessagesState):
from langchain_core.messages import SystemMessage
query = _latest_user_message(state)
memory_context = _build_memory_context(query)
response = langgraph_llm.invoke(
[
SystemMessage(
content=(
"You are a support agent. Use the durable memory below when it is "
"relevant to the current user request.\n\n"
f"Durable memory:\n{memory_context}"
)
),
*state["messages"],
]
)
return {"messages": [response]}
builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
thread_id="langgraph_flow_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
##Persist user context after a flow session
#%%
flow_session_1 = flow_graph.invoke(
{
"messages": [
HumanMessage(
content="I am John, a Python developer and I need help debugging a payment service."
)
]
}
)
flow_assistant_reply = flow_session_1["messages"][-1].content
print(flow_assistant_reply)
#I can help with that. What error are you seeing?
flow_memory_thread.add_messages(
[
{
"role": "user",
"content": "I am John, a Python developer and I need help debugging a payment service.",
},
{
"role": "assistant",
"content": flow_assistant_reply,
},
]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")
##Reuse memory in a new flow session
#%%
flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")
flow_session_2 = flow_graph.invoke(
{
"messages": [
HumanMessage(content="Who am I?")
]
}
)
flow_remembered_reply = flow_session_2["messages"][-1].content
print(flow_remembered_reply)
#The user is John, a Python developer.
##Disable automatic memory extraction
#%%
manual_agent_memory = OracleAgentMemory(
connection=db_pool,
embedder=embedder,
extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
thread_id="langgraph_manual_memory_demo",
user_id=user_id,
agent_id=agent_id,
)
manual_memory_thread.add_messages(
[
{
"role": "user",
"content": "Please remember that I prefer concise code reviews.",
},
{
"role": "assistant",
"content": "Understood. I will keep responses concise.",
},
]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")