使用 LangGraph 的代理程式記憶體

在本文中,您將瞭解如何以兩種方式將「代理程式記憶體」連線到 LangGraph:

秘訣:如需設定套裝程式,請參閱開始使用代理程式記憶體。如果此範例需要本機 Oracle AI Database,請參閱在本機執行 Oracle AI Database

代理程式記憶體設定

從設定「代理程式記憶體」從屬端、LangGraph 交談模型以及可重複使用的 search_memory 工具開始。「代理程式記憶體」從屬端也會使用自己的 LLM,定期從最近的繫線訊息擷取持久的記憶體,而 LangGraph 模型則負責處理代理程式回應和工具使用。

from typing import Annotated

from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph

from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory

embedder = Embedder(
    model="YOUR_EMBEDDING_MODEL",
    api_base="YOUR_EMBEDDING_API_BASE",
    api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
    model="gpt-4.1-mini",
    api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ...  #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
    model="gpt-4.1-mini",
    api_key="YOUR_OPENAI_API_KEY",
)


#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
    connection=db_pool,
    embedder=embedder,
    llm=llm,
)


@tool
def search_memory(
    query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
    """Search Oracle Agent Memory for durable user facts relevant to the current request."""
    results = agent_memory.search(
        query=query,
        user_id=user_id,
        agent_id=agent_id,
        max_results=3,
        record_types=["memory"],
    )
    if not results:
        return "No relevant memory found."
    return "\n".join(result.content for result in results)


def _latest_user_message(state: MessagesState) -> str:
    for message in reversed(state["messages"]):
        if getattr(message, "type", None) == "human":
            return str(message.content)
        if getattr(message, "role", None) == "user":
            return str(message.content)
    return ""


def _build_memory_context(query: str) -> str:
    results = agent_memory.search(
        query=query,
        user_id=user_id,
        agent_id=agent_id,
        max_results=3,
        record_types=["memory"],
    )
    memory_context = "\n".join(f"- {result.content}" for result in results)
    return memory_context or "- No relevant memory found."

預建的 ReAct 專員

LangChain 在 LangGraph 執行階段上提供預先建立的 ReAct 樣式代理程式。您可以將 search_memory 顯示為其中一種工具,讓代理程式決定何時應該查詢持久性記憶體。

設定預建代理程式

react_agent = create_agent(
    model=langgraph_llm,
    tools=[search_memory],
    system_prompt=(
        "You are a support agent. When the user asks about durable facts from "
        "prior sessions, call the search_memory tool before answering."
    ),
)
react_memory_thread = agent_memory.create_thread(
    thread_id="langgraph_react_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)

在預先建置的 ReAct 階段作業之後保存使用者相關資訊環境

第一次執行完成後,將交換的訊息附加至「代理程式記憶體」,並儲存任何稍後應重複使用的持久性事實。

react_session_1 = react_agent.invoke(
    {
        "messages": [
            HumanMessage(
                content="I am John, a Python developer and I need help debugging a payment service."
            )
        ]
    }
)
react_assistant_reply = react_session_1["messages"][-1].content

print(react_assistant_reply)
#I can help with that. What error are you seeing?

#add_messages will add messages to the DB and extract memories automatically
react_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "I am John, a Python developer and I need help debugging a payment service.",
        },
        {
            "role": "assistant",
            "content": react_assistant_reply,
        },
    ]
)
#add_memory adds memory to the DB
react_memory_thread.add_memory("The user is John, a Python developer.")

在新的預建反應階段作業重複使用記憶體

當稍後執行開始時,請重新開啟相同的「代理程式記憶體」繫線,然後讓預建的代理程式在回答之前呼叫 search_memory

react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")

react_session_2 = react_agent.invoke(
    {
        "messages": [
            HumanMessage(content="Who am I?")
        ]
    }
)
react_remembered_reply = react_session_2["messages"][-1].content

print(react_remembered_reply)

輸出:

The user is John, a Python developer.

自訂流程

如果您需要對協調流程進行更嚴格的控制,請建立自訂的 LangGraph 流程,然後將「代理程式記憶體」結果直接插入您的模型節點中。

設定自訂流程

def call_model(state: MessagesState):
    from langchain_core.messages import SystemMessage

    query = _latest_user_message(state)
    memory_context = _build_memory_context(query)
    response = langgraph_llm.invoke(
        [
            SystemMessage(
                content=(
                    "You are a support agent. Use the durable memory below when it is "
                    "relevant to the current user request.\n\n"
                    f"Durable memory:\n{memory_context}"
                )
            ),
            *state["messages"],
        ]
    )
    return {"messages": [response]}


builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
    thread_id="langgraph_flow_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)

在流程階段作業之後保存使用者相關資訊環境

第一個流程執行完成後,將交換的訊息附加至「代理程式記憶體」,並儲存任何稍後應重複使用的持久性事實。

flow_session_1 = flow_graph.invoke(
    {
        "messages": [
            HumanMessage(
                content="I am John, a Python developer and I need help debugging a payment service."
            )
        ]
    }
)
flow_assistant_reply = flow_session_1["messages"][-1].content

print(flow_assistant_reply)
#I can help with that. What error are you seeing?

flow_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "I am John, a Python developer and I need help debugging a payment service.",
        },
        {
            "role": "assistant",
            "content": flow_assistant_reply,
        },
    ]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")

在新流程階段作業重複使用記憶體

當後續的流程執行開始時,請重新開啟相同的「代理程式記憶體」繫線,並讓圖表搜尋持續的記憶體,以回答先前的使用者相關資訊環境。

flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")

flow_session_2 = flow_graph.invoke(
    {
        "messages": [
            HumanMessage(content="Who am I?")
        ]
    }
)
flow_remembered_reply = flow_session_2["messages"][-1].content

print(flow_remembered_reply)

輸出:

The user is John, a Python developer.

停用自動擷取

如果您只想保留訊息並手動新增持久記憶體,請使用 extract_memories=False 建立「代理程式記憶體」從屬端,然後自行寫入記憶體資料列。

manual_agent_memory = OracleAgentMemory(
    connection=db_pool,
    embedder=embedder,
    extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
    thread_id="langgraph_manual_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)
manual_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "Please remember that I prefer concise code reviews.",
        },
        {
            "role": "assistant",
            "content": "Understood. I will keep responses concise.",
        },
    ]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")

結論

在本文中,我們學會如何使用預先建立的 ReAct 代理程式或自訂 StateGraph(MessagesState) 流程將代理程式記憶體連線至 LangGraph、在每個階段作業後保留執行緒訊息,以及在後續執行中重複使用持久記憶體。

提示:學習如何整合「代理程式記憶體」與 LangGraph 之後,您可能也對整合代理程式記憶體與 WayFlow 有興趣。

完整代碼

#Copyright © 2026 Oracle and/or its affiliates.
#isort:skip_file
#fmt: off
#Agent Memory Code Example - Integration with LangGraph
#-------------------------------------------------------

#How to use:
#Create a new Python virtual environment and install the latest oracleagentmemory version.

#You can now run the script
#1. As a Python file:
#```bash
#python integration_with_langgraph.py
#```
#2. As a Notebook (in VSCode):
#When viewing the file,
#- press the keys Ctrl + Enter to run the selected cell
#- or Shift + Enter to run the selected cell and move to the cell below


##Configure Oracle Memory and LangGraph setup

#%%
from typing import Annotated

from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, MessagesState, StateGraph

from oracleagentmemory.core.embedders.embedder import Embedder
from oracleagentmemory.core.llms.llm import Llm
from oracleagentmemory.core.oracleagentmemory import OracleAgentMemory

embedder = Embedder(
    model="YOUR_EMBEDDING_MODEL",
    api_base="YOUR_EMBEDDING_API_BASE",
    api_key="YOUR_EMBEDDING_API_KEY",
)
llm = Llm(
    model="gpt-4.1-mini",
    api_key="YOUR_OPENAI_API_KEY",
)
db_pool = ...  #an oracledb connection or connection pool
langgraph_llm = ChatOpenAI(
    model="gpt-4.1-mini",
    api_key="YOUR_OPENAI_API_KEY",
)


#Keep these identifiers stable for the same assistant and end user so memory
#is scoped consistently across threads and sessions.
agent_id = "support_agent"
user_id = "user_123"
agent_memory = OracleAgentMemory(
    connection=db_pool,
    embedder=embedder,
    llm=llm,
)


@tool
def search_memory(
    query: Annotated[str, "Question to search in Oracle Agent Memory"],
) -> Annotated[str, "Top matching durable memory content"]:
    """Search Oracle Agent Memory for durable user facts relevant to the current request."""
    results = agent_memory.search(
        query=query,
        user_id=user_id,
        agent_id=agent_id,
        max_results=3,
        record_types=["memory"],
    )
    if not results:
        return "No relevant memory found."
    return "\n".join(result.content for result in results)


def _latest_user_message(state: MessagesState) -> str:
    for message in reversed(state["messages"]):
        if getattr(message, "type", None) == "human":
            return str(message.content)
        if getattr(message, "role", None) == "user":
            return str(message.content)
    return ""


def _build_memory_context(query: str) -> str:
    results = agent_memory.search(
        query=query,
        user_id=user_id,
        agent_id=agent_id,
        max_results=3,
        record_types=["memory"],
    )
    memory_context = "\n".join(f"- {result.content}" for result in results)
    return memory_context or "- No relevant memory found."


##Configure a prebuilt LangGraph ReAct agent

#%%
react_agent = create_agent(
    model=langgraph_llm,
    tools=[search_memory],
    system_prompt=(
        "You are a support agent. When the user asks about durable facts from "
        "prior sessions, call the search_memory tool before answering."
    ),
)
react_memory_thread = agent_memory.create_thread(
    thread_id="langgraph_react_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)


##Persist user context after a prebuilt ReAct session

#%%
react_session_1 = react_agent.invoke(
    {
        "messages": [
            HumanMessage(
                content="I am John, a Python developer and I need help debugging a payment service."
            )
        ]
    }
)
react_assistant_reply = react_session_1["messages"][-1].content

print(react_assistant_reply)
#I can help with that. What error are you seeing?

react_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "I am John, a Python developer and I need help debugging a payment service.",
        },
        {
            "role": "assistant",
            "content": react_assistant_reply,
        },
    ]
)
react_memory_thread.add_memory("The user is John, a Python developer.")


##Reuse memory in a new prebuilt ReAct session

#%%
react_memory_thread = agent_memory.get_thread("langgraph_react_memory_demo")

react_session_2 = react_agent.invoke(
    {
        "messages": [
            HumanMessage(content="Who am I?")
        ]
    }
)
react_remembered_reply = react_session_2["messages"][-1].content

print(react_remembered_reply)
#The user is John, a Python developer.


##Configure a custom LangGraph flow

#%%
def call_model(state: MessagesState):
    from langchain_core.messages import SystemMessage

    query = _latest_user_message(state)
    memory_context = _build_memory_context(query)
    response = langgraph_llm.invoke(
        [
            SystemMessage(
                content=(
                    "You are a support agent. Use the durable memory below when it is "
                    "relevant to the current user request.\n\n"
                    f"Durable memory:\n{memory_context}"
                )
            ),
            *state["messages"],
        ]
    )
    return {"messages": [response]}


builder = StateGraph(MessagesState)
builder.add_node("call_model", call_model)
builder.add_edge(START, "call_model")
builder.add_edge("call_model", END)
flow_graph = builder.compile()
flow_memory_thread = agent_memory.create_thread(
    thread_id="langgraph_flow_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)


##Persist user context after a flow session

#%%
flow_session_1 = flow_graph.invoke(
    {
        "messages": [
            HumanMessage(
                content="I am John, a Python developer and I need help debugging a payment service."
            )
        ]
    }
)
flow_assistant_reply = flow_session_1["messages"][-1].content

print(flow_assistant_reply)
#I can help with that. What error are you seeing?

flow_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "I am John, a Python developer and I need help debugging a payment service.",
        },
        {
            "role": "assistant",
            "content": flow_assistant_reply,
        },
    ]
)
flow_memory_thread.add_memory("The user is John, a Python developer.")


##Reuse memory in a new flow session

#%%
flow_memory_thread = agent_memory.get_thread("langgraph_flow_memory_demo")

flow_session_2 = flow_graph.invoke(
    {
        "messages": [
            HumanMessage(content="Who am I?")
        ]
    }
)
flow_remembered_reply = flow_session_2["messages"][-1].content

print(flow_remembered_reply)
#The user is John, a Python developer.


##Disable automatic memory extraction

#%%
manual_agent_memory = OracleAgentMemory(
    connection=db_pool,
    embedder=embedder,
    extract_memories=False,
)
manual_memory_thread = manual_agent_memory.create_thread(
    thread_id="langgraph_manual_memory_demo",
    user_id=user_id,
    agent_id=agent_id,
)
manual_memory_thread.add_messages(
    [
        {
            "role": "user",
            "content": "Please remember that I prefer concise code reviews.",
        },
        {
            "role": "assistant",
            "content": "Understood. I will keep responses concise.",
        },
    ]
)
manual_memory_thread.add_memory("The user prefers concise code reviews.")