Overview
LangGraph lets you build stateful, multi-step agent workflows as directed graphs. By integrating Cognis as dedicated graph nodes, you can add persistent memory retrieval and storage as first-class steps in your agent pipeline. What you’ll build: A support chatbot with aretrieve → respond → store graph that automatically recalls customer history and persists new interactions.
Integration pattern: Graph nodes — dedicated nodes for memory retrieval and storage in explicit control flow.
Prerequisites
- Hosted (lyzr-adk)
- Open Source (lyzr-cognis)
pip install lyzr-adk langgraph langchain-openai
export LYZR_API_KEY="your-lyzr-api-key"
export OPENAI_API_KEY="your-openai-api-key"
pip install lyzr-cognis langgraph langchain-openai
export GEMINI_API_KEY="your-gemini-key"
export OPENAI_API_KEY="your-openai-api-key"
Quick Start
- Hosted (lyzr-adk)
- Open Source (lyzr-cognis)
from langgraph.graph import StateGraph, END
from lyzr import Cognis, CognisMessage
cog = Cognis()
def retrieve(state):
results = cog.search(query=state["user_input"], owner_id=state["owner_id"], limit=5)
return {"memory_context": "\n".join(f"- {r.content}" for r in results)}
def respond(state):
# Your LLM call here, using state["memory_context"]
return {"response": "..."}
def store(state):
cog.add(
messages=[
CognisMessage(role="user", content=state["user_input"]),
CognisMessage(role="assistant", content=state["response"]),
],
owner_id=state["owner_id"],
)
return {}
graph = StateGraph(dict)
graph.add_node("retrieve", retrieve)
graph.add_node("respond", respond)
graph.add_node("store", store)
graph.set_entry_point("retrieve")
graph.add_edge("retrieve", "respond")
graph.add_edge("respond", "store")
graph.add_edge("store", END)
app = graph.compile()
from langgraph.graph import StateGraph, END
from cognis import Cognis
m = Cognis(owner_id="default_user")
def retrieve(state):
resp = m.search(state["user_input"], owner_id=state.get("owner_id"), limit=5)
return {"memory_context": "\n".join(f"- {r['content']}" for r in resp["results"])}
def respond(state):
# Your LLM call here, using state["memory_context"]
return {"response": "..."}
def store(state):
m.add([
{"role": "user", "content": state["user_input"]},
{"role": "assistant", "content": state["response"]},
], owner_id=state.get("owner_id"))
return {}
graph = StateGraph(dict)
graph.add_node("retrieve", retrieve)
graph.add_node("respond", respond)
graph.add_node("store", store)
graph.set_entry_point("retrieve")
graph.add_edge("retrieve", "respond")
graph.add_edge("respond", "store")
graph.add_edge("store", END)
app = graph.compile()
Complete Example: Support Chatbot
Step 1: Define the State
- Hosted (lyzr-adk)
- Open Source (lyzr-cognis)
import operator
from typing import Annotated, List, TypedDict
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from lyzr import Cognis, CognisMessage
cog = Cognis()
llm = ChatOpenAI(model="gpt-4o")
class ChatState(TypedDict):
messages: Annotated[List, operator.add]
user_input: str
owner_id: str
session_id: str
memory_context: str
response: str
import operator
from typing import Annotated, List, TypedDict
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from cognis import Cognis
m = Cognis(owner_id="default_user", agent_id="support_langgraph")
llm = ChatOpenAI(model="gpt-4o")
class ChatState(TypedDict):
messages: Annotated[List, operator.add]
user_input: str
owner_id: str
session_id: str
memory_context: str
response: str
Step 2: Define Graph Nodes
- Hosted (lyzr-adk)
- Open Source (lyzr-cognis)
def retrieve_context(state: ChatState) -> dict:
"""Search Cognis for relevant customer history."""
results = cog.search(query=state["user_input"], owner_id=state["owner_id"], limit=5)
if results:
context = "\n".join(f"- {r.content}" for r in results)
else:
context = "No previous interactions found."
return {"memory_context": context}
def generate_response(state: ChatState) -> dict:
"""Generate a response using the LLM with memory context."""
system_msg = (
"You are a helpful product support agent. Use the customer's "
f"history to provide personalized assistance.\n\n"
f"Customer history:\n{state['memory_context']}"
)
messages = [
SystemMessage(content=system_msg),
*state["messages"],
HumanMessage(content=state["user_input"]),
]
result = llm.invoke(messages)
return {
"response": result.content,
"messages": [HumanMessage(content=state["user_input"]), AIMessage(content=result.content)],
}
def store_memory(state: ChatState) -> dict:
"""Persist the interaction in Cognis."""
cog.add(
messages=[
CognisMessage(role="user", content=state["user_input"]),
CognisMessage(role="assistant", content=state["response"]),
],
owner_id=state["owner_id"],
session_id=state["session_id"],
agent_id="support_langgraph",
)
return {}
def retrieve_context(state: ChatState) -> dict:
"""Search Cognis for relevant customer history."""
resp = m.search(state["user_input"], owner_id=state["owner_id"], limit=5)
if resp["results"]:
context = "\n".join(f"- {r['content']}" for r in resp["results"])
else:
context = "No previous interactions found."
return {"memory_context": context}
def generate_response(state: ChatState) -> dict:
"""Generate a response using the LLM with memory context."""
system_msg = (
"You are a helpful product support agent. Use the customer's "
f"history to provide personalized assistance.\n\n"
f"Customer history:\n{state['memory_context']}"
)
messages = [
SystemMessage(content=system_msg),
*state["messages"],
HumanMessage(content=state["user_input"]),
]
result = llm.invoke(messages)
return {
"response": result.content,
"messages": [HumanMessage(content=state["user_input"]), AIMessage(content=result.content)],
}
def store_memory(state: ChatState) -> dict:
"""Persist the interaction in Cognis."""
m.add(
[
{"role": "user", "content": state["user_input"]},
{"role": "assistant", "content": state["response"]},
],
owner_id=state["owner_id"],
session_id=state["session_id"],
)
return {}
Step 3: Add Conditional Edges
Skip storing trivial messages (greetings, single-word replies):def should_store(state: ChatState) -> str:
"""Route: store meaningful messages, skip trivial ones."""
trivial = {"hi", "hello", "hey", "thanks", "bye", "ok"}
if state["user_input"].strip().lower() in trivial:
return "skip"
return "store"
Step 4: Build and Compile the Graph
graph = StateGraph(ChatState)
graph.add_node("retrieve", retrieve_context)
graph.add_node("respond", generate_response)
graph.add_node("store", store_memory)
graph.set_entry_point("retrieve")
graph.add_edge("retrieve", "respond")
graph.add_conditional_edges("respond", should_store, {
"store": "store",
"skip": END,
})
graph.add_edge("store", END)
app = graph.compile()
Step 5: Run Conversations
# First interaction
result = app.invoke({
"messages": [],
"user_input": "I'm having trouble with my Pro subscription.",
"owner_id": "customer_42",
"session_id": "support_session_1",
"memory_context": "",
"response": "",
})
print(result["response"])
# Second interaction — memory recalls the subscription issue
result = app.invoke({
"messages": result["messages"],
"user_input": "I tried logging out and back in but it still doesn't work.",
"owner_id": "customer_42",
"session_id": "support_session_1",
"memory_context": "",
"response": "",
})
print(result["response"])
# New session — Cognis remembers the customer
result = app.invoke({
"messages": [],
"user_input": "Hi, I had issues last time. Is everything fixed?",
"owner_id": "customer_42",
"session_id": "support_session_2",
"memory_context": "",
"response": "",
})
print(result["response"]) # Will reference the previous Pro subscription issue
Advanced Patterns
Using Context API in a Graph Node
- Hosted (lyzr-adk)
- Open Source (lyzr-cognis)
def retrieve_context_v2(state: ChatState) -> dict:
"""Use cog.context() for server-assembled context."""
context = cog.context(
current_messages=[CognisMessage(role="user", content=state["user_input"])],
owner_id=state["owner_id"],
session_id=state["session_id"],
enable_long_term_memory=True,
cross_session=True,
)
return {"memory_context": str(context)}
def retrieve_context_v2(state: ChatState) -> dict:
"""Use get_context for assembled context."""
ctx = m.get_context(
messages=[{"role": "user", "content": state["user_input"]}],
owner_id=state["owner_id"],
session_id=state["session_id"],
)
return {"memory_context": ctx["context_string"]}
Async Graph Nodes
Async methods (
aadd, asearch) are hosted-only. Open-source Cognis is sync only.# Hosted only
async def retrieve_context_async(state: ChatState) -> dict:
results = await cog.asearch(query=state["user_input"], owner_id=state["owner_id"], limit=5)
context = "\n".join(f"- {r.content}" for r in results) if results else ""
return {"memory_context": context}
async def store_memory_async(state: ChatState) -> dict:
await cog.aadd(
messages=[
CognisMessage(role="user", content=state["user_input"]),
CognisMessage(role="assistant", content=state["response"]),
],
owner_id=state["owner_id"],
session_id=state["session_id"],
)
return {}
Next Steps
Cognis + LangChain
Simpler LCEL chain integration
Cognis + Agno
Tool-based memory for autonomous agents