Skip to content

Examples

from yantrikdb import YantrikDB
from sentence_transformers import SentenceTransformer
db = YantrikDB("./my_app.db")
db.set_embedder(SentenceTransformer("all-MiniLM-L6-v2"))
# Store memories
db.record(text="User prefers dark mode", importance=0.8, domain="preference")
db.record(text="Project deadline is March 30", importance=0.9, domain="work")
db.record(text="Alice is the team lead", importance=0.7, domain="people")
# Recall relevant memories
results = db.recall(query="who leads the team?", top_k=3)
for r in results:
print(f"[{r['score']:.2f}] {r['text']}")
# [1.45] Alice is the team lead
# Create relationships
db.relate("Alice", "Backend Team", "leads")
db.relate("Alice", "Acme Corp", "works_at")
db.relate("Backend Team", "FastAPI", "uses")
# Query connections
edges = db.get_edges("Alice")
# [{'src': 'Alice', 'dst': 'Backend Team', 'rel_type': 'leads'},
# {'src': 'Alice', 'dst': 'Acme Corp', 'rel_type': 'works_at'}]
# Get entity intelligence
profile = db.entity_profile("Alice")
# Detect contradictions, consolidate redundant memories, mine patterns
result = db.think()
print(f"Consolidated: {result['consolidation_count']}")
print(f"Conflicts found: {result['conflicts_found']}")
# Check for stale important memories
stale = db.stale(days=30)
for m in stale:
print(f"Needs review: {m['text']}")
# Check upcoming deadlines
upcoming = db.upcoming(days=7)
for m in upcoming:
print(f"Due soon: {m['text']}")
# Record a strategy
db.record_procedural(
text="For FastAPI auth, use OAuth2PasswordBearer with JWT tokens",
domain="work",
)
# Later, surface relevant procedures
procedures = db.surface_procedural(query="how to add authentication")
# Returns ranked strategies by effectiveness
# Reinforce what worked
db.reinforce_procedural(rid=procedures[0]["rid"], outcome=0.9)

The MCP server gives any MCP-compatible agent persistent memory with zero code changes. Just configure and go.

~/.claude/mcp.json
{
"mcpServers": {
"yantrikdb": {
"command": "yantrikdb-mcp"
}
}
}

Now your agent automatically:

You: We decided to use PostgreSQL for the new service
Agent: [auto-remembers: "Decision: use PostgreSQL for new service", importance: 0.8]
// ... 3 weeks later, new conversation ...
You: What database did we pick for the new service?
Agent: [auto-recalls] You decided on PostgreSQL for the new service.
// Session 1
You: I'm Alex, backend engineer. We're building a task manager with FastAPI.
Agent: [stores: name, role, project, stack]
// Session 2 (days later)
You: Can you help me add auth to the API?
Agent: [recalls: Alex, FastAPI project, task manager]
Sure Alex! For your FastAPI task manager, I'd recommend
OAuth2PasswordBearer with JWT tokens...
// Session 1
You: We're using PostgreSQL for the database
// Session 5
You: We switched to MySQL last week
Agent: [think() detects conflict]
I notice a contradiction — you previously said PostgreSQL,
but now you're saying MySQL. Which is current?

Cortex wraps YantrikDB with personality traits, bond evolution, and context assembly for OpenClaw agents.

from yantrik_memory import YantrikMemory
mem = YantrikMemory()
# One call does everything:
# - Recalls relevant memories
# - Evolves personality traits
# - Updates relationship bond
# - Assembles full LLM context
context = mem.process_turn(
agent_id="assistant",
user_id="alex",
message="That's really helpful, thanks! Keep it concise.",
)
print(context["traits"])
# {'humor': 0.5, 'empathy': 0.5, 'helpfulness': 0.55,
# 'conciseness': 0.55, ...}
print(context["bond"])
# {'level': 'acquaintance', 'score': 0.08,
# 'interaction_count': 4}
print(context["personality_guidance"])
# "Be more helpfulness in responses. Be more conciseness in responses."
# User A likes humor
mem.evolve_traits("assistant", "alex", "haha that's hilarious!")
traits_a = mem.get_traits("assistant", "alex")
# {'humor': 0.55, ...}
# User B prefers seriousness
mem.evolve_traits("assistant", "jordan", "please be serious")
traits_b = mem.get_traits("assistant", "jordan")
# {'humor': 0.43, ...} (negative signals weighted 33% heavier)
# First interaction
bond = mem.update_bond("assistant", "alex", "hello!")
# {'level': 'stranger', 'score': 0.02, 'milestones': ['First interaction']}
# After 25 interactions
# {'level': 'familiar', 'score': 0.38,
# 'milestones': ['First interaction', 'Getting acquainted',
# 'Building rapport', 'Familiar face']}
# After 100+ interactions
# {'level': 'trusted', 'score': 0.78,
# 'milestones': [..., 'Trusted partner']}
Turn 1:
Alex: I'm a backend engineer building a task manager with FastAPI.
Agent: Hi Alex. What API help do you need?
[stored: name, role, project, stack]
Turn 2:
Alex: I always use PostgreSQL for prod, SQLite for dev.
Agent: [recalls FastAPI project, provides SQLAlchemy setup
with exactly those backends]
[stored: database preferences]
Turn 3:
Alex: You're really helpful! What do you know about me?
Agent: Backend engineer, building task manager, FastAPI,
PostgreSQL for prod, SQLite for dev.
[traits: helpfulness → 0.55]
[bond: stranger → acquaintance]

Terminal window
# Each project gets its own memory
YANTRIKDB_DB_PATH=./project-a/memory.db yantrikdb-mcp
YANTRIKDB_DB_PATH=./project-b/memory.db yantrikdb-mcp
from yantrik_memory import YantrikMemory
mem = YantrikMemory({"encryption_enabled": True})
# Sensitive data encrypted at rest with Fernet
mem.remember(
agent_id="assistant",
content="API key: sk-1234567890",
memory_kind="secret",
content_encrypted=True,
)
db = YantrikDB("./memory.db")
# Stats for monitoring
stats = db.stats()
print(f"Active: {stats['active_memories']}")
print(f"Entities: {stats['entities']}")
print(f"Conflicts: {stats['open_conflicts']}")