Skip to content

Commit 11f3712

Browse files
frederikb96claude
andcommitted
Add configurable default for infer parameter
Implements a configurable default value for the infer parameter across memory operations, allowing users to control whether LLM processing is enabled by default. Configuration: - Added default_infer field to mem0 MemoryConfig (default: True) - Added default_infer to OpenMemory API configuration schema - Configuration UI toggle in settings page under "Default Memory Processing Settings" - Loaded from database configuration with proper None/False handling API Changes: - MCP add_memories: infer parameter now optional (Optional[bool]) - REST create_memory: infer parameter now optional with Field documentation - When infer=None, applies memory_client.config.default_infer - Updated tool descriptions to document infer parameter behavior Behavior: - infer=True: LLM extracts semantic facts and deduplicates - infer=False: Stores exact verbatim text without transformation - infer=None: Uses configured default_infer value (default: True) This allows users to set their preferred default behavior while still being able to override on a per-call basis. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <[email protected]>
1 parent 639d26e commit 11f3712

File tree

6 files changed

+87
-17
lines changed

6 files changed

+87
-17
lines changed

mem0/configs/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,10 @@ class MemoryConfig(BaseModel):
6464
description="Custom prompt for the update memory",
6565
default=None,
6666
)
67+
default_infer: bool = Field(
68+
description="Default value for infer when not specified in API call",
69+
default=True
70+
)
6771

6872

6973
class AzureConfig(BaseModel):

openmemory/api/app/mcp_server.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import json
2121
import logging
2222
import uuid
23+
from typing import Optional
2324

2425
from app.database import SessionLocal
2526
from app.models import Memory, MemoryAccessLog, MemoryState, MemoryStatusHistory
@@ -57,8 +58,8 @@ def get_memory_client_safe():
5758
# Initialize SSE transport
5859
sse = SseServerTransport("/mcp/messages/")
5960

60-
@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something.")
61-
async def add_memories(text: str) -> str:
61+
@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something. The 'infer' parameter controls processing: True (default) = LLM extracts semantic facts and deduplicates; False = stores exact verbatim text without transformation.")
62+
async def add_memories(text: str, infer: Optional[bool] = None) -> str:
6263
uid = user_id_var.get(None)
6364
client_name = client_name_var.get(None)
6465

@@ -82,12 +83,17 @@ async def add_memories(text: str) -> str:
8283
if not app.is_active:
8384
return f"Error: App {app.name} is currently paused on OpenMemory. Cannot create new memories."
8485

86+
# Apply default from config if not specified
87+
infer_value = infer if infer is not None else memory_client.config.default_infer
88+
8589
response = memory_client.add(text,
8690
user_id=uid,
8791
metadata={
88-
"source_app": "openmemory",
89-
"mcp_client": client_name,
90-
})
92+
"source_app": "openmemory",
93+
"mcp_client": client_name
94+
},
95+
infer=infer_value
96+
)
9197

9298
# Process the response and update database
9399
if isinstance(response, dict) and 'results' in response:

openmemory/api/app/routers/config.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,12 @@ class Mem0Config(BaseModel):
4141
llm: Optional[LLMProvider] = None
4242
embedder: Optional[EmbedderProvider] = None
4343
vector_store: Optional[VectorStoreProvider] = None
44+
default_infer: Optional[bool] = Field(
45+
None,
46+
description="Default value for infer parameter when not specified in API/MCP calls. "
47+
"When True: enables LLM fact extraction and deduplication. "
48+
"When False: stores verbatim text without transformation."
49+
)
4450

4551
class ConfigSchema(BaseModel):
4652
openmemory: Optional[OpenMemoryConfig] = None
@@ -69,7 +75,8 @@ def get_default_configuration():
6975
"api_key": "env:OPENAI_API_KEY"
7076
}
7177
},
72-
"vector_store": None
78+
"vector_store": None,
79+
"default_infer": True
7380
}
7481
}
7582

openmemory/api/app/routers/memories.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from fastapi import APIRouter, Depends, HTTPException, Query
2121
from fastapi_pagination import Page, Params
2222
from fastapi_pagination.ext.sqlalchemy import paginate as sqlalchemy_paginate
23-
from pydantic import BaseModel
23+
from pydantic import BaseModel, Field
2424
from sqlalchemy import func
2525
from sqlalchemy.orm import Session, joinedload
2626

@@ -213,7 +213,13 @@ class CreateMemoryRequest(BaseModel):
213213
user_id: str
214214
text: str
215215
metadata: dict = {}
216-
infer: bool = True
216+
infer: Optional[bool] = Field(
217+
None,
218+
description="Enable LLM processing for fact extraction and deduplication. "
219+
"When True: content is analyzed and transformed into semantic facts. "
220+
"When False: stores exact verbatim text without transformation. "
221+
"When None: uses default from server configuration (default_infer)."
222+
)
217223
app: str = "openmemory"
218224

219225

@@ -254,6 +260,9 @@ async def create_memory(
254260
"error": str(client_error)
255261
}
256262

263+
# Apply default from config if not specified
264+
infer_value = request.infer if request.infer is not None else memory_client.config.default_infer
265+
257266
# Try to save to Qdrant via memory_client
258267
try:
259268
qdrant_response = memory_client.add(
@@ -263,7 +272,7 @@ async def create_memory(
263272
"source_app": "openmemory",
264273
"mcp_client": request.app,
265274
},
266-
infer=request.infer
275+
infer=infer_value
267276
)
268277

269278
# Log the response for debugging

openmemory/api/app/utils/memory.py

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -304,10 +304,11 @@ def get_memory_client(custom_instructions: str = None):
304304
try:
305305
# Start with default configuration
306306
config = get_default_memory_config()
307-
308-
# Variable to track custom instructions
307+
308+
# Variables to track custom prompts and defaults from database
309309
db_custom_instructions = None
310-
310+
db_default_infer = None
311+
311312
# Load configuration from database
312313
try:
313314
db = SessionLocal()
@@ -316,14 +317,19 @@ def get_memory_client(custom_instructions: str = None):
316317
if db_config:
317318
json_config = db_config.value
318319

319-
# Extract custom instructions from openmemory settings
320-
if "openmemory" in json_config and "custom_instructions" in json_config["openmemory"]:
321-
db_custom_instructions = json_config["openmemory"]["custom_instructions"]
322-
320+
# Extract custom prompts from openmemory settings
321+
if "openmemory" in json_config:
322+
if "custom_instructions" in json_config["openmemory"]:
323+
db_custom_instructions = json_config["openmemory"]["custom_instructions"]
324+
323325
# Override defaults with configurations from the database
324326
if "mem0" in json_config:
325327
mem0_config = json_config["mem0"]
326-
328+
329+
# Extract default flags from mem0 config
330+
if "default_infer" in mem0_config:
331+
db_default_infer = mem0_config["default_infer"]
332+
327333
# Update LLM configuration if available
328334
if "llm" in mem0_config and mem0_config["llm"] is not None:
329335
config["llm"] = mem0_config["llm"]
@@ -357,6 +363,11 @@ def get_memory_client(custom_instructions: str = None):
357363
if instructions_to_use:
358364
config["custom_fact_extraction_prompt"] = instructions_to_use
359365

366+
# Use database value for default_infer
367+
# Note: Must use 'is not None' check to properly handle False value
368+
if db_default_infer is not None:
369+
config["default_infer"] = db_default_infer
370+
360371
# ALWAYS parse environment variables in the final config
361372
# This ensures that even default config values like "env:OPENAI_API_KEY" get parsed
362373
print("Parsing environment variables in final config...")

openmemory/ui/components/form-view.tsx

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,16 @@ export function FormView({ settings, onChange }: FormViewProps) {
9797
})
9898
}
9999

100+
const handleMem0ConfigChange = (key: string, value: any) => {
101+
onChange({
102+
...settings,
103+
mem0: {
104+
...settings.mem0,
105+
[key]: value,
106+
},
107+
})
108+
}
109+
100110
const needsLlmApiKey = settings.mem0?.llm?.provider?.toLowerCase() !== "ollama"
101111
const needsEmbedderApiKey = settings.mem0?.embedder?.provider?.toLowerCase() !== "ollama"
102112
const isLlmOllama = settings.mem0?.llm?.provider?.toLowerCase() === "ollama"
@@ -352,6 +362,29 @@ export function FormView({ settings, onChange }: FormViewProps) {
352362
</CardContent>
353363
</Card>
354364

365+
{/* Default Memory Processing Settings */}
366+
<Card>
367+
<CardHeader>
368+
<CardTitle>Default Memory Processing Settings</CardTitle>
369+
<CardDescription>Configure default behavior for memory operations</CardDescription>
370+
</CardHeader>
371+
<CardContent className="space-y-6">
372+
<div className="flex items-center justify-between">
373+
<div className="space-y-0.5 flex-1">
374+
<Label htmlFor="default-infer">Default Infer</Label>
375+
<p className="text-xs text-muted-foreground">
376+
Enable LLM processing (extraction & deduplication) by default
377+
</p>
378+
</div>
379+
<Switch
380+
id="default-infer"
381+
checked={settings.mem0?.default_infer !== false}
382+
onCheckedChange={(checked) => handleMem0ConfigChange("default_infer", checked)}
383+
/>
384+
</div>
385+
</CardContent>
386+
</Card>
387+
355388
{/* Backup (Export / Import) */}
356389
<Card>
357390
<CardHeader>

0 commit comments

Comments
 (0)