diff --git a/mem0/configs/base.py b/mem0/configs/base.py index dd0dd9df4b..3c993842df 100644 --- a/mem0/configs/base.py +++ b/mem0/configs/base.py @@ -64,6 +64,10 @@ class MemoryConfig(BaseModel): description="Custom prompt for the update memory", default=None, ) + default_infer: bool = Field( + description="Default value for infer when not specified in API call", + default=True + ) class AzureConfig(BaseModel): diff --git a/openmemory/api/app/mcp_server.py b/openmemory/api/app/mcp_server.py index 459aaf760a..4c730eca30 100644 --- a/openmemory/api/app/mcp_server.py +++ b/openmemory/api/app/mcp_server.py @@ -20,6 +20,7 @@ import json import logging import uuid +from typing import Optional from app.database import SessionLocal from app.models import Memory, MemoryAccessLog, MemoryState, MemoryStatusHistory @@ -57,8 +58,8 @@ def get_memory_client_safe(): # Initialize SSE transport sse = SseServerTransport("/mcp/messages/") -@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something.") -async def add_memories(text: str) -> str: +@mcp.tool(description="Add a new memory. This method is called everytime the user informs anything about themselves, their preferences, or anything that has any relevant information which can be useful in the future conversation. This can also be called when the user asks you to remember something. The 'infer' parameter controls processing: True (default) = LLM extracts semantic facts and deduplicates; False = stores exact verbatim text without transformation.") +async def add_memories(text: str, infer: Optional[bool] = None) -> str: uid = user_id_var.get(None) client_name = client_name_var.get(None) @@ -82,12 +83,17 @@ async def add_memories(text: str) -> str: if not app.is_active: return f"Error: App {app.name} is currently paused on OpenMemory. Cannot create new memories." + # Apply default from config if not specified + infer_value = infer if infer is not None else memory_client.config.default_infer + response = memory_client.add(text, user_id=uid, metadata={ - "source_app": "openmemory", - "mcp_client": client_name, - }) + "source_app": "openmemory", + "mcp_client": client_name + }, + infer=infer_value + ) # Process the response and update database if isinstance(response, dict) and 'results' in response: diff --git a/openmemory/api/app/routers/config.py b/openmemory/api/app/routers/config.py index d55cf7829a..b395342b26 100644 --- a/openmemory/api/app/routers/config.py +++ b/openmemory/api/app/routers/config.py @@ -41,6 +41,12 @@ class Mem0Config(BaseModel): llm: Optional[LLMProvider] = None embedder: Optional[EmbedderProvider] = None vector_store: Optional[VectorStoreProvider] = None + default_infer: Optional[bool] = Field( + None, + description="Default value for infer parameter when not specified in API/MCP calls. " + "When True: enables LLM fact extraction and deduplication. " + "When False: stores verbatim text without transformation." + ) class ConfigSchema(BaseModel): openmemory: Optional[OpenMemoryConfig] = None @@ -69,7 +75,8 @@ def get_default_configuration(): "api_key": "env:OPENAI_API_KEY" } }, - "vector_store": None + "vector_store": None, + "default_infer": True } } @@ -154,7 +161,11 @@ async def update_configuration(config: ConfigSchema, db: Session = Depends(get_d # Update mem0 settings updated_config["mem0"] = config.mem0.dict(exclude_none=True) - + + # Save the configuration to database + save_config_to_db(db, updated_config) + reset_memory_client() + return updated_config @router.patch("/", response_model=ConfigSchema) async def patch_configuration(config_update: ConfigSchema, db: Session = Depends(get_db)): diff --git a/openmemory/api/app/routers/memories.py b/openmemory/api/app/routers/memories.py index 3af7bbe13b..5b0b3db31a 100644 --- a/openmemory/api/app/routers/memories.py +++ b/openmemory/api/app/routers/memories.py @@ -20,7 +20,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query from fastapi_pagination import Page, Params from fastapi_pagination.ext.sqlalchemy import paginate as sqlalchemy_paginate -from pydantic import BaseModel +from pydantic import BaseModel, Field from sqlalchemy import func from sqlalchemy.orm import Session, joinedload @@ -213,7 +213,13 @@ class CreateMemoryRequest(BaseModel): user_id: str text: str metadata: dict = {} - infer: bool = True + infer: Optional[bool] = Field( + None, + description="Enable LLM processing for fact extraction and deduplication. " + "When True: content is analyzed and transformed into semantic facts. " + "When False: stores exact verbatim text without transformation. " + "When None: uses default from server configuration (default_infer)." + ) app: str = "openmemory" @@ -254,6 +260,9 @@ async def create_memory( "error": str(client_error) } + # Apply default from config if not specified + infer_value = request.infer if request.infer is not None else memory_client.config.default_infer + # Try to save to Qdrant via memory_client try: qdrant_response = memory_client.add( @@ -263,7 +272,7 @@ async def create_memory( "source_app": "openmemory", "mcp_client": request.app, }, - infer=request.infer + infer=infer_value ) # Log the response for debugging diff --git a/openmemory/api/app/utils/memory.py b/openmemory/api/app/utils/memory.py index a4f557fe69..f5589fc59d 100644 --- a/openmemory/api/app/utils/memory.py +++ b/openmemory/api/app/utils/memory.py @@ -304,10 +304,11 @@ def get_memory_client(custom_instructions: str = None): try: # Start with default configuration config = get_default_memory_config() - - # Variable to track custom instructions + + # Variables to track custom prompts and defaults from database db_custom_instructions = None - + db_default_infer = None + # Load configuration from database try: db = SessionLocal() @@ -316,14 +317,19 @@ def get_memory_client(custom_instructions: str = None): if db_config: json_config = db_config.value - # Extract custom instructions from openmemory settings - if "openmemory" in json_config and "custom_instructions" in json_config["openmemory"]: - db_custom_instructions = json_config["openmemory"]["custom_instructions"] - + # Extract custom prompts from openmemory settings + if "openmemory" in json_config: + if "custom_instructions" in json_config["openmemory"]: + db_custom_instructions = json_config["openmemory"]["custom_instructions"] + # Override defaults with configurations from the database if "mem0" in json_config: mem0_config = json_config["mem0"] - + + # Extract default flags from mem0 config + if "default_infer" in mem0_config: + db_default_infer = mem0_config["default_infer"] + # Update LLM configuration if available if "llm" in mem0_config and mem0_config["llm"] is not None: config["llm"] = mem0_config["llm"] @@ -357,6 +363,11 @@ def get_memory_client(custom_instructions: str = None): if instructions_to_use: config["custom_fact_extraction_prompt"] = instructions_to_use + # Use database value for default_infer + # Note: Must use 'is not None' check to properly handle False value + if db_default_infer is not None: + config["default_infer"] = db_default_infer + # ALWAYS parse environment variables in the final config # This ensures that even default config values like "env:OPENAI_API_KEY" get parsed print("Parsing environment variables in final config...") diff --git a/openmemory/ui/components/form-view.tsx b/openmemory/ui/components/form-view.tsx index 4e8baba163..29d6585a68 100644 --- a/openmemory/ui/components/form-view.tsx +++ b/openmemory/ui/components/form-view.tsx @@ -97,6 +97,16 @@ export function FormView({ settings, onChange }: FormViewProps) { }) } + const handleMem0ConfigChange = (key: string, value: any) => { + onChange({ + ...settings, + mem0: { + ...settings.mem0, + [key]: value, + }, + }) + } + const needsLlmApiKey = settings.mem0?.llm?.provider?.toLowerCase() !== "ollama" const needsEmbedderApiKey = settings.mem0?.embedder?.provider?.toLowerCase() !== "ollama" const isLlmOllama = settings.mem0?.llm?.provider?.toLowerCase() === "ollama" @@ -352,6 +362,29 @@ export function FormView({ settings, onChange }: FormViewProps) { + {/* Default Memory Processing Settings */} + + + Default Memory Processing Settings + Configure default behavior for memory operations + + + + + Default Infer + + Enable LLM processing (extraction & deduplication) by default + + + handleMem0ConfigChange("default_infer", checked)} + /> + + + + {/* Backup (Export / Import) */}
+ Enable LLM processing (extraction & deduplication) by default +