Skip to content

Commit be5a3b2

Browse files
authored
Merge pull request #7 from BLTC-520/smolagents-branch
until 2.2
2 parents 0ae62ae + 8fe73f6 commit be5a3b2

39 files changed

+21140
-174
lines changed

.gitignore

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,25 @@ Thumbs.db
5353
*.swp
5454
*.swo
5555

56+
57+
# Python Virtual Environments
58+
backend/venv/
59+
**/venv/
60+
**/__pycache__/
61+
*.pyc
62+
*.pyo
63+
64+
chroma_db/
65+
./chroma_db/
66+
**/chroma_db/
67+
68+
# Service management scripts
69+
start-all-services.sh
70+
stop-all-services.sh
71+
72+
# Artifacts dependencies
73+
artifacts/.deps/npm/
74+
=======
5675
# --------------------
5776
# Service management scripts
5877
start-all-services.sh
@@ -61,3 +80,4 @@ stop-all-services.sh
6180
# --------------------
6281
# Misc
6382
package-lock.json
83+

CLAUDE.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,4 +190,6 @@ Press Ctrl+C to stop all services.
190190
- **Environment variables**: Check console logs in `lib/supabase.ts` for missing env vars
191191
- **Route guard issues**: Check `profiles.is_admin` field in Supabase for role problems
192192
- **Hook debugging**: Add console.logs to custom hooks to trace state changes
193-
- **Supabase issues**: Check browser Network tab for failed API requests
193+
- **Supabase issues**: Check browser Network tab for failed API requests
194+
- @backend/smolagents-service/spec/ , this folder is the specification for buiding the @backend/smolagents-service/ , always refer to this folder.
195+
- We doing @backend/smolagents-service/spec/tasks.md , task by task, and every task will go through test before we commit on git.
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
"""
2+
Smolagents-based multi-agent analysis system for citizen eligibility analysis.
3+
"""
4+
5+
from .citizen_analysis_agent import CitizenAnalysisAgent
6+
7+
__all__ = ["CitizenAnalysisAgent"]
Lines changed: 293 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,293 @@
1+
"""
2+
CitizenAnalysisAgent - Basic implementation using smolagents framework.
3+
4+
This agent performs eligibility analysis for government subsidy applications
5+
using a configurable LLM backend and various analysis tools.
6+
"""
7+
8+
from typing import Dict, Any, List, Optional, Callable
9+
import os
10+
from dataclasses import dataclass
11+
from datetime import datetime
12+
13+
from smolagents import CodeAgent, LiteLLMModel, Tool
14+
15+
16+
@dataclass
17+
class AgentConfig:
18+
"""Configuration class for CitizenAnalysisAgent"""
19+
model_name: str = "gpt-4o-mini"
20+
temperature: float = 0.1
21+
max_tokens: int = 2000
22+
timeout: int = 30
23+
api_key: Optional[str] = None
24+
25+
@classmethod
26+
def from_env(cls) -> "AgentConfig":
27+
"""Create configuration from environment variables"""
28+
return cls(
29+
model_name=os.getenv("AGENT_MODEL_NAME", "gpt-4o-mini"),
30+
temperature=float(os.getenv("AGENT_TEMPERATURE", "0.2")),
31+
max_tokens=int(os.getenv("AGENT_MAX_TOKENS", "50000")),
32+
timeout=int(os.getenv("AGENT_TIMEOUT", "30")),
33+
api_key=os.getenv("OPENAI_API_KEY"),
34+
)
35+
36+
37+
class CitizenAnalysisAgent(CodeAgent):
38+
"""
39+
Basic implementation of CitizenAnalysisAgent using smolagents framework.
40+
41+
This agent analyzes citizen eligibility for government subsidies using
42+
a configurable LLM backend and extensible tool system.
43+
"""
44+
45+
def __init__(
46+
self,
47+
config: Optional[AgentConfig] = None,
48+
tools: Optional[List[Tool]] = None,
49+
step_callbacks: Optional[List[Callable]] = None
50+
):
51+
"""
52+
Initialize the CitizenAnalysisAgent.
53+
54+
Args:
55+
config: Agent configuration (uses environment defaults if None)
56+
tools: List of tools to make available to the agent
57+
step_callbacks: List of callback functions for step monitoring
58+
"""
59+
self.config = config or AgentConfig.from_env()
60+
61+
# Initialize the LiteLLM model with configuration
62+
model = LiteLLMModel(
63+
model_id=self.config.model_name,
64+
temperature=self.config.temperature,
65+
max_tokens=self.config.max_tokens,
66+
timeout=self.config.timeout
67+
)
68+
69+
# Add path for imports
70+
import sys
71+
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
72+
if current_dir not in sys.path:
73+
sys.path.insert(0, current_dir)
74+
75+
# Create validation tool
76+
validation_tool = None
77+
try:
78+
from tools.citizen_data_validation_tool import CitizenDataValidationTool
79+
validation_tool = CitizenDataValidationTool(enable_audit_logging=True)
80+
print(f"✓ Validation tool created: {type(validation_tool)}")
81+
print(f"✓ Tool name: {getattr(validation_tool, 'name', 'MISSING NAME')}")
82+
83+
except Exception as e:
84+
print(f"Could not create validation tool: {e}")
85+
86+
# Convert tools to proper format for smolagents (list of BaseTool instances)
87+
from smolagents.agents import BaseTool
88+
89+
agent_tools = []
90+
91+
# Add user-provided tools
92+
if tools:
93+
if isinstance(tools, list):
94+
for tool in tools:
95+
if isinstance(tool, BaseTool):
96+
agent_tools.append(tool)
97+
print(f"✓ Added user tool: {getattr(tool, 'name', 'unnamed')}")
98+
else:
99+
print(f"Warning: Tool is not BaseTool instance: {type(tool)}")
100+
else:
101+
print(f"Warning: tools should be a list, got {type(tools)}")
102+
103+
# Add validation tool
104+
if validation_tool:
105+
if isinstance(validation_tool, BaseTool):
106+
agent_tools.append(validation_tool)
107+
print(f"✓ Added validation tool: {validation_tool.name}")
108+
else:
109+
print(f"Warning: Validation tool is not BaseTool: {type(validation_tool)}")
110+
print(f"Validation tool MRO: {type(validation_tool).__mro__}")
111+
112+
print(f"Final tools list: {[getattr(tool, 'name', 'unnamed') for tool in agent_tools]}")
113+
114+
# Set up environment for LiteLLM if needed
115+
if self.config.api_key:
116+
os.environ["OPENAI_API_KEY"] = self.config.api_key
117+
118+
# Initialize parent CodeAgent with tools as list
119+
super().__init__(
120+
tools=agent_tools,
121+
model=model,
122+
planning_interval=5,
123+
stream_outputs=False
124+
)
125+
126+
# Debug the result
127+
print(f"After init - tools type: {type(self.tools)}")
128+
if hasattr(self, 'tools'):
129+
if isinstance(self.tools, dict):
130+
print(f"Tools dict keys: {list(self.tools.keys())}")
131+
for key, value in self.tools.items():
132+
print(f" {key}: {type(value)}")
133+
elif isinstance(self.tools, list):
134+
print(f"Tools list length: {len(self.tools)}")
135+
for i, tool in enumerate(self.tools):
136+
print(f" Tool {i}: {type(tool)} - {getattr(tool, 'name', 'no name')}")
137+
else:
138+
print(f"Tools content: {self.tools}")
139+
140+
# Store step callbacks for future plan review implementation
141+
self.step_callbacks = step_callbacks or []
142+
143+
# Initialize basic metadata
144+
self.created_at = datetime.now()
145+
self.analysis_count = 0
146+
147+
def run(
148+
self,
149+
citizen_data: Dict[str, Any],
150+
query: str = "Analyze this citizen's eligibility for government subsidies",
151+
reset: bool = True
152+
) -> Dict[str, Any]:
153+
"""
154+
Basic run method for citizen analysis.
155+
156+
Args:
157+
citizen_data: Dictionary containing citizen information
158+
query: Analysis query/prompt
159+
reset: Whether to reset agent memory (True for new analysis)
160+
161+
Returns:
162+
Dictionary containing analysis results
163+
"""
164+
try:
165+
# Increment analysis counter
166+
self.analysis_count += 1
167+
168+
# Prepare the analysis prompt
169+
analysis_prompt = self._prepare_analysis_prompt(citizen_data, query)
170+
171+
# Run the agent analysis
172+
result = super().run(analysis_prompt, reset=reset)
173+
174+
# Format and return results
175+
return {
176+
"status": "completed",
177+
"analysis_id": f"analysis_{self.analysis_count}_{int(datetime.now().timestamp())}",
178+
"citizen_id": citizen_data.get("citizen_id", "unknown"),
179+
"raw_result": result,
180+
"processed_at": datetime.now().isoformat(),
181+
"model_used": self.config.model_name,
182+
"tools_used": [tool.__class__.__name__ for tool in self.tools]
183+
}
184+
185+
except Exception as e:
186+
return {
187+
"status": "error",
188+
"error": str(e),
189+
"error_type": type(e).__name__,
190+
"analysis_id": f"error_{self.analysis_count}_{int(datetime.now().timestamp())}",
191+
"processed_at": datetime.now().isoformat()
192+
}
193+
194+
def _prepare_analysis_prompt(self, citizen_data: Dict[str, Any], query: str) -> str:
195+
"""
196+
Prepare the analysis prompt for the LLM.
197+
198+
Args:
199+
citizen_data: Citizen information dictionary
200+
query: Analysis query
201+
202+
Returns:
203+
Formatted prompt string
204+
"""
205+
# Basic prompt template - will be enhanced in later phases
206+
prompt_template = f"""
207+
You are an expert analyst for government subsidy eligibility assessment.
208+
209+
CITIZEN DATA:
210+
{self._format_citizen_data(citizen_data)}
211+
212+
ANALYSIS REQUEST:
213+
{query}
214+
215+
Please analyze the citizen's eligibility based on the provided data. Consider:
216+
1. Income levels and financial need
217+
2. Demographic factors
218+
3. Geographic location
219+
4. Family composition
220+
5. Any special circumstances
221+
222+
Provide a comprehensive analysis with reasoning and recommendations.
223+
"""
224+
return prompt_template.strip()
225+
226+
def _format_citizen_data(self, citizen_data: Dict[str, Any]) -> str:
227+
"""
228+
Format citizen data for prompt inclusion.
229+
230+
Args:
231+
citizen_data: Raw citizen data dictionary
232+
233+
Returns:
234+
Formatted string representation
235+
"""
236+
formatted_lines = []
237+
for key, value in citizen_data.items():
238+
formatted_lines.append(f"- {key.replace('_', ' ').title()}: {value}")
239+
240+
return "\n".join(formatted_lines)
241+
242+
def get_agent_info(self) -> Dict[str, Any]:
243+
"""
244+
Get information about the agent's current state.
245+
246+
Returns:
247+
Dictionary with agent metadata and statistics
248+
"""
249+
return {
250+
"agent_type": "CitizenAnalysisAgent",
251+
"model_name": self.config.model_name,
252+
"created_at": self.created_at.isoformat(),
253+
"analysis_count": self.analysis_count,
254+
"tools_count": len(self.tools),
255+
"tool_names": [tool.__class__.__name__ for tool in self.tools],
256+
"config": {
257+
"temperature": self.config.temperature,
258+
"max_tokens": self.config.max_tokens,
259+
"timeout": self.config.timeout
260+
}
261+
}
262+
263+
def test_configuration(self) -> Dict[str, Any]:
264+
"""
265+
Test the agent configuration and model connectivity.
266+
267+
Returns:
268+
Dictionary with test results
269+
"""
270+
try:
271+
# Test basic LLM connectivity using proper LiteLLMModel message format
272+
test_messages = [
273+
{"role": "user", "content": [{"type": "text", "text": "Hello, please respond with 'Configuration test successful'"}]}
274+
]
275+
276+
test_response = self.model(test_messages)
277+
278+
return {
279+
"status": "success",
280+
"message": "Agent configuration test successful",
281+
"model_response": str(test_response),
282+
"model_name": self.config.model_name,
283+
"timestamp": datetime.now().isoformat()
284+
}
285+
286+
except Exception as e:
287+
return {
288+
"status": "error",
289+
"message": "Agent configuration test failed",
290+
"error": str(e),
291+
"error_type": type(e).__name__,
292+
"timestamp": datetime.now().isoformat()
293+
}

0 commit comments

Comments
 (0)