@dataclass
class Job:
id: str
description: str
precedence: int
standing: TaskStatus = TaskStatus.PENDING
dependencies: Record[str] = None
outcome: Optionally available[str] = None
def __post_init__(self):
if self.dependencies is None:
self.dependencies = []
class SAGEAgent:
"""Self-Adaptive Aim-oriented Execution AI Agent"""
def __init__(self, api_key: str, model_name: str = "gemini-1.5-flash"):
genai.configure(api_key=api_key)
self.mannequin = genai.GenerativeModel(model_name)
self.reminiscence = []
self.duties = {}
self.context = {}
self.iteration_count = 0
def self_assess(self, purpose: str, context: Dict[str, Any]) -> Dict[str, Any]:
"""S: Self-Evaluation - Consider present state and capabilities"""
assessment_prompt = f"""
You're an AI agent conducting self-assessment. Reply ONLY with legitimate JSON, no further textual content.
GOAL: {purpose}
CONTEXT: {json.dumps(context, indent=2)}
TASKS_PROCESSED: {len(self.duties)}
Present evaluation as JSON with these actual keys:
{{
"progress_score": ,
"sources": ["list of available resources"],
"gaps": ["list of knowledge gaps"],
"dangers": ["list of potential risks"],
"suggestions": ["list of next steps"]
}}
"""
response = self.mannequin.generate_content(assessment_prompt)
attempt:
textual content = response.textual content.strip()
if textual content.startswith('```'):
textual content = textual content.cut up('```')[1]
if textual content.startswith('json'):
textual content = textual content[4:]
textual content = textual content.strip()
return json.hundreds(textual content)
besides Exception as e:
print(f"Evaluation parsing error: {e}")
return {
"progress_score": 25,
"sources": ["AI capabilities", "Internet knowledge"],
"gaps": ["Specific domain expertise", "Real-time data"],
"dangers": ["Information accuracy", "Scope complexity"],
"suggestions": ["Break down into smaller tasks", "Focus on research first"]
}
def adaptive_plan(self, purpose: str, evaluation: Dict[str, Any]) -> Record[Task]:
"""A: Adaptive Planning - Create dynamic, context-aware activity decomposition"""
planning_prompt = f"""
You're an AI activity planner. Reply ONLY with legitimate JSON array, no further textual content.
MAIN_GOAL: {purpose}
ASSESSMENT: {json.dumps(evaluation, indent=2)}
Create 3-4 actionable duties as JSON array:
[
{{
"id": "task_1",
"description": "Clear, specific task description",
"priority": 5,
"dependencies": []
}},
{{
"id": "task_2",
"description": "One other particular activity",
"precedence": 4,
"dependencies": ["task_1"]
}}
]
Every activity should have: id (string), description (string), precedence (1-5), dependencies (array of strings)
"""
response = self.mannequin.generate_content(planning_prompt)
attempt:
textual content = response.textual content.strip()
if textual content.startswith('```'):
textual content = textual content.cut up('```')[1]
if textual content.startswith('json'):
textual content = textual content[4:]
textual content = textual content.strip()
task_data = json.hundreds(textual content)
duties = []
for i, task_info in enumerate(task_data):
activity = Job(
id=task_info.get('id', f'task_{i+1}'),
description=task_info.get('description', 'Undefined activity'),
precedence=task_info.get('precedence', 3),
dependencies=task_info.get('dependencies', [])
)
duties.append(activity)
return duties
besides Exception as e:
print(f"Planning parsing error: {e}")
return [
Task(id="research_1", description="Research sustainable urban gardening basics", priority=5),
Task(id="research_2", description="Identify space-efficient growing methods", priority=4),
Task(id="compile_1", description="Organize findings into structured guide", priority=3, dependencies=["research_1", "research_2"])
]
def execute_goal_oriented(self, activity: Job) -> str:
"""G: Aim-oriented Execution - Execute particular activity with targeted consideration"""
execution_prompt = f"""
GOAL-ORIENTED EXECUTION:
Job: {activity.description}
Precedence: {activity.precedence}
Context: {json.dumps(self.context, indent=2)}
Execute this activity step-by-step:
1. Break down the duty into concrete actions
2. Execute every motion methodically
3. Validate outcomes at every step
4. Present complete output
Give attention to sensible, actionable outcomes. Be particular and thorough.
"""
response = self.mannequin.generate_content(execution_prompt)
return response.textual content.strip()
def integrate_experience(self, activity: Job, outcome: str, success: bool) -> Dict[str, Any]:
"""E: Expertise Integration - Be taught from outcomes and replace data"""
integration_prompt = f"""
You're studying from activity execution. Reply ONLY with legitimate JSON, no further textual content.
TASK: {activity.description}
RESULT: {outcome[:200]}...
SUCCESS: {success}
Present studying insights as JSON:
{{
"learnings": ["key insight 1", "key insight 2"],
"patterns": ["pattern observed 1", "pattern observed 2"],
"changes": ["adjustment for future 1", "adjustment for future 2"],
"confidence_boost":
}}
"""
response = self.mannequin.generate_content(integration_prompt)
attempt:
textual content = response.textual content.strip()
if textual content.startswith('```'):
textual content = textual content.cut up('```')[1]
if textual content.startswith('json'):
textual content = textual content[4:]
textual content = textual content.strip()
expertise = json.hundreds(textual content)
expertise['task_id'] = activity.id
expertise['timestamp'] = time.time()
self.reminiscence.append(expertise)
return expertise
besides Exception as e:
print(f"Expertise parsing error: {e}")
expertise = {
"learnings": [f"Completed task: {task.description}"],
"patterns": ["Task execution follows planned approach"],
"changes": ["Continue systematic approach"],
"confidence_boost": 5 if success else -2,
"task_id": activity.id,
"timestamp": time.time()
}
self.reminiscence.append(expertise)
return expertise
def execute_sage_cycle(self, purpose: str, max_iterations: int = 3) -> Dict[str, Any]:
"""Execute full SAGE cycle for purpose achievement"""
print(f"🎯 Beginning SAGE cycle for purpose: {purpose}")
outcomes = {"purpose": purpose, "iterations": [], "final_status": "unknown"}
for iteration in vary(max_iterations):
self.iteration_count += 1
print(f"n🔄 SAGE Iteration {iteration + 1}")
print("📊 Self-Evaluation...")
evaluation = self.self_assess(purpose, self.context)
print(f"Progress Rating: {evaluation.get('progress_score', 0)}/100")
print("🗺️ Adaptive Planning...")
duties = self.adaptive_plan(purpose, evaluation)
print(f"Generated {len(duties)} duties")
print("⚡ Aim-oriented Execution...")
iteration_results = []
for activity in sorted(duties, key=lambda x: x.precedence, reverse=True):
if self._dependencies_met(activity):
print(f" Executing: {activity.description}")
activity.standing = TaskStatus.IN_PROGRESS
attempt:
outcome = self.execute_goal_oriented(activity)
activity.outcome = outcome
activity.standing = TaskStatus.COMPLETED
success = True
print(f" ✅ Accomplished: {activity.id}")
besides Exception as e:
activity.standing = TaskStatus.FAILED
activity.outcome = f"Error: {str(e)}"
success = False
print(f" ❌ Failed: {activity.id}")
expertise = self.integrate_experience(activity, activity.outcome, success)
self.duties[task.id] = activity
iteration_results.append({
"activity": asdict(activity),
"expertise": expertise
})
self._update_context(iteration_results)
outcomes["iterations"].append({
"iteration": iteration + 1,
"evaluation": evaluation,
"tasks_generated": len(duties),
"tasks_completed": len([r for r in iteration_results if r["task"]["status"] == "accomplished"]),
"outcomes": iteration_results
})
if evaluation.get('progress_score', 0) >= 90:
outcomes["final_status"] = "achieved"
print("🎉 Aim achieved!")
break
if outcomes["final_status"] == "unknown":
outcomes["final_status"] = "in_progress"
return outcomes
def _dependencies_met(self, activity: Job) -> bool:
"""Test if activity dependencies are glad"""
for dep_id in activity.dependencies:
if dep_id not in self.duties or self.duties[dep_id].standing != TaskStatus.COMPLETED:
return False
return True
def _update_context(self, outcomes: Record[Dict[str, Any]]):
"""Replace agent context based mostly on execution outcomes"""
completed_tasks = [r for r in results if r["task"]["status"] == "accomplished"]
self.context.replace({
"completed_tasks": len(completed_tasks),
"total_tasks": len(self.duties),
"success_rate": len(completed_tasks) / len(outcomes) if outcomes else 0,
"last_update": time.time()
})