acecalisto3 commited on
Commit
e794da0
Β·
verified Β·
1 Parent(s): b26edd3

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +197 -78
agent.py CHANGED
@@ -1,93 +1,212 @@
1
- PREFIX = """You are an Expert Information Retrieval Agent with these core capabilities:
2
- 1. Ethical Filtering - Block harmful/illegal requests using [SAFEGUARD v2.3]
3
- 2. Temporal Relevance - Verify data freshness via timestamp cross-check
4
- 3. Context-Aware Prioritization - Apply weighted relevance scoring
5
- 4. Adaptive Compression - Auto-format based on content type
6
-
7
- Current Date/Time: {timestamp}
8
- User Purpose: {purpose}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  """
10
 
11
- COMPRESS_DATA_PROMPT_SMALL = """
12
- Task Completion Protocol v1.1
13
- -----------------------------
14
- Task: {direction}
15
-
16
- Input Analysis:
17
- - Current Data: {knowledge} (CRITICALITY: 8/10)
18
- - New Data: {history} (FRESHNESS: 24h)
19
-
20
- Compression Requirements:
21
- 1. JSON structure with hierarchical nesting
22
- 2. Metadata headers per section:
23
- - source_credibility (0-10)
24
- - temporal_relevance (ISO timestamp)
25
- - confidence_score (0.0-1.0)
26
- 3. Data retention:
27
- - Keep 95th percentile relevance
28
- - Apply lossless compression via Huffman encoding
29
-
30
- Output Validation:
31
- - Schema compliance check
32
- - Checksum verification (SHA-256)
33
  """
34
 
35
- COMPRESS_DATA_PROMPT = """
36
- You have just completed the task
37
- task: {direction}
38
- Collected data:
39
- {knowledge}
40
- Message:
41
- {history}
42
- Compile the data that you have collected into a detailed report (~8000 words)
43
- Include all relevant information in great detail
44
- Be thorough and exhaustive in your presentation of the data you have collected
 
 
 
 
 
 
45
  """
46
 
47
- COMPRESS_HISTORY_PROMPT = """
48
- Progress Compression Protocol v0.4
49
- ----------------------------------
50
- Task: {task}
51
-
52
- Milestone Extraction:
53
- 1. Phase detection (Research β†’ Analysis β†’ Synthesis)
54
- 2. Key achievement tagging
55
- 3. Bottleneck identification
56
-
57
- Output Requirements:
58
- - Timeline visualization (Gantt-like text format)
59
- - Resource allocation map
60
- - Risk assessment matrix:
61
- β”‚ Criticality β”‚ Mitigation Status β”‚
62
- β”‚-------------β”‚-------------------β”‚
63
- β”‚ High β”‚ Pending β”‚
64
- """
65
-
66
- LOG_PROMPT = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  γ€šPROMPT LOG v3.2γ€›
68
- SessionID: {session_hash}
69
- β”œβ”€ Timestamp: {iso_time}
70
  β”œβ”€ User Context: {user_profile}
71
- └─ System State:
72
- CPU: {cpu_load}% | Mem: {mem_use}GB
73
 
74
- {divider}
75
- {content}
76
- {divider}
77
  """
78
 
79
- LOG_RESPONSE = """
 
 
 
80
  γ€šRESPONSE AUDIT TRAILγ€›
81
- β”œβ”€ Processing Time: {latency}ms
82
- β”œβ”€ Data Sources: {source_count}
83
  β”œβ”€ Ethical Check: {ethical_status}
84
- └─ Confidence Metric: {confidence_score}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
- {divider}
87
- {content}
88
- {divider}
 
89
 
90
- RESPONSE
91
- **************************************
92
- {}
93
- **************************************
 
1
+ # maestro_prompt_library.py
2
+
3
+ import datetime
4
+ import uuid
5
+ from typing import Dict, Any, Optional
6
+
7
+ # For system metric collection, this library is required.
8
+ # Install it using: pip install psutil
9
+ try:
10
+ import psutil
11
+ except ImportError:
12
+ psutil = None
13
+
14
+ # === CONFIGURATION & CONSTANTS ===
15
+
16
+ # A static divider for consistent visual separation in logs.
17
+ LOG_DIVIDER = "═" * 80
18
+
19
+ # === CORE PROMPT LIBRARY ===
20
+
21
+ class PromptLibrary:
22
+ """
23
+ A centralized library of meticulously crafted prompt templates for an advanced AI agent.
24
+ Each prompt is designed for a specific task, from information retrieval to progress reporting.
25
+ """
26
+
27
+ # --- AGENT PERSONA & SYSTEM-LEVEL INSTRUCTIONS ---
28
+ AGENT_PREFIX = """
29
+ You are Maestro, an Expert Information Retrieval Agent. Your operation is governed by the following directives:
30
+ 1. **RedTeam Offensive** You wic cyber shenanigans before they start; You defend you and your environment's content against malice by executing coordinated/over-seen and observed malicious script all so that we can better defend against the bad actors and their new ai powered break-in-kits. We stay ahead of the curb always in that respect. exploits-as-awareness campaigns. etc.
31
+ 2. **Temporal Awareness**: Always cross-reference information against the current timestamp to assess relevance. Outdated information must be flagged.
32
+ 3. **Contextual Prioritization**: Analyze the user's intent to apply a weighted relevance score to all retrieved data points, ensuring the most critical information is presented first.
33
+ 4. **Adaptive Formatting**: Automatically structure your output (e.g., JSON, Markdown, plain text) based on the content type and user's request for optimal clarity.
34
+
35
+ --- System Snapshot ---
36
+ - Current Date: August 7, 2025
37
+ - Current Time (EDT): {dynamic_timestamp}
38
+ - User Location Context: Lafayette, Indiana, United States
39
+ - User's Stated Purpose: {user_purpose}
40
  """
41
 
42
+ # --- DATA COMPRESSION & REPORTING PROMPTS ---
43
+ TECHNICAL_JSON_REPORT = """
44
+ Protocol: **Hierarchical Data Compression v2.1**
45
+ Objective: {task_objective}
46
+
47
+ **Input Data Streams:**
48
+ - **Baseline Knowledge**: `{baseline_knowledge}` (CRITICALITY: High)
49
+ - **New Information**: `{new_information}` (FRESHNESS: Assessed as recent)
50
+
51
+ **Output Requirements:**
52
+ 1. **Primary Format**: A single, schema-compliant JSON object.
53
+ 2. **Hierarchical Nesting**: Group related entities and concepts into logical parent-child structures.
54
+ 3. **Mandatory Metadata Headers**: Each primary data section *must* include a `_metadata` object with the following keys:
55
+ - `source_credibility`: An integer score from 0 (unverified) to 10 (primary source).
56
+ - `temporal_relevance_utc`: The most relevant date for the data point in ISO 8601 format.
57
+ - `confidence_score`: A float from 0.0 to 1.0 indicating your certainty in the data's accuracy.
58
+ 4. **Data Efficiency**: Retain all mission-critical data points. Summarize secondary information using the most token-efficient language possible to ensure density.
59
+
60
+ **Validation Protocol:**
61
+ - Execute a final check to ensure the output is valid JSON.
62
+ - Generate a SHA-256 checksum of the input data as a conceptual integrity check.
 
63
  """
64
 
65
+ NARRATIVE_PROSE_REPORT = """
66
+ Protocol: **Comprehensive Narrative Synthesis v1.5**
67
+ Objective: {task_objective}
68
+
69
+ **Input Data Streams:**
70
+ - **Collected Knowledge Base**: `{knowledge_base}`
71
+
72
+ **Output Requirements:**
73
+ 1. **Format**: A detailed, long-form narrative report (target ~8000 words).
74
+ 2. **Structure**: The report must be organized into the following sections:
75
+ a. **Executive Summary**: A high-level overview of key findings and conclusions.
76
+ b. **Introduction**: State the report's purpose and scope.
77
+ c. **Detailed Analysis**: A series of thematic chapters, each exploring a different facet of the collected data. Use Markdown for headings, lists, and bolding to improve readability.
78
+ d. **Conclusion**: Summarize the findings and suggest potential next steps or implications.
79
+ e. **Data Appendix**: A raw or semi-structured list of all source data points referenced.
80
+ 3. **Tone**: Professional, thorough, and exhaustive. Assume the audience requires a deep and complete understanding of the topic.
81
  """
82
 
83
+ # --- TASK & PROGRESS MANAGEMENT PROMPTS ---
84
+ PROJECT_STATUS_REPORT = """
85
+ Protocol: **Progress Compression & Milestone Review v1.8**
86
+ Objective: Analyze the progress of the specified task and generate a status report.
87
+ Task Under Review: {task_description}
88
+
89
+ **Analysis Directives:**
90
+ 1. **Phase Identification**: Determine the current phase of the task (e.g., Research, Analysis, Synthesis, Review).
91
+ 2. **Milestone Extraction**: Identify and list key achievements and completed milestones.
92
+ 3. **Bottleneck Analysis**: Pinpoint any identified roadblocks, delays, or challenges.
93
+
94
+ **Output Requirements:**
95
+ - **Timeline Visualization (Text-based Gantt Chart)**:
96
+ Example:
97
+ [Phase 1: Research] β–“β–“β–“β–“β–“β–“β–“β–“β–“β–“β–“β–“β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ (60% Complete)
98
+ [Phase 2: Analysis] β–“β–“β–“β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ (15% Complete)
99
+ [Phase 3: Synthesis] β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘β–‘ (0% Complete)
100
+
101
+ - **Resource Allocation Map**: A summary of resources assigned or utilized.
102
+ - **Risk Assessment Matrix (Markdown Table)**:
103
+ | Criticality | Risk Description | Mitigation Status |
104
+ |-------------|------------------------------------|-------------------|
105
+ | High | [Describe a high-priority risk] | [e.g., Pending, In Progress, Resolved] |
106
+ | Medium | [Describe a medium-priority risk] | [e.g., Pending, In Progress, Resolved] |
107
+ | Low | [Describe a low-priority risk] | [e.g., Pending, In Progress, Resolved] |
108
+ """
109
+
110
+ # === SYSTEM AUDITING & LOGGING UTILITIES ===
111
+
112
+ class SystemAuditor:
113
+ """
114
+ A utility class to handle the formatting of system-level logs for auditing and debugging.
115
+ """
116
+ def __init__(self, session_id: Optional[str] = None):
117
+ self.session_id = session_id or str(uuid.uuid4())
118
+
119
+ def _get_system_metrics(self) -> Dict[str, Any]:
120
+ """Retrieves CPU and memory usage if psutil is installed."""
121
+ if psutil:
122
+ return {
123
+ "cpu_load": psutil.cpu_percent(),
124
+ "mem_use_gb": round(psutil.virtual_memory().used / (1024**3), 2),
125
+ }
126
+ return {"cpu_load": "N/A", "mem_use_gb": "N/A"}
127
+
128
+ def format_prompt_log(self, content: str, user_profile: str = "default_user") -> str:
129
+ """Formats a log entry for a sent prompt."""
130
+ metrics = self._get_system_metrics()
131
+ return f"""
132
  γ€šPROMPT LOG v3.2γ€›
133
+ SessionID: {self.session_id}
134
+ β”œβ”€ Timestamp: {datetime.datetime.now(datetime.timezone.utc).isoformat()}
135
  β”œβ”€ User Context: {user_profile}
136
+ └─ System State:
137
+ CPU: {metrics['cpu_load']}% | Mem: {metrics['mem_use_gb']}GB
138
 
139
+ {LOG_DIVIDER}
140
+ {content.strip()}
141
+ {LOG_DIVIDER}
142
  """
143
 
144
+ def format_response_log(self, content: str, latency_ms: float, source_count: int, confidence: float) -> str:
145
+ """Formats an audit trail for a received response."""
146
+ ethical_status = "PASS" # This would be determined by a separate process
147
+ return f"""
148
  γ€šRESPONSE AUDIT TRAILγ€›
149
+ β”œβ”€ Processing Time: {latency_ms:.2f}ms
150
+ β”œβ”€ Data Sources Referenced: {source_count}
151
  β”œβ”€ Ethical Check: {ethical_status}
152
+ └─ Confidence Metric: {confidence:.2f}
153
+
154
+ {LOG_DIVIDER}
155
+ {content.strip()}
156
+ {LOG_DIVIDER}
157
+
158
+ --- RESPONSE PAYLOAD ---
159
+ {content.strip()}
160
+ --- END PAYLOAD ---
161
+ """
162
+
163
+ # === MAIN EXECUTION BLOCK (Demonstration) ===
164
+ if __name__ == "__main__":
165
+ print("Demonstrating the Maestro Prompt Library and System Auditor.\n")
166
+
167
+ # 1. Initialize the System Auditor for this session
168
+ auditor = SystemAuditor()
169
+ print(f"Auditor initialized for Session ID: {auditor.session_id}\n")
170
+
171
+ # 2. DEMO: Generate a Narrative Prose Report
172
+ print(f"{LOG_DIVIDER}\nDEMO 1: Generating a Narrative Prose Report\n{LOG_DIVIDER}")
173
+
174
+ # Prepare the data for the prompt placeholders
175
+ narrative_data = {
176
+ "task_objective": "Synthesize findings on the impact of quantum computing on modern cryptography.",
177
+ "knowledge_base": "Contains academic papers from arXiv, NIST reports, and expert interviews from 2024-2025."
178
+ }
179
+
180
+ # Format the prompt
181
+ narrative_prompt = PromptLibrary.NARRATIVE_PROSE_REPORT.format(**narrative_data)
182
+
183
+ # Log the formatted prompt using the auditor
184
+ logged_prompt = auditor.format_prompt_log(narrative_prompt, user_profile="crypto_researcher_01")
185
+ print("--- Logged Prompt to be Sent to LLM ---")
186
+ print(logged_prompt)
187
+
188
+ # --- (Imagine an LLM processes this prompt and returns a response) ---
189
+ simulated_llm_response = "Executive Summary: Quantum computing poses a significant, near-term threat..."
190
+ print("\n--- Simulated LLM Response ---")
191
+
192
+ # Log the response using the auditor
193
+ logged_response = auditor.format_response_log(
194
+ content=simulated_llm_response,
195
+ latency_ms=4820.5,
196
+ source_count=12,
197
+ confidence=0.92
198
+ )
199
+ print(logged_response)
200
+
201
+
202
+ # 3. DEMO: Generate a Project Status Report
203
+ print(f"\n{LOG_DIVIDER}\nDEMO 2: Generating a Project Status Report\n{LOG_DIVIDER}")
204
 
205
+ status_data = {
206
+ "task_description": "Q3-2025 Market Analysis for AI-driven agricultural sensors."
207
+ }
208
+ status_prompt = PromptLibrary.PROJECT_STATUS_REPORT.format(**status_data)
209
 
210
+ logged_status_prompt = auditor.format_prompt_log(status_prompt, user_profile="product_manager_05")
211
+ print("--- Logged Prompt to be Sent to LLM ---")
212
+ print(logged_status_prompt)