Mehedi2 commited on
Commit
f73d5ba
·
verified ·
1 Parent(s): 7ce5da4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +330 -355
app.py CHANGED
@@ -1,11 +1,11 @@
1
  import os
2
- import json
3
  import requests
4
- from typing import Dict, Any, List, Optional
5
  import gradio as gr
6
- import pandas as pd
7
 
8
- # Configuration
 
9
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") or os.getenv("my_key")
10
 
11
  class OpenRouterLLM:
@@ -14,15 +14,17 @@ class OpenRouterLLM:
14
  self.model = model
15
  self.base_url = "https://openrouter.ai/api/v1/chat/completions"
16
 
17
- def __call__(self, prompt: str, max_tokens: int = 2000, temperature: float = 0.1) -> str:
 
 
18
  if not self.api_key or not self.api_key.startswith('sk-or-v1-'):
19
- return "Error: Invalid OpenRouter API key"
20
 
21
  headers = {
22
  "Authorization": f"Bearer {self.api_key}",
23
  "Content-Type": "application/json",
24
  "HTTP-Referer": "https://huggingface.co/spaces/Mehedi2/Gaia-Test-Agent",
25
- "X-Title": "GAIA Test Agent"
26
  }
27
 
28
  payload = {
@@ -30,14 +32,7 @@ class OpenRouterLLM:
30
  "messages": [
31
  {
32
  "role": "system",
33
- "content": """You are an advanced AI assistant designed for the GAIA benchmark. You excel at:
34
- - Complex reasoning and multi-step problem solving
35
- - Mathematical calculations and logical analysis
36
- - Research and fact-finding
37
- - File analysis and data interpretation
38
- - Providing precise, unambiguous answers
39
-
40
- Always think step-by-step and provide clear reasoning for your answers."""
41
  },
42
  {
43
  "role": "user",
@@ -50,418 +45,398 @@ Always think step-by-step and provide clear reasoning for your answers."""
50
  }
51
 
52
  try:
53
- response = requests.post(self.base_url, headers=headers, json=payload, timeout=60)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- if response.status_code == 200:
56
- result = response.json()
 
57
  return result["choices"][0]["message"]["content"].strip()
58
  else:
59
- return f"API Error: {response.status_code} - {response.text[:200]}"
60
-
 
 
 
 
61
  except Exception as e:
62
- return f"Error: {str(e)}"
63
 
64
- class GAIATestAgent:
65
- """GAIA Benchmark Test Agent with enhanced capabilities"""
66
 
67
- def __init__(self, api_key: str, model: str = "deepseek/deepseek-v3.1-terminus"):
68
- self.llm = OpenRouterLLM(api_key, model)
69
- self.api_key = api_key
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- def solve_gaia_question(self, question: str, file_content: Optional[str] = None, level: int = 1) -> str:
72
- """Main method to solve GAIA benchmark questions"""
 
 
 
 
 
 
 
 
 
73
 
74
- # Build context
75
- context_parts = [f"Question: {question}"]
76
 
77
- if file_content:
78
- context_parts.append(f"File content provided: {file_content[:2000]}")
 
79
 
80
- context_parts.append(f"Question Level: {level} (1=Basic, 2=Intermediate, 3=Advanced)")
 
 
81
 
82
- # Create comprehensive solving prompt
83
- main_prompt = f"""
84
- You are solving a GAIA benchmark question. These questions test advanced AI capabilities and require careful reasoning.
85
-
86
- {chr(10).join(context_parts)}
87
-
88
- Approach this systematically:
89
-
90
- 1. **Understanding**: What exactly is the question asking for?
91
- 2. **Analysis**: What information do I have and what do I need to find?
92
- 3. **Strategy**: What approach should I take to solve this?
93
- 4. **Execution**: Work through the problem step by step
94
- 5. **Verification**: Does my answer make sense?
95
- 6. **Final Answer**: Provide a clear, precise answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- For GAIA questions:
98
- - Be extremely precise and factual
99
- - Show your reasoning clearly
100
- - For numerical answers, provide exact numbers
101
- - For factual answers, be concise but complete
102
- - If you need to make calculations, show your work
103
 
104
- Think carefully and solve step by step:
105
  """
106
 
107
- return self.llm(main_prompt, max_tokens=2000, temperature=0.1)
108
-
109
- # Sample GAIA-style questions for testing
110
- SAMPLE_QUESTIONS = [
111
- {
112
- "task_id": "math_001",
113
- "Question": "What is the sum of all prime numbers less than 20?",
114
- "Level": 1,
115
- "Final answer": "77",
116
- "explanation": "Primes < 20: 2, 3, 5, 7, 11, 13, 17, 19. Sum = 77"
117
- },
118
- {
119
- "task_id": "logic_001",
120
- "Question": "If all roses are flowers and some flowers fade quickly, can we conclude that some roses fade quickly?",
121
- "Level": 2,
122
- "Final answer": "No",
123
- "explanation": "This is a logical fallacy - we cannot conclude that some roses fade quickly from the given premises"
124
- },
125
- {
126
- "task_id": "calc_001",
127
- "Question": "A rectangle has a perimeter of 24 cm and an area of 32 cm². What is the length of its longer side?",
128
- "Level": 2,
129
- "Final answer": "8",
130
- "explanation": "Solving the system: 2(l+w)=24 and l×w=32 gives l=8, w=4"
131
- },
132
- {
133
- "task_id": "reasoning_001",
134
- "Question": "In a sequence where each term is the sum of the two preceding terms, if the 5th term is 13 and the 6th term is 21, what is the 4th term?",
135
- "Level": 2,
136
- "Final answer": "8",
137
- "explanation": "Working backwards: F(4) + F(5) = F(6), so F(4) = 21 - 13 = 8"
138
- },
139
- {
140
- "task_id": "wordplay_001",
141
- "Question": "What common English word becomes shorter when you add two letters to it?",
142
- "Level": 2,
143
- "Final answer": "Short",
144
- "explanation": "The word 'short' becomes 'shorter' when you add 'er'"
145
- }
146
- ]
147
 
148
- def evaluate_answer(agent_answer: str, correct_answer: str, question: str) -> Dict[str, Any]:
149
- """Evaluate agent answer against correct answer"""
150
 
151
- # Normalize answers
152
- agent_norm = agent_answer.strip().lower()
153
- correct_norm = correct_answer.strip().lower()
154
 
155
- # Check for exact match
156
- exact_match = agent_norm == correct_norm
 
 
 
 
 
 
157
 
158
- # Check if correct answer is contained in agent response
159
- contains_answer = correct_norm in agent_norm
 
 
 
160
 
161
- # For numerical answers, try to extract numbers
162
- import re
163
- agent_numbers = re.findall(r'-?\d+\.?\d*', agent_answer)
164
- correct_numbers = re.findall(r'-?\d+\.?\d*', correct_answer)
165
 
166
- numerical_match = False
167
- if agent_numbers and correct_numbers:
168
- try:
169
- agent_num = float(agent_numbers[-1]) # Take last number found
170
- correct_num = float(correct_numbers[0])
171
- numerical_match = abs(agent_num - correct_num) < 0.001
172
- except:
173
- pass
174
-
175
- # Determine if answer is correct
176
- is_correct = exact_match or contains_answer or numerical_match
177
-
178
- return {
179
- "is_correct": is_correct,
180
- "exact_match": exact_match,
181
- "contains_answer": contains_answer,
182
- "numerical_match": numerical_match,
183
- "confidence": "high" if exact_match else "medium" if contains_answer or numerical_match else "low"
184
- }
185
-
186
- def test_single_question(question, expected_answer, level, api_key):
187
- """Test a single question"""
188
- if not api_key:
189
- return "❌ Please provide your OpenRouter API key", "", "❌ No API key"
190
 
191
- if not question.strip():
192
- return "❌ Please provide a question", "", "❌ No question"
193
 
194
- agent = GAIATestAgent(api_key)
195
- response = agent.solve_gaia_question(question, level=level)
196
 
197
- if expected_answer.strip():
198
- eval_result = evaluate_answer(response, expected_answer, question)
199
- status = f"✅ Correct ({eval_result['confidence']} confidence)" if eval_result['is_correct'] else "❌ Incorrect"
200
- else:
201
- status = "⚠️ No expected answer provided"
202
 
203
- return response, expected_answer, status
 
 
204
 
205
- def test_sample_questions(api_key):
206
- """Test on predefined sample questions"""
207
- if not api_key:
208
- return "❌ Please provide your OpenRouter API key", pd.DataFrame()
209
-
210
- agent = GAIATestAgent(api_key)
211
- results = []
212
-
213
- total_questions = len(SAMPLE_QUESTIONS)
214
- correct_count = 0
215
-
216
- for i, q in enumerate(SAMPLE_QUESTIONS):
217
- response = agent.solve_gaia_question(q["Question"], level=q["Level"])
218
- eval_result = evaluate_answer(response, q["Final answer"], q["Question"])
219
-
220
- if eval_result["is_correct"]:
221
- correct_count += 1
222
-
223
- # Truncate long responses for display
224
- display_response = response[:150] + "..." if len(response) > 150 else response
225
- display_question = q["Question"][:80] + "..." if len(q["Question"]) > 80 else q["Question"]
226
-
227
- results.append({
228
- "ID": q["task_id"],
229
- "Question": display_question,
230
- "Level": q["Level"],
231
- "Expected": q["Final answer"],
232
- "Agent Answer": display_response,
233
- "Status": "✅" if eval_result["is_correct"] else "❌",
234
- "Confidence": eval_result["confidence"]
235
- })
236
 
237
- accuracy = (correct_count / total_questions) * 100
 
238
 
239
- # Create summary
240
- summary = f"""
241
- ## 📊 Test Results Summary
242
-
243
- ### Overall Performance
244
- - **Total Questions Tested**: {total_questions}
245
- - **Correct Answers**: {correct_count}
246
- - **Accuracy**: {accuracy:.1f}%
247
 
248
- ### By Level
249
- - **Level 1**: {sum(1 for q in SAMPLE_QUESTIONS if q['Level'] == 1)} questions
250
- - **Level 2**: {sum(1 for q in SAMPLE_QUESTIONS if q['Level'] == 2)} questions
251
- - **Level 3**: {sum(1 for q in SAMPLE_QUESTIONS if q['Level'] == 3)} questions
252
 
253
- ### Performance Analysis
254
- - **High Confidence Correct**: {sum(1 for r in results if r['Status'] == '✅' and r['Confidence'] == 'high')}
255
- - **Medium Confidence Correct**: {sum(1 for r in results if r['Status'] == '✅' and r['Confidence'] == 'medium')}
256
- - **Incorrect Answers**: {len(results) - correct_count}
 
257
 
258
- {('🎉 Excellent performance! Ready for GAIA submission.' if accuracy >= 80 else
259
- '👍 Good performance! Consider fine-tuning for better results.' if accuracy >= 60 else
260
- '⚠️ Performance needs improvement. Review failed cases.')}
261
  """
262
 
263
- results_df = pd.DataFrame(results)
264
 
265
- return summary, results_df
266
-
267
- def generate_submission_template(api_key):
268
- """Generate GAIA submission template and instructions"""
269
 
270
- submission_instructions = """
271
- # 🏆 GAIA Benchmark Submission Guide
272
-
273
- ## Step 1: Access the GAIA Dataset
274
- 1. Go to: https://huggingface.co/datasets/gaia-benchmark/GAIA
275
- 2. Accept the dataset conditions (required to prevent data leakage)
276
- 3. Load the dataset using: `datasets.load_dataset("gaia-benchmark/GAIA", "2023_all")`
277
-
278
- ## Step 2: Generate Predictions
279
- Your submission file should be a JSON Lines (.jsonl) file:
280
-
281
- ```json
282
- {"task_id": "validation_001", "model_answer": "Your precise answer"}
283
- {"task_id": "validation_002", "model_answer": "42"}
284
- {"task_id": "validation_003", "model_answer": "The answer is Paris"}
285
- ```
286
 
287
- ## Step 3: Submit to Leaderboard
288
- 1. Go to: https://huggingface.co/spaces/gaia-benchmark/leaderboard
289
- 2. Follow the submission instructions
290
- 3. Upload your .jsonl predictions file
291
- 4. Wait for evaluation results
 
 
 
 
 
292
 
293
- ## ⚠️ Important Notes:
294
- - Only submit predictions for TEST set (not validation)
295
- - Answers must be extracted cleanly (no reasoning text)
296
- - Review GAIA paper for detailed guidelines: https://arxiv.org/abs/2311.12983
297
- """
298
-
299
- # Create sample submission content
300
- sample_submission = [
301
- {"task_id": "sample_001", "model_answer": "77"},
302
- {"task_id": "sample_002", "model_answer": "No"},
303
- {"task_id": "sample_003", "model_answer": "8"}
304
- ]
305
-
306
- submission_content = "\\n".join([json.dumps(item) for item in sample_submission])
307
-
308
- return submission_instructions, submission_content
309
 
310
- # Create the Gradio interface
311
- def create_gaia_interface():
312
-
313
- with gr.Blocks(title="🧪 GAIA Test Agent by Mehedi", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
  gr.HTML("""
316
- <div style="text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; margin-bottom: 25px; box-shadow: 0 4px 15px rgba(0,0,0,0.1);">
317
- <h1>🧪 GAIA Benchmark Test Agent</h1>
318
- <p><strong>Advanced AI Testing for General AI Intelligence Assessment</strong></p>
319
- <p>Test your AI agent on complex reasoning tasks • Powered by DeepSeek V3.1 Terminus</p>
320
  </div>
321
  """)
322
 
323
- # API Key input
324
  with gr.Row():
325
- api_key_input = gr.Textbox(
326
- label="🔑 OpenRouter API Key",
327
- placeholder="sk-or-v1-your-openrouter-api-key-here",
328
- type="password",
329
- value=OPENROUTER_API_KEY or "",
330
- info="Required for AI agent functionality. Get yours at openrouter.ai"
331
- )
332
-
333
- # Main tabs
334
- with gr.Tabs():
335
-
336
- # Single Question Testing
337
- with gr.Tab("🎯 Single Question Test"):
338
- gr.Markdown("### Test individual GAIA-style questions")
339
 
340
  with gr.Row():
341
- with gr.Column():
342
- question_input = gr.Textbox(
343
- label="Question",
344
- placeholder="Enter your GAIA-style question here...",
345
- lines=4,
346
- info="Enter complex reasoning questions similar to GAIA benchmark"
347
- )
348
-
349
- with gr.Row():
350
- expected_answer_input = gr.Textbox(
351
- label="Expected Answer (Optional)",
352
- placeholder="Expected answer for comparison...",
353
- info="Provide the correct answer to evaluate performance"
354
- )
355
- level_input = gr.Slider(
356
- minimum=1, maximum=3, value=1, step=1,
357
- label="Difficulty Level",
358
- info="1=Basic, 2=Intermediate, 3=Advanced"
359
- )
360
-
361
- test_single_btn = gr.Button("🧪 Test Question", variant="primary")
362
-
363
- with gr.Column():
364
- agent_response_output = gr.Textbox(
365
- label="🤖 Agent Response",
366
- lines=12,
367
- show_copy_button=True,
368
- info="Complete reasoning and answer from the AI agent"
369
- )
370
-
371
- with gr.Row():
372
- expected_display = gr.Textbox(label="Expected", interactive=False)
373
- result_status_output = gr.Textbox(label="Evaluation", interactive=False)
374
-
375
- # Sample Questions Battery Test
376
- with gr.Tab("📝 Sample Questions Test"):
377
- gr.Markdown("### Test on curated GAIA-style questions")
378
-
379
- with gr.Column():
380
- gr.Markdown("""
381
- **Sample Question Types:**
382
- - 🔢 Mathematical reasoning and calculations
383
- - 🧠 Logical reasoning and inference
384
- - 🔍 Multi-step problem solving
385
- - 📊 Data analysis and interpretation
386
- - 🎯 Precision and accuracy testing
387
- """)
388
-
389
- test_samples_btn = gr.Button("🧪 Run Full Test Battery", variant="primary")
390
-
391
- test_summary_output = gr.Markdown()
392
-
393
- test_results_output = gr.Dataframe(
394
- label="📊 Detailed Test Results",
395
- wrap=True
396
  )
397
-
398
- # Submission Guidelines
399
- with gr.Tab("📤 GAIA Submission"):
400
- gr.Markdown("### Official GAIA benchmark submission guide")
401
 
402
- generate_guide_btn = gr.Button("📋 Generate Submission Guide", variant="primary")
 
 
 
 
403
 
404
- submission_guide_output = gr.Markdown()
 
405
 
406
- with gr.Accordion("📄 Sample Submission File", open=False):
407
- submission_sample_output = gr.Code(language="json", label="sample_submission.jsonl")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
 
409
- # Information footer
410
- gr.Markdown("""
411
- ---
412
- ### 📖 About GAIA Benchmark
 
 
 
 
 
 
 
413
 
414
- **GAIA (General AI Intelligence Assessment)** evaluates AI systems on complex, real-world questions requiring:
415
- - Multi-step reasoning and planning
416
- - Tool usage and external knowledge integration
417
- - Mathematical calculations and logical inference
418
- - File analysis and multi-modal understanding
 
419
 
420
- ### 🔗 Official Resources
421
- - **🏆 Leaderboard**: [GAIA Benchmark Leaderboard](https://huggingface.co/spaces/gaia-benchmark/leaderboard)
422
- - **📚 Dataset**: [GAIA Dataset](https://huggingface.co/datasets/gaia-benchmark/GAIA)
423
- - **📄 Research Paper**: [GAIA: A Benchmark for General AI Assistants](https://arxiv.org/abs/2311.12983)
424
 
425
- ---
426
- **⚡ Powered by**: DeepSeek V3.1 Terminus via OpenRouter | **🛠️ Built by**: Mehedi | **🎯 Purpose**: GAIA Benchmark Testing
 
 
 
427
  """)
428
 
429
  # Event handlers
430
- test_single_btn.click(
431
- fn=test_single_question,
432
- inputs=[question_input, expected_answer_input, level_input, api_key_input],
433
- outputs=[agent_response_output, expected_display, result_status_output]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
  )
435
 
436
- test_samples_btn.click(
437
- fn=test_sample_questions,
438
- inputs=[api_key_input],
439
- outputs=[test_summary_output, test_results_output]
440
  )
441
 
442
- generate_guide_btn.click(
443
- fn=generate_submission_template,
444
- inputs=[api_key_input],
445
- outputs=[submission_guide_output, submission_sample_output]
 
 
 
 
 
 
446
  )
447
 
448
- return demo
449
 
450
- # Launch the application
451
  if __name__ == "__main__":
452
- demo = create_gaia_interface()
453
 
454
  # Check if running on Hugging Face Spaces
455
  if os.getenv("SPACE_ID"):
456
- demo.launch(
 
457
  server_name="0.0.0.0",
458
  server_port=7860,
459
- show_api=False,
460
- share=False
461
  )
462
  else:
463
- # Local development
464
- demo.launch(share=True, show_api=False, debug=True)
 
 
 
465
 
466
 
467
 
 
1
  import os
 
2
  import requests
3
+ import json
4
  import gradio as gr
5
+ from typing import Dict, Any, Optional
6
 
7
+ # Set your OpenRouter API key as environment variable
8
+ # For Hugging Face Spaces, you'll add this as a secret
9
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") or os.getenv("my_key")
10
 
11
  class OpenRouterLLM:
 
14
  self.model = model
15
  self.base_url = "https://openrouter.ai/api/v1/chat/completions"
16
 
17
+ def __call__(self, prompt: str, max_tokens: int = 1000, temperature: float = 0.3) -> str:
18
+ """Make API call to OpenRouter with DeepSeek V3.1 Terminus"""
19
+
20
  if not self.api_key or not self.api_key.startswith('sk-or-v1-'):
21
+ return "Error: Invalid OpenRouter API key. Please configure your API key."
22
 
23
  headers = {
24
  "Authorization": f"Bearer {self.api_key}",
25
  "Content-Type": "application/json",
26
  "HTTP-Referer": "https://huggingface.co/spaces/Mehedi2/Gaia-Test-Agent",
27
+ "X-Title": "AI Navigation Agent"
28
  }
29
 
30
  payload = {
 
32
  "messages": [
33
  {
34
  "role": "system",
35
+ "content": "You are a helpful navigation assistant. Provide clear, concise, and user-friendly route summaries."
 
 
 
 
 
 
 
36
  },
37
  {
38
  "role": "user",
 
45
  }
46
 
47
  try:
48
+ response = requests.post(
49
+ self.base_url,
50
+ headers=headers,
51
+ json=payload,
52
+ timeout=30
53
+ )
54
+
55
+ if response.status_code == 401:
56
+ return "❌ Error: Invalid API key or unauthorized."
57
+ elif response.status_code == 402:
58
+ return "❌ Error: Insufficient credits in OpenRouter account."
59
+ elif response.status_code == 429:
60
+ return "❌ Error: Rate limit exceeded. Please wait and try again."
61
+ elif response.status_code != 200:
62
+ return f"❌ Error: HTTP {response.status_code} - {response.text[:200]}"
63
 
64
+ result = response.json()
65
+
66
+ if "choices" in result and len(result["choices"]) > 0:
67
  return result["choices"][0]["message"]["content"].strip()
68
  else:
69
+ return " Error: No response content received."
70
+
71
+ except requests.exceptions.Timeout:
72
+ return "❌ Error: Request timeout. Please try again."
73
+ except requests.exceptions.RequestException as e:
74
+ return f"❌ Error calling OpenRouter API: {str(e)}"
75
  except Exception as e:
76
+ return f"Error: {str(e)}"
77
 
78
+ def fetch_route_from_osrm(origin: str, destination: str) -> str:
79
+ """Fetch route from OSRM API"""
80
 
81
+ try:
82
+ # Validate coordinates
83
+ origin_parts = origin.split(',')
84
+ dest_parts = destination.split(',')
85
+
86
+ if len(origin_parts) != 2 or len(dest_parts) != 2:
87
+ return "❌ Error: Coordinates must be in 'longitude,latitude' format"
88
+
89
+ # Parse coordinates
90
+ float(origin_parts[0]), float(origin_parts[1])
91
+ float(dest_parts[0]), float(dest_parts[1])
92
+
93
+ except (ValueError, IndexError):
94
+ return "❌ Error: Invalid coordinate format"
95
 
96
+ url = f"http://router.project-osrm.org/route/v1/driving/{origin};{destination}"
97
+ params = {
98
+ "overview": "false",
99
+ "steps": "true",
100
+ "geometries": "geojson"
101
+ }
102
+
103
+ try:
104
+ response = requests.get(url, params=params, timeout=15)
105
+ response.raise_for_status()
106
+ data = response.json()
107
 
108
+ if not data.get("routes") or len(data["routes"]) == 0:
109
+ return " No route found between the specified locations."
110
 
111
+ route = data["routes"][0]
112
+ total_distance_km = route.get("distance", 0) / 1000
113
+ total_duration_min = route.get("duration", 0) / 60
114
 
115
+ # Process turn-by-turn instructions
116
+ instructions = []
117
+ step_number = 1
118
 
119
+ for leg in route["legs"]:
120
+ for step in leg["steps"]:
121
+ maneuver = step.get("maneuver", {})
122
+ step_type = maneuver.get("type", "continue")
123
+ modifier = maneuver.get("modifier", "")
124
+ road_name = step.get("name", "")
125
+ distance_m = step.get("distance", 0)
126
+
127
+ if distance_m < 10:
128
+ continue
129
+
130
+ instruction = f"{step_number}. "
131
+
132
+ if step_type == "depart":
133
+ direction = "Start your journey"
134
+ if modifier:
135
+ direction += f" heading {modifier}"
136
+ if road_name:
137
+ direction += f" on {road_name}"
138
+
139
+ elif step_type == "arrive":
140
+ instruction += "🎯 You have arrived at your destination!"
141
+ instructions.append(instruction)
142
+ break
143
+
144
+ elif step_type == "turn":
145
+ direction = f"Turn {modifier}" if modifier else "Turn"
146
+ if road_name:
147
+ direction += f" onto {road_name}"
148
+
149
+ elif step_type == "merge":
150
+ direction = f"Merge {modifier}" if modifier else "Merge"
151
+ if road_name:
152
+ direction += f" onto {road_name}"
153
+
154
+ elif step_type == "continue":
155
+ direction = "Continue straight"
156
+ if road_name:
157
+ direction += f" on {road_name}"
158
+
159
+ else:
160
+ direction = f"{step_type.replace('_', ' ').title()}"
161
+ if modifier:
162
+ direction += f" {modifier}"
163
+ if road_name:
164
+ direction += f" on {road_name}"
165
+
166
+ if distance_m >= 100:
167
+ if distance_m >= 1000:
168
+ direction += f" for {distance_m/1000:.1f} km"
169
+ else:
170
+ direction += f" for {distance_m:.0f} meters"
171
+
172
+ instruction += direction
173
+ instructions.append(instruction)
174
+ step_number += 1
175
+
176
+ route_summary = f"""📍 ROUTE SUMMARY
177
+ 📊 Distance: {total_distance_km:.1f} km
178
+ ⏱️ Estimated Time: {total_duration_min:.0f} minutes
179
+ 🛣️ From: {origin} → To: {destination}
180
 
181
+ 🧭 TURN-BY-TURN DIRECTIONS:
182
+ {chr(10).join(instructions)}
 
 
 
 
183
 
184
+ 💡 Total Steps: {len(instructions)}
185
  """
186
 
187
+ return route_summary.strip()
188
+
189
+ except Exception as e:
190
+ return f"❌ Error fetching route: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
+ def navigate_with_ai(origin_lat, origin_lon, dest_lat, dest_lon, progress=gr.Progress()):
193
+ """Main navigation function for Gradio interface"""
194
 
195
+ progress(0, desc="Starting navigation...")
 
 
196
 
197
+ # Validate inputs
198
+ try:
199
+ origin_lat = float(origin_lat)
200
+ origin_lon = float(origin_lon)
201
+ dest_lat = float(dest_lat)
202
+ dest_lon = float(dest_lon)
203
+ except (ValueError, TypeError):
204
+ return "❌ Error: Please enter valid numeric coordinates."
205
 
206
+ # Check coordinate ranges
207
+ if not (-90 <= origin_lat <= 90) or not (-180 <= origin_lon <= 180):
208
+ return "❌ Error: Origin coordinates out of valid range."
209
+ if not (-90 <= dest_lat <= 90) or not (-180 <= dest_lon <= 180):
210
+ return "❌ Error: Destination coordinates out of valid range."
211
 
212
+ # Format coordinates
213
+ origin = f"{origin_lon},{origin_lat}"
214
+ destination = f"{dest_lon},{dest_lat}"
 
215
 
216
+ progress(0.3, desc="Fetching route data...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
+ # Get route from OSRM
219
+ raw_route = fetch_route_from_osrm(origin, destination)
220
 
221
+ if raw_route.startswith("❌"):
222
+ return raw_route
223
 
224
+ progress(0.7, desc="Generating AI summary...")
 
 
 
 
225
 
226
+ # Check if API key is available
227
+ if not OPENROUTER_API_KEY:
228
+ return f"""⚠️ Warning: No API key configured. Showing raw route data:
229
 
230
+ {raw_route}
231
+
232
+ To get AI-enhanced summaries, please configure your OpenRouter API key in the Space settings."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
+ # Generate AI summary
235
+ llm = OpenRouterLLM(api_key=OPENROUTER_API_KEY, model="deepseek/deepseek-v3.1-terminus")
236
 
237
+ prompt = f"""
238
+ Analyze this route information and create a helpful navigation summary:
 
 
 
 
 
 
239
 
240
+ {raw_route}
 
 
 
241
 
242
+ Please provide:
243
+ 1. A brief overview of the journey
244
+ 2. Simplified directions with key landmarks
245
+ 3. Any important notes about the route
246
+ 4. Travel tips if relevant
247
 
248
+ Format your response to be clear and easy to follow.
 
 
249
  """
250
 
251
+ progress(0.9, desc="Finalizing response...")
252
 
253
+ ai_summary = llm(prompt, max_tokens=1200, temperature=0.2)
 
 
 
254
 
255
+ progress(1.0, desc="Complete!")
256
+
257
+ return ai_summary
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
+ # Predefined location examples
260
+ LOCATION_EXAMPLES = {
261
+ "Dhaka, Bangladesh": (23.8103, 90.4125),
262
+ "Chittagong, Bangladesh": (22.3569, 91.7832),
263
+ "London, UK": (51.5074, -0.1278),
264
+ "New York, USA": (40.7128, -74.0060),
265
+ "Paris, France": (48.8566, 2.3522),
266
+ "Tokyo, Japan": (35.6762, 139.6503),
267
+ "Sydney, Australia": (-33.8688, 151.2093)
268
+ }
269
 
270
+ def set_example_location(location_name, is_destination=False):
271
+ """Set example location coordinates"""
272
+ if location_name in LOCATION_EXAMPLES:
273
+ lat, lon = LOCATION_EXAMPLES[location_name]
274
+ return lat, lon
275
+ return None, None
 
 
 
 
 
 
 
 
 
 
276
 
277
+ # Create Gradio interface
278
+ def create_gradio_app():
279
+ with gr.Blocks(
280
+ title="🗺️ AI Navigation Agent",
281
+ theme=gr.themes.Soft(),
282
+ css="""
283
+ .main-header {
284
+ text-align: center;
285
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
286
+ color: white;
287
+ padding: 20px;
288
+ border-radius: 10px;
289
+ margin-bottom: 20px;
290
+ }
291
+ """
292
+ ) as app:
293
 
294
  gr.HTML("""
295
+ <div class="main-header">
296
+ <h1>🗺️ AI Navigation Agent</h1>
297
+ <p>Get AI-powered route planning with DeepSeek V3.1 Terminus</p>
 
298
  </div>
299
  """)
300
 
 
301
  with gr.Row():
302
+ with gr.Column():
303
+ gr.Markdown("### 📍 Origin (Starting Point)")
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
  with gr.Row():
306
+ origin_lat = gr.Number(
307
+ label="Latitude",
308
+ placeholder="e.g., 23.8103",
309
+ value=23.8103,
310
+ precision=6
311
+ )
312
+ origin_lon = gr.Number(
313
+ label="Longitude",
314
+ placeholder="e.g., 90.4125",
315
+ value=90.4125,
316
+ precision=6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  )
 
 
 
 
318
 
319
+ origin_examples = gr.Dropdown(
320
+ choices=list(LOCATION_EXAMPLES.keys()),
321
+ label="Or choose a preset location",
322
+ value=None
323
+ )
324
 
325
+ with gr.Column():
326
+ gr.Markdown("### 🎯 Destination (End Point)")
327
 
328
+ with gr.Row():
329
+ dest_lat = gr.Number(
330
+ label="Latitude",
331
+ placeholder="e.g., 22.3569",
332
+ value=22.3569,
333
+ precision=6
334
+ )
335
+ dest_lon = gr.Number(
336
+ label="Longitude",
337
+ placeholder="e.g., 91.7832",
338
+ value=91.7832,
339
+ precision=6
340
+ )
341
+
342
+ dest_examples = gr.Dropdown(
343
+ choices=list(LOCATION_EXAMPLES.keys()),
344
+ label="Or choose a preset location",
345
+ value=None
346
+ )
347
 
348
+ with gr.Row():
349
+ clear_btn = gr.Button("🗑️ Clear", variant="secondary")
350
+ navigate_btn = gr.Button("🧭 Get Navigation", variant="primary", size="lg")
351
+
352
+ with gr.Row():
353
+ output = gr.Textbox(
354
+ label="🗺️ Navigation Result",
355
+ lines=20,
356
+ placeholder="Your navigation instructions will appear here...",
357
+ show_copy_button=True
358
+ )
359
 
360
+ gr.Markdown("""
361
+ ### 💡 How to Use:
362
+ 1. Enter latitude and longitude coordinates for origin and destination
363
+ 2. Or use the dropdown to select preset locations
364
+ 3. Click "Get Navigation" to generate AI-powered route instructions
365
+ 4. The system uses OSRM for routing and DeepSeek V3.1 Terminus for summaries
366
 
367
+ ### 📝 Coordinate Format:
368
+ - Latitude: -90 to 90 (North/South)
369
+ - Longitude: -180 to 180 (East/West)
370
+ - Example: Dhaka is at 23.8103, 90.4125
371
 
372
+ ### 🔧 Features:
373
+ - Real-time route calculation
374
+ - AI-enhanced navigation summaries
375
+ - Distance and time estimates
376
+ - Turn-by-turn directions
377
  """)
378
 
379
  # Event handlers
380
+ def set_origin_example(location):
381
+ if location:
382
+ lat, lon = set_example_location(location)
383
+ return lat, lon
384
+ return gr.update(), gr.update()
385
+
386
+ def set_dest_example(location):
387
+ if location:
388
+ lat, lon = set_example_location(location)
389
+ return lat, lon
390
+ return gr.update(), gr.update()
391
+
392
+ def clear_all():
393
+ return "", "", "", "", None, None, ""
394
+
395
+ # Wire up events
396
+ origin_examples.change(
397
+ fn=set_origin_example,
398
+ inputs=[origin_examples],
399
+ outputs=[origin_lat, origin_lon]
400
  )
401
 
402
+ dest_examples.change(
403
+ fn=set_dest_example,
404
+ inputs=[dest_examples],
405
+ outputs=[dest_lat, dest_lon]
406
  )
407
 
408
+ navigate_btn.click(
409
+ fn=navigate_with_ai,
410
+ inputs=[origin_lat, origin_lon, dest_lat, dest_lon],
411
+ outputs=[output],
412
+ show_progress=True
413
+ )
414
+
415
+ clear_btn.click(
416
+ fn=clear_all,
417
+ outputs=[origin_lat, origin_lon, dest_lat, dest_lon, origin_examples, dest_examples, output]
418
  )
419
 
420
+ return app
421
 
422
+ # Launch the app
423
  if __name__ == "__main__":
424
+ app = create_gradio_app()
425
 
426
  # Check if running on Hugging Face Spaces
427
  if os.getenv("SPACE_ID"):
428
+ # Running on HF Spaces
429
+ app.launch(
430
  server_name="0.0.0.0",
431
  server_port=7860,
432
+ show_api=False
 
433
  )
434
  else:
435
+ # Running locally
436
+ app.launch(
437
+ share=True,
438
+ show_api=False
439
+ )
440
 
441
 
442