Spaces:
Sleeping
Sleeping
Commit
·
88604b7
1
Parent(s):
3591b35
fyp
Browse files- app/ai/agent/nodes/listing_collect.py +54 -148
- app/ai/tools/casual_chat_tool.py +123 -116
- app/ai/tools/listing_tool.py +2 -1
app/ai/agent/nodes/listing_collect.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# app/ai/agent/nodes/listing_collect.py
|
| 2 |
"""
|
| 3 |
-
Dynamic
|
| 4 |
"""
|
| 5 |
|
| 6 |
import json
|
|
@@ -12,7 +12,11 @@ from langchain_core.messages import SystemMessage, HumanMessage
|
|
| 12 |
|
| 13 |
from app.ai.agent.state import AgentState, FlowState
|
| 14 |
from app.ai.agent.validators import JSONValidator
|
| 15 |
-
from app.ai.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
from app.config import settings
|
| 17 |
|
| 18 |
logger = get_logger(__name__)
|
|
@@ -22,7 +26,7 @@ llm = ChatOpenAI(
|
|
| 22 |
api_key=settings.DEEPSEEK_API_KEY,
|
| 23 |
base_url=settings.DEEPSEEK_BASE_URL,
|
| 24 |
model="deepseek-chat",
|
| 25 |
-
temperature=0.7,
|
| 26 |
)
|
| 27 |
|
| 28 |
async def generate_contextual_question(state: AgentState, next_field: str = None) -> str:
|
|
@@ -199,148 +203,9 @@ async def handle_intent_switch(state: AgentState, new_intent: str) -> AgentState
|
|
| 199 |
|
| 200 |
return state
|
| 201 |
|
| 202 |
-
async def extract_listing_fields_smart(user_message: str, user_role: str, current_fields: Dict = None) -> Dict:
|
| 203 |
-
"""
|
| 204 |
-
Smart field extraction that understands context, corrections, and partial info
|
| 205 |
-
"""
|
| 206 |
-
|
| 207 |
-
logger.info("Smart field extraction",
|
| 208 |
-
msg_len=len(user_message),
|
| 209 |
-
current_fields=list(current_fields.keys()) if current_fields else [])
|
| 210 |
-
|
| 211 |
-
context = f"\nCurrently saved: {json.dumps(current_fields, indent=2)}" if current_fields else ""
|
| 212 |
-
|
| 213 |
-
prompt = f"""Extract property information from this user message. Be smart about context and corrections.
|
| 214 |
-
|
| 215 |
-
User role: {user_role}
|
| 216 |
-
User message: "{user_message}"{context}
|
| 217 |
-
|
| 218 |
-
Extract these fields (set to null if not mentioned, extract corrections if present):
|
| 219 |
-
- location: City/area name or null
|
| 220 |
-
- bedrooms: Number or null (handle "3", "three", "3bed")
|
| 221 |
-
- bathrooms: Number or null (handle "2", "two", "2bath")
|
| 222 |
-
- price: Amount or null (handle "50k", "50,000", "50000")
|
| 223 |
-
- price_type: "monthly", "yearly", "weekly", "daily", "nightly" or null
|
| 224 |
-
- amenities: List or [] (wifi, parking, furnished, ac, etc.)
|
| 225 |
-
- requirements: Text or null
|
| 226 |
-
|
| 227 |
-
Be smart about:
|
| 228 |
-
- Corrections: "actually it's 3 bedrooms" → update bedrooms to 3
|
| 229 |
-
- Partial info: "50k" when expecting price → extract price: 50000
|
| 230 |
-
- Context: Use conversation history to understand
|
| 231 |
-
|
| 232 |
-
Return ONLY valid JSON with extracted fields."""
|
| 233 |
-
|
| 234 |
-
try:
|
| 235 |
-
response = await llm.ainvoke([
|
| 236 |
-
SystemMessage(content="You are a smart field extractor. Understand context and corrections."),
|
| 237 |
-
HumanMessage(content=prompt)
|
| 238 |
-
])
|
| 239 |
-
|
| 240 |
-
# Extract JSON from response
|
| 241 |
-
json_match = re.search(r'\{.*\}', response.content, re.DOTALL)
|
| 242 |
-
if json_match:
|
| 243 |
-
result = json.loads(json_match.group())
|
| 244 |
-
logger.info("Smart extraction successful", extracted=list(result.keys()))
|
| 245 |
-
return result
|
| 246 |
-
|
| 247 |
-
return {}
|
| 248 |
-
|
| 249 |
-
except Exception as e:
|
| 250 |
-
logger.error("Smart extraction failed", exc_info=e)
|
| 251 |
-
return {}
|
| 252 |
-
|
| 253 |
-
async def decide_next_listing_action(state: AgentState) -> Dict:
|
| 254 |
-
"""
|
| 255 |
-
AI decides what to do next based on current conversation context
|
| 256 |
-
"""
|
| 257 |
-
|
| 258 |
-
provided = state.provided_fields
|
| 259 |
-
missing = state.missing_required_fields
|
| 260 |
-
user_msg = state.last_user_message
|
| 261 |
-
|
| 262 |
-
prompt = f"""You are Aida managing a property listing conversation. Decide next action.
|
| 263 |
-
|
| 264 |
-
Current state:
|
| 265 |
-
- Provided fields: {json.dumps(provided, indent=2)}
|
| 266 |
-
- Missing required: {missing}
|
| 267 |
-
- User just said: "{user_msg}"
|
| 268 |
-
|
| 269 |
-
Available actions:
|
| 270 |
-
1. "ask_missing" - Ask for next missing required field
|
| 271 |
-
2. "ask_optional" - Ask about amenities/requirements (when required complete)
|
| 272 |
-
3. "show_draft" - All required fields complete, show preview
|
| 273 |
-
4. "acknowledge" - Acknowledge what user said, then continue
|
| 274 |
-
5. "clarify" - Need clarification on what user meant
|
| 275 |
-
|
| 276 |
-
Consider:
|
| 277 |
-
- If missing required fields → "ask_missing"
|
| 278 |
-
- If all required complete → "ask_optional" or "show_draft"
|
| 279 |
-
- If user provided info → "acknowledge" then continue
|
| 280 |
-
- If unclear → "clarify"
|
| 281 |
-
|
| 282 |
-
Return ONLY valid JSON:
|
| 283 |
-
{{
|
| 284 |
-
"action": "ask_missing|ask_optional|show_draft|acknowledge|clarify",
|
| 285 |
-
"reasoning": "why this action",
|
| 286 |
-
"next_field": "field to ask about (if ask_missing)",
|
| 287 |
-
"acknowledgment": "what to acknowledge (if acknowledge)"
|
| 288 |
-
}}"""
|
| 289 |
-
|
| 290 |
-
try:
|
| 291 |
-
response = await llm.ainvoke([
|
| 292 |
-
SystemMessage(content="Make smart conversation flow decisions for property listing."),
|
| 293 |
-
HumanMessage(content=prompt)
|
| 294 |
-
])
|
| 295 |
-
|
| 296 |
-
# Extract JSON
|
| 297 |
-
json_match = re.search(r'\{.*\}', response.content, re.DOTALL)
|
| 298 |
-
if json_match:
|
| 299 |
-
result = json.loads(json_match.group())
|
| 300 |
-
|
| 301 |
-
logger.info("AI flow decision",
|
| 302 |
-
action=result["action"],
|
| 303 |
-
reasoning=result["reasoning"])
|
| 304 |
-
|
| 305 |
-
return result
|
| 306 |
-
|
| 307 |
-
# Fallback decision
|
| 308 |
-
if missing:
|
| 309 |
-
return {
|
| 310 |
-
"action": "ask_missing",
|
| 311 |
-
"reasoning": "Fallback - ask missing field",
|
| 312 |
-
"next_field": missing[0],
|
| 313 |
-
"acknowledgment": ""
|
| 314 |
-
}
|
| 315 |
-
else:
|
| 316 |
-
return {
|
| 317 |
-
"action": "show_draft",
|
| 318 |
-
"reasoning": "Fallback - show draft",
|
| 319 |
-
"next_field": None,
|
| 320 |
-
"acknowledgment": ""
|
| 321 |
-
}
|
| 322 |
-
|
| 323 |
-
except Exception as e:
|
| 324 |
-
logger.error("Flow decision failed", exc_info=e)
|
| 325 |
-
# Safe fallback
|
| 326 |
-
if missing:
|
| 327 |
-
return {
|
| 328 |
-
"action": "ask_missing",
|
| 329 |
-
"reasoning": "Exception fallback - ask missing field",
|
| 330 |
-
"next_field": missing[0],
|
| 331 |
-
"acknowledgment": ""
|
| 332 |
-
}
|
| 333 |
-
else:
|
| 334 |
-
return {
|
| 335 |
-
"action": "show_draft",
|
| 336 |
-
"reasoning": "Exception fallback - show draft",
|
| 337 |
-
"next_field": None,
|
| 338 |
-
"acknowledgment": ""
|
| 339 |
-
}
|
| 340 |
-
|
| 341 |
async def listing_collect_handler(state: AgentState) -> AgentState:
|
| 342 |
"""
|
| 343 |
-
Dynamic listing collection with smart
|
| 344 |
"""
|
| 345 |
|
| 346 |
logger.info("Dynamic listing collection",
|
|
@@ -361,7 +226,18 @@ async def listing_collect_handler(state: AgentState) -> AgentState:
|
|
| 361 |
# Switch to new intent
|
| 362 |
return await handle_intent_switch(state, intent_check["detected_intent"])
|
| 363 |
|
| 364 |
-
# 📝 Step 2:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
if intent_check["extracted_fields"]:
|
| 366 |
# Use extracted fields from intent check
|
| 367 |
extracted = intent_check["extracted_fields"]
|
|
@@ -380,8 +256,28 @@ async def listing_collect_handler(state: AgentState) -> AgentState:
|
|
| 380 |
state.update_listing_progress(field, value)
|
| 381 |
logger.info("Field updated", field=field, value=str(value)[:50])
|
| 382 |
|
| 383 |
-
# 🎯 Step
|
| 384 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
|
| 386 |
logger.info("AI decided next action",
|
| 387 |
action=decision["action"],
|
|
@@ -401,8 +297,18 @@ async def listing_collect_handler(state: AgentState) -> AgentState:
|
|
| 401 |
state.temp_data["action"] = "asking_optional"
|
| 402 |
|
| 403 |
elif decision["action"] == "show_draft":
|
| 404 |
-
|
| 405 |
-
state.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
|
| 407 |
elif decision["action"] == "acknowledge":
|
| 408 |
# Acknowledge what they said and continue
|
|
|
|
| 1 |
# app/ai/agent/nodes/listing_collect.py
|
| 2 |
"""
|
| 3 |
+
Dynamic listing collection with proper example flow and null handling
|
| 4 |
"""
|
| 5 |
|
| 6 |
import json
|
|
|
|
| 12 |
|
| 13 |
from app.ai.agent.state import AgentState, FlowState
|
| 14 |
from app.ai.agent.validators import JSONValidator
|
| 15 |
+
from app.ai.tools.listing_tool import (
|
| 16 |
+
extract_listing_fields_smart,
|
| 17 |
+
decide_next_listing_action,
|
| 18 |
+
generate_listing_example
|
| 19 |
+
)
|
| 20 |
from app.config import settings
|
| 21 |
|
| 22 |
logger = get_logger(__name__)
|
|
|
|
| 26 |
api_key=settings.DEEPSEEK_API_KEY,
|
| 27 |
base_url=settings.DEEPSEEK_BASE_URL,
|
| 28 |
model="deepseek-chat",
|
| 29 |
+
temperature=0.7,
|
| 30 |
)
|
| 31 |
|
| 32 |
async def generate_contextual_question(state: AgentState, next_field: str = None) -> str:
|
|
|
|
| 203 |
|
| 204 |
return state
|
| 205 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
async def listing_collect_handler(state: AgentState) -> AgentState:
|
| 207 |
"""
|
| 208 |
+
Dynamic listing collection with smart example flow and proper validation
|
| 209 |
"""
|
| 210 |
|
| 211 |
logger.info("Dynamic listing collection",
|
|
|
|
| 226 |
# Switch to new intent
|
| 227 |
return await handle_intent_switch(state, intent_check["detected_intent"])
|
| 228 |
|
| 229 |
+
# 📝 Step 2: Check if this is the initial state (no fields provided yet)
|
| 230 |
+
if not state.provided_fields and state.last_user_message in ["i want to list", "i want to list a property", "list my property"]:
|
| 231 |
+
logger.info("Initial listing request - showing example first")
|
| 232 |
+
|
| 233 |
+
# Show random example
|
| 234 |
+
example = await generate_listing_example("en", state.user_role)
|
| 235 |
+
state.temp_data["response_text"] = f"Great! 🏠 Here's an example of how you could describe it:\n\n\"{example}\"\n\nNow tell me about your property."
|
| 236 |
+
state.temp_data["action"] = "show_example"
|
| 237 |
+
|
| 238 |
+
return state # Stay in listing_collect, wait for actual details
|
| 239 |
+
|
| 240 |
+
# 📝 Step 3: Extract fields from current message
|
| 241 |
if intent_check["extracted_fields"]:
|
| 242 |
# Use extracted fields from intent check
|
| 243 |
extracted = intent_check["extracted_fields"]
|
|
|
|
| 256 |
state.update_listing_progress(field, value)
|
| 257 |
logger.info("Field updated", field=field, value=str(value)[:50])
|
| 258 |
|
| 259 |
+
# 🎯 Step 4: Check if we have actual data to proceed
|
| 260 |
+
required_fields = ["location", "bedrooms", "bathrooms", "price", "price_type"]
|
| 261 |
+
has_any_real_data = any(
|
| 262 |
+
state.provided_fields.get(f) is not None
|
| 263 |
+
for f in required_fields
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
if not has_any_real_data:
|
| 267 |
+
# Still no real data - ask naturally for first field
|
| 268 |
+
question = await generate_contextual_question(state, "location")
|
| 269 |
+
state.temp_data["response_text"] = question
|
| 270 |
+
state.temp_data["action"] = "asking_first_field"
|
| 271 |
+
state.current_asking_for = "location"
|
| 272 |
+
return state
|
| 273 |
+
|
| 274 |
+
# 🎯 Step 5: AI decides next action based on context
|
| 275 |
+
decision = await decide_next_listing_action({
|
| 276 |
+
"provided_fields": state.provided_fields,
|
| 277 |
+
"missing_required_fields": state.missing_required_fields,
|
| 278 |
+
"last_user_message": state.last_user_message,
|
| 279 |
+
"user_role": state.user_role
|
| 280 |
+
})
|
| 281 |
|
| 282 |
logger.info("AI decided next action",
|
| 283 |
action=decision["action"],
|
|
|
|
| 297 |
state.temp_data["action"] = "asking_optional"
|
| 298 |
|
| 299 |
elif decision["action"] == "show_draft":
|
| 300 |
+
# Check we actually have required fields before showing draft
|
| 301 |
+
missing_required = [f for f in required_fields if state.provided_fields.get(f) is None]
|
| 302 |
+
if missing_required:
|
| 303 |
+
# Still missing required fields - ask for next one
|
| 304 |
+
question = await generate_contextual_question(state, missing_required[0])
|
| 305 |
+
state.temp_data["response_text"] = question
|
| 306 |
+
state.temp_data["action"] = "asking_field"
|
| 307 |
+
state.current_asking_for = missing_required[0]
|
| 308 |
+
else:
|
| 309 |
+
# All required fields complete - show draft
|
| 310 |
+
state.temp_data["response_text"] = "Perfect! Let me create your listing preview..."
|
| 311 |
+
state.temp_data["action"] = "all_fields_collected"
|
| 312 |
|
| 313 |
elif decision["action"] == "acknowledge":
|
| 314 |
# Acknowledge what they said and continue
|
app/ai/tools/casual_chat_tool.py
CHANGED
|
@@ -1,163 +1,170 @@
|
|
| 1 |
-
# app/ai/
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
from typing import Dict, Optional
|
| 5 |
from structlog import get_logger
|
| 6 |
from langchain_openai import ChatOpenAI
|
| 7 |
-
from langchain_core.
|
| 8 |
-
from langchain_core.runnables import RunnablePassthrough
|
| 9 |
|
| 10 |
-
from app.
|
|
|
|
| 11 |
from app.ai.prompts.system_prompt import get_system_prompt
|
|
|
|
| 12 |
|
| 13 |
logger = get_logger(__name__)
|
| 14 |
|
| 15 |
-
|
| 16 |
-
# INITIALIZE LLM
|
| 17 |
llm = ChatOpenAI(
|
| 18 |
api_key=settings.DEEPSEEK_API_KEY,
|
| 19 |
base_url=settings.DEEPSEEK_BASE_URL,
|
| 20 |
model="deepseek-chat",
|
| 21 |
-
temperature=0.8,
|
| 22 |
-
max_tokens=500,
|
| 23 |
)
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
|
| 39 |
formatted = []
|
| 40 |
-
for msg in
|
| 41 |
-
role = "
|
| 42 |
-
content = msg
|
| 43 |
formatted.append(f"{role}: {content}")
|
| 44 |
|
| 45 |
return "\n".join(formatted)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
# CASUAL CHAT TOOL
|
| 49 |
-
async def process_casual_chat(
|
| 50 |
-
user_message: str,
|
| 51 |
-
user_id: str,
|
| 52 |
-
user_role: str = "renter",
|
| 53 |
-
conversation_history: list = None,
|
| 54 |
-
) -> Dict:
|
| 55 |
"""
|
| 56 |
-
|
|
|
|
|
|
|
| 57 |
|
| 58 |
Args:
|
| 59 |
-
|
| 60 |
-
user_id: User ID
|
| 61 |
-
user_role: User's role (landlord or renter)
|
| 62 |
-
conversation_history: Previous messages in conversation
|
| 63 |
|
| 64 |
Returns:
|
| 65 |
-
|
| 66 |
-
"success": bool,
|
| 67 |
-
"action": "casual_chat",
|
| 68 |
-
"reply": str (Aida's response),
|
| 69 |
-
"state": dict
|
| 70 |
-
}
|
| 71 |
"""
|
| 72 |
|
| 73 |
logger.info(
|
| 74 |
-
"
|
| 75 |
-
user_id=user_id,
|
| 76 |
-
|
| 77 |
)
|
| 78 |
|
| 79 |
try:
|
| 80 |
-
#
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
|
| 84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
chain = CASUAL_CHAT_PROMPT | llm
|
| 88 |
|
| 89 |
-
#
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
"input": user_message,
|
| 93 |
-
"chat_history": chat_history
|
| 94 |
-
}
|
| 95 |
|
| 96 |
-
|
| 97 |
-
response = await chain.ainvoke(context)
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
| 102 |
else:
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
logger.info(
|
| 106 |
-
"Casual chat
|
| 107 |
-
|
|
|
|
| 108 |
)
|
| 109 |
|
| 110 |
-
return
|
| 111 |
-
"success": True,
|
| 112 |
-
"action": "casual_chat",
|
| 113 |
-
"reply": aida_reply,
|
| 114 |
-
"state": {
|
| 115 |
-
"status": "chatting",
|
| 116 |
-
"last_message_type": "casual_chat",
|
| 117 |
-
}
|
| 118 |
-
}
|
| 119 |
|
| 120 |
except Exception as e:
|
| 121 |
logger.error("Casual chat error", exc_info=e)
|
|
|
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
"success": False,
|
| 127 |
-
"action": "casual_chat",
|
| 128 |
-
"reply": fallback_reply,
|
| 129 |
-
"state": {
|
| 130 |
-
"status": "chatting",
|
| 131 |
-
"error": str(e),
|
| 132 |
-
}
|
| 133 |
-
}
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
# TEST
|
| 137 |
-
async def test():
|
| 138 |
-
"""Test the LangChain chat"""
|
| 139 |
-
|
| 140 |
-
test_messages = [
|
| 141 |
-
"Hi, how are you?",
|
| 142 |
-
"Who created you?",
|
| 143 |
-
"What's the weather like?",
|
| 144 |
-
"Can you help me list my apartment?",
|
| 145 |
-
]
|
| 146 |
-
|
| 147 |
-
print("\nTesting LangChain Casual Chat\n" + "="*70 + "\n")
|
| 148 |
-
|
| 149 |
-
for message in test_messages:
|
| 150 |
-
print(f"User: {message}")
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
if __name__ == "__main__":
|
| 162 |
-
import asyncio
|
| 163 |
-
asyncio.run(test())
|
|
|
|
| 1 |
+
# app/ai/agent/nodes/casual_chat.py
|
| 2 |
+
"""
|
| 3 |
+
Node: Handle casual conversation with proper state transitions
|
| 4 |
+
FIXED: Transitions to IDLE instead of COMPLETE to allow conversation continuation
|
| 5 |
+
"""
|
| 6 |
|
|
|
|
| 7 |
from structlog import get_logger
|
| 8 |
from langchain_openai import ChatOpenAI
|
| 9 |
+
from langchain_core.messages import SystemMessage, HumanMessage
|
|
|
|
| 10 |
|
| 11 |
+
from app.ai.agent.state import AgentState, FlowState
|
| 12 |
+
from app.ai.agent.validators import ResponseValidator
|
| 13 |
from app.ai.prompts.system_prompt import get_system_prompt
|
| 14 |
+
from app.config import settings
|
| 15 |
|
| 16 |
logger = get_logger(__name__)
|
| 17 |
|
| 18 |
+
# Initialize LLM for casual conversation
|
|
|
|
| 19 |
llm = ChatOpenAI(
|
| 20 |
api_key=settings.DEEPSEEK_API_KEY,
|
| 21 |
base_url=settings.DEEPSEEK_BASE_URL,
|
| 22 |
model="deepseek-chat",
|
| 23 |
+
temperature=0.8, # Higher temp for more natural conversation
|
|
|
|
| 24 |
)
|
| 25 |
|
| 26 |
+
def build_conversation_context(state: AgentState) -> str:
|
| 27 |
+
"""
|
| 28 |
+
Build conversation context from history.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
state: Agent state with conversation history
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Formatted conversation history string
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
messages = state.conversation_history[-6:] # Last 6 messages for context
|
| 38 |
+
|
| 39 |
+
if not messages:
|
| 40 |
+
return "(New conversation)"
|
| 41 |
|
| 42 |
formatted = []
|
| 43 |
+
for msg in messages:
|
| 44 |
+
role = "User" if msg["role"] == "user" else "Aida"
|
| 45 |
+
content = msg["content"]
|
| 46 |
formatted.append(f"{role}: {content}")
|
| 47 |
|
| 48 |
return "\n".join(formatted)
|
| 49 |
|
| 50 |
+
async def casual_chat_handler(state: AgentState) -> AgentState:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
"""
|
| 52 |
+
Handle casual conversation with proper state transitions.
|
| 53 |
+
|
| 54 |
+
FIXED: Transitions to IDLE instead of COMPLETE to allow conversation continuation
|
| 55 |
|
| 56 |
Args:
|
| 57 |
+
state: Agent state
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
Returns:
|
| 60 |
+
Updated state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
"""
|
| 62 |
|
| 63 |
logger.info(
|
| 64 |
+
"Handling casual chat",
|
| 65 |
+
user_id=state.user_id,
|
| 66 |
+
message=state.last_user_message[:50]
|
| 67 |
)
|
| 68 |
|
| 69 |
try:
|
| 70 |
+
# ============================================================
|
| 71 |
+
# STEP 1: Build conversation context
|
| 72 |
+
# ============================================================
|
| 73 |
+
|
| 74 |
+
conv_context = build_conversation_context(state)
|
| 75 |
+
|
| 76 |
+
logger.info("Conversation context built", context_len=len(conv_context))
|
| 77 |
+
|
| 78 |
+
# ============================================================
|
| 79 |
+
# STEP 2: Get system prompt
|
| 80 |
+
# ============================================================
|
| 81 |
+
|
| 82 |
+
system_prompt = get_system_prompt(user_role=state.user_role)
|
| 83 |
+
|
| 84 |
+
logger.info("System prompt loaded", user_role=state.user_role)
|
| 85 |
+
|
| 86 |
+
# ============================================================
|
| 87 |
+
# STEP 3: Build chat prompt with context
|
| 88 |
+
# ============================================================
|
| 89 |
|
| 90 |
+
chat_prompt = f"""{system_prompt}
|
| 91 |
+
|
| 92 |
+
CONVERSATION HISTORY:
|
| 93 |
+
{conv_context}
|
| 94 |
+
|
| 95 |
+
CURRENT USER MESSAGE: {state.last_user_message}
|
| 96 |
+
|
| 97 |
+
Respond naturally and helpfully. Keep your response conversational and friendly (2-3 sentences max)."""
|
| 98 |
+
|
| 99 |
+
# ============================================================
|
| 100 |
+
# STEP 4: Call LLM for response
|
| 101 |
+
# ============================================================
|
| 102 |
+
|
| 103 |
+
response = await llm.ainvoke([
|
| 104 |
+
SystemMessage(content="You are AIDA, a warm and helpful real estate AI assistant. Respond naturally to user questions."),
|
| 105 |
+
HumanMessage(content=chat_prompt)
|
| 106 |
+
])
|
| 107 |
+
|
| 108 |
+
response_text = response.content if hasattr(response, 'content') else str(response)
|
| 109 |
|
| 110 |
+
logger.info("LLM response generated", response_len=len(response_text))
|
|
|
|
| 111 |
|
| 112 |
+
# ============================================================
|
| 113 |
+
# STEP 5: Validate response
|
| 114 |
+
# ============================================================
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
+
is_valid, cleaned_text, error = ResponseValidator.validate_response_text(response_text)
|
|
|
|
| 117 |
|
| 118 |
+
if not is_valid:
|
| 119 |
+
logger.warning("Response validation failed", error=error)
|
| 120 |
+
# Use fallback
|
| 121 |
+
cleaned_text = "I'm here to help with real estate questions. What would you like to know?"
|
| 122 |
else:
|
| 123 |
+
# Sanitize
|
| 124 |
+
cleaned_text = ResponseValidator.sanitize_response(cleaned_text)
|
| 125 |
+
|
| 126 |
+
logger.info("Response validated", text_len=len(cleaned_text))
|
| 127 |
+
|
| 128 |
+
# ============================================================
|
| 129 |
+
# STEP 6: Store in state
|
| 130 |
+
# ============================================================
|
| 131 |
+
|
| 132 |
+
state.temp_data["response_text"] = cleaned_text
|
| 133 |
+
state.temp_data["action"] = "casual_chat"
|
| 134 |
+
|
| 135 |
+
logger.info("Response stored in state", user_id=state.user_id)
|
| 136 |
+
|
| 137 |
+
# ============================================================
|
| 138 |
+
# STEP 7: ✅ FIXED - Transition to IDLE (not COMPLETE!)
|
| 139 |
+
# ============================================================
|
| 140 |
+
|
| 141 |
+
success, error = state.transition_to(FlowState.IDLE, reason="Casual chat completed, ready for next interaction")
|
| 142 |
+
|
| 143 |
+
if not success:
|
| 144 |
+
logger.error("Transition to IDLE failed", error=error)
|
| 145 |
+
state.set_error(error, should_retry=False)
|
| 146 |
+
return state
|
| 147 |
|
| 148 |
logger.info(
|
| 149 |
+
"Casual chat completed",
|
| 150 |
+
user_id=state.user_id,
|
| 151 |
+
steps=state.steps_taken
|
| 152 |
)
|
| 153 |
|
| 154 |
+
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
except Exception as e:
|
| 157 |
logger.error("Casual chat error", exc_info=e)
|
| 158 |
+
error_msg = f"Chat error: {str(e)}"
|
| 159 |
|
| 160 |
+
# Set fallback response
|
| 161 |
+
state.temp_data["response_text"] = "Sorry, I had a moment there! What were you saying?"
|
| 162 |
+
state.temp_data["action"] = "casual_chat"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
+
# Try to recover
|
| 165 |
+
if state.set_error(error_msg, should_retry=True):
|
| 166 |
+
state.transition_to(FlowState.IDLE, reason="Chat with error recovery")
|
| 167 |
+
else:
|
| 168 |
+
state.transition_to(FlowState.ERROR, reason="Casual chat error")
|
| 169 |
|
| 170 |
+
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/tools/listing_tool.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
# app/ai/tools/listing_tool.py
|
| 2 |
-
# FINAL VERSION:
|
| 3 |
|
| 4 |
import json
|
| 5 |
import re
|
|
@@ -106,6 +106,7 @@ async def auto_detect_listing_type(price_type: str, user_role: str, user_message
|
|
| 106 |
|
| 107 |
# ---------- STEP 4: AUTO-DETECT CURRENCY ----------
|
| 108 |
async def get_currency_for_location(location: str) -> str:
|
|
|
|
| 109 |
if location is None:
|
| 110 |
logger.warning("Location is None, defaulting to NGN")
|
| 111 |
return "NGN"
|
|
|
|
| 1 |
# app/ai/tools/listing_tool.py
|
| 2 |
+
# FINAL VERSION: Complete dynamic listing tool with null safety and smart extraction
|
| 3 |
|
| 4 |
import json
|
| 5 |
import re
|
|
|
|
| 106 |
|
| 107 |
# ---------- STEP 4: AUTO-DETECT CURRENCY ----------
|
| 108 |
async def get_currency_for_location(location: str) -> str:
|
| 109 |
+
# CRITICAL FIX: Handle None location
|
| 110 |
if location is None:
|
| 111 |
logger.warning("Location is None, defaulting to NGN")
|
| 112 |
return "NGN"
|