# pixal_agent_full.py import os import datetime import gradio as gr import requests from typing import Optional, List from langchain.llms.base import LLM from langchain.agents import initialize_agent, AgentType,load_tools from langchain.agents import AgentExecutor, create_structured_chat_agent from langchain.tools import Tool from langchain_experimental.tools.python.tool import PythonREPLTool import queue from typing import Any, Dict import gradio as gr from langchain.callbacks.base import BaseCallbackHandler from langchain.tools import YouTubeSearchTool as YTS # 2. 커스텀 콜백 핸들러 # github_model_llm.py """ GitHub Models API 기반 LLM 래퍼 (LangChain LLM 호환) - OpenAI-style chat completions 호환 - function calling (OPENAI_MULTI_FUNCTIONS) 지원: functions, function_call 전달 가능 - system prompt (system_prompt) 지원 - 옵션: temperature, max_tokens, top_p 등 전달 - raw response 반환 메서드 포함 """ from typing import Optional, List, Dict, Any import os import time import json import requests from requests.adapters import HTTPAdapter, Retry from langchain.llms.base import LLM class GitHubModelLLM(LLM): model: str endpoint: str token: Optional[str] system_prompt: Optional[str] def __init__( self, model: str = "openai/gpt-4.1", token: Optional[str] = os.environ["token"], endpoint: str = "https://models.github.ai/inference", system_prompt: Optional[str] = "너는 PIXAL(Primary Interactive X-ternal Assistant with multi Language)이야.너의 개발자는 정성윤 이라는 6학년 파이썬 프로그래머야.", request_timeout: float = 30.0, max_retries: int = 2, backoff_factor: float = 0.3, **kwargs, ): """ Args: model: 모델 이름 (예: "openai/gpt-4.1") token: GitHub Models API 토큰 (Bearer). 환경변수 GITHUB_TOKEN / token 사용 가능 as fallback. endpoint: API endpoint (기본: https://models.github.ai/inference) system_prompt: (선택) system role 메시지로 항상 앞에 붙임 request_timeout: 요청 타임아웃 (초) max_retries: 네트워크 재시도 횟수 backoff_factor: 재시도 지수 보정 kwargs: LangChain LLM 부모에 전달할 추가 인자 """ super().__init__(**kwargs) self.model = model self.endpoint = endpoint.rstrip("/") self.token = token or os.getenv("GITHUB_TOKEN") or os.getenv("token") self.system_prompt = system_prompt self.request_timeout = request_timeout # requests 세션 + 재시도 설정 self.session = requests.Session() retries = Retry(total=max_retries, backoff_factor=backoff_factor, status_forcelist=[429, 500, 502, 503, 504], allowed_methods=["POST", "GET"]) self.session.mount("https://", HTTPAdapter(max_retries=retries)) self.session.headers.update({ "Content-Type": "application/json" }) if self.token: self.session.headers.update({"Authorization": f"Bearer {self.token}"}) @property def _llm_type(self) -> str: return "github_models_api" # ---------- 편의 internal helper ---------- def _build_messages(self, prompt: str, extra_messages: Optional[List[Dict[str, Any]]] = None) -> List[Dict[str, Any]]: """ messages 배열 생성: system (optional) + extra_messages (if any) + user prompt extra_messages: 이미 role keys로 구성된 메시지 리스트 (예: conversation history) """ msgs: List[Dict[str, Any]] = [] if self.system_prompt: msgs.append({"role": "system", "content": self.system_prompt}) if extra_messages: # ensure format: list of {"role":..,"content":..} msgs.extend(extra_messages) msgs.append({"role": "user", "content": prompt}) return msgs def _post_chat(self, body: Dict[str, Any]) -> Dict[str, Any]: url = f"{self.endpoint}/chat/completions" # ensure Authorization present if "Authorization" not in self.session.headers and not self.token: raise ValueError("GitHub Models token not set. Provide token param or set GITHUB_TOKEN env var.") resp = self.session.post(url, json=body, timeout=self.request_timeout) try: resp.raise_for_status() except requests.HTTPError as e: # try to surface JSON error if present content = resp.text try: j = resp.json() content = json.dumps(j, ensure_ascii=False, indent=2) except Exception: pass raise RuntimeError(f"GitHub Models API error: {e} - {content}") return resp.json() # ---------- LangChain LLM interface ---------- def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs) -> str: """ LangChain LLM `_call` 구현 (동기). Supports kwargs: - functions: list[dict] (function schemas) - function_call: "auto" | {"name": "..."} | etc. - messages: list[dict] (if you want to pass full conversation instead of prompt) - temperature, top_p, max_tokens, n, stream, etc. Returns: assistant content (string). If function_call is returned by model, returns the 'content' if present, otherwise returns function_call object as JSON string (so caller can parse). """ # support passing full messages via kwargs['messages'] messages = None extra_messages = None if "messages" in kwargs and isinstance(kwargs["messages"], list): messages = kwargs.pop("messages") else: # optionally allow 'history' or 'extra_messages' extra_messages = kwargs.pop("extra_messages", None) if messages is None: messages = self._build_messages(prompt, extra_messages=extra_messages) body: Dict[str, Any] = { "model": self.model, "messages": messages, } # pass optional top-level params (temperature, max_tokens, etc.) from kwargs for opt in ["temperature", "top_p", "max_tokens", "n", "stream", "presence_penalty", "frequency_penalty"]: if opt in kwargs: body[opt] = kwargs.pop(opt) # pass function-calling related keys verbatim if provided if "functions" in kwargs: body["functions"] = kwargs.pop("functions") if "function_call" in kwargs: body["function_call"] = kwargs.pop("function_call") # include stop if present if stop: body["stop"] = stop # send request raw = self._post_chat(body) # save raw for caller if needed self._last_raw = raw # parse assistant message choices = raw.get("choices") or [] if not choices: return "" message_obj = choices[0].get("message", {}) # if assistant returned a function_call, include that info if "function_call" in message_obj: # return function_call as JSON string so agent/tool orchestrator can parse it # but if content also exists, prefer content func = message_obj["function_call"] # sometimes content may be absent; return structured JSON string return json.dumps({"function_call": func}, ensure_ascii=False) # otherwise return assistant content return message_obj.get("content", "") or "" # optional: expose raw response getter def last_raw_response(self) -> Optional[Dict[str, Any]]: return getattr(self, "_last_raw", None) # optional: provide a convenience chat method to get full message object def chat_completions(self, prompt: str, messages: Optional[List[Dict[str, Any]]] = None, **kwargs) -> Dict[str, Any]: """ Directly call chat completions and return full parsed JSON response. - If `messages` provided, it's used as the full messages array (system/user/assistant roles as needed) - else uses prompt + system_prompt to construct messages. """ if messages is None: messages = self._build_messages(prompt) body: Dict[str, Any] = {"model": self.model, "messages": messages} for opt in ["temperature", "top_p", "max_tokens", "n", "stream"]: if opt in kwargs: body[opt] = kwargs.pop(opt) if "functions" in kwargs: body["functions"] = kwargs.pop("functions") if "function_call" in kwargs: body["function_call"] = kwargs.pop("function_call") raw = self._post_chat(body) self._last_raw = raw return raw from langchain_community.retrievers import WikipediaRetriever from langchain.tools.retriever import create_retriever_tool retriever = WikipediaRetriever(lang="ko",top_k_results=10) wiki=Tool(func=retriever.get_relevant_documents,name="WIKI SEARCH",description="위키백과에서 필요한 정보를 불러옵니다.결괴를 검증하여 사용하시오.") # ────────────────────────────── # ✅ GitHub Models LLM # ────────────────────────────── ''' class GitHubModelLLM(LLM): model: str = "openai/gpt-4.1" endpoint: str = "https://models.github.ai/inference" token: Optional[str] = None @property def _llm_type(self) -> str: return "github_models_api" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: if not self.token: raise ValueError("GitHub API token이 필요합니다.") headers = { "Authorization": "Bearer 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", "Content-Type": "application/json", } body = {"model": self.model, "messages": [{"role": "user", "content": prompt}]} resp = requests.post(f"{self.endpoint}/chat/completions", json=body, headers=headers) if resp.status_code != 200: raise ValueError(f"API 오류: {resp.status_code} - {resp.text}") return resp.json()["choices"][0]["message"]["content"] ''' # ────────────────────────────── # ✅ LLM 설정 # ────────────────────────────── token = os.getenv("GITHUB_TOKEN") or os.getenv("token") if not token: print("⚠️ GitHub Token이 필요합니다. 예: setx GITHUB_TOKEN your_token") llm = GitHubModelLLM() # ────────────────────────────── # ✅ LangChain 기본 도구 불러오기 # ────────────────────────────── tools = load_tools( ["ddg-search", "requests_all", "llm-math"], llm=llm,allow_dangerous_tools=True )+[YTS()]+[wiki] # ────────────────────────────── # ✅ Python 실행 도구 (LangChain 내장) # ────────────────────────────── python_tool = PythonREPLTool() tools.append(Tool(name="python_repl", func=python_tool.run, description="Python 코드를 실행합니다.")) from langchain import hub prompt=hub.pull("hwchase17/structured-chat-agent") # ────────────────────────────── # ✅ 파일 도구 # ────────────────────────────── # ────────────────────────────── # ✅ 정확한 한국 시간 함수 (Asia/Seoul) # ────────────────────────────── import requests from datetime import datetime from zoneinfo import ZoneInfo def time_now(_=""): try: # 정확한 UTC 시각을 외부 API에서 가져옴 resp = requests.get("https://timeapi.io/api/Time/current/zone?timeZone=Asia/Seoul", timeout=5) if resp.status_code == 200: data = resp.json() dt = data["dateTime"].split(".")[0].replace("T", " ") return f"현재 시각: {dt} (Asia/Seoul, 서버 기준 NTP 동기화)" else: # API 실패 시 로컬 시스템 시각으로 대체 tz = ZoneInfo("Asia/Seoul") now = datetime.now(tz) return f"현재 시각(로컬): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul)" except Exception as e: tz = ZoneInfo("Asia/Seoul") now = datetime.now(tz) return f"현재 시각(백업): {now.strftime('%Y-%m-%d %H:%M:%S')} (Asia/Seoul, 오류: {e})" # ────────────────────────────── # ✅ 도구 등록 # ────────────────────────────── tools.extend([Tool(name="time_now", func=time_now, description="현재 시간을 반환합니다.")]) from langchain.memory import ConversationBufferMemory as MEM from langchain.agents.agent_toolkits import FileManagementToolkit as FMT tools.extend(FMT(root_dir=str(os.getcwd())).get_tools()) # ────────────────────────────── # ✅ Agent 초기화 # ────────────────────────────── mem=MEM() agent=initialize_agent(tools,llm,agent=AgentType.OPENAI_MULTI_FUNCTIONS,verbose=True,memory=mem) #agent = create_structured_chat_agent(llm, tools, prompt) #agent= AgentExecutor(agent=agent, tools=tools,memory=mem) # ────────────────────────────── # ✅ Gradio UI # ────────────────────────────── def chat(message, history): try: response = agent.invoke(message) except Exception as e: response = f"⚠️ 오류: {e}" history = history + [(message, response)] return history,history with gr.Blocks(theme=gr.themes.Soft(), title="PIXAL Assistant (LangChain + GitHub LLM)") as demo: gr.Markdown(""" ## 🤖 PIXAL Assistant **LangChain 기반 멀티툴 에이전트** 🧰 DuckDuckGo · Wikipedia · Math · Requests · Python REPL · File · Time """) chatbot = gr.Chatbot(label="PIXAL 대화", height=600) msg = gr.Textbox(label="메시지", placeholder="명령 또는 질문을 입력하세요...") clear = gr.Button("초기화") msg.submit(chat, [msg, chatbot], [chatbot, chatbot]) clear.click(lambda: None, None, chatbot, queue=False) if __name__ == "__main__": demo.launch()