Files
deepagent/deepagents_sourcecode/libs/deepagents-cli/deepagents_cli/tools.py
HyunjunJeon 9cb01f4abe project init
2025-12-31 11:32:36 +09:00

184 lines
6.0 KiB
Python

"""CLI 에이전트를 위한 사용자 정의 도구."""
from typing import Any, Literal
import requests # type: ignore
from markdownify import markdownify # type: ignore
from tavily import TavilyClient # type: ignore
from deepagents_cli.config import settings
# Initialize Tavily client if API key is available
tavily_client = TavilyClient(api_key=settings.tavily_api_key) if settings.has_tavily else None
def http_request(
url: str,
method: str = "GET",
headers: dict[str, str] | None = None,
data: str | dict | None = None,
params: dict[str, str] | None = None,
timeout: int = 30,
) -> dict[str, Any]:
"""Sends an HTTP request to an API or web service.
Args:
url: The URL to target
method: HTTP method (GET, POST, PUT, DELETE, etc.)
headers: HTTP headers to include
data: Request body data (string or dict)
params: URL query parameters
timeout: Request timeout in seconds
Returns:
Dictionary containing status_code, headers, and content
"""
try:
kwargs = {"url": url, "method": method.upper(), "timeout": timeout}
if headers:
kwargs["headers"] = headers
if params:
kwargs["params"] = params
if data:
if isinstance(data, dict):
kwargs["json"] = data
else:
kwargs["data"] = data
response = requests.request(**kwargs)
try:
content = response.json()
except:
content = response.text
return {
"success": response.status_code < 400,
"status_code": response.status_code,
"headers": dict(response.headers),
"content": content,
"url": response.url,
}
except requests.exceptions.Timeout:
return {
"success": False,
"status_code": 0,
"headers": {},
"content": f"{timeout}초 후 요청 시간이 초과되었습니다",
"url": url,
}
except requests.exceptions.RequestException as e:
return {
"success": False,
"status_code": 0,
"headers": {},
"content": f"요청 오류: {e!s}",
"url": url,
}
except Exception as e:
return {
"success": False,
"status_code": 0,
"headers": {},
"content": f"요청 생성 오류: {e!s}",
"url": url,
}
def web_search(
query: str,
max_results: int = 5,
topic: Literal["general", "news", "finance"] = "general",
include_raw_content: bool = False,
):
"""Performs a web search using Tavily for current information and documents.
This tool searches the web and returns relevant results. After receiving results,
you should synthesize the information into a natural response that helps the user.
Args:
query: The search query (specific and detailed)
max_results: Number of results to return (default: 5)
topic: The topic type of the search - "general" for most queries, "news" for current events
include_raw_content: Include full page content (Warning: uses more tokens)
Returns:
Dictionary containing:
- results: List of search results, each containing:
- title: Page title
- url: Page URL
- content: Relevant snippet from the page
- score: Relevance score (0-1)
- query: Original search query
IMPORTANT: After using this tool:
1. Read the 'content' field of each result
2. Extract relevant information that answers the user's question
3. Synthesize this into a clear, natural language response
4. Cite sources by mentioning the page title or URL
5. Do NOT show raw JSON to the user - always provide a formatted response
"""
if tavily_client is None:
return {
"error": "Tavily API 키가 구성되지 않았습니다. TAVILY_API_KEY 환경 변수를 설정하십시오.",
"query": query,
}
try:
return tavily_client.search(
query,
max_results=max_results,
include_raw_content=include_raw_content,
topic=topic,
)
except Exception as e:
return {"error": f"웹 검색 오류: {e!s}", "query": query}
def fetch_url(url: str, timeout: int = 30) -> dict[str, Any]:
"""Fetches content from a URL and converts HTML to markdown format.
This tool fetches web page content and converts it to clean markdown text,
making it easier to read and process HTML content. After receiving markdown,
you should synthesize the information into a natural response that helps the user.
Args:
url: The URL to fetch (must be a valid HTTP/HTTPS URL)
timeout: Request timeout in seconds (default: 30)
Returns:
Dictionary containing:
- success: Whether the request was successful
- url: Final URL after redirects
- markdown_content: The page content converted to markdown
- status_code: HTTP status code
- content_length: Length of markdown content (in characters)
IMPORTANT: After using this tool:
1. Read the markdown_content
2. Extract relevant information that answers the user's question
3. Synthesize this into a clear, natural language response
4. Do NOT show raw markdown to the user unless specifically requested
"""
try:
response = requests.get(
url,
timeout=timeout,
headers={"User-Agent": "Mozilla/5.0 (compatible; DeepAgents/1.0)"},
)
response.raise_for_status()
# Convert HTML content to markdown
markdown_content = markdownify(response.text)
return {
"url": str(response.url),
"markdown_content": markdown_content,
"status_code": response.status_code,
"content_length": len(markdown_content),
}
except Exception as e:
return {"error": f"URL 가져오기 오류: {e!s}", "url": url}