Files
deepagent/deepagents_sourcecode/libs/deepagents-cli/deepagents_cli/config.py
HyunjunJeon af5fbfabec 문서 추가: Context Engineering 문서 추가 및 deepagents_sourcecode 한국어 번역
- Context_Engineering.md: 에이전트 컨텍스트 엔지니어링 개념 정리 문서 추가
- Context_Engineering_Research.ipynb: 연구 노트북 업데이트
- deepagents_sourcecode/: docstring과 주석을 한국어로 번역
2026-01-11 17:55:52 +09:00

529 lines
18 KiB
Python

"""CLI 설정/상수/모델 생성 관련 로직입니다.
Configuration, constants, and model creation for the CLI.
"""
import os
import re
import sys
import uuid
from dataclasses import dataclass
from pathlib import Path
import dotenv
from rich.console import Console
from deepagents_cli._version import __version__
dotenv.load_dotenv()
# CRITICAL: Override LANGSMITH_PROJECT to route agent traces to separate project
# LangSmith reads LANGSMITH_PROJECT at invocation time, so we override it here
# and preserve the user's original value for shell commands
_deepagents_project = os.environ.get("DEEPAGENTS_LANGSMITH_PROJECT")
_original_langsmith_project = os.environ.get("LANGSMITH_PROJECT")
if _deepagents_project:
# Override LANGSMITH_PROJECT for agent traces
os.environ["LANGSMITH_PROJECT"] = _deepagents_project
# Now safe to import LangChain modules
from langchain_core.language_models import BaseChatModel # noqa: E402
# Color scheme
COLORS = {
"primary": "#10b981",
"dim": "#6b7280",
"user": "#ffffff",
"agent": "#10b981",
"thinking": "#34d399",
"tool": "#fbbf24",
}
# ASCII art banner
DEEP_AGENTS_ASCII = f"""
██████╗ ███████╗ ███████╗ ██████╗
██╔══██╗ ██╔════╝ ██╔════╝ ██╔══██╗
██║ ██║ █████╗ █████╗ ██████╔╝
██║ ██║ ██╔══╝ ██╔══╝ ██╔═══╝
██████╔╝ ███████╗ ███████╗ ██║
╚═════╝ ╚══════╝ ╚══════╝ ╚═╝
█████╗ ██████╗ ███████╗ ███╗ ██╗ ████████╗ ███████╗
██╔══██╗ ██╔════╝ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██╔════╝
███████║ ██║ ███╗ █████╗ ██╔██╗ ██║ ██║ ███████╗
██╔══██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ╚════██║
██║ ██║ ╚██████╔╝ ███████╗ ██║ ╚████║ ██║ ███████║
╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚══════╝
v{__version__}
"""
# Interactive commands
COMMANDS = {
"clear": "Clear screen and reset conversation",
"help": "Show help information",
"tokens": "Show token usage for current session",
"quit": "Exit the CLI",
"exit": "Exit the CLI",
}
# Maximum argument length for display
MAX_ARG_LENGTH = 150
# Agent configuration
config = {"recursion_limit": 1000}
# Rich console instance
console = Console(highlight=False)
def _find_project_root(start_path: Path | None = None) -> Path | None:
"""Find the project root by looking for .git directory.
Walks up the directory tree from start_path (or cwd) looking for a .git
directory, which indicates the project root.
Args:
start_path: Directory to start searching from. Defaults to current working directory.
Returns:
Path to the project root if found, None otherwise.
"""
current = Path(start_path or Path.cwd()).resolve()
# Walk up the directory tree
for parent in [current, *list(current.parents)]:
git_dir = parent / ".git"
if git_dir.exists():
return parent
return None
def _find_project_agent_md(project_root: Path) -> list[Path]:
"""Find project-specific AGENTS.md file(s).
Checks two locations and returns ALL that exist:
1. project_root/.deepagents/AGENTS.md
2. project_root/AGENTS.md
Both files will be loaded and combined if both exist.
Args:
project_root: Path to the project root directory.
Returns:
List of paths to project AGENTS.md files (may contain 0, 1, or 2 paths).
"""
paths = []
# Prefer the `.deepagents/AGENTS.md` location when present.
deepagents_md = project_root / ".deepagents" / "AGENTS.md"
if deepagents_md.exists():
paths.append(deepagents_md)
# Also look for a repository-root `AGENTS.md` (and include both if both exist).
root_md = project_root / "AGENTS.md"
if root_md.exists():
paths.append(root_md)
return paths
@dataclass
class Settings:
"""Global settings and environment detection for deepagents-cli.
This class is initialized once at startup and provides access to:
- Available models and API keys
- Current project information
- Tool availability (e.g., Tavily)
- File system paths
Attributes:
project_root: Current project root directory (if in a git project)
openai_api_key: OpenAI API key if available
anthropic_api_key: Anthropic API key if available
tavily_api_key: Tavily API key if available
deepagents_langchain_project: LangSmith project name for deepagents agent tracing
user_langchain_project: Original LANGSMITH_PROJECT from environment (for user code)
"""
# API keys
openai_api_key: str | None
anthropic_api_key: str | None
google_api_key: str | None
tavily_api_key: str | None
# LangSmith configuration
deepagents_langchain_project: str | None # For deepagents agent tracing
user_langchain_project: str | None # Original LANGSMITH_PROJECT for user code
# Model configuration
model_name: str | None = None # Currently active model name
model_provider: str | None = None # Provider (openai, anthropic, google)
# Project information
project_root: Path | None = None
@classmethod
def from_environment(cls, *, start_path: Path | None = None) -> "Settings":
"""Create settings by detecting the current environment.
Args:
start_path: Directory to start project detection from (defaults to cwd)
Returns:
Settings instance with detected configuration
"""
# Detect API keys
openai_key = os.environ.get("OPENAI_API_KEY")
anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
google_key = os.environ.get("GOOGLE_API_KEY")
tavily_key = os.environ.get("TAVILY_API_KEY")
# Detect LangSmith configuration
# DEEPAGENTS_LANGSMITH_PROJECT: Project for deepagents agent tracing
# user_langchain_project: User's ORIGINAL LANGSMITH_PROJECT (before override)
# Note: LANGSMITH_PROJECT was already overridden at module import time (above)
# so we use the saved original value, not the current os.environ value
deepagents_langchain_project = os.environ.get("DEEPAGENTS_LANGSMITH_PROJECT")
user_langchain_project = _original_langsmith_project # Use saved original!
# Detect project
project_root = _find_project_root(start_path)
return cls(
openai_api_key=openai_key,
anthropic_api_key=anthropic_key,
google_api_key=google_key,
tavily_api_key=tavily_key,
deepagents_langchain_project=deepagents_langchain_project,
user_langchain_project=user_langchain_project,
project_root=project_root,
)
@property
def has_openai(self) -> bool:
"""Check if OpenAI API key is configured."""
return self.openai_api_key is not None
@property
def has_anthropic(self) -> bool:
"""Check if Anthropic API key is configured."""
return self.anthropic_api_key is not None
@property
def has_google(self) -> bool:
"""Check if Google API key is configured."""
return self.google_api_key is not None
@property
def has_tavily(self) -> bool:
"""Check if Tavily API key is configured."""
return self.tavily_api_key is not None
@property
def has_deepagents_langchain_project(self) -> bool:
"""Check if deepagents LangChain project name is configured."""
return self.deepagents_langchain_project is not None
@property
def has_project(self) -> bool:
"""Check if currently in a git project."""
return self.project_root is not None
@property
def user_deepagents_dir(self) -> Path:
"""Get the base user-level .deepagents directory.
Returns:
Path to ~/.deepagents
"""
return Path.home() / ".deepagents"
def get_user_agent_md_path(self, agent_name: str) -> Path:
"""Get user-level AGENTS.md path for a specific agent.
Returns path regardless of whether the file exists.
Args:
agent_name: Name of the agent
Returns:
Path to ~/.deepagents/{agent_name}/AGENTS.md
"""
return Path.home() / ".deepagents" / agent_name / "AGENTS.md"
def get_project_agent_md_path(self) -> Path | None:
"""Get project-level AGENTS.md path.
Returns path regardless of whether the file exists.
Returns:
Path to {project_root}/.deepagents/AGENTS.md, or None if not in a project
"""
if not self.project_root:
return None
return self.project_root / ".deepagents" / "AGENTS.md"
@staticmethod
def _is_valid_agent_name(agent_name: str) -> bool:
"""Validate prevent invalid filesystem paths and security issues."""
if not agent_name or not agent_name.strip():
return False
# Allow only alphanumeric, hyphens, underscores, and whitespace
return bool(re.match(r"^[a-zA-Z0-9_\-\s]+$", agent_name))
def get_agent_dir(self, agent_name: str) -> Path:
"""Get the global agent directory path.
Args:
agent_name: Name of the agent
Returns:
Path to ~/.deepagents/{agent_name}
"""
if not self._is_valid_agent_name(agent_name):
msg = (
f"Invalid agent name: {agent_name!r}. "
"Agent names can only contain letters, numbers, hyphens, underscores, and spaces."
)
raise ValueError(msg)
return Path.home() / ".deepagents" / agent_name
def ensure_agent_dir(self, agent_name: str) -> Path:
"""Ensure the global agent directory exists and return its path.
Args:
agent_name: Name of the agent
Returns:
Path to ~/.deepagents/{agent_name}
"""
if not self._is_valid_agent_name(agent_name):
msg = (
f"Invalid agent name: {agent_name!r}. "
"Agent names can only contain letters, numbers, hyphens, underscores, and spaces."
)
raise ValueError(msg)
agent_dir = self.get_agent_dir(agent_name)
agent_dir.mkdir(parents=True, exist_ok=True)
return agent_dir
def ensure_project_deepagents_dir(self) -> Path | None:
"""Ensure the project .deepagents directory exists and return its path.
Returns:
Path to project .deepagents directory, or None if not in a project
"""
if not self.project_root:
return None
project_deepagents_dir = self.project_root / ".deepagents"
project_deepagents_dir.mkdir(parents=True, exist_ok=True)
return project_deepagents_dir
def get_user_skills_dir(self, agent_name: str) -> Path:
"""Get user-level skills directory path for a specific agent.
Args:
agent_name: Name of the agent
Returns:
Path to ~/.deepagents/{agent_name}/skills/
"""
return self.get_agent_dir(agent_name) / "skills"
def ensure_user_skills_dir(self, agent_name: str) -> Path:
"""Ensure user-level skills directory exists and return its path.
Args:
agent_name: Name of the agent
Returns:
Path to ~/.deepagents/{agent_name}/skills/
"""
skills_dir = self.get_user_skills_dir(agent_name)
skills_dir.mkdir(parents=True, exist_ok=True)
return skills_dir
def get_project_skills_dir(self) -> Path | None:
"""Get project-level skills directory path.
Returns:
Path to {project_root}/.deepagents/skills/, or None if not in a project
"""
if not self.project_root:
return None
return self.project_root / ".deepagents" / "skills"
def ensure_project_skills_dir(self) -> Path | None:
"""Ensure project-level skills directory exists and return its path.
Returns:
Path to {project_root}/.deepagents/skills/, or None if not in a project
"""
if not self.project_root:
return None
skills_dir = self.get_project_skills_dir()
skills_dir.mkdir(parents=True, exist_ok=True)
return skills_dir
# Global settings instance (initialized once)
settings = Settings.from_environment()
class SessionState:
"""Holds mutable session state (auto-approve mode, etc)."""
def __init__(self, *, auto_approve: bool = False, no_splash: bool = False) -> None:
"""세션 상태를 초기화합니다.
Args:
auto_approve: 도구 호출을 자동 승인할지 여부
no_splash: 시작 시 스플래시 화면을 숨길지 여부
"""
self.auto_approve = auto_approve
self.no_splash = no_splash
self.exit_hint_until: float | None = None
self.exit_hint_handle = None
self.thread_id = str(uuid.uuid4())
def toggle_auto_approve(self) -> bool:
"""Toggle auto-approve and return new state."""
self.auto_approve = not self.auto_approve
return self.auto_approve
def get_default_coding_instructions() -> str:
"""Get the default coding agent instructions.
These are the immutable base instructions that cannot be modified by the agent.
Long-term memory (AGENTS.md) is handled separately by the middleware.
"""
default_prompt_path = Path(__file__).parent / "default_agent_prompt.md"
return default_prompt_path.read_text()
def _detect_provider(model_name: str) -> str | None:
"""Auto-detect provider from model name.
Args:
model_name: Model name to detect provider from
Returns:
Provider name (openai, anthropic, google) or None if can't detect
"""
model_lower = model_name.lower()
if any(x in model_lower for x in ["gpt", "o1", "o3"]):
return "openai"
if "claude" in model_lower:
return "anthropic"
if "gemini" in model_lower:
return "google"
return None
def create_model(model_name_override: str | None = None) -> BaseChatModel:
"""Create the appropriate model based on available API keys.
Uses the global settings instance to determine which model to create.
Args:
model_name_override: Optional model name to use instead of environment variable
Returns:
ChatModel instance (OpenAI, Anthropic, or Google)
Raises:
SystemExit if no API key is configured or model provider can't be determined
"""
# Determine provider and model
if model_name_override:
# Use provided model, auto-detect provider
provider = _detect_provider(model_name_override)
if not provider:
console.print(
"[bold red]Error:[/bold red] Could not detect provider from model name: "
f"{model_name_override}"
)
console.print("\nSupported model name patterns:")
console.print(" - OpenAI: gpt-*, o1-*, o3-*")
console.print(" - Anthropic: claude-*")
console.print(" - Google: gemini-*")
sys.exit(1)
# Check if API key for detected provider is available
if provider == "openai" and not settings.has_openai:
console.print(
"[bold red]Error:[/bold red] Model "
f"'{model_name_override}' requires OPENAI_API_KEY"
)
sys.exit(1)
elif provider == "anthropic" and not settings.has_anthropic:
console.print(
"[bold red]Error:[/bold red] Model "
f"'{model_name_override}' requires ANTHROPIC_API_KEY"
)
sys.exit(1)
elif provider == "google" and not settings.has_google:
console.print(
"[bold red]Error:[/bold red] Model "
f"'{model_name_override}' requires GOOGLE_API_KEY"
)
sys.exit(1)
model_name = model_name_override
# Use environment variable defaults, detect provider by API key priority
elif settings.has_openai:
provider = "openai"
model_name = os.environ.get("OPENAI_MODEL", "gpt-5-mini")
elif settings.has_anthropic:
provider = "anthropic"
model_name = os.environ.get("ANTHROPIC_MODEL", "claude-sonnet-4-5-20250929")
elif settings.has_google:
provider = "google"
model_name = os.environ.get("GOOGLE_MODEL", "gemini-3-pro-preview")
else:
console.print("[bold red]Error:[/bold red] No API key configured.")
console.print("\nPlease set one of the following environment variables:")
console.print(" - OPENAI_API_KEY (for OpenAI models like gpt-5-mini)")
console.print(" - ANTHROPIC_API_KEY (for Claude models)")
console.print(" - GOOGLE_API_KEY (for Google Gemini models)")
console.print("\nExample:")
console.print(" export OPENAI_API_KEY=your_api_key_here")
console.print("\nOr add it to your .env file.")
sys.exit(1)
# Store model info in settings for display
settings.model_name = model_name
settings.model_provider = provider
# Create and return the model
if provider == "openai":
from langchain_openai import ChatOpenAI
return ChatOpenAI(model=model_name)
if provider == "anthropic":
from langchain_anthropic import ChatAnthropic
return ChatAnthropic(
model_name=model_name,
max_tokens=20_000, # type: ignore[arg-type]
)
if provider == "google":
from langchain_google_genai import ChatGoogleGenerativeAI
return ChatGoogleGenerativeAI(
model=model_name,
temperature=0,
max_tokens=None,
)
msg = f"Unsupported model provider: {provider}"
raise RuntimeError(msg)